diff --git a/.gitmodules b/.gitmodules index 238c8f9..17449c8 100644 --- a/.gitmodules +++ b/.gitmodules @@ -6,3 +6,7 @@ path = mros2-asp3-f767zi url = git@github.com:mROS-base/mros2-asp3-f767zi branch = eval/v0.3.1 +[submodule "micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/micro_ros_stm32cubemx_utils"] + path = micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/micro_ros_stm32cubemx_utils + url = https://github.com/mROS-base/micro_ros_stm32cubemx_utils + branch = galactic diff --git a/micro-ros-stm32_embeddedrtps/main_twist.c b/micro-ros-stm32_embeddedrtps/main_twist.c new file mode 100644 index 0000000..90e1eef --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/main_twist.c @@ -0,0 +1,505 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file : main.c + * @brief : Main program body + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2022 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ +/* USER CODE END Header */ +/* Includes ------------------------------------------------------------------*/ +#include "main.h" +#include "cmsis_os.h" +#include "lwip.h" + +/* Private includes ----------------------------------------------------------*/ +/* USER CODE BEGIN Includes */ +#include +#include +#include +#include + +#include +/* USER CODE END Includes */ + +/* Private typedef -----------------------------------------------------------*/ +/* USER CODE BEGIN PTD */ + +/* USER CODE END PTD */ + +/* Private define ------------------------------------------------------------*/ +/* USER CODE BEGIN PD */ +/* USER CODE END PD */ + +/* Private macro -------------------------------------------------------------*/ +/* USER CODE BEGIN PM */ + +/* USER CODE END PM */ + +/* Private variables ---------------------------------------------------------*/ + +UART_HandleTypeDef huart3; + +PCD_HandleTypeDef hpcd_USB_OTG_FS; + +/* Definitions for defaultTask */ +osThreadId_t defaultTaskHandle; +const osThreadAttr_t defaultTask_attributes = { + .name = "defaultTask", + .stack_size = 8000 * 4, + .priority = (osPriority_t) osPriorityBelowNormal3, +}; +/* USER CODE BEGIN PV */ + +/* USER CODE END PV */ + +/* Private function prototypes -----------------------------------------------*/ +void SystemClock_Config(void); +static void MX_GPIO_Init(void); +static void MX_USART3_UART_Init(void); +static void MX_USB_OTG_FS_PCD_Init(void); +void StartDefaultTask(void *argument); + +/* USER CODE BEGIN PFP */ + +/* USER CODE END PFP */ + +/* Private user code ---------------------------------------------------------*/ +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +/** + * @brief The application entry point. + * @retval int + */ +int main(void) +{ + /* USER CODE BEGIN 1 */ + + /* USER CODE END 1 */ + + /* MCU Configuration--------------------------------------------------------*/ + + /* Reset of all peripherals, Initializes the Flash interface and the Systick. */ + HAL_Init(); + + /* USER CODE BEGIN Init */ + + /* USER CODE END Init */ + + /* Configure the system clock */ + SystemClock_Config(); + + /* USER CODE BEGIN SysInit */ + + /* USER CODE END SysInit */ + + /* Initialize all configured peripherals */ + MX_GPIO_Init(); + MX_USART3_UART_Init(); + MX_USB_OTG_FS_PCD_Init(); + /* USER CODE BEGIN 2 */ + + /* USER CODE END 2 */ + + /* Init scheduler */ + osKernelInitialize(); + + /* USER CODE BEGIN RTOS_MUTEX */ + /* add mutexes, ... */ + /* USER CODE END RTOS_MUTEX */ + + /* USER CODE BEGIN RTOS_SEMAPHORES */ + /* add semaphores, ... */ + /* USER CODE END RTOS_SEMAPHORES */ + + /* USER CODE BEGIN RTOS_TIMERS */ + /* start timers, add new ones, ... */ + /* USER CODE END RTOS_TIMERS */ + + /* USER CODE BEGIN RTOS_QUEUES */ + /* add queues, ... */ + /* USER CODE END RTOS_QUEUES */ + + /* Create the thread(s) */ + /* creation of defaultTask */ + defaultTaskHandle = osThreadNew(StartDefaultTask, NULL, &defaultTask_attributes); + + /* USER CODE BEGIN RTOS_THREADS */ + /* add threads, ... */ + /* USER CODE END RTOS_THREADS */ + + /* USER CODE BEGIN RTOS_EVENTS */ + /* add events, ... */ + /* USER CODE END RTOS_EVENTS */ + + /* Start scheduler */ + osKernelStart(); + + /* We should never get here as control is now taken by the scheduler */ + /* Infinite loop */ + /* USER CODE BEGIN WHILE */ + while (1) + { + /* USER CODE END WHILE */ + + /* USER CODE BEGIN 3 */ + } + /* USER CODE END 3 */ +} + +/** + * @brief System Clock Configuration + * @retval None + */ +void SystemClock_Config(void) +{ + RCC_OscInitTypeDef RCC_OscInitStruct = {0}; + RCC_ClkInitTypeDef RCC_ClkInitStruct = {0}; + RCC_PeriphCLKInitTypeDef PeriphClkInitStruct = {0}; + + /** Configure LSE Drive Capability + */ + HAL_PWR_EnableBkUpAccess(); + /** Configure the main internal regulator output voltage + */ + __HAL_RCC_PWR_CLK_ENABLE(); + __HAL_PWR_VOLTAGESCALING_CONFIG(PWR_REGULATOR_VOLTAGE_SCALE3); + /** Initializes the RCC Oscillators according to the specified parameters + * in the RCC_OscInitTypeDef structure. + */ + RCC_OscInitStruct.OscillatorType = RCC_OSCILLATORTYPE_HSE; + RCC_OscInitStruct.HSEState = RCC_HSE_BYPASS; + RCC_OscInitStruct.PLL.PLLState = RCC_PLL_ON; + RCC_OscInitStruct.PLL.PLLSource = RCC_PLLSOURCE_HSE; + RCC_OscInitStruct.PLL.PLLM = 4; + RCC_OscInitStruct.PLL.PLLN = 96; + RCC_OscInitStruct.PLL.PLLP = RCC_PLLP_DIV2; + RCC_OscInitStruct.PLL.PLLQ = 4; + RCC_OscInitStruct.PLL.PLLR = 2; + if (HAL_RCC_OscConfig(&RCC_OscInitStruct) != HAL_OK) + { + Error_Handler(); + } + /** Activate the Over-Drive mode + */ + if (HAL_PWREx_EnableOverDrive() != HAL_OK) + { + Error_Handler(); + } + /** Initializes the CPU, AHB and APB buses clocks + */ + RCC_ClkInitStruct.ClockType = RCC_CLOCKTYPE_HCLK|RCC_CLOCKTYPE_SYSCLK + |RCC_CLOCKTYPE_PCLK1|RCC_CLOCKTYPE_PCLK2; + RCC_ClkInitStruct.SYSCLKSource = RCC_SYSCLKSOURCE_PLLCLK; + RCC_ClkInitStruct.AHBCLKDivider = RCC_SYSCLK_DIV1; + RCC_ClkInitStruct.APB1CLKDivider = RCC_HCLK_DIV2; + RCC_ClkInitStruct.APB2CLKDivider = RCC_HCLK_DIV1; + + if (HAL_RCC_ClockConfig(&RCC_ClkInitStruct, FLASH_LATENCY_3) != HAL_OK) + { + Error_Handler(); + } + PeriphClkInitStruct.PeriphClockSelection = RCC_PERIPHCLK_USART3|RCC_PERIPHCLK_CLK48; + PeriphClkInitStruct.Usart3ClockSelection = RCC_USART3CLKSOURCE_PCLK1; + PeriphClkInitStruct.Clk48ClockSelection = RCC_CLK48SOURCE_PLL; + if (HAL_RCCEx_PeriphCLKConfig(&PeriphClkInitStruct) != HAL_OK) + { + Error_Handler(); + } +} + +/** + * @brief USART3 Initialization Function + * @param None + * @retval None + */ +static void MX_USART3_UART_Init(void) +{ + + /* USER CODE BEGIN USART3_Init 0 */ + + /* USER CODE END USART3_Init 0 */ + + /* USER CODE BEGIN USART3_Init 1 */ + + /* USER CODE END USART3_Init 1 */ + huart3.Instance = USART3; + huart3.Init.BaudRate = 115200; + huart3.Init.WordLength = UART_WORDLENGTH_8B; + huart3.Init.StopBits = UART_STOPBITS_1; + huart3.Init.Parity = UART_PARITY_NONE; + huart3.Init.Mode = UART_MODE_TX_RX; + huart3.Init.HwFlowCtl = UART_HWCONTROL_NONE; + huart3.Init.OverSampling = UART_OVERSAMPLING_16; + huart3.Init.OneBitSampling = UART_ONE_BIT_SAMPLE_DISABLE; + huart3.AdvancedInit.AdvFeatureInit = UART_ADVFEATURE_NO_INIT; + if (HAL_UART_Init(&huart3) != HAL_OK) + { + Error_Handler(); + } + /* USER CODE BEGIN USART3_Init 2 */ + + /* USER CODE END USART3_Init 2 */ + +} + +/** + * @brief USB_OTG_FS Initialization Function + * @param None + * @retval None + */ +static void MX_USB_OTG_FS_PCD_Init(void) +{ + + /* USER CODE BEGIN USB_OTG_FS_Init 0 */ + + /* USER CODE END USB_OTG_FS_Init 0 */ + + /* USER CODE BEGIN USB_OTG_FS_Init 1 */ + + /* USER CODE END USB_OTG_FS_Init 1 */ + hpcd_USB_OTG_FS.Instance = USB_OTG_FS; + hpcd_USB_OTG_FS.Init.dev_endpoints = 6; + hpcd_USB_OTG_FS.Init.speed = PCD_SPEED_FULL; + hpcd_USB_OTG_FS.Init.dma_enable = DISABLE; + hpcd_USB_OTG_FS.Init.phy_itface = PCD_PHY_EMBEDDED; + hpcd_USB_OTG_FS.Init.Sof_enable = ENABLE; + hpcd_USB_OTG_FS.Init.low_power_enable = DISABLE; + hpcd_USB_OTG_FS.Init.lpm_enable = DISABLE; + hpcd_USB_OTG_FS.Init.vbus_sensing_enable = ENABLE; + hpcd_USB_OTG_FS.Init.use_dedicated_ep1 = DISABLE; + if (HAL_PCD_Init(&hpcd_USB_OTG_FS) != HAL_OK) + { + Error_Handler(); + } + /* USER CODE BEGIN USB_OTG_FS_Init 2 */ + + /* USER CODE END USB_OTG_FS_Init 2 */ + +} + +/** + * @brief GPIO Initialization Function + * @param None + * @retval None + */ +static void MX_GPIO_Init(void) +{ + GPIO_InitTypeDef GPIO_InitStruct = {0}; + + /* GPIO Ports Clock Enable */ + __HAL_RCC_GPIOC_CLK_ENABLE(); + __HAL_RCC_GPIOH_CLK_ENABLE(); + __HAL_RCC_GPIOA_CLK_ENABLE(); + __HAL_RCC_GPIOB_CLK_ENABLE(); + __HAL_RCC_GPIOD_CLK_ENABLE(); + __HAL_RCC_GPIOG_CLK_ENABLE(); + + /*Configure GPIO pin Output Level */ + HAL_GPIO_WritePin(GPIOB, LD1_Pin|LD3_Pin|LD2_Pin, GPIO_PIN_RESET); + + /*Configure GPIO pin Output Level */ + HAL_GPIO_WritePin(USB_PowerSwitchOn_GPIO_Port, USB_PowerSwitchOn_Pin, GPIO_PIN_RESET); + + /*Configure GPIO pin : USER_Btn_Pin */ + GPIO_InitStruct.Pin = USER_Btn_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_IT_RISING; + GPIO_InitStruct.Pull = GPIO_NOPULL; + HAL_GPIO_Init(USER_Btn_GPIO_Port, &GPIO_InitStruct); + + /*Configure GPIO pins : LD1_Pin LD3_Pin LD2_Pin */ + GPIO_InitStruct.Pin = LD1_Pin|LD3_Pin|LD2_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_OUTPUT_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_LOW; + HAL_GPIO_Init(GPIOB, &GPIO_InitStruct); + + /*Configure GPIO pin : USB_PowerSwitchOn_Pin */ + GPIO_InitStruct.Pin = USB_PowerSwitchOn_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_OUTPUT_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_LOW; + HAL_GPIO_Init(USB_PowerSwitchOn_GPIO_Port, &GPIO_InitStruct); + + /*Configure GPIO pin : USB_OverCurrent_Pin */ + GPIO_InitStruct.Pin = USB_OverCurrent_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_INPUT; + GPIO_InitStruct.Pull = GPIO_NOPULL; + HAL_GPIO_Init(USB_OverCurrent_GPIO_Port, &GPIO_InitStruct); + +} + +/* USER CODE BEGIN 4 */ +#ifdef __GNUC__ +int __io_putchar(int ch) +#else +int fputc(int ch, FILE *f) +#endif /* __GNUC__ */ +{ + HAL_UART_Transmit(&huart3, (uint8_t *)&ch, 1, 0xFFFF); + return ch; +} +/* USER CODE END 4 */ + +/* USER CODE BEGIN Header_StartDefaultTask */ +void * microros_allocate(size_t size, void * state); +void microros_deallocate(void * pointer, void * state); +void * microros_reallocate(void * pointer, size_t size, void * state); +void * microros_zero_allocate(size_t number_of_elements, size_t size_of_element, void * state); + +rcl_publisher_t publisher; +rcl_subscription_t subscriber; + +geometry_msgs__msg__Twist msg; + +void subscription_callback(const void * msgin) +{ + //printf("sub\r\n"); + + const geometry_msgs__msg__Twist * msg = (const geometry_msgs__msg__Twist *)msgin; + + rcl_publish(&publisher, (const void*)msg, NULL); +} + +/** + * @brief Function implementing the defaultTask thread. + * @param argument: Not used + * @retval None + */ +/* USER CODE END Header_StartDefaultTask */ +void StartDefaultTask(void *argument) +{ + /* init code for LWIP */ + MX_LWIP_Init(); + /* USER CODE BEGIN 5 */ + rcl_allocator_t freeRTOS_allocator = rcutils_get_zero_initialized_allocator(); + freeRTOS_allocator.allocate = microros_allocate; + freeRTOS_allocator.deallocate = microros_deallocate; + freeRTOS_allocator.reallocate = microros_reallocate; + freeRTOS_allocator.zero_allocate = microros_zero_allocate; + + if (!rcutils_set_default_allocator(&freeRTOS_allocator)) { + printf("Error on default allocators (line %d)\n", __LINE__); + } + + printf("main start\r\n"); + + // micro-ROS app + + rclc_support_t support; + rcl_allocator_t allocator; + rcl_node_t node; + + allocator = rcl_get_default_allocator(); + + //create init_options + rclc_support_init(&support, 0, NULL, &allocator); + + // create node + rclc_node_init_default(&node, "cubemx_node", "", &support); + + // create publisher + rclc_publisher_init_default( + &publisher, + &node, + ROSIDL_GET_MSG_TYPE_SUPPORT(geometry_msgs, msg, Twist), + "to_linux"); + + // create subscriber + rclc_subscription_init_default( + &subscriber, + &node, + ROSIDL_GET_MSG_TYPE_SUPPORT(geometry_msgs, msg, Twist), + "to_stm"); + + // Create executor + rclc_executor_t executor; + rclc_executor_init(&executor, &support.context, 3, &allocator); + rclc_executor_add_subscription(&executor, &subscriber, &msg, + &subscription_callback, ON_NEW_DATA); + + // TO print this enable FreeRTOS -> USE_STATS_FORMATTING_FUNCTIONS + + // static char ptrTaskList[1000]; + + // vTaskList(ptrTaskList); + // printf("**********************************"); + // printf("Task State Prio Stack Num"); + // printf("**********************************"); + // printf(ptrTaskList); + // printf("**********************************"); + + + for(;;) { + rclc_executor_spin_some(&executor, RCL_MS_TO_NS(10)); + osDelay(10); + } + /* USER CODE END 5 */ +} + +/** + * @brief Period elapsed callback in non blocking mode + * @note This function is called when TIM14 interrupt took place, inside + * HAL_TIM_IRQHandler(). It makes a direct call to HAL_IncTick() to increment + * a global variable "uwTick" used as application time base. + * @param htim : TIM handle + * @retval None + */ +void HAL_TIM_PeriodElapsedCallback(TIM_HandleTypeDef *htim) +{ + /* USER CODE BEGIN Callback 0 */ + + /* USER CODE END Callback 0 */ + if (htim->Instance == TIM14) { + HAL_IncTick(); + } + /* USER CODE BEGIN Callback 1 */ + + /* USER CODE END Callback 1 */ +} + +/** + * @brief This function is executed in case of error occurrence. + * @retval None + */ +void Error_Handler(void) +{ + /* USER CODE BEGIN Error_Handler_Debug */ + /* User can add his own implementation to report the HAL error return state */ + __disable_irq(); + while (1) + { + } + /* USER CODE END Error_Handler_Debug */ +} + +#ifdef USE_FULL_ASSERT +/** + * @brief Reports the name of the source file and the source line number + * where the assert_param error has occurred. + * @param file: pointer to the source file name + * @param line: assert_param error line source number + * @retval None + */ +void assert_failed(uint8_t *file, uint32_t line) +{ + /* USER CODE BEGIN 6 */ + /* User can add his own implementation to report the file name and line number, + ex: printf("Wrong parameters value: file %s on line %d\r\n", file, line) */ + /* USER CODE END 6 */ +} +#endif /* USE_FULL_ASSERT */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/micro-ros-stm32_embeddedrtps/main_uint16.c b/micro-ros-stm32_embeddedrtps/main_uint16.c new file mode 100644 index 0000000..e3d7d28 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/main_uint16.c @@ -0,0 +1,505 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file : main.c + * @brief : Main program body + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2022 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ +/* USER CODE END Header */ +/* Includes ------------------------------------------------------------------*/ +#include "main.h" +#include "cmsis_os.h" +#include "lwip.h" + +/* Private includes ----------------------------------------------------------*/ +/* USER CODE BEGIN Includes */ +#include +#include +#include +#include + +#include +/* USER CODE END Includes */ + +/* Private typedef -----------------------------------------------------------*/ +/* USER CODE BEGIN PTD */ + +/* USER CODE END PTD */ + +/* Private define ------------------------------------------------------------*/ +/* USER CODE BEGIN PD */ +/* USER CODE END PD */ + +/* Private macro -------------------------------------------------------------*/ +/* USER CODE BEGIN PM */ + +/* USER CODE END PM */ + +/* Private variables ---------------------------------------------------------*/ + +UART_HandleTypeDef huart3; + +PCD_HandleTypeDef hpcd_USB_OTG_FS; + +/* Definitions for defaultTask */ +osThreadId_t defaultTaskHandle; +const osThreadAttr_t defaultTask_attributes = { + .name = "defaultTask", + .stack_size = 8000 * 4, + .priority = (osPriority_t) osPriorityBelowNormal3, +}; +/* USER CODE BEGIN PV */ + +/* USER CODE END PV */ + +/* Private function prototypes -----------------------------------------------*/ +void SystemClock_Config(void); +static void MX_GPIO_Init(void); +static void MX_USART3_UART_Init(void); +static void MX_USB_OTG_FS_PCD_Init(void); +void StartDefaultTask(void *argument); + +/* USER CODE BEGIN PFP */ + +/* USER CODE END PFP */ + +/* Private user code ---------------------------------------------------------*/ +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +/** + * @brief The application entry point. + * @retval int + */ +int main(void) +{ + /* USER CODE BEGIN 1 */ + + /* USER CODE END 1 */ + + /* MCU Configuration--------------------------------------------------------*/ + + /* Reset of all peripherals, Initializes the Flash interface and the Systick. */ + HAL_Init(); + + /* USER CODE BEGIN Init */ + + /* USER CODE END Init */ + + /* Configure the system clock */ + SystemClock_Config(); + + /* USER CODE BEGIN SysInit */ + + /* USER CODE END SysInit */ + + /* Initialize all configured peripherals */ + MX_GPIO_Init(); + MX_USART3_UART_Init(); + MX_USB_OTG_FS_PCD_Init(); + /* USER CODE BEGIN 2 */ + + /* USER CODE END 2 */ + + /* Init scheduler */ + osKernelInitialize(); + + /* USER CODE BEGIN RTOS_MUTEX */ + /* add mutexes, ... */ + /* USER CODE END RTOS_MUTEX */ + + /* USER CODE BEGIN RTOS_SEMAPHORES */ + /* add semaphores, ... */ + /* USER CODE END RTOS_SEMAPHORES */ + + /* USER CODE BEGIN RTOS_TIMERS */ + /* start timers, add new ones, ... */ + /* USER CODE END RTOS_TIMERS */ + + /* USER CODE BEGIN RTOS_QUEUES */ + /* add queues, ... */ + /* USER CODE END RTOS_QUEUES */ + + /* Create the thread(s) */ + /* creation of defaultTask */ + defaultTaskHandle = osThreadNew(StartDefaultTask, NULL, &defaultTask_attributes); + + /* USER CODE BEGIN RTOS_THREADS */ + /* add threads, ... */ + /* USER CODE END RTOS_THREADS */ + + /* USER CODE BEGIN RTOS_EVENTS */ + /* add events, ... */ + /* USER CODE END RTOS_EVENTS */ + + /* Start scheduler */ + osKernelStart(); + + /* We should never get here as control is now taken by the scheduler */ + /* Infinite loop */ + /* USER CODE BEGIN WHILE */ + while (1) + { + /* USER CODE END WHILE */ + + /* USER CODE BEGIN 3 */ + } + /* USER CODE END 3 */ +} + +/** + * @brief System Clock Configuration + * @retval None + */ +void SystemClock_Config(void) +{ + RCC_OscInitTypeDef RCC_OscInitStruct = {0}; + RCC_ClkInitTypeDef RCC_ClkInitStruct = {0}; + RCC_PeriphCLKInitTypeDef PeriphClkInitStruct = {0}; + + /** Configure LSE Drive Capability + */ + HAL_PWR_EnableBkUpAccess(); + /** Configure the main internal regulator output voltage + */ + __HAL_RCC_PWR_CLK_ENABLE(); + __HAL_PWR_VOLTAGESCALING_CONFIG(PWR_REGULATOR_VOLTAGE_SCALE3); + /** Initializes the RCC Oscillators according to the specified parameters + * in the RCC_OscInitTypeDef structure. + */ + RCC_OscInitStruct.OscillatorType = RCC_OSCILLATORTYPE_HSE; + RCC_OscInitStruct.HSEState = RCC_HSE_BYPASS; + RCC_OscInitStruct.PLL.PLLState = RCC_PLL_ON; + RCC_OscInitStruct.PLL.PLLSource = RCC_PLLSOURCE_HSE; + RCC_OscInitStruct.PLL.PLLM = 4; + RCC_OscInitStruct.PLL.PLLN = 96; + RCC_OscInitStruct.PLL.PLLP = RCC_PLLP_DIV2; + RCC_OscInitStruct.PLL.PLLQ = 4; + RCC_OscInitStruct.PLL.PLLR = 2; + if (HAL_RCC_OscConfig(&RCC_OscInitStruct) != HAL_OK) + { + Error_Handler(); + } + /** Activate the Over-Drive mode + */ + if (HAL_PWREx_EnableOverDrive() != HAL_OK) + { + Error_Handler(); + } + /** Initializes the CPU, AHB and APB buses clocks + */ + RCC_ClkInitStruct.ClockType = RCC_CLOCKTYPE_HCLK|RCC_CLOCKTYPE_SYSCLK + |RCC_CLOCKTYPE_PCLK1|RCC_CLOCKTYPE_PCLK2; + RCC_ClkInitStruct.SYSCLKSource = RCC_SYSCLKSOURCE_PLLCLK; + RCC_ClkInitStruct.AHBCLKDivider = RCC_SYSCLK_DIV1; + RCC_ClkInitStruct.APB1CLKDivider = RCC_HCLK_DIV2; + RCC_ClkInitStruct.APB2CLKDivider = RCC_HCLK_DIV1; + + if (HAL_RCC_ClockConfig(&RCC_ClkInitStruct, FLASH_LATENCY_3) != HAL_OK) + { + Error_Handler(); + } + PeriphClkInitStruct.PeriphClockSelection = RCC_PERIPHCLK_USART3|RCC_PERIPHCLK_CLK48; + PeriphClkInitStruct.Usart3ClockSelection = RCC_USART3CLKSOURCE_PCLK1; + PeriphClkInitStruct.Clk48ClockSelection = RCC_CLK48SOURCE_PLL; + if (HAL_RCCEx_PeriphCLKConfig(&PeriphClkInitStruct) != HAL_OK) + { + Error_Handler(); + } +} + +/** + * @brief USART3 Initialization Function + * @param None + * @retval None + */ +static void MX_USART3_UART_Init(void) +{ + + /* USER CODE BEGIN USART3_Init 0 */ + + /* USER CODE END USART3_Init 0 */ + + /* USER CODE BEGIN USART3_Init 1 */ + + /* USER CODE END USART3_Init 1 */ + huart3.Instance = USART3; + huart3.Init.BaudRate = 115200; + huart3.Init.WordLength = UART_WORDLENGTH_8B; + huart3.Init.StopBits = UART_STOPBITS_1; + huart3.Init.Parity = UART_PARITY_NONE; + huart3.Init.Mode = UART_MODE_TX_RX; + huart3.Init.HwFlowCtl = UART_HWCONTROL_NONE; + huart3.Init.OverSampling = UART_OVERSAMPLING_16; + huart3.Init.OneBitSampling = UART_ONE_BIT_SAMPLE_DISABLE; + huart3.AdvancedInit.AdvFeatureInit = UART_ADVFEATURE_NO_INIT; + if (HAL_UART_Init(&huart3) != HAL_OK) + { + Error_Handler(); + } + /* USER CODE BEGIN USART3_Init 2 */ + + /* USER CODE END USART3_Init 2 */ + +} + +/** + * @brief USB_OTG_FS Initialization Function + * @param None + * @retval None + */ +static void MX_USB_OTG_FS_PCD_Init(void) +{ + + /* USER CODE BEGIN USB_OTG_FS_Init 0 */ + + /* USER CODE END USB_OTG_FS_Init 0 */ + + /* USER CODE BEGIN USB_OTG_FS_Init 1 */ + + /* USER CODE END USB_OTG_FS_Init 1 */ + hpcd_USB_OTG_FS.Instance = USB_OTG_FS; + hpcd_USB_OTG_FS.Init.dev_endpoints = 6; + hpcd_USB_OTG_FS.Init.speed = PCD_SPEED_FULL; + hpcd_USB_OTG_FS.Init.dma_enable = DISABLE; + hpcd_USB_OTG_FS.Init.phy_itface = PCD_PHY_EMBEDDED; + hpcd_USB_OTG_FS.Init.Sof_enable = ENABLE; + hpcd_USB_OTG_FS.Init.low_power_enable = DISABLE; + hpcd_USB_OTG_FS.Init.lpm_enable = DISABLE; + hpcd_USB_OTG_FS.Init.vbus_sensing_enable = ENABLE; + hpcd_USB_OTG_FS.Init.use_dedicated_ep1 = DISABLE; + if (HAL_PCD_Init(&hpcd_USB_OTG_FS) != HAL_OK) + { + Error_Handler(); + } + /* USER CODE BEGIN USB_OTG_FS_Init 2 */ + + /* USER CODE END USB_OTG_FS_Init 2 */ + +} + +/** + * @brief GPIO Initialization Function + * @param None + * @retval None + */ +static void MX_GPIO_Init(void) +{ + GPIO_InitTypeDef GPIO_InitStruct = {0}; + + /* GPIO Ports Clock Enable */ + __HAL_RCC_GPIOC_CLK_ENABLE(); + __HAL_RCC_GPIOH_CLK_ENABLE(); + __HAL_RCC_GPIOA_CLK_ENABLE(); + __HAL_RCC_GPIOB_CLK_ENABLE(); + __HAL_RCC_GPIOD_CLK_ENABLE(); + __HAL_RCC_GPIOG_CLK_ENABLE(); + + /*Configure GPIO pin Output Level */ + HAL_GPIO_WritePin(GPIOB, LD1_Pin|LD3_Pin|LD2_Pin, GPIO_PIN_RESET); + + /*Configure GPIO pin Output Level */ + HAL_GPIO_WritePin(USB_PowerSwitchOn_GPIO_Port, USB_PowerSwitchOn_Pin, GPIO_PIN_RESET); + + /*Configure GPIO pin : USER_Btn_Pin */ + GPIO_InitStruct.Pin = USER_Btn_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_IT_RISING; + GPIO_InitStruct.Pull = GPIO_NOPULL; + HAL_GPIO_Init(USER_Btn_GPIO_Port, &GPIO_InitStruct); + + /*Configure GPIO pins : LD1_Pin LD3_Pin LD2_Pin */ + GPIO_InitStruct.Pin = LD1_Pin|LD3_Pin|LD2_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_OUTPUT_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_LOW; + HAL_GPIO_Init(GPIOB, &GPIO_InitStruct); + + /*Configure GPIO pin : USB_PowerSwitchOn_Pin */ + GPIO_InitStruct.Pin = USB_PowerSwitchOn_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_OUTPUT_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_LOW; + HAL_GPIO_Init(USB_PowerSwitchOn_GPIO_Port, &GPIO_InitStruct); + + /*Configure GPIO pin : USB_OverCurrent_Pin */ + GPIO_InitStruct.Pin = USB_OverCurrent_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_INPUT; + GPIO_InitStruct.Pull = GPIO_NOPULL; + HAL_GPIO_Init(USB_OverCurrent_GPIO_Port, &GPIO_InitStruct); + +} + +/* USER CODE BEGIN 4 */ +#ifdef __GNUC__ +int __io_putchar(int ch) +#else +int fputc(int ch, FILE *f) +#endif /* __GNUC__ */ +{ + HAL_UART_Transmit(&huart3, (uint8_t *)&ch, 1, 0xFFFF); + return ch; +} +/* USER CODE END 4 */ + +/* USER CODE BEGIN Header_StartDefaultTask */ +void * microros_allocate(size_t size, void * state); +void microros_deallocate(void * pointer, void * state); +void * microros_reallocate(void * pointer, size_t size, void * state); +void * microros_zero_allocate(size_t number_of_elements, size_t size_of_element, void * state); + +rcl_publisher_t publisher; +rcl_subscription_t subscriber; + +std_msgs__msg__UInt16 msg; + +void subscription_callback(const void * msgin) +{ + //printf("sub\r\n"); + + const std_msgs__msg__UInt16 * msg = (const std_msgs__msg__UInt16 *)msgin; + + rcl_publish(&publisher, (const void*)msg, NULL); +} + +/** + * @brief Function implementing the defaultTask thread. + * @param argument: Not used + * @retval None + */ +/* USER CODE END Header_StartDefaultTask */ +void StartDefaultTask(void *argument) +{ + /* init code for LWIP */ + MX_LWIP_Init(); + /* USER CODE BEGIN 5 */ + rcl_allocator_t freeRTOS_allocator = rcutils_get_zero_initialized_allocator(); + freeRTOS_allocator.allocate = microros_allocate; + freeRTOS_allocator.deallocate = microros_deallocate; + freeRTOS_allocator.reallocate = microros_reallocate; + freeRTOS_allocator.zero_allocate = microros_zero_allocate; + + if (!rcutils_set_default_allocator(&freeRTOS_allocator)) { + printf("Error on default allocators (line %d)\n", __LINE__); + } + + printf("main start\r\n"); + + // micro-ROS app + + rclc_support_t support; + rcl_allocator_t allocator; + rcl_node_t node; + + allocator = rcl_get_default_allocator(); + + //create init_options + rclc_support_init(&support, 0, NULL, &allocator); + + // create node + rclc_node_init_default(&node, "cubemx_node", "", &support); + + // create publisher + rclc_publisher_init_default( + &publisher, + &node, + ROSIDL_GET_MSG_TYPE_SUPPORT(std_msgs, msg, UInt16), + "to_linux"); + + // create subscriber + rclc_subscription_init_default( + &subscriber, + &node, + ROSIDL_GET_MSG_TYPE_SUPPORT(std_msgs, msg, UInt16), + "to_stm"); + + // Create executor + rclc_executor_t executor; + rclc_executor_init(&executor, &support.context, 3, &allocator); + rclc_executor_add_subscription(&executor, &subscriber, &msg, + &subscription_callback, ON_NEW_DATA); + + // TO print this enable FreeRTOS -> USE_STATS_FORMATTING_FUNCTIONS + + // static char ptrTaskList[1000]; + + // vTaskList(ptrTaskList); + // printf("**********************************"); + // printf("Task State Prio Stack Num"); + // printf("**********************************"); + // printf(ptrTaskList); + // printf("**********************************"); + + + for(;;) { + rclc_executor_spin_some(&executor, RCL_MS_TO_NS(10)); + osDelay(10); + } + /* USER CODE END 5 */ +} + +/** + * @brief Period elapsed callback in non blocking mode + * @note This function is called when TIM14 interrupt took place, inside + * HAL_TIM_IRQHandler(). It makes a direct call to HAL_IncTick() to increment + * a global variable "uwTick" used as application time base. + * @param htim : TIM handle + * @retval None + */ +void HAL_TIM_PeriodElapsedCallback(TIM_HandleTypeDef *htim) +{ + /* USER CODE BEGIN Callback 0 */ + + /* USER CODE END Callback 0 */ + if (htim->Instance == TIM14) { + HAL_IncTick(); + } + /* USER CODE BEGIN Callback 1 */ + + /* USER CODE END Callback 1 */ +} + +/** + * @brief This function is executed in case of error occurrence. + * @retval None + */ +void Error_Handler(void) +{ + /* USER CODE BEGIN Error_Handler_Debug */ + /* User can add his own implementation to report the HAL error return state */ + __disable_irq(); + while (1) + { + } + /* USER CODE END Error_Handler_Debug */ +} + +#ifdef USE_FULL_ASSERT +/** + * @brief Reports the name of the source file and the source line number + * where the assert_param error has occurred. + * @param file: pointer to the source file name + * @param line: assert_param error line source number + * @retval None + */ +void assert_failed(uint8_t *file, uint32_t line) +{ + /* USER CODE BEGIN 6 */ + /* User can add his own implementation to report the file name and line number, + ex: printf("Wrong parameters value: file %s on line %d\r\n", file, line) */ + /* USER CODE END 6 */ +} +#endif /* USE_FULL_ASSERT */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/.cproject b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/.cproject new file mode 100644 index 0000000..fce279f --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/.cproject @@ -0,0 +1,301 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/.mxproject b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/.mxproject new file mode 100644 index 0000000..c27c367 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/.mxproject @@ -0,0 +1,37 @@ +[PreviousLibFiles] +LibFiles=Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_eth.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rcc.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rcc_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_flash.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_flash_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_gpio.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_gpio_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pwr.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pwr_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_cortex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_def.h;Drivers/STM32F7xx_HAL_Driver/Inc/Legacy/stm32_hal_legacy.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_i2c.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_i2c_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_exti.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_tim.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_tim_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_uart.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_uart_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pcd.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pcd_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_ll_usb.h;Middlewares/Third_Party/FreeRTOS/Source/include/croutine.h;Middlewares/Third_Party/FreeRTOS/Source/include/deprecated_definitions.h;Middlewares/Third_Party/FreeRTOS/Source/include/event_groups.h;Middlewares/Third_Party/FreeRTOS/Source/include/FreeRTOS.h;Middlewares/Third_Party/FreeRTOS/Source/include/list.h;Middlewares/Third_Party/FreeRTOS/Source/include/message_buffer.h;Middlewares/Third_Party/FreeRTOS/Source/include/mpu_prototypes.h;Middlewares/Third_Party/FreeRTOS/Source/include/mpu_wrappers.h;Middlewares/Third_Party/FreeRTOS/Source/include/portable.h;Middlewares/Third_Party/FreeRTOS/Source/include/projdefs.h;Middlewares/Third_Party/FreeRTOS/Source/include/queue.h;Middlewares/Third_Party/FreeRTOS/Source/include/semphr.h;Middlewares/Third_Party/FreeRTOS/Source/include/stack_macros.h;Middlewares/Third_Party/FreeRTOS/Source/include/StackMacros.h;Middlewares/Third_Party/FreeRTOS/Source/include/stream_buffer.h;Middlewares/Third_Party/FreeRTOS/Source/include/task.h;Middlewares/Third_Party/FreeRTOS/Source/include/timers.h;Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS_V2/cmsis_os2.h;Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS_V2/cmsis_os.h;Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/portmacro.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ccp.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap_ms.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-md5.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-new.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/eap.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/eui64.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/fsm.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipcp.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipv6cp.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/lcp.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/magic.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/mppe.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_impl.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_opts.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppapi.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppcrypt.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppdebug.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppoe.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppol2tp.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppos.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/upap.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/vj.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ecp.h;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_eth.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_gpio.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_cortex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_exti.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_uart.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_uart_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pcd.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pcd_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_ll_usb.c;Middlewares/Third_Party/FreeRTOS/Source/croutine.c;Middlewares/Third_Party/FreeRTOS/Source/event_groups.c;Middlewares/Third_Party/FreeRTOS/Source/list.c;Middlewares/Third_Party/FreeRTOS/Source/queue.c;Middlewares/Third_Party/FreeRTOS/Source/stream_buffer.c;Middlewares/Third_Party/FreeRTOS/Source/tasks.c;Middlewares/Third_Party/FreeRTOS/Source/timers.c;Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS_V2/cmsis_os2.c;Middlewares/Third_Party/FreeRTOS/Source/portable/MemMang/heap_4.c;Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/port.c;Middlewares/Third_Party/LwIP/src/netif/ppp/auth.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ccp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/chap_ms.c;Middlewares/Third_Party/LwIP/src/netif/ppp/chap-md5.c;Middlewares/Third_Party/LwIP/src/netif/ppp/chap-new.c;Middlewares/Third_Party/LwIP/src/netif/ppp/demand.c;Middlewares/Third_Party/LwIP/src/netif/ppp/eap.c;Middlewares/Third_Party/LwIP/src/netif/ppp/eui64.c;Middlewares/Third_Party/LwIP/src/netif/ppp/fsm.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ipcp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ipv6cp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/lcp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/magic.c;Middlewares/Third_Party/LwIP/src/netif/ppp/mppe.c;Middlewares/Third_Party/LwIP/src/netif/ppp/multilink.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ppp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppapi.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppcrypt.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppoe.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppol2tp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppos.c;Middlewares/Third_Party/LwIP/src/netif/ppp/upap.c;Middlewares/Third_Party/LwIP/src/netif/ppp/utils.c;Middlewares/Third_Party/LwIP/src/netif/ppp/vj.c;Middlewares/Third_Party/LwIP/src/netif/bridgeif.c;Middlewares/Third_Party/LwIP/src/netif/bridgeif_fdb.c;Middlewares/Third_Party/LwIP/src/netif/ethernet.c;Middlewares/Third_Party/LwIP/src/netif/lowpan6.c;Middlewares/Third_Party/LwIP/src/netif/lowpan6_ble.c;Middlewares/Third_Party/LwIP/src/netif/lowpan6_common.c;Middlewares/Third_Party/LwIP/src/netif/slipif.c;Middlewares/Third_Party/LwIP/src/netif/zepif.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ecp.c;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_eth.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rcc.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rcc_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_flash.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_flash_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_gpio.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_gpio_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pwr.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pwr_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_cortex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_def.h;Drivers/STM32F7xx_HAL_Driver/Inc/Legacy/stm32_hal_legacy.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_i2c.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_i2c_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_exti.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_tim.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_tim_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_uart.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_uart_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pcd.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pcd_ex.h;Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_ll_usb.h;Middlewares/Third_Party/FreeRTOS/Source/include/croutine.h;Middlewares/Third_Party/FreeRTOS/Source/include/deprecated_definitions.h;Middlewares/Third_Party/FreeRTOS/Source/include/event_groups.h;Middlewares/Third_Party/FreeRTOS/Source/include/FreeRTOS.h;Middlewares/Third_Party/FreeRTOS/Source/include/list.h;Middlewares/Third_Party/FreeRTOS/Source/include/message_buffer.h;Middlewares/Third_Party/FreeRTOS/Source/include/mpu_prototypes.h;Middlewares/Third_Party/FreeRTOS/Source/include/mpu_wrappers.h;Middlewares/Third_Party/FreeRTOS/Source/include/portable.h;Middlewares/Third_Party/FreeRTOS/Source/include/projdefs.h;Middlewares/Third_Party/FreeRTOS/Source/include/queue.h;Middlewares/Third_Party/FreeRTOS/Source/include/semphr.h;Middlewares/Third_Party/FreeRTOS/Source/include/stack_macros.h;Middlewares/Third_Party/FreeRTOS/Source/include/StackMacros.h;Middlewares/Third_Party/FreeRTOS/Source/include/stream_buffer.h;Middlewares/Third_Party/FreeRTOS/Source/include/task.h;Middlewares/Third_Party/FreeRTOS/Source/include/timers.h;Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS_V2/cmsis_os2.h;Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS_V2/cmsis_os.h;Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/portmacro.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ccp.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap_ms.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-md5.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-new.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/eap.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/eui64.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/fsm.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipcp.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipv6cp.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/lcp.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/magic.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/mppe.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_impl.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_opts.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppapi.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppcrypt.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppdebug.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppoe.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppol2tp.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppos.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/upap.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/vj.h;Middlewares/Third_Party/LwIP/src/include/netif/ppp/ecp.h;Drivers/CMSIS/Device/ST/STM32F7xx/Include/stm32f767xx.h;Drivers/CMSIS/Device/ST/STM32F7xx/Include/stm32f7xx.h;Drivers/CMSIS/Device/ST/STM32F7xx/Include/system_stm32f7xx.h;Drivers/CMSIS/Device/ST/STM32F7xx/Source/Templates/system_stm32f7xx.c;Middlewares/Third_Party/LwIP/src/include/lwip/memp.h;Middlewares/Third_Party/LwIP/src/include/lwip/igmp.h;Middlewares/Third_Party/LwIP/src/include/lwip/opt.h;Middlewares/Third_Party/LwIP/src/include/lwip/if_api.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip6_zone.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip.h;Middlewares/Third_Party/LwIP/src/include/lwip/mld6.h;Middlewares/Third_Party/LwIP/src/include/lwip/tcp.h;Middlewares/Third_Party/LwIP/src/include/lwip/icmp.h;Middlewares/Third_Party/LwIP/src/include/lwip/netif.h;Middlewares/Third_Party/LwIP/src/include/lwip/icmp6.h;Middlewares/Third_Party/LwIP/src/include/lwip/inet_chksum.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip_addr.h;Middlewares/Third_Party/LwIP/src/include/lwip/snmp.h;Middlewares/Third_Party/LwIP/src/include/lwip/tcpbase.h;Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tls.h;Middlewares/Third_Party/LwIP/src/include/lwip/arch.h;Middlewares/Third_Party/LwIP/src/include/lwip/mem.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip6_addr.h;Middlewares/Third_Party/LwIP/src/include/lwip/ethip6.h;Middlewares/Third_Party/LwIP/src/include/lwip/tcpip.h;Middlewares/Third_Party/LwIP/src/include/lwip/raw.h;Middlewares/Third_Party/LwIP/src/include/lwip/def.h;Middlewares/Third_Party/LwIP/src/include/lwip/err.h;Middlewares/Third_Party/LwIP/src/include/lwip/nd6.h;Middlewares/Third_Party/LwIP/src/include/lwip/netbuf.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip6.h;Middlewares/Third_Party/LwIP/src/include/lwip/dhcp6.h;Middlewares/Third_Party/LwIP/src/include/lwip/inet.h;Middlewares/Third_Party/LwIP/src/include/lwip/timeouts.h;Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tcp.h;Middlewares/Third_Party/LwIP/src/include/lwip/dhcp.h;Middlewares/Third_Party/LwIP/src/include/lwip/debug.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip4.h;Middlewares/Third_Party/LwIP/src/include/lwip/sio.h;Middlewares/Third_Party/LwIP/src/include/lwip/altcp.h;Middlewares/Third_Party/LwIP/src/include/lwip/netdb.h;Middlewares/Third_Party/LwIP/src/include/lwip/netifapi.h;Middlewares/Third_Party/LwIP/src/include/lwip/stats.h;Middlewares/Third_Party/LwIP/src/include/lwip/sockets.h;Middlewares/Third_Party/LwIP/src/include/lwip/dns.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip6_frag.h;Middlewares/Third_Party/LwIP/src/include/lwip/udp.h;Middlewares/Third_Party/LwIP/src/include/lwip/pbuf.h;Middlewares/Third_Party/LwIP/src/include/lwip/etharp.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip4_frag.h;Middlewares/Third_Party/LwIP/src/include/lwip/api.h;Middlewares/Third_Party/LwIP/src/include/lwip/autoip.h;Middlewares/Third_Party/LwIP/src/include/lwip/errno.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip4_addr.h;Middlewares/Third_Party/LwIP/src/include/lwip/init.h;Middlewares/Third_Party/LwIP/src/include/lwip/sys.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_proxyconnect.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_scalar.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmpv3.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/lwiperf.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_core.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_framework.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_tls_mbedtls_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_table.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/http_client.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_mib2.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_usm.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_threadsync.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_server.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/fs.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcpip_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/raw_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/api_msg.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/altcp_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcp_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/nd6_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/sockets_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_std.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/mem_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/igmp.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/mld6.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/tcp.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp6.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/ethernet.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/iana.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/nd6.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip6.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp6.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip4.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/ieee.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/dns.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/udp.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/etharp.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/autoip.h;Middlewares/Third_Party/LwIP/src/include/netif/bridgeif_opts.h;Middlewares/Third_Party/LwIP/src/include/netif/zepif.h;Middlewares/Third_Party/LwIP/src/include/netif/lowpan6.h;Middlewares/Third_Party/LwIP/src/include/netif/ethernet.h;Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_common.h;Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_opts.h;Middlewares/Third_Party/LwIP/src/include/netif/ieee802154.h;Middlewares/Third_Party/LwIP/src/include/netif/bridgeif.h;Middlewares/Third_Party/LwIP/src/include/netif/slipif.h;Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_ble.h;Middlewares/Third_Party/LwIP/src/include/netif/etharp.h;Middlewares/Third_Party/LwIP/src/include/compat/posix/netdb.h;Middlewares/Third_Party/LwIP/src/include/compat/posix/arpa/inet.h;Middlewares/Third_Party/LwIP/src/include/compat/posix/net/if.h;Middlewares/Third_Party/LwIP/src/include/compat/posix/sys/socket.h;Middlewares/Third_Party/LwIP/src/include/compat/stdc/errno.h;Middlewares/Third_Party/LwIP/system/arch/perf.h;Middlewares/Third_Party/LwIP/system/arch/cc.h;Middlewares/Third_Party/LwIP/system/arch/cpu.h;Middlewares/Third_Party/LwIP/system/arch/bpstruct.h;Middlewares/Third_Party/LwIP/system/arch/epstruct.h;Middlewares/Third_Party/LwIP/system/arch/sys_arch.h;Middlewares/Third_Party/LwIP/system/arch/lib.h;Middlewares/Third_Party/LwIP/system/arch/init.h;Middlewares/Third_Party/LwIP/src/api/netdb.c;Middlewares/Third_Party/LwIP/src/api/netifapi.c;Middlewares/Third_Party/LwIP/src/api/err.c;Middlewares/Third_Party/LwIP/src/api/netbuf.c;Middlewares/Third_Party/LwIP/src/api/tcpip.c;Middlewares/Third_Party/LwIP/src/api/sockets.c;Middlewares/Third_Party/LwIP/src/api/api_msg.c;Middlewares/Third_Party/LwIP/src/api/api_lib.c;Middlewares/Third_Party/LwIP/src/api/if_api.c;Middlewares/Third_Party/LwIP/src/core/mem.c;Middlewares/Third_Party/LwIP/src/core/def.c;Middlewares/Third_Party/LwIP/src/core/raw.c;Middlewares/Third_Party/LwIP/src/core/init.c;Middlewares/Third_Party/LwIP/src/core/altcp.c;Middlewares/Third_Party/LwIP/src/core/memp.c;Middlewares/Third_Party/LwIP/src/core/tcp_out.c;Middlewares/Third_Party/LwIP/src/core/netif.c;Middlewares/Third_Party/LwIP/src/core/inet_chksum.c;Middlewares/Third_Party/LwIP/src/core/tcp_in.c;Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c;Middlewares/Third_Party/LwIP/src/core/udp.c;Middlewares/Third_Party/LwIP/src/core/timeouts.c;Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c;Middlewares/Third_Party/LwIP/src/core/ip.c;Middlewares/Third_Party/LwIP/src/core/tcp.c;Middlewares/Third_Party/LwIP/src/core/sys.c;Middlewares/Third_Party/LwIP/src/core/pbuf.c;Middlewares/Third_Party/LwIP/src/core/stats.c;Middlewares/Third_Party/LwIP/src/core/dns.c;Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c;Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c;Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c;Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c;Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c;Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c;Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c;Middlewares/Third_Party/LwIP/system/OS/sys_arch.c;Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c;Middlewares/Third_Party/LwIP/src/include/lwip/memp.h;Middlewares/Third_Party/LwIP/src/include/lwip/igmp.h;Middlewares/Third_Party/LwIP/src/include/lwip/opt.h;Middlewares/Third_Party/LwIP/src/include/lwip/if_api.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip6_zone.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip.h;Middlewares/Third_Party/LwIP/src/include/lwip/mld6.h;Middlewares/Third_Party/LwIP/src/include/lwip/tcp.h;Middlewares/Third_Party/LwIP/src/include/lwip/icmp.h;Middlewares/Third_Party/LwIP/src/include/lwip/netif.h;Middlewares/Third_Party/LwIP/src/include/lwip/icmp6.h;Middlewares/Third_Party/LwIP/src/include/lwip/inet_chksum.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip_addr.h;Middlewares/Third_Party/LwIP/src/include/lwip/snmp.h;Middlewares/Third_Party/LwIP/src/include/lwip/tcpbase.h;Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tls.h;Middlewares/Third_Party/LwIP/src/include/lwip/arch.h;Middlewares/Third_Party/LwIP/src/include/lwip/mem.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip6_addr.h;Middlewares/Third_Party/LwIP/src/include/lwip/ethip6.h;Middlewares/Third_Party/LwIP/src/include/lwip/tcpip.h;Middlewares/Third_Party/LwIP/src/include/lwip/raw.h;Middlewares/Third_Party/LwIP/src/include/lwip/def.h;Middlewares/Third_Party/LwIP/src/include/lwip/err.h;Middlewares/Third_Party/LwIP/src/include/lwip/nd6.h;Middlewares/Third_Party/LwIP/src/include/lwip/netbuf.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip6.h;Middlewares/Third_Party/LwIP/src/include/lwip/dhcp6.h;Middlewares/Third_Party/LwIP/src/include/lwip/inet.h;Middlewares/Third_Party/LwIP/src/include/lwip/timeouts.h;Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tcp.h;Middlewares/Third_Party/LwIP/src/include/lwip/dhcp.h;Middlewares/Third_Party/LwIP/src/include/lwip/debug.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip4.h;Middlewares/Third_Party/LwIP/src/include/lwip/sio.h;Middlewares/Third_Party/LwIP/src/include/lwip/altcp.h;Middlewares/Third_Party/LwIP/src/include/lwip/netdb.h;Middlewares/Third_Party/LwIP/src/include/lwip/netifapi.h;Middlewares/Third_Party/LwIP/src/include/lwip/stats.h;Middlewares/Third_Party/LwIP/src/include/lwip/sockets.h;Middlewares/Third_Party/LwIP/src/include/lwip/dns.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip6_frag.h;Middlewares/Third_Party/LwIP/src/include/lwip/udp.h;Middlewares/Third_Party/LwIP/src/include/lwip/pbuf.h;Middlewares/Third_Party/LwIP/src/include/lwip/etharp.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip4_frag.h;Middlewares/Third_Party/LwIP/src/include/lwip/api.h;Middlewares/Third_Party/LwIP/src/include/lwip/autoip.h;Middlewares/Third_Party/LwIP/src/include/lwip/errno.h;Middlewares/Third_Party/LwIP/src/include/lwip/ip4_addr.h;Middlewares/Third_Party/LwIP/src/include/lwip/init.h;Middlewares/Third_Party/LwIP/src/include/lwip/sys.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_proxyconnect.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_scalar.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmpv3.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/lwiperf.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_core.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_framework.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_tls_mbedtls_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_table.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/http_client.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_mib2.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_usm.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns_opts.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_threadsync.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_server.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/fs.h;Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcpip_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/raw_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/api_msg.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/altcp_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcp_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/nd6_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/sockets_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_std.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/priv/mem_priv.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/igmp.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/mld6.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/tcp.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp6.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/ethernet.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/iana.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/nd6.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip6.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp6.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip4.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/ieee.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/dns.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/udp.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/etharp.h;Middlewares/Third_Party/LwIP/src/include/lwip/prot/autoip.h;Middlewares/Third_Party/LwIP/src/include/netif/bridgeif_opts.h;Middlewares/Third_Party/LwIP/src/include/netif/zepif.h;Middlewares/Third_Party/LwIP/src/include/netif/lowpan6.h;Middlewares/Third_Party/LwIP/src/include/netif/ethernet.h;Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_common.h;Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_opts.h;Middlewares/Third_Party/LwIP/src/include/netif/ieee802154.h;Middlewares/Third_Party/LwIP/src/include/netif/bridgeif.h;Middlewares/Third_Party/LwIP/src/include/netif/slipif.h;Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_ble.h;Middlewares/Third_Party/LwIP/src/include/netif/etharp.h;Middlewares/Third_Party/LwIP/src/include/compat/posix/netdb.h;Middlewares/Third_Party/LwIP/src/include/compat/posix/arpa/inet.h;Middlewares/Third_Party/LwIP/src/include/compat/posix/net/if.h;Middlewares/Third_Party/LwIP/src/include/compat/posix/sys/socket.h;Middlewares/Third_Party/LwIP/src/include/compat/stdc/errno.h;Middlewares/Third_Party/LwIP/system/arch/perf.h;Middlewares/Third_Party/LwIP/system/arch/cc.h;Middlewares/Third_Party/LwIP/system/arch/cpu.h;Middlewares/Third_Party/LwIP/system/arch/bpstruct.h;Middlewares/Third_Party/LwIP/system/arch/epstruct.h;Middlewares/Third_Party/LwIP/system/arch/sys_arch.h;Middlewares/Third_Party/LwIP/system/arch/lib.h;Middlewares/Third_Party/LwIP/system/arch/init.h;Drivers/CMSIS/Include/core_cm0.h;Drivers/CMSIS/Include/core_cm0plus.h;Drivers/CMSIS/Include/core_sc300.h;Drivers/CMSIS/Include/core_cm7.h;Drivers/CMSIS/Include/mpu_armv8.h;Drivers/CMSIS/Include/core_cm33.h;Drivers/CMSIS/Include/cmsis_armclang.h;Drivers/CMSIS/Include/core_sc000.h;Drivers/CMSIS/Include/core_armv8mml.h;Drivers/CMSIS/Include/cmsis_gcc.h;Drivers/CMSIS/Include/cmsis_compiler.h;Drivers/CMSIS/Include/core_cm3.h;Drivers/CMSIS/Include/cmsis_iccarm.h;Drivers/CMSIS/Include/mpu_armv7.h;Drivers/CMSIS/Include/core_cm1.h;Drivers/CMSIS/Include/cmsis_version.h;Drivers/CMSIS/Include/core_armv8mbl.h;Drivers/CMSIS/Include/core_cm23.h;Drivers/CMSIS/Include/cmsis_armcc.h;Drivers/CMSIS/Include/tz_context.h;Drivers/CMSIS/Include/core_cm4.h; + +[PreviousUsedCubeIDEFiles] +SourceFiles=Core/Src/main.c;Core/Src/freertos.c;LWIP/App/lwip.c;LWIP/Target/ethernetif.c;Core/Src/stm32f7xx_it.c;Core/Src/stm32f7xx_hal_msp.c;Core/Src/stm32f7xx_hal_timebase_tim.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_eth.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_gpio.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_cortex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_exti.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_uart.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_uart_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pcd.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pcd_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_ll_usb.c;Middlewares/Third_Party/FreeRTOS/Source/croutine.c;Middlewares/Third_Party/FreeRTOS/Source/event_groups.c;Middlewares/Third_Party/FreeRTOS/Source/list.c;Middlewares/Third_Party/FreeRTOS/Source/queue.c;Middlewares/Third_Party/FreeRTOS/Source/stream_buffer.c;Middlewares/Third_Party/FreeRTOS/Source/tasks.c;Middlewares/Third_Party/FreeRTOS/Source/timers.c;Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS_V2/cmsis_os2.c;Middlewares/Third_Party/FreeRTOS/Source/portable/MemMang/heap_4.c;Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/port.c;Middlewares/Third_Party/LwIP/src/netif/ppp/auth.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ccp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/chap_ms.c;Middlewares/Third_Party/LwIP/src/netif/ppp/chap-md5.c;Middlewares/Third_Party/LwIP/src/netif/ppp/chap-new.c;Middlewares/Third_Party/LwIP/src/netif/ppp/demand.c;Middlewares/Third_Party/LwIP/src/netif/ppp/eap.c;Middlewares/Third_Party/LwIP/src/netif/ppp/eui64.c;Middlewares/Third_Party/LwIP/src/netif/ppp/fsm.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ipcp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ipv6cp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/lcp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/magic.c;Middlewares/Third_Party/LwIP/src/netif/ppp/mppe.c;Middlewares/Third_Party/LwIP/src/netif/ppp/multilink.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ppp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppapi.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppcrypt.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppoe.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppol2tp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppos.c;Middlewares/Third_Party/LwIP/src/netif/ppp/upap.c;Middlewares/Third_Party/LwIP/src/netif/ppp/utils.c;Middlewares/Third_Party/LwIP/src/netif/ppp/vj.c;Middlewares/Third_Party/LwIP/src/netif/bridgeif.c;Middlewares/Third_Party/LwIP/src/netif/bridgeif_fdb.c;Middlewares/Third_Party/LwIP/src/netif/ethernet.c;Middlewares/Third_Party/LwIP/src/netif/lowpan6.c;Middlewares/Third_Party/LwIP/src/netif/lowpan6_ble.c;Middlewares/Third_Party/LwIP/src/netif/lowpan6_common.c;Middlewares/Third_Party/LwIP/src/netif/slipif.c;Middlewares/Third_Party/LwIP/src/netif/zepif.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ecp.c;Drivers/CMSIS/Device/ST/STM32F7xx/Source/Templates/system_stm32f7xx.c;Middlewares/Third_Party/LwIP/src/api/netdb.c;Middlewares/Third_Party/LwIP/src/api/netifapi.c;Middlewares/Third_Party/LwIP/src/api/err.c;Middlewares/Third_Party/LwIP/src/api/netbuf.c;Middlewares/Third_Party/LwIP/src/api/tcpip.c;Middlewares/Third_Party/LwIP/src/api/sockets.c;Middlewares/Third_Party/LwIP/src/api/api_msg.c;Middlewares/Third_Party/LwIP/src/api/api_lib.c;Middlewares/Third_Party/LwIP/src/api/if_api.c;Middlewares/Third_Party/LwIP/src/core/mem.c;Middlewares/Third_Party/LwIP/src/core/def.c;Middlewares/Third_Party/LwIP/src/core/raw.c;Middlewares/Third_Party/LwIP/src/core/init.c;Middlewares/Third_Party/LwIP/src/core/altcp.c;Middlewares/Third_Party/LwIP/src/core/memp.c;Middlewares/Third_Party/LwIP/src/core/tcp_out.c;Middlewares/Third_Party/LwIP/src/core/netif.c;Middlewares/Third_Party/LwIP/src/core/inet_chksum.c;Middlewares/Third_Party/LwIP/src/core/tcp_in.c;Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c;Middlewares/Third_Party/LwIP/src/core/udp.c;Middlewares/Third_Party/LwIP/src/core/timeouts.c;Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c;Middlewares/Third_Party/LwIP/src/core/ip.c;Middlewares/Third_Party/LwIP/src/core/tcp.c;Middlewares/Third_Party/LwIP/src/core/sys.c;Middlewares/Third_Party/LwIP/src/core/pbuf.c;Middlewares/Third_Party/LwIP/src/core/stats.c;Middlewares/Third_Party/LwIP/src/core/dns.c;Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c;Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c;Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c;Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c;Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c;Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c;Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c;Middlewares/Third_Party/LwIP/system/OS/sys_arch.c;Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c;Core/Src/system_stm32f7xx.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_eth.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_gpio.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_cortex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_exti.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_uart.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_uart_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pcd.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pcd_ex.c;Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_ll_usb.c;Middlewares/Third_Party/FreeRTOS/Source/croutine.c;Middlewares/Third_Party/FreeRTOS/Source/event_groups.c;Middlewares/Third_Party/FreeRTOS/Source/list.c;Middlewares/Third_Party/FreeRTOS/Source/queue.c;Middlewares/Third_Party/FreeRTOS/Source/stream_buffer.c;Middlewares/Third_Party/FreeRTOS/Source/tasks.c;Middlewares/Third_Party/FreeRTOS/Source/timers.c;Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS_V2/cmsis_os2.c;Middlewares/Third_Party/FreeRTOS/Source/portable/MemMang/heap_4.c;Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/port.c;Middlewares/Third_Party/LwIP/src/netif/ppp/auth.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ccp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/chap_ms.c;Middlewares/Third_Party/LwIP/src/netif/ppp/chap-md5.c;Middlewares/Third_Party/LwIP/src/netif/ppp/chap-new.c;Middlewares/Third_Party/LwIP/src/netif/ppp/demand.c;Middlewares/Third_Party/LwIP/src/netif/ppp/eap.c;Middlewares/Third_Party/LwIP/src/netif/ppp/eui64.c;Middlewares/Third_Party/LwIP/src/netif/ppp/fsm.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ipcp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ipv6cp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/lcp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/magic.c;Middlewares/Third_Party/LwIP/src/netif/ppp/mppe.c;Middlewares/Third_Party/LwIP/src/netif/ppp/multilink.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ppp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppapi.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppcrypt.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppoe.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppol2tp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppos.c;Middlewares/Third_Party/LwIP/src/netif/ppp/upap.c;Middlewares/Third_Party/LwIP/src/netif/ppp/utils.c;Middlewares/Third_Party/LwIP/src/netif/ppp/vj.c;Middlewares/Third_Party/LwIP/src/netif/bridgeif.c;Middlewares/Third_Party/LwIP/src/netif/bridgeif_fdb.c;Middlewares/Third_Party/LwIP/src/netif/ethernet.c;Middlewares/Third_Party/LwIP/src/netif/lowpan6.c;Middlewares/Third_Party/LwIP/src/netif/lowpan6_ble.c;Middlewares/Third_Party/LwIP/src/netif/lowpan6_common.c;Middlewares/Third_Party/LwIP/src/netif/slipif.c;Middlewares/Third_Party/LwIP/src/netif/zepif.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ecp.c;Drivers/CMSIS/Device/ST/STM32F7xx/Source/Templates/system_stm32f7xx.c;Middlewares/Third_Party/LwIP/src/api/netdb.c;Middlewares/Third_Party/LwIP/src/api/netifapi.c;Middlewares/Third_Party/LwIP/src/api/err.c;Middlewares/Third_Party/LwIP/src/api/netbuf.c;Middlewares/Third_Party/LwIP/src/api/tcpip.c;Middlewares/Third_Party/LwIP/src/api/sockets.c;Middlewares/Third_Party/LwIP/src/api/api_msg.c;Middlewares/Third_Party/LwIP/src/api/api_lib.c;Middlewares/Third_Party/LwIP/src/api/if_api.c;Middlewares/Third_Party/LwIP/src/core/mem.c;Middlewares/Third_Party/LwIP/src/core/def.c;Middlewares/Third_Party/LwIP/src/core/raw.c;Middlewares/Third_Party/LwIP/src/core/init.c;Middlewares/Third_Party/LwIP/src/core/altcp.c;Middlewares/Third_Party/LwIP/src/core/memp.c;Middlewares/Third_Party/LwIP/src/core/tcp_out.c;Middlewares/Third_Party/LwIP/src/core/netif.c;Middlewares/Third_Party/LwIP/src/core/inet_chksum.c;Middlewares/Third_Party/LwIP/src/core/tcp_in.c;Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c;Middlewares/Third_Party/LwIP/src/core/udp.c;Middlewares/Third_Party/LwIP/src/core/timeouts.c;Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c;Middlewares/Third_Party/LwIP/src/core/ip.c;Middlewares/Third_Party/LwIP/src/core/tcp.c;Middlewares/Third_Party/LwIP/src/core/sys.c;Middlewares/Third_Party/LwIP/src/core/pbuf.c;Middlewares/Third_Party/LwIP/src/core/stats.c;Middlewares/Third_Party/LwIP/src/core/dns.c;Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c;Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c;Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c;Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c;Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c;Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c;Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c;Middlewares/Third_Party/LwIP/system/OS/sys_arch.c;Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c;Core/Src/system_stm32f7xx.c;;;Middlewares/Third_Party/FreeRTOS/Source/croutine.c;Middlewares/Third_Party/FreeRTOS/Source/event_groups.c;Middlewares/Third_Party/FreeRTOS/Source/list.c;Middlewares/Third_Party/FreeRTOS/Source/queue.c;Middlewares/Third_Party/FreeRTOS/Source/stream_buffer.c;Middlewares/Third_Party/FreeRTOS/Source/tasks.c;Middlewares/Third_Party/FreeRTOS/Source/timers.c;Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS_V2/cmsis_os2.c;Middlewares/Third_Party/FreeRTOS/Source/portable/MemMang/heap_4.c;Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/port.c;Middlewares/Third_Party/LwIP/src/netif/ppp/auth.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ccp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/chap_ms.c;Middlewares/Third_Party/LwIP/src/netif/ppp/chap-md5.c;Middlewares/Third_Party/LwIP/src/netif/ppp/chap-new.c;Middlewares/Third_Party/LwIP/src/netif/ppp/demand.c;Middlewares/Third_Party/LwIP/src/netif/ppp/eap.c;Middlewares/Third_Party/LwIP/src/netif/ppp/eui64.c;Middlewares/Third_Party/LwIP/src/netif/ppp/fsm.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ipcp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ipv6cp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/lcp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/magic.c;Middlewares/Third_Party/LwIP/src/netif/ppp/mppe.c;Middlewares/Third_Party/LwIP/src/netif/ppp/multilink.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ppp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppapi.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppcrypt.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppoe.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppol2tp.c;Middlewares/Third_Party/LwIP/src/netif/ppp/pppos.c;Middlewares/Third_Party/LwIP/src/netif/ppp/upap.c;Middlewares/Third_Party/LwIP/src/netif/ppp/utils.c;Middlewares/Third_Party/LwIP/src/netif/ppp/vj.c;Middlewares/Third_Party/LwIP/src/netif/bridgeif.c;Middlewares/Third_Party/LwIP/src/netif/bridgeif_fdb.c;Middlewares/Third_Party/LwIP/src/netif/ethernet.c;Middlewares/Third_Party/LwIP/src/netif/lowpan6.c;Middlewares/Third_Party/LwIP/src/netif/lowpan6_ble.c;Middlewares/Third_Party/LwIP/src/netif/lowpan6_common.c;Middlewares/Third_Party/LwIP/src/netif/slipif.c;Middlewares/Third_Party/LwIP/src/netif/zepif.c;Middlewares/Third_Party/LwIP/src/netif/ppp/ecp.c;Middlewares/Third_Party/LwIP/src/api/netdb.c;Middlewares/Third_Party/LwIP/src/api/netifapi.c;Middlewares/Third_Party/LwIP/src/api/err.c;Middlewares/Third_Party/LwIP/src/api/netbuf.c;Middlewares/Third_Party/LwIP/src/api/tcpip.c;Middlewares/Third_Party/LwIP/src/api/sockets.c;Middlewares/Third_Party/LwIP/src/api/api_msg.c;Middlewares/Third_Party/LwIP/src/api/api_lib.c;Middlewares/Third_Party/LwIP/src/api/if_api.c;Middlewares/Third_Party/LwIP/src/core/mem.c;Middlewares/Third_Party/LwIP/src/core/def.c;Middlewares/Third_Party/LwIP/src/core/raw.c;Middlewares/Third_Party/LwIP/src/core/init.c;Middlewares/Third_Party/LwIP/src/core/altcp.c;Middlewares/Third_Party/LwIP/src/core/memp.c;Middlewares/Third_Party/LwIP/src/core/tcp_out.c;Middlewares/Third_Party/LwIP/src/core/netif.c;Middlewares/Third_Party/LwIP/src/core/inet_chksum.c;Middlewares/Third_Party/LwIP/src/core/tcp_in.c;Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c;Middlewares/Third_Party/LwIP/src/core/udp.c;Middlewares/Third_Party/LwIP/src/core/timeouts.c;Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c;Middlewares/Third_Party/LwIP/src/core/ip.c;Middlewares/Third_Party/LwIP/src/core/tcp.c;Middlewares/Third_Party/LwIP/src/core/sys.c;Middlewares/Third_Party/LwIP/src/core/pbuf.c;Middlewares/Third_Party/LwIP/src/core/stats.c;Middlewares/Third_Party/LwIP/src/core/dns.c;Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c;Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c;Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c;Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c;Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c;Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c;Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c;Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c;Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c;Middlewares/Third_Party/LwIP/system/OS/sys_arch.c;Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c; +HeaderPath=Middlewares/Third_Party/LwIP/src/include;Middlewares/Third_Party/LwIP/system;Middlewares/Third_Party/LwIP/src/include;Middlewares/Third_Party/LwIP/system;Drivers/STM32F7xx_HAL_Driver/Inc;Drivers/STM32F7xx_HAL_Driver/Inc/Legacy;Middlewares/Third_Party/FreeRTOS/Source/include;Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS_V2;Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1;Middlewares/Third_Party/LwIP/src/include/netif/ppp;Drivers/CMSIS/Device/ST/STM32F7xx/Include;Middlewares/Third_Party/LwIP/src/include/lwip;Middlewares/Third_Party/LwIP/src/include/lwip/apps;Middlewares/Third_Party/LwIP/src/include/lwip/priv;Middlewares/Third_Party/LwIP/src/include/lwip/prot;Middlewares/Third_Party/LwIP/src/include/netif;Middlewares/Third_Party/LwIP/src/include/compat/posix;Middlewares/Third_Party/LwIP/src/include/compat/posix/arpa;Middlewares/Third_Party/LwIP/src/include/compat/posix/net;Middlewares/Third_Party/LwIP/src/include/compat/posix/sys;Middlewares/Third_Party/LwIP/src/include/compat/stdc;Middlewares/Third_Party/LwIP/system/arch;Drivers/CMSIS/Include;Core/Inc;LWIP/App;LWIP/Target; +CDefines=USE_HAL_DRIVER;STM32F767xx;USE_HAL_DRIVER;USE_HAL_DRIVER; + +[PreviousGenFiles] +AdvancedFolderStructure=true +HeaderFileListSize=7 +HeaderFiles#0=/home/takase/ros/mROS2/github/eval/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Inc/FreeRTOSConfig.h +HeaderFiles#1=/home/takase/ros/mROS2/github/eval/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/LWIP/App/lwip.h +HeaderFiles#2=/home/takase/ros/mROS2/github/eval/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/LWIP/Target/lwipopts.h +HeaderFiles#3=/home/takase/ros/mROS2/github/eval/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/LWIP/Target/ethernetif.h +HeaderFiles#4=/home/takase/ros/mROS2/github/eval/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Inc/stm32f7xx_it.h +HeaderFiles#5=/home/takase/ros/mROS2/github/eval/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Inc/stm32f7xx_hal_conf.h +HeaderFiles#6=/home/takase/ros/mROS2/github/eval/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Inc/main.h +HeaderFolderListSize=3 +HeaderPath#0=/home/takase/ros/mROS2/github/eval/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Inc +HeaderPath#1=/home/takase/ros/mROS2/github/eval/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/LWIP/App +HeaderPath#2=/home/takase/ros/mROS2/github/eval/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/LWIP/Target +HeaderFiles=; +SourceFileListSize=7 +SourceFiles#0=/home/takase/ros/mROS2/github/eval/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/freertos.c +SourceFiles#1=/home/takase/ros/mROS2/github/eval/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/LWIP/App/lwip.c +SourceFiles#2=/home/takase/ros/mROS2/github/eval/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/LWIP/Target/ethernetif.c +SourceFiles#3=/home/takase/ros/mROS2/github/eval/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/stm32f7xx_it.c +SourceFiles#4=/home/takase/ros/mROS2/github/eval/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/stm32f7xx_hal_msp.c +SourceFiles#5=/home/takase/ros/mROS2/github/eval/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/stm32f7xx_hal_timebase_tim.c +SourceFiles#6=/home/takase/ros/mROS2/github/eval/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/main.c +SourceFolderListSize=3 +SourcePath#0=/home/takase/ros/mROS2/github/eval/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src +SourcePath#1=/home/takase/ros/mROS2/github/eval/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/LWIP/App +SourcePath#2=/home/takase/ros/mROS2/github/eval/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/LWIP/Target +SourceFiles=; + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/.project b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/.project new file mode 100644 index 0000000..03f2438 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/.project @@ -0,0 +1,33 @@ + + + micro-ros_embeddedrtps + + + + + + org.eclipse.cdt.managedbuilder.core.genmakebuilder + clean,full,incremental, + + + + + org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder + full,incremental, + + + + + + com.st.stm32cube.ide.mcu.MCUProjectNature + com.st.stm32cube.ide.mcu.MCUCubeProjectNature + org.eclipse.cdt.core.cnature + org.eclipse.cdt.core.ccnature + com.st.stm32cube.ide.mcu.MCUCubeIdeServicesRevAev2ProjectNature + com.st.stm32cube.ide.mcu.MCUAdvancedStructureProjectNature + com.st.stm32cube.ide.mcu.MCUSingleCpuProjectNature + com.st.stm32cube.ide.mcu.MCURootProjectNature + org.eclipse.cdt.managedbuilder.core.managedBuildNature + org.eclipse.cdt.managedbuilder.core.ScannerConfigNature + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/.settings/com.st.stm32cube.ide.mcu.sfrview.prefs b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/.settings/com.st.stm32cube.ide.mcu.sfrview.prefs new file mode 100644 index 0000000..233019a --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/.settings/com.st.stm32cube.ide.mcu.sfrview.prefs @@ -0,0 +1,3 @@ +eclipse.preferences.version=1 +svd_custom_file_path= +svd_file_path=platform\:/plugin/com.st.stm32cube.ide.mcu.productdb.debug/resources/cmsis/STMicroelectronics_CMSIS_SVD/STM32F767.svd diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/.settings/language.settings.xml b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/.settings/language.settings.xml new file mode 100644 index 0000000..aba0be9 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/.settings/language.settings.xml @@ -0,0 +1,27 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/.settings/org.eclipse.cdt.codan.core.prefs b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/.settings/org.eclipse.cdt.codan.core.prefs new file mode 100644 index 0000000..2e279ce --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/.settings/org.eclipse.cdt.codan.core.prefs @@ -0,0 +1,74 @@ +com.st.stm32cube.ide.mcu.ide.oss.source.checker.libnano.problem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Float formatting support\\")"} +eclipse.preferences.version=1 +org.eclipse.cdt.codan.checkers.errnoreturn.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"No return\\")",implicit\=>false} +org.eclipse.cdt.codan.checkers.errreturnvalue.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Unused return value\\")"} +org.eclipse.cdt.codan.checkers.localvarreturn=-Warning +org.eclipse.cdt.codan.checkers.localvarreturn.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Returning the address of a local variable\\")"} +org.eclipse.cdt.codan.checkers.nocommentinside.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Nesting comments\\")"} +org.eclipse.cdt.codan.checkers.nolinecomment.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Line comments\\")"} +org.eclipse.cdt.codan.checkers.noreturn.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"No return value\\")",implicit\=>false} +org.eclipse.cdt.codan.internal.checkers.AbstractClassCreation.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Abstract class cannot be instantiated\\")"} +org.eclipse.cdt.codan.internal.checkers.AmbiguousProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Ambiguous problem\\")"} +org.eclipse.cdt.codan.internal.checkers.AssignmentInConditionProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Assignment in condition\\")"} +org.eclipse.cdt.codan.internal.checkers.AssignmentToItselfProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Assignment to itself\\")"} +org.eclipse.cdt.codan.internal.checkers.BlacklistProblem=-Warning +org.eclipse.cdt.codan.internal.checkers.BlacklistProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Function or method is blacklisted\\")",blacklist\=>()} +org.eclipse.cdt.codan.internal.checkers.CStyleCastProblem=-Warning +org.eclipse.cdt.codan.internal.checkers.CStyleCastProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"C-Style cast instead of C++ cast\\")",checkMacro\=>true} +org.eclipse.cdt.codan.internal.checkers.CaseBreakProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"No break at end of case\\")",no_break_comment\=>"no break",last_case_param\=>false,empty_case_param\=>false,enable_fallthrough_quickfix_param\=>false} +org.eclipse.cdt.codan.internal.checkers.CatchByReference.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Catching by reference is recommended\\")",unknown\=>false,exceptions\=>()} +org.eclipse.cdt.codan.internal.checkers.CircularReferenceProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Circular inheritance\\")"} +org.eclipse.cdt.codan.internal.checkers.ClassMembersInitialization.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Class members should be properly initialized\\")",skip\=>true} +org.eclipse.cdt.codan.internal.checkers.CopyrightProblem=-Warning +org.eclipse.cdt.codan.internal.checkers.CopyrightProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Lack of copyright information\\")",regex\=>".*Copyright.*"} +org.eclipse.cdt.codan.internal.checkers.DecltypeAutoProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Invalid 'decltype(auto)' specifier\\")"} +org.eclipse.cdt.codan.internal.checkers.FieldResolutionProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Field cannot be resolved\\")"} +org.eclipse.cdt.codan.internal.checkers.FloatCompareProblem=-Warning +org.eclipse.cdt.codan.internal.checkers.FloatCompareProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Direct float comparison\\")"} +org.eclipse.cdt.codan.internal.checkers.FunctionResolutionProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Function cannot be resolved\\")"} +org.eclipse.cdt.codan.internal.checkers.GotoStatementProblem=-Warning +org.eclipse.cdt.codan.internal.checkers.GotoStatementProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Goto statement used\\")"} +org.eclipse.cdt.codan.internal.checkers.InvalidArguments.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Invalid arguments\\")"} +org.eclipse.cdt.codan.internal.checkers.InvalidTemplateArgumentsProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Invalid template argument\\")"} +org.eclipse.cdt.codan.internal.checkers.LabelStatementNotFoundProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Label statement not found\\")"} +org.eclipse.cdt.codan.internal.checkers.MagicNumberProblem=-Warning +org.eclipse.cdt.codan.internal.checkers.MagicNumberProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Avoid magic numbers\\")",checkArray\=>true,checkOperatorParen\=>true,exceptions\=>(1,0,-1,2,1.0,0.0,-1.0)} +org.eclipse.cdt.codan.internal.checkers.MemberDeclarationNotFoundProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Member declaration not found\\")"} +org.eclipse.cdt.codan.internal.checkers.MethodResolutionProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Method cannot be resolved\\")"} +org.eclipse.cdt.codan.internal.checkers.MissCaseProblem=-Warning +org.eclipse.cdt.codan.internal.checkers.MissCaseProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Missing cases in switch\\")"} +org.eclipse.cdt.codan.internal.checkers.MissDefaultProblem=-Warning +org.eclipse.cdt.codan.internal.checkers.MissDefaultProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Missing default in switch\\")",defaultWithAllEnums\=>false} +org.eclipse.cdt.codan.internal.checkers.MissReferenceProblem=-Warning +org.eclipse.cdt.codan.internal.checkers.MissReferenceProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Missing reference return value in assignment operator\\")"} +org.eclipse.cdt.codan.internal.checkers.MissSelfCheckProblem=-Warning +org.eclipse.cdt.codan.internal.checkers.MissSelfCheckProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Missing self check in assignment operator\\")"} +org.eclipse.cdt.codan.internal.checkers.MultipleDeclarationsProblem=-Warning +org.eclipse.cdt.codan.internal.checkers.MultipleDeclarationsProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Multiple variable declaration\\")"} +org.eclipse.cdt.codan.internal.checkers.NamingConventionFunctionChecker.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Name convention for function\\")",pattern\=>"^[a-z]",macro\=>true,exceptions\=>()} +org.eclipse.cdt.codan.internal.checkers.NoDiscardProblem=Warning +org.eclipse.cdt.codan.internal.checkers.NoDiscardProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Return value not evaluated\\")",macro\=>true} +org.eclipse.cdt.codan.internal.checkers.NonVirtualDestructorProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Class has a virtual method and non-virtual destructor\\")"} +org.eclipse.cdt.codan.internal.checkers.OverloadProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Invalid overload\\")"} +org.eclipse.cdt.codan.internal.checkers.RedeclarationProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Invalid redeclaration\\")"} +org.eclipse.cdt.codan.internal.checkers.RedefinitionProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Invalid redefinition\\")"} +org.eclipse.cdt.codan.internal.checkers.ReturnStyleProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Return with parenthesis\\")"} +org.eclipse.cdt.codan.internal.checkers.ScanfFormatStringSecurityProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Format String Vulnerability\\")"} +org.eclipse.cdt.codan.internal.checkers.ShallowCopyProblem=-Warning +org.eclipse.cdt.codan.internal.checkers.ShallowCopyProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Miss copy constructor or assignment operator\\")",onlynew\=>false} +org.eclipse.cdt.codan.internal.checkers.StatementHasNoEffectProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Statement has no effect\\")",macro\=>true,exceptions\=>()} +org.eclipse.cdt.codan.internal.checkers.StaticVariableInHeaderProblem=-Warning +org.eclipse.cdt.codan.internal.checkers.StaticVariableInHeaderProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Static variable in header file\\")"} +org.eclipse.cdt.codan.internal.checkers.SuggestedParenthesisProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Suggested parenthesis around expression\\")",paramNot\=>false} +org.eclipse.cdt.codan.internal.checkers.SuspiciousSemicolonProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Suspicious semicolon\\")",else\=>false,afterelse\=>false} +org.eclipse.cdt.codan.internal.checkers.SymbolShadowingProblem=-Warning +org.eclipse.cdt.codan.internal.checkers.SymbolShadowingProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Symbol shadowing\\")",paramFuncParameters\=>true} +org.eclipse.cdt.codan.internal.checkers.TypeResolutionProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Type cannot be resolved\\")"} +org.eclipse.cdt.codan.internal.checkers.UnusedFunctionDeclarationProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Unused function declaration\\")",macro\=>true} +org.eclipse.cdt.codan.internal.checkers.UnusedStaticFunctionProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Unused static function\\")",macro\=>true} +org.eclipse.cdt.codan.internal.checkers.UnusedVariableDeclarationProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Unused variable declaration in file scope\\")",macro\=>true,exceptions\=>("@(\#)","$Id")} +org.eclipse.cdt.codan.internal.checkers.UsingInHeaderProblem=-Warning +org.eclipse.cdt.codan.internal.checkers.UsingInHeaderProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Using directive in header\\")"} +org.eclipse.cdt.codan.internal.checkers.VariableResolutionProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Symbol is not resolved\\")"} +org.eclipse.cdt.codan.internal.checkers.VirtualMethodCallProblem=-Error +org.eclipse.cdt.codan.internal.checkers.VirtualMethodCallProblem.params={launchModes\=>{RUN_ON_FULL_BUILD\=>true,RUN_ON_INC_BUILD\=>true,RUN_ON_FILE_OPEN\=>false,RUN_ON_FILE_SAVE\=>false,RUN_AS_YOU_TYPE\=>true,RUN_ON_DEMAND\=>true},suppression_comment\=>"@suppress(\\"Virtual method call in constructor/destructor\\")"} diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/.settings/org.eclipse.cdt.core.prefs b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/.settings/org.eclipse.cdt.core.prefs new file mode 100644 index 0000000..c8ec5df --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/.settings/org.eclipse.cdt.core.prefs @@ -0,0 +1,6 @@ +doxygen/doxygen_new_line_after_brief=true +doxygen/doxygen_use_brief_tag=false +doxygen/doxygen_use_javadoc_tags=true +doxygen/doxygen_use_pre_tag=false +doxygen/doxygen_use_structural_commands=false +eclipse.preferences.version=1 diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/.settings/stm32cubeide.project.prefs b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/.settings/stm32cubeide.project.prefs new file mode 100644 index 0000000..d10936a --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/.settings/stm32cubeide.project.prefs @@ -0,0 +1,5 @@ +635E684B79701B039C64EA45C3F84D30=55901144B141C49B20296CF6A4A0554C +66BE74F758C12D739921AEA421D593D3=1 +8DF89ED150041C4CBC7CB9A9CAA90856=5D92697FE42B65B09AFA450577DD3FA4 +DC22A860405A8BF2F2C095E5B6529F12=5D92697FE42B65B09AFA450577DD3FA4 +eclipse.preferences.version=1 diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Inc/FreeRTOSConfig.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Inc/FreeRTOSConfig.h new file mode 100644 index 0000000..ed4f173 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Inc/FreeRTOSConfig.h @@ -0,0 +1,164 @@ +/* USER CODE BEGIN Header */ +/* + * FreeRTOS Kernel V10.2.1 + * Portion Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Portion Copyright (C) 2019 StMicroelectronics, Inc. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ +/* USER CODE END Header */ + +#ifndef FREERTOS_CONFIG_H +#define FREERTOS_CONFIG_H + +/*----------------------------------------------------------- + * Application specific definitions. + * + * These definitions should be adjusted for your particular hardware and + * application requirements. + * + * These parameters and more are described within the 'configuration' section of the + * FreeRTOS API documentation available on the FreeRTOS.org web site. + * + * See http://www.freertos.org/a00110.html + *----------------------------------------------------------*/ + +/* USER CODE BEGIN Includes */ +/* Section where include file can be added */ +/* USER CODE END Includes */ + +/* Ensure definitions are only used by the compiler, and not by the assembler. */ +#if defined(__ICCARM__) || defined(__CC_ARM) || defined(__GNUC__) + #include + extern uint32_t SystemCoreClock; +#endif +#define configENABLE_FPU 0 +#define configENABLE_MPU 0 + +#define configUSE_PREEMPTION 1 +#define configSUPPORT_STATIC_ALLOCATION 1 +#define configSUPPORT_DYNAMIC_ALLOCATION 1 +#define configUSE_IDLE_HOOK 0 +#define configUSE_TICK_HOOK 0 +#define configCPU_CLOCK_HZ ( SystemCoreClock ) +#define configTICK_RATE_HZ ((TickType_t)1000) +#define configMAX_PRIORITIES ( 56 ) +#define configMINIMAL_STACK_SIZE ((uint16_t)128) +#define configTOTAL_HEAP_SIZE ((size_t)100000) +#define configMAX_TASK_NAME_LEN ( 30 ) +#define configUSE_TRACE_FACILITY 1 +#define configUSE_16_BIT_TICKS 0 +#define configUSE_MUTEXES 1 +#define configQUEUE_REGISTRY_SIZE 8 +#define configUSE_RECURSIVE_MUTEXES 1 +#define configUSE_COUNTING_SEMAPHORES 1 +#define configUSE_PORT_OPTIMISED_TASK_SELECTION 0 +/* USER CODE BEGIN MESSAGE_BUFFER_LENGTH_TYPE */ +/* Defaults to size_t for backward compatibility, but can be changed + if lengths will always be less than the number of bytes in a size_t. */ +#define configMESSAGE_BUFFER_LENGTH_TYPE size_t +/* USER CODE END MESSAGE_BUFFER_LENGTH_TYPE */ + +/* Co-routine definitions. */ +#define configUSE_CO_ROUTINES 0 +#define configMAX_CO_ROUTINE_PRIORITIES ( 2 ) + +/* Software timer definitions. */ +#define configUSE_TIMERS 1 +#define configTIMER_TASK_PRIORITY ( 2 ) +#define configTIMER_QUEUE_LENGTH 10 +#define configTIMER_TASK_STACK_DEPTH 256 + +/* The following flag must be enabled only when using newlib */ +#define configUSE_NEWLIB_REENTRANT 1 + +/* Set the following definitions to 1 to include the API function, or zero +to exclude the API function. */ +#define INCLUDE_vTaskPrioritySet 1 +#define INCLUDE_uxTaskPriorityGet 1 +#define INCLUDE_vTaskDelete 1 +#define INCLUDE_vTaskCleanUpResources 0 +#define INCLUDE_vTaskSuspend 1 +#define INCLUDE_vTaskDelayUntil 1 +#define INCLUDE_vTaskDelay 1 +#define INCLUDE_xTaskGetSchedulerState 1 +#define INCLUDE_xTimerPendFunctionCall 1 +#define INCLUDE_xQueueGetMutexHolder 1 +#define INCLUDE_uxTaskGetStackHighWaterMark 1 +#define INCLUDE_eTaskGetState 1 + +/* + * The CMSIS-RTOS V2 FreeRTOS wrapper is dependent on the heap implementation used + * by the application thus the correct define need to be enabled below + */ +#define USE_FreeRTOS_HEAP_4 + +/* Cortex-M specific definitions. */ +#ifdef __NVIC_PRIO_BITS + /* __BVIC_PRIO_BITS will be specified when CMSIS is being used. */ + #define configPRIO_BITS __NVIC_PRIO_BITS +#else + #define configPRIO_BITS 4 +#endif + +/* The lowest interrupt priority that can be used in a call to a "set priority" +function. */ +#define configLIBRARY_LOWEST_INTERRUPT_PRIORITY 15 + +/* The highest interrupt priority that can be used by any interrupt service +routine that makes calls to interrupt safe FreeRTOS API functions. DO NOT CALL +INTERRUPT SAFE FREERTOS API FUNCTIONS FROM ANY INTERRUPT THAT HAS A HIGHER +PRIORITY THAN THIS! (higher priorities are lower numeric values. */ +#define configLIBRARY_MAX_SYSCALL_INTERRUPT_PRIORITY 5 + +/* Interrupt priorities used by the kernel port layer itself. These are generic +to all Cortex-M ports, and do not rely on any particular library functions. */ +#define configKERNEL_INTERRUPT_PRIORITY ( configLIBRARY_LOWEST_INTERRUPT_PRIORITY << (8 - configPRIO_BITS) ) +/* !!!! configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to zero !!!! +See http://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html. */ +#define configMAX_SYSCALL_INTERRUPT_PRIORITY ( configLIBRARY_MAX_SYSCALL_INTERRUPT_PRIORITY << (8 - configPRIO_BITS) ) + +/* Normal assert() semantics without relying on the provision of an assert.h +header file. */ +/* USER CODE BEGIN 1 */ +// FROM THIS: +// #define configASSERT( x ) if ((x) == 0) {taskDISABLE_INTERRUPTS(); for( ;; );} +// TO THIS: +#define configASSERT( x ) +/* USER CODE END 1 */ + +/* Definitions that map the FreeRTOS port interrupt handlers to their CMSIS +standard names. */ +#define vPortSVCHandler SVC_Handler +#define xPortPendSVHandler PendSV_Handler + +/* IMPORTANT: This define is commented when used with STM32Cube firmware, when the timebase source is SysTick, + to prevent overwriting SysTick_Handler defined within STM32Cube HAL */ + +#define xPortSysTickHandler SysTick_Handler + +/* USER CODE BEGIN Defines */ +/* Section where parameter definitions can be added (for instance, to override default ones in FreeRTOS.h) */ +/* USER CODE END Defines */ + +#endif /* FREERTOS_CONFIG_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Inc/main.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Inc/main.h new file mode 100644 index 0000000..b42596a --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Inc/main.h @@ -0,0 +1,123 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file : main.h + * @brief : Header for main.c file. + * This file contains the common defines of the application. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2022 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __MAIN_H +#define __MAIN_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/* Private includes ----------------------------------------------------------*/ +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +/* Exported types ------------------------------------------------------------*/ +/* USER CODE BEGIN ET */ + +/* USER CODE END ET */ + +/* Exported constants --------------------------------------------------------*/ +/* USER CODE BEGIN EC */ + +/* USER CODE END EC */ + +/* Exported macro ------------------------------------------------------------*/ +/* USER CODE BEGIN EM */ + +/* USER CODE END EM */ + +/* Exported functions prototypes ---------------------------------------------*/ +void Error_Handler(void); + +/* USER CODE BEGIN EFP */ + +/* USER CODE END EFP */ + +/* Private defines -----------------------------------------------------------*/ +#define USER_Btn_Pin GPIO_PIN_13 +#define USER_Btn_GPIO_Port GPIOC +#define MCO_Pin GPIO_PIN_0 +#define MCO_GPIO_Port GPIOH +#define RMII_MDC_Pin GPIO_PIN_1 +#define RMII_MDC_GPIO_Port GPIOC +#define RMII_REF_CLK_Pin GPIO_PIN_1 +#define RMII_REF_CLK_GPIO_Port GPIOA +#define RMII_MDIO_Pin GPIO_PIN_2 +#define RMII_MDIO_GPIO_Port GPIOA +#define RMII_CRS_DV_Pin GPIO_PIN_7 +#define RMII_CRS_DV_GPIO_Port GPIOA +#define RMII_RXD0_Pin GPIO_PIN_4 +#define RMII_RXD0_GPIO_Port GPIOC +#define RMII_RXD1_Pin GPIO_PIN_5 +#define RMII_RXD1_GPIO_Port GPIOC +#define LD1_Pin GPIO_PIN_0 +#define LD1_GPIO_Port GPIOB +#define RMII_TXD1_Pin GPIO_PIN_13 +#define RMII_TXD1_GPIO_Port GPIOB +#define LD3_Pin GPIO_PIN_14 +#define LD3_GPIO_Port GPIOB +#define STLK_RX_Pin GPIO_PIN_8 +#define STLK_RX_GPIO_Port GPIOD +#define STLK_TX_Pin GPIO_PIN_9 +#define STLK_TX_GPIO_Port GPIOD +#define USB_PowerSwitchOn_Pin GPIO_PIN_6 +#define USB_PowerSwitchOn_GPIO_Port GPIOG +#define USB_OverCurrent_Pin GPIO_PIN_7 +#define USB_OverCurrent_GPIO_Port GPIOG +#define USB_SOF_Pin GPIO_PIN_8 +#define USB_SOF_GPIO_Port GPIOA +#define USB_VBUS_Pin GPIO_PIN_9 +#define USB_VBUS_GPIO_Port GPIOA +#define USB_ID_Pin GPIO_PIN_10 +#define USB_ID_GPIO_Port GPIOA +#define USB_DM_Pin GPIO_PIN_11 +#define USB_DM_GPIO_Port GPIOA +#define USB_DP_Pin GPIO_PIN_12 +#define USB_DP_GPIO_Port GPIOA +#define TMS_Pin GPIO_PIN_13 +#define TMS_GPIO_Port GPIOA +#define TCK_Pin GPIO_PIN_14 +#define TCK_GPIO_Port GPIOA +#define RMII_TX_EN_Pin GPIO_PIN_11 +#define RMII_TX_EN_GPIO_Port GPIOG +#define RMII_TXD0_Pin GPIO_PIN_13 +#define RMII_TXD0_GPIO_Port GPIOG +#define SWO_Pin GPIO_PIN_3 +#define SWO_GPIO_Port GPIOB +#define LD2_Pin GPIO_PIN_7 +#define LD2_GPIO_Port GPIOB +/* USER CODE BEGIN Private defines */ + +/* USER CODE END Private defines */ + +#ifdef __cplusplus +} +#endif + +#endif /* __MAIN_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Inc/stm32f7xx_hal_conf.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Inc/stm32f7xx_hal_conf.h new file mode 100644 index 0000000..59aee49 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Inc/stm32f7xx_hal_conf.h @@ -0,0 +1,483 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_conf_template.h + * @author MCD Application Team + * @brief HAL configuration template file. + * This file should be copied to the application folder and renamed + * to stm32f7xx_hal_conf.h. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2017 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_CONF_H +#define __STM32F7xx_HAL_CONF_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ + +/* ########################## Module Selection ############################## */ +/** + * @brief This is the list of modules to be used in the HAL driver + */ +#define HAL_MODULE_ENABLED + + /* #define HAL_ADC_MODULE_ENABLED */ +/* #define HAL_CRYP_MODULE_ENABLED */ +/* #define HAL_CAN_MODULE_ENABLED */ +/* #define HAL_CEC_MODULE_ENABLED */ +/* #define HAL_CRC_MODULE_ENABLED */ +/* #define HAL_CRYP_MODULE_ENABLED */ +/* #define HAL_DAC_MODULE_ENABLED */ +/* #define HAL_DCMI_MODULE_ENABLED */ +/* #define HAL_DMA2D_MODULE_ENABLED */ +#define HAL_ETH_MODULE_ENABLED +/* #define HAL_NAND_MODULE_ENABLED */ +/* #define HAL_NOR_MODULE_ENABLED */ +/* #define HAL_SRAM_MODULE_ENABLED */ +/* #define HAL_SDRAM_MODULE_ENABLED */ +/* #define HAL_HASH_MODULE_ENABLED */ +/* #define HAL_I2S_MODULE_ENABLED */ +/* #define HAL_IWDG_MODULE_ENABLED */ +/* #define HAL_LPTIM_MODULE_ENABLED */ +/* #define HAL_LTDC_MODULE_ENABLED */ +/* #define HAL_QSPI_MODULE_ENABLED */ +/* #define HAL_RNG_MODULE_ENABLED */ +/* #define HAL_RTC_MODULE_ENABLED */ +/* #define HAL_SAI_MODULE_ENABLED */ +/* #define HAL_SD_MODULE_ENABLED */ +/* #define HAL_MMC_MODULE_ENABLED */ +/* #define HAL_SPDIFRX_MODULE_ENABLED */ +/* #define HAL_SPI_MODULE_ENABLED */ +#define HAL_TIM_MODULE_ENABLED +#define HAL_UART_MODULE_ENABLED +/* #define HAL_USART_MODULE_ENABLED */ +/* #define HAL_IRDA_MODULE_ENABLED */ +/* #define HAL_SMARTCARD_MODULE_ENABLED */ +/* #define HAL_WWDG_MODULE_ENABLED */ +#define HAL_PCD_MODULE_ENABLED +/* #define HAL_HCD_MODULE_ENABLED */ +/* #define HAL_DFSDM_MODULE_ENABLED */ +/* #define HAL_DSI_MODULE_ENABLED */ +/* #define HAL_JPEG_MODULE_ENABLED */ +/* #define HAL_MDIOS_MODULE_ENABLED */ +/* #define HAL_SMBUS_MODULE_ENABLED */ +/* #define HAL_EXTI_MODULE_ENABLED */ +#define HAL_GPIO_MODULE_ENABLED +#define HAL_EXTI_MODULE_ENABLED +#define HAL_DMA_MODULE_ENABLED +#define HAL_RCC_MODULE_ENABLED +#define HAL_FLASH_MODULE_ENABLED +#define HAL_PWR_MODULE_ENABLED +#define HAL_I2C_MODULE_ENABLED +#define HAL_CORTEX_MODULE_ENABLED + +/* ########################## HSE/HSI Values adaptation ##################### */ +/** + * @brief Adjust the value of External High Speed oscillator (HSE) used in your application. + * This value is used by the RCC HAL module to compute the system frequency + * (when HSE is used as system clock source, directly or through the PLL). + */ +#if !defined (HSE_VALUE) + #define HSE_VALUE ((uint32_t)8000000U) /*!< Value of the External oscillator in Hz */ +#endif /* HSE_VALUE */ + +#if !defined (HSE_STARTUP_TIMEOUT) + #define HSE_STARTUP_TIMEOUT ((uint32_t)100U) /*!< Time out for HSE start up, in ms */ +#endif /* HSE_STARTUP_TIMEOUT */ + +/** + * @brief Internal High Speed oscillator (HSI) value. + * This value is used by the RCC HAL module to compute the system frequency + * (when HSI is used as system clock source, directly or through the PLL). + */ +#if !defined (HSI_VALUE) + #define HSI_VALUE ((uint32_t)16000000U) /*!< Value of the Internal oscillator in Hz*/ +#endif /* HSI_VALUE */ + +/** + * @brief Internal Low Speed oscillator (LSI) value. + */ +#if !defined (LSI_VALUE) + #define LSI_VALUE ((uint32_t)32000U) /*!< LSI Typical Value in Hz*/ +#endif /* LSI_VALUE */ /*!< Value of the Internal Low Speed oscillator in Hz + The real value may vary depending on the variations + in voltage and temperature. */ +/** + * @brief External Low Speed oscillator (LSE) value. + */ +#if !defined (LSE_VALUE) + #define LSE_VALUE ((uint32_t)32768U) /*!< Value of the External Low Speed oscillator in Hz */ +#endif /* LSE_VALUE */ + +#if !defined (LSE_STARTUP_TIMEOUT) + #define LSE_STARTUP_TIMEOUT ((uint32_t)5000U) /*!< Time out for LSE start up, in ms */ +#endif /* LSE_STARTUP_TIMEOUT */ + +/** + * @brief External clock source for I2S peripheral + * This value is used by the I2S HAL module to compute the I2S clock source + * frequency, this source is inserted directly through I2S_CKIN pad. + */ +#if !defined (EXTERNAL_CLOCK_VALUE) + #define EXTERNAL_CLOCK_VALUE ((uint32_t)12288000U) /*!< Value of the Internal oscillator in Hz*/ +#endif /* EXTERNAL_CLOCK_VALUE */ + +/* Tip: To avoid modifying this file each time you need to use different HSE, + === you can define the HSE value in your toolchain compiler preprocessor. */ + +/* ########################### System Configuration ######################### */ +/** + * @brief This is the HAL system configuration section + */ +#define VDD_VALUE ((uint32_t)3300U) /*!< Value of VDD in mv */ +#define TICK_INT_PRIORITY ((uint32_t)15U) /*!< tick interrupt priority */ +#define USE_RTOS 0U +#define PREFETCH_ENABLE 0U +#define ART_ACCLERATOR_ENABLE 0U /* To enable instruction cache and prefetch */ + +#define USE_HAL_ADC_REGISTER_CALLBACKS 0U /* ADC register callback disabled */ +#define USE_HAL_CAN_REGISTER_CALLBACKS 0U /* CAN register callback disabled */ +#define USE_HAL_CEC_REGISTER_CALLBACKS 0U /* CEC register callback disabled */ +#define USE_HAL_CRYP_REGISTER_CALLBACKS 0U /* CRYP register callback disabled */ +#define USE_HAL_DAC_REGISTER_CALLBACKS 0U /* DAC register callback disabled */ +#define USE_HAL_DCMI_REGISTER_CALLBACKS 0U /* DCMI register callback disabled */ +#define USE_HAL_DFSDM_REGISTER_CALLBACKS 0U /* DFSDM register callback disabled */ +#define USE_HAL_DMA2D_REGISTER_CALLBACKS 0U /* DMA2D register callback disabled */ +#define USE_HAL_DSI_REGISTER_CALLBACKS 0U /* DSI register callback disabled */ +#define USE_HAL_ETH_REGISTER_CALLBACKS 0U /* ETH register callback disabled */ +#define USE_HAL_HASH_REGISTER_CALLBACKS 0U /* HASH register callback disabled */ +#define USE_HAL_HCD_REGISTER_CALLBACKS 0U /* HCD register callback disabled */ +#define USE_HAL_I2C_REGISTER_CALLBACKS 0U /* I2C register callback disabled */ +#define USE_HAL_I2S_REGISTER_CALLBACKS 0U /* I2S register callback disabled */ +#define USE_HAL_IRDA_REGISTER_CALLBACKS 0U /* IRDA register callback disabled */ +#define USE_HAL_JPEG_REGISTER_CALLBACKS 0U /* JPEG register callback disabled */ +#define USE_HAL_LPTIM_REGISTER_CALLBACKS 0U /* LPTIM register callback disabled */ +#define USE_HAL_LTDC_REGISTER_CALLBACKS 0U /* LTDC register callback disabled */ +#define USE_HAL_MDIOS_REGISTER_CALLBACKS 0U /* MDIOS register callback disabled */ +#define USE_HAL_MMC_REGISTER_CALLBACKS 0U /* MMC register callback disabled */ +#define USE_HAL_NAND_REGISTER_CALLBACKS 0U /* NAND register callback disabled */ +#define USE_HAL_NOR_REGISTER_CALLBACKS 0U /* NOR register callback disabled */ +#define USE_HAL_PCD_REGISTER_CALLBACKS 0U /* PCD register callback disabled */ +#define USE_HAL_QSPI_REGISTER_CALLBACKS 0U /* QSPI register callback disabled */ +#define USE_HAL_RNG_REGISTER_CALLBACKS 0U /* RNG register callback disabled */ +#define USE_HAL_RTC_REGISTER_CALLBACKS 0U /* RTC register callback disabled */ +#define USE_HAL_SAI_REGISTER_CALLBACKS 0U /* SAI register callback disabled */ +#define USE_HAL_SD_REGISTER_CALLBACKS 0U /* SD register callback disabled */ +#define USE_HAL_SMARTCARD_REGISTER_CALLBACKS 0U /* SMARTCARD register callback disabled */ +#define USE_HAL_SDRAM_REGISTER_CALLBACKS 0U /* SDRAM register callback disabled */ +#define USE_HAL_SRAM_REGISTER_CALLBACKS 0U /* SRAM register callback disabled */ +#define USE_HAL_SPDIFRX_REGISTER_CALLBACKS 0U /* SPDIFRX register callback disabled */ +#define USE_HAL_SMBUS_REGISTER_CALLBACKS 0U /* SMBUS register callback disabled */ +#define USE_HAL_SPI_REGISTER_CALLBACKS 0U /* SPI register callback disabled */ +#define USE_HAL_TIM_REGISTER_CALLBACKS 0U /* TIM register callback disabled */ +#define USE_HAL_UART_REGISTER_CALLBACKS 0U /* UART register callback disabled */ +#define USE_HAL_USART_REGISTER_CALLBACKS 0U /* USART register callback disabled */ +#define USE_HAL_WWDG_REGISTER_CALLBACKS 0U /* WWDG register callback disabled */ + +/* ########################## Assert Selection ############################## */ +/** + * @brief Uncomment the line below to expanse the "assert_param" macro in the + * HAL drivers code + */ +/* #define USE_FULL_ASSERT 1U */ + +/* ################## Ethernet peripheral configuration ##################### */ + +/* Section 1 : Ethernet peripheral configuration */ + +/* MAC ADDRESS: MAC_ADDR0:MAC_ADDR1:MAC_ADDR2:MAC_ADDR3:MAC_ADDR4:MAC_ADDR5 */ +#define MAC_ADDR0 2U +#define MAC_ADDR1 0U +#define MAC_ADDR2 0U +#define MAC_ADDR3 0U +#define MAC_ADDR4 0U +#define MAC_ADDR5 0U + +/* Definition of the Ethernet driver buffers size and count */ +#define ETH_RX_BUF_SIZE ETH_MAX_PACKET_SIZE /* buffer size for receive */ +#define ETH_TX_BUF_SIZE ETH_MAX_PACKET_SIZE /* buffer size for transmit */ +#define ETH_RXBUFNB ((uint32_t)4U) /* 4 Rx buffers of size ETH_RX_BUF_SIZE */ +#define ETH_TXBUFNB ((uint32_t)4U) /* 4 Tx buffers of size ETH_TX_BUF_SIZE */ + +/* Section 2: PHY configuration section */ + +/* LAN8742A_PHY_ADDRESS Address*/ +#define LAN8742A_PHY_ADDRESS 0 +/* PHY Reset delay these values are based on a 1 ms Systick interrupt*/ +#define PHY_RESET_DELAY ((uint32_t)0x000000FFU) +/* PHY Configuration delay */ +#define PHY_CONFIG_DELAY ((uint32_t)0x00000FFFU) + +#define PHY_READ_TO ((uint32_t)0x0000FFFFU) +#define PHY_WRITE_TO ((uint32_t)0x0000FFFFU) + +/* Section 3: Common PHY Registers */ + +#define PHY_BCR ((uint16_t)0x00U) /*!< Transceiver Basic Control Register */ +#define PHY_BSR ((uint16_t)0x01U) /*!< Transceiver Basic Status Register */ + +#define PHY_RESET ((uint16_t)0x8000U) /*!< PHY Reset */ +#define PHY_LOOPBACK ((uint16_t)0x4000U) /*!< Select loop-back mode */ +#define PHY_FULLDUPLEX_100M ((uint16_t)0x2100U) /*!< Set the full-duplex mode at 100 Mb/s */ +#define PHY_HALFDUPLEX_100M ((uint16_t)0x2000U) /*!< Set the half-duplex mode at 100 Mb/s */ +#define PHY_FULLDUPLEX_10M ((uint16_t)0x0100U) /*!< Set the full-duplex mode at 10 Mb/s */ +#define PHY_HALFDUPLEX_10M ((uint16_t)0x0000U) /*!< Set the half-duplex mode at 10 Mb/s */ +#define PHY_AUTONEGOTIATION ((uint16_t)0x1000U) /*!< Enable auto-negotiation function */ +#define PHY_RESTART_AUTONEGOTIATION ((uint16_t)0x0200U) /*!< Restart auto-negotiation function */ +#define PHY_POWERDOWN ((uint16_t)0x0800U) /*!< Select the power down mode */ +#define PHY_ISOLATE ((uint16_t)0x0400U) /*!< Isolate PHY from MII */ + +#define PHY_AUTONEGO_COMPLETE ((uint16_t)0x0020U) /*!< Auto-Negotiation process completed */ +#define PHY_LINKED_STATUS ((uint16_t)0x0004U) /*!< Valid link established */ +#define PHY_JABBER_DETECTION ((uint16_t)0x0002U) /*!< Jabber condition detected */ + +/* Section 4: Extended PHY Registers */ +#define PHY_SR ((uint16_t)0x10U) /*!< PHY status register Offset */ + +#define PHY_SPEED_STATUS ((uint16_t)0x0002U) /*!< PHY Speed mask */ +#define PHY_DUPLEX_STATUS ((uint16_t)0x0004U) /*!< PHY Duplex mask */ + +#define PHY_ISFR ((uint16_t)0x001DU) /*!< PHY Interrupt Source Flag register Offset */ +#define PHY_ISFR_INT4 ((uint16_t)0x000BU) /*!< PHY Link down inturrupt */ + +/* ################## SPI peripheral configuration ########################## */ + +/* CRC FEATURE: Use to activate CRC feature inside HAL SPI Driver +* Activated: CRC code is present inside driver +* Deactivated: CRC code cleaned from driver +*/ + +#define USE_SPI_CRC 0U + +/* Includes ------------------------------------------------------------------*/ +/** + * @brief Include module's header file + */ + +#ifdef HAL_RCC_MODULE_ENABLED + #include "stm32f7xx_hal_rcc.h" +#endif /* HAL_RCC_MODULE_ENABLED */ + +#ifdef HAL_EXTI_MODULE_ENABLED + #include "stm32f7xx_hal_exti.h" +#endif /* HAL_EXTI_MODULE_ENABLED */ + +#ifdef HAL_GPIO_MODULE_ENABLED + #include "stm32f7xx_hal_gpio.h" +#endif /* HAL_GPIO_MODULE_ENABLED */ + +#ifdef HAL_DMA_MODULE_ENABLED + #include "stm32f7xx_hal_dma.h" +#endif /* HAL_DMA_MODULE_ENABLED */ + +#ifdef HAL_CORTEX_MODULE_ENABLED + #include "stm32f7xx_hal_cortex.h" +#endif /* HAL_CORTEX_MODULE_ENABLED */ + +#ifdef HAL_ADC_MODULE_ENABLED + #include "stm32f7xx_hal_adc.h" +#endif /* HAL_ADC_MODULE_ENABLED */ + +#ifdef HAL_CAN_MODULE_ENABLED + #include "stm32f7xx_hal_can.h" +#endif /* HAL_CAN_MODULE_ENABLED */ + +#ifdef HAL_CEC_MODULE_ENABLED + #include "stm32f7xx_hal_cec.h" +#endif /* HAL_CEC_MODULE_ENABLED */ + +#ifdef HAL_CRC_MODULE_ENABLED + #include "stm32f7xx_hal_crc.h" +#endif /* HAL_CRC_MODULE_ENABLED */ + +#ifdef HAL_CRYP_MODULE_ENABLED + #include "stm32f7xx_hal_cryp.h" +#endif /* HAL_CRYP_MODULE_ENABLED */ + +#ifdef HAL_DMA2D_MODULE_ENABLED + #include "stm32f7xx_hal_dma2d.h" +#endif /* HAL_DMA2D_MODULE_ENABLED */ + +#ifdef HAL_DAC_MODULE_ENABLED + #include "stm32f7xx_hal_dac.h" +#endif /* HAL_DAC_MODULE_ENABLED */ + +#ifdef HAL_DCMI_MODULE_ENABLED + #include "stm32f7xx_hal_dcmi.h" +#endif /* HAL_DCMI_MODULE_ENABLED */ + +#ifdef HAL_ETH_MODULE_ENABLED + #include "stm32f7xx_hal_eth.h" +#endif /* HAL_ETH_MODULE_ENABLED */ + +#ifdef HAL_FLASH_MODULE_ENABLED + #include "stm32f7xx_hal_flash.h" +#endif /* HAL_FLASH_MODULE_ENABLED */ + +#ifdef HAL_SRAM_MODULE_ENABLED + #include "stm32f7xx_hal_sram.h" +#endif /* HAL_SRAM_MODULE_ENABLED */ + +#ifdef HAL_NOR_MODULE_ENABLED + #include "stm32f7xx_hal_nor.h" +#endif /* HAL_NOR_MODULE_ENABLED */ + +#ifdef HAL_NAND_MODULE_ENABLED + #include "stm32f7xx_hal_nand.h" +#endif /* HAL_NAND_MODULE_ENABLED */ + +#ifdef HAL_SDRAM_MODULE_ENABLED + #include "stm32f7xx_hal_sdram.h" +#endif /* HAL_SDRAM_MODULE_ENABLED */ + +#ifdef HAL_HASH_MODULE_ENABLED + #include "stm32f7xx_hal_hash.h" +#endif /* HAL_HASH_MODULE_ENABLED */ + +#ifdef HAL_I2C_MODULE_ENABLED + #include "stm32f7xx_hal_i2c.h" +#endif /* HAL_I2C_MODULE_ENABLED */ + +#ifdef HAL_I2S_MODULE_ENABLED + #include "stm32f7xx_hal_i2s.h" +#endif /* HAL_I2S_MODULE_ENABLED */ + +#ifdef HAL_IWDG_MODULE_ENABLED + #include "stm32f7xx_hal_iwdg.h" +#endif /* HAL_IWDG_MODULE_ENABLED */ + +#ifdef HAL_LPTIM_MODULE_ENABLED + #include "stm32f7xx_hal_lptim.h" +#endif /* HAL_LPTIM_MODULE_ENABLED */ + +#ifdef HAL_LTDC_MODULE_ENABLED + #include "stm32f7xx_hal_ltdc.h" +#endif /* HAL_LTDC_MODULE_ENABLED */ + +#ifdef HAL_PWR_MODULE_ENABLED + #include "stm32f7xx_hal_pwr.h" +#endif /* HAL_PWR_MODULE_ENABLED */ + +#ifdef HAL_QSPI_MODULE_ENABLED + #include "stm32f7xx_hal_qspi.h" +#endif /* HAL_QSPI_MODULE_ENABLED */ + +#ifdef HAL_RNG_MODULE_ENABLED + #include "stm32f7xx_hal_rng.h" +#endif /* HAL_RNG_MODULE_ENABLED */ + +#ifdef HAL_RTC_MODULE_ENABLED + #include "stm32f7xx_hal_rtc.h" +#endif /* HAL_RTC_MODULE_ENABLED */ + +#ifdef HAL_SAI_MODULE_ENABLED + #include "stm32f7xx_hal_sai.h" +#endif /* HAL_SAI_MODULE_ENABLED */ + +#ifdef HAL_SD_MODULE_ENABLED + #include "stm32f7xx_hal_sd.h" +#endif /* HAL_SD_MODULE_ENABLED */ + +#ifdef HAL_MMC_MODULE_ENABLED + #include "stm32f7xx_hal_mmc.h" +#endif /* HAL_MMC_MODULE_ENABLED */ + +#ifdef HAL_SPDIFRX_MODULE_ENABLED + #include "stm32f7xx_hal_spdifrx.h" +#endif /* HAL_SPDIFRX_MODULE_ENABLED */ + +#ifdef HAL_SPI_MODULE_ENABLED + #include "stm32f7xx_hal_spi.h" +#endif /* HAL_SPI_MODULE_ENABLED */ + +#ifdef HAL_TIM_MODULE_ENABLED + #include "stm32f7xx_hal_tim.h" +#endif /* HAL_TIM_MODULE_ENABLED */ + +#ifdef HAL_UART_MODULE_ENABLED + #include "stm32f7xx_hal_uart.h" +#endif /* HAL_UART_MODULE_ENABLED */ + +#ifdef HAL_USART_MODULE_ENABLED + #include "stm32f7xx_hal_usart.h" +#endif /* HAL_USART_MODULE_ENABLED */ + +#ifdef HAL_IRDA_MODULE_ENABLED + #include "stm32f7xx_hal_irda.h" +#endif /* HAL_IRDA_MODULE_ENABLED */ + +#ifdef HAL_SMARTCARD_MODULE_ENABLED + #include "stm32f7xx_hal_smartcard.h" +#endif /* HAL_SMARTCARD_MODULE_ENABLED */ + +#ifdef HAL_WWDG_MODULE_ENABLED + #include "stm32f7xx_hal_wwdg.h" +#endif /* HAL_WWDG_MODULE_ENABLED */ + +#ifdef HAL_PCD_MODULE_ENABLED + #include "stm32f7xx_hal_pcd.h" +#endif /* HAL_PCD_MODULE_ENABLED */ + +#ifdef HAL_HCD_MODULE_ENABLED + #include "stm32f7xx_hal_hcd.h" +#endif /* HAL_HCD_MODULE_ENABLED */ + +#ifdef HAL_DFSDM_MODULE_ENABLED + #include "stm32f7xx_hal_dfsdm.h" +#endif /* HAL_DFSDM_MODULE_ENABLED */ + +#ifdef HAL_DSI_MODULE_ENABLED + #include "stm32f7xx_hal_dsi.h" +#endif /* HAL_DSI_MODULE_ENABLED */ + +#ifdef HAL_JPEG_MODULE_ENABLED + #include "stm32f7xx_hal_jpeg.h" +#endif /* HAL_JPEG_MODULE_ENABLED */ + +#ifdef HAL_MDIOS_MODULE_ENABLED + #include "stm32f7xx_hal_mdios.h" +#endif /* HAL_MDIOS_MODULE_ENABLED */ + +#ifdef HAL_SMBUS_MODULE_ENABLED + #include "stm32f7xx_hal_smbus.h" +#endif /* HAL_SMBUS_MODULE_ENABLED */ + +/* Exported macro ------------------------------------------------------------*/ +#ifdef USE_FULL_ASSERT +/** + * @brief The assert_param macro is used for function's parameters check. + * @param expr: If expr is false, it calls assert_failed function + * which reports the name of the source file and the source + * line number of the call that failed. + * If expr is true, it returns no value. + * @retval None + */ + #define assert_param(expr) ((expr) ? (void)0U : assert_failed((uint8_t *)__FILE__, __LINE__)) +/* Exported functions ------------------------------------------------------- */ + void assert_failed(uint8_t* file, uint32_t line); +#else + #define assert_param(expr) ((void)0U) +#endif /* USE_FULL_ASSERT */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_CONF_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Inc/stm32f7xx_it.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Inc/stm32f7xx_it.h new file mode 100644 index 0000000..ff5bb75 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Inc/stm32f7xx_it.h @@ -0,0 +1,68 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file stm32f7xx_it.h + * @brief This file contains the headers of the interrupt handlers. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2022 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_IT_H +#define __STM32F7xx_IT_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Private includes ----------------------------------------------------------*/ +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +/* Exported types ------------------------------------------------------------*/ +/* USER CODE BEGIN ET */ + +/* USER CODE END ET */ + +/* Exported constants --------------------------------------------------------*/ +/* USER CODE BEGIN EC */ + +/* USER CODE END EC */ + +/* Exported macro ------------------------------------------------------------*/ +/* USER CODE BEGIN EM */ + +/* USER CODE END EM */ + +/* Exported functions prototypes ---------------------------------------------*/ +void NMI_Handler(void); +void HardFault_Handler(void); +void MemManage_Handler(void); +void BusFault_Handler(void); +void UsageFault_Handler(void); +void DebugMon_Handler(void); +void TIM8_TRG_COM_TIM14_IRQHandler(void); +void ETH_IRQHandler(void); +/* USER CODE BEGIN EFP */ + +/* USER CODE END EFP */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_IT_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/custom_memory_manager.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/custom_memory_manager.c new file mode 100644 index 0000000..724a25c --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/custom_memory_manager.c @@ -0,0 +1,459 @@ +/* + * A custom implementation of pvPortMallocMicroROS() and vPortFreeMicroROS() with realloc and + * calloc features based on FreeRTOS heap4.c. + */ + +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining +all the API functions to use the MPU wrappers. That should only be done when +task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +#include "FreeRTOS.h" +#include "task.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 0 ) + #error This file must not be used if configSUPPORT_DYNAMIC_ALLOCATION is 0 +#endif + +/* Block sizes must not get too small. */ +#define heapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) ) + +/* Assumes 8bit bytes! */ +#define heapBITS_PER_BYTE ( ( size_t ) 8 ) + +/* Allocate the memory for the heap. */ +static uint8_t ucHeap[ configTOTAL_HEAP_SIZE ]; + +/* Define the linked list structure. This is used to link free blocks in order +of their memory address. */ +typedef struct A_BLOCK_LINK +{ + struct A_BLOCK_LINK *pxNextFreeBlock; /*<< The next free block in the list. */ + size_t xBlockSize; /*<< The size of the free block. */ +} BlockLink_t; + +/*-----------------------------------------------------------*/ + +/* + * Inserts a block of memory that is being freed into the correct position in + * the list of free memory blocks. The block being freed will be merged with + * the block in front it and/or the block behind it if the memory blocks are + * adjacent to each other. + */ +static void prvInsertBlockIntoFreeList( BlockLink_t *pxBlockToInsert ); + +/* + * Called automatically to setup the required heap structures the first time + * pvPortMallocMicroROS() is called. + */ +static void prvHeapInit( void ); + +/*-----------------------------------------------------------*/ + +/* The size of the structure placed at the beginning of each allocated memory +block must by correctly byte aligned. */ +static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( portBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) portBYTE_ALIGNMENT_MASK ); + +/* Create a couple of list links to mark the start and end of the list. */ +static BlockLink_t xStart, *pxEnd = NULL; + +/* Keeps track of the number of free bytes remaining, but says nothing about +fragmentation. */ +static size_t xFreeBytesRemaining = 0U; +static size_t xMinimumEverFreeBytesRemaining = 0U; + +/* Gets set to the top bit of an size_t type. When this bit in the xBlockSize +member of an BlockLink_t structure is set then the block belongs to the +application. When the bit is free the block is still part of the free heap +space. */ +static size_t xBlockAllocatedBit = 0; + +/*-----------------------------------------------------------*/ + +void *pvPortMallocMicroROS( size_t xWantedSize ) +{ +BlockLink_t *pxBlock, *pxPreviousBlock, *pxNewBlockLink; +void *pvReturn = NULL; + + vTaskSuspendAll(); + { + /* If this is the first call to malloc then the heap will require + initialisation to setup the list of free blocks. */ + if( pxEnd == NULL ) + { + prvHeapInit(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Check the requested block size is not so large that the top bit is + set. The top bit of the block size member of the BlockLink_t structure + is used to determine who owns the block - the application or the + kernel, so it must be free. */ + if( ( xWantedSize & xBlockAllocatedBit ) == 0 ) + { + /* The wanted size is increased so it can contain a BlockLink_t + structure in addition to the requested amount of bytes. */ + if( xWantedSize > 0 ) + { + xWantedSize += xHeapStructSize; + + /* Ensure that blocks are always aligned to the required number + of bytes. */ + if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 ) + { + /* Byte alignment required. */ + xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ); + configASSERT( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) == 0 ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) ) + { + /* Traverse the list from the start (lowest address) block until + one of adequate size is found. */ + pxPreviousBlock = &xStart; + pxBlock = xStart.pxNextFreeBlock; + while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) ) + { + pxPreviousBlock = pxBlock; + pxBlock = pxBlock->pxNextFreeBlock; + } + + /* If the end marker was reached then a block of adequate size + was not found. */ + if( pxBlock != pxEnd ) + { + /* Return the memory space pointed to - jumping over the + BlockLink_t structure at its start. */ + pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize ); + + /* This block is being returned for use so must be taken out + of the list of free blocks. */ + pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; + + /* If the block is larger than required it can be split into + two. */ + if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE ) + { + /* This block is to be split into two. Create a new + block following the number of bytes requested. The void + cast is used to prevent byte alignment warnings from the + compiler. */ + pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize ); + configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); + + /* Calculate the sizes of two blocks split from the + single block. */ + pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; + pxBlock->xBlockSize = xWantedSize; + + /* Insert the new block into the list of free blocks. */ + prvInsertBlockIntoFreeList( pxNewBlockLink ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xFreeBytesRemaining -= pxBlock->xBlockSize; + + if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) + { + xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* The block is being returned - it is allocated and owned + by the application and has no "next" block. */ + pxBlock->xBlockSize |= xBlockAllocatedBit; + pxBlock->pxNextFreeBlock = NULL; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceMALLOC( pvReturn, xWantedSize ); + } + ( void ) xTaskResumeAll(); + + #if( configUSE_MALLOC_FAILED_HOOK == 1 ) + { + if( pvReturn == NULL ) + { + extern void vApplicationMallocFailedHook( void ); + vApplicationMallocFailedHook(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif + + configASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) portBYTE_ALIGNMENT_MASK ) == 0 ); + return pvReturn; +} +/*-----------------------------------------------------------*/ + +void vPortFreeMicroROS( void *pv ) +{ +uint8_t *puc = ( uint8_t * ) pv; +BlockLink_t *pxLink; + + if( pv != NULL ) + { + /* The memory being freed will have an BlockLink_t structure immediately + before it. */ + puc -= xHeapStructSize; + + /* This casting is to keep the compiler from issuing warnings. */ + pxLink = ( void * ) puc; + + /* Check the block is actually allocated. */ + configASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ); + configASSERT( pxLink->pxNextFreeBlock == NULL ); + + if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ) + { + if( pxLink->pxNextFreeBlock == NULL ) + { + /* The block is being returned to the heap - it is no longer + allocated. */ + pxLink->xBlockSize &= ~xBlockAllocatedBit; + + vTaskSuspendAll(); + { + /* Add this block to the list of free blocks. */ + xFreeBytesRemaining += pxLink->xBlockSize; + traceFREE( pv, pxLink->xBlockSize ); + prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) ); + } + ( void ) xTaskResumeAll(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } +} +/*-----------------------------------------------------------*/ + +/*-----------------------------------------------------------*/ +size_t getBlockSize( void *pv ) +{ + + uint8_t *puc = ( uint8_t * ) pv; + BlockLink_t *pxLink; + + puc -= xHeapStructSize; + pxLink = ( void * ) puc; + + size_t count = pxLink->xBlockSize & ~xBlockAllocatedBit; + + return count; +} +/*-----------------------------------------------------------*/ + +void *pvPortReallocMicroROS( void *pv, size_t xWantedSize ) +{ + vTaskSuspendAll(); + + void * newmem = pvPortMallocMicroROS(xWantedSize); + + uint8_t *puc = ( uint8_t * ) pv; + BlockLink_t *pxLink; + + puc -= xHeapStructSize; + pxLink = ( void * ) puc; + + + char *in_src = (char*)pv; + char *in_dest = (char*)newmem; + size_t count = pxLink->xBlockSize & ~xBlockAllocatedBit; + + while(count--) + *in_dest++ = *in_src++; + + vPortFreeMicroROS(pv); + + ( void ) xTaskResumeAll(); + + return newmem; +} +/*-----------------------------------------------------------*/ + +void *pvPortCallocMicroROS( size_t num, size_t xWantedSize ) +{ + vTaskSuspendAll(); + size_t count = xWantedSize*num; + + void * mem = pvPortMallocMicroROS(count); + char *in_dest = (char*)mem; + + while(count--) + *in_dest++ = 0; + + ( void ) xTaskResumeAll(); + return mem; +} +/*-----------------------------------------------------------*/ + +size_t xPortGetFreeHeapSizeMicroROS( void ) +{ + return xFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ + +size_t xPortGetMinimumEverFreeHeapSizeMicroROS( void ) +{ + return xMinimumEverFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ + +void vPortInitialiseBlocksMicroROS( void ) +{ + /* This just exists to keep the linker quiet. */ +} +/*-----------------------------------------------------------*/ + +static void prvHeapInit( void ) +{ +BlockLink_t *pxFirstFreeBlock; +uint8_t *pucAlignedHeap; +size_t uxAddress; +size_t xTotalHeapSize = configTOTAL_HEAP_SIZE; + + /* Ensure the heap starts on a correctly aligned boundary. */ + uxAddress = ( size_t ) ucHeap; + + if( ( uxAddress & portBYTE_ALIGNMENT_MASK ) != 0 ) + { + uxAddress += ( portBYTE_ALIGNMENT - 1 ); + uxAddress &= ~( ( size_t ) portBYTE_ALIGNMENT_MASK ); + xTotalHeapSize -= uxAddress - ( size_t ) ucHeap; + } + + pucAlignedHeap = ( uint8_t * ) uxAddress; + + /* xStart is used to hold a pointer to the first item in the list of free + blocks. The void cast is used to prevent compiler warnings. */ + xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap; + xStart.xBlockSize = ( size_t ) 0; + + /* pxEnd is used to mark the end of the list of free blocks and is inserted + at the end of the heap space. */ + uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize; + uxAddress -= xHeapStructSize; + uxAddress &= ~( ( size_t ) portBYTE_ALIGNMENT_MASK ); + pxEnd = ( void * ) uxAddress; + pxEnd->xBlockSize = 0; + pxEnd->pxNextFreeBlock = NULL; + + /* To start with there is a single free block that is sized to take up the + entire heap space, minus the space taken by pxEnd. */ + pxFirstFreeBlock = ( void * ) pucAlignedHeap; + pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock; + pxFirstFreeBlock->pxNextFreeBlock = pxEnd; + + /* Only one block exists - and it covers the entire usable heap space. */ + xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + + /* Work out the position of the top bit in a size_t variable. */ + xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * heapBITS_PER_BYTE ) - 1 ); +} +/*-----------------------------------------------------------*/ + +static void prvInsertBlockIntoFreeList( BlockLink_t *pxBlockToInsert ) +{ +BlockLink_t *pxIterator; +uint8_t *puc; + + /* Iterate through the list until a block is found that has a higher address + than the block being inserted. */ + for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock ) + { + /* Nothing to do here, just iterate to the right position. */ + } + + /* Do the block being inserted, and the block it is being inserted after + make a contiguous block of memory? */ + puc = ( uint8_t * ) pxIterator; + if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert ) + { + pxIterator->xBlockSize += pxBlockToInsert->xBlockSize; + pxBlockToInsert = pxIterator; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Do the block being inserted, and the block it is being inserted before + make a contiguous block of memory? */ + puc = ( uint8_t * ) pxBlockToInsert; + if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock ) + { + if( pxIterator->pxNextFreeBlock != pxEnd ) + { + /* Form one big block from the two blocks. */ + pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize; + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock; + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxEnd; + } + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock; + } + + /* If the block being inserted plugged a gab, so was merged with the block + before and the block after, then it's pxNextFreeBlock pointer will have + already been set, and should not be set here as that would make it point + to itself. */ + if( pxIterator != pxBlockToInsert ) + { + pxIterator->pxNextFreeBlock = pxBlockToInsert; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} \ No newline at end of file diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/freertos.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/freertos.c new file mode 100644 index 0000000..635163a --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/freertos.c @@ -0,0 +1,61 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * File Name : freertos.c + * Description : Code for freertos applications + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2022 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Includes ------------------------------------------------------------------*/ +#include "FreeRTOS.h" +#include "task.h" +#include "main.h" + +/* Private includes ----------------------------------------------------------*/ +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +/* Private typedef -----------------------------------------------------------*/ +/* USER CODE BEGIN PTD */ + +/* USER CODE END PTD */ + +/* Private define ------------------------------------------------------------*/ +/* USER CODE BEGIN PD */ + +/* USER CODE END PD */ + +/* Private macro -------------------------------------------------------------*/ +/* USER CODE BEGIN PM */ + +/* USER CODE END PM */ + +/* Private variables ---------------------------------------------------------*/ +/* USER CODE BEGIN Variables */ + +/* USER CODE END Variables */ + +/* Private function prototypes -----------------------------------------------*/ +/* USER CODE BEGIN FunctionPrototypes */ + +/* USER CODE END FunctionPrototypes */ + +/* Private application code --------------------------------------------------*/ +/* USER CODE BEGIN Application */ + +/* USER CODE END Application */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/main.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/main.c new file mode 100644 index 0000000..e3d7d28 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/main.c @@ -0,0 +1,505 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file : main.c + * @brief : Main program body + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2022 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ +/* USER CODE END Header */ +/* Includes ------------------------------------------------------------------*/ +#include "main.h" +#include "cmsis_os.h" +#include "lwip.h" + +/* Private includes ----------------------------------------------------------*/ +/* USER CODE BEGIN Includes */ +#include +#include +#include +#include + +#include +/* USER CODE END Includes */ + +/* Private typedef -----------------------------------------------------------*/ +/* USER CODE BEGIN PTD */ + +/* USER CODE END PTD */ + +/* Private define ------------------------------------------------------------*/ +/* USER CODE BEGIN PD */ +/* USER CODE END PD */ + +/* Private macro -------------------------------------------------------------*/ +/* USER CODE BEGIN PM */ + +/* USER CODE END PM */ + +/* Private variables ---------------------------------------------------------*/ + +UART_HandleTypeDef huart3; + +PCD_HandleTypeDef hpcd_USB_OTG_FS; + +/* Definitions for defaultTask */ +osThreadId_t defaultTaskHandle; +const osThreadAttr_t defaultTask_attributes = { + .name = "defaultTask", + .stack_size = 8000 * 4, + .priority = (osPriority_t) osPriorityBelowNormal3, +}; +/* USER CODE BEGIN PV */ + +/* USER CODE END PV */ + +/* Private function prototypes -----------------------------------------------*/ +void SystemClock_Config(void); +static void MX_GPIO_Init(void); +static void MX_USART3_UART_Init(void); +static void MX_USB_OTG_FS_PCD_Init(void); +void StartDefaultTask(void *argument); + +/* USER CODE BEGIN PFP */ + +/* USER CODE END PFP */ + +/* Private user code ---------------------------------------------------------*/ +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +/** + * @brief The application entry point. + * @retval int + */ +int main(void) +{ + /* USER CODE BEGIN 1 */ + + /* USER CODE END 1 */ + + /* MCU Configuration--------------------------------------------------------*/ + + /* Reset of all peripherals, Initializes the Flash interface and the Systick. */ + HAL_Init(); + + /* USER CODE BEGIN Init */ + + /* USER CODE END Init */ + + /* Configure the system clock */ + SystemClock_Config(); + + /* USER CODE BEGIN SysInit */ + + /* USER CODE END SysInit */ + + /* Initialize all configured peripherals */ + MX_GPIO_Init(); + MX_USART3_UART_Init(); + MX_USB_OTG_FS_PCD_Init(); + /* USER CODE BEGIN 2 */ + + /* USER CODE END 2 */ + + /* Init scheduler */ + osKernelInitialize(); + + /* USER CODE BEGIN RTOS_MUTEX */ + /* add mutexes, ... */ + /* USER CODE END RTOS_MUTEX */ + + /* USER CODE BEGIN RTOS_SEMAPHORES */ + /* add semaphores, ... */ + /* USER CODE END RTOS_SEMAPHORES */ + + /* USER CODE BEGIN RTOS_TIMERS */ + /* start timers, add new ones, ... */ + /* USER CODE END RTOS_TIMERS */ + + /* USER CODE BEGIN RTOS_QUEUES */ + /* add queues, ... */ + /* USER CODE END RTOS_QUEUES */ + + /* Create the thread(s) */ + /* creation of defaultTask */ + defaultTaskHandle = osThreadNew(StartDefaultTask, NULL, &defaultTask_attributes); + + /* USER CODE BEGIN RTOS_THREADS */ + /* add threads, ... */ + /* USER CODE END RTOS_THREADS */ + + /* USER CODE BEGIN RTOS_EVENTS */ + /* add events, ... */ + /* USER CODE END RTOS_EVENTS */ + + /* Start scheduler */ + osKernelStart(); + + /* We should never get here as control is now taken by the scheduler */ + /* Infinite loop */ + /* USER CODE BEGIN WHILE */ + while (1) + { + /* USER CODE END WHILE */ + + /* USER CODE BEGIN 3 */ + } + /* USER CODE END 3 */ +} + +/** + * @brief System Clock Configuration + * @retval None + */ +void SystemClock_Config(void) +{ + RCC_OscInitTypeDef RCC_OscInitStruct = {0}; + RCC_ClkInitTypeDef RCC_ClkInitStruct = {0}; + RCC_PeriphCLKInitTypeDef PeriphClkInitStruct = {0}; + + /** Configure LSE Drive Capability + */ + HAL_PWR_EnableBkUpAccess(); + /** Configure the main internal regulator output voltage + */ + __HAL_RCC_PWR_CLK_ENABLE(); + __HAL_PWR_VOLTAGESCALING_CONFIG(PWR_REGULATOR_VOLTAGE_SCALE3); + /** Initializes the RCC Oscillators according to the specified parameters + * in the RCC_OscInitTypeDef structure. + */ + RCC_OscInitStruct.OscillatorType = RCC_OSCILLATORTYPE_HSE; + RCC_OscInitStruct.HSEState = RCC_HSE_BYPASS; + RCC_OscInitStruct.PLL.PLLState = RCC_PLL_ON; + RCC_OscInitStruct.PLL.PLLSource = RCC_PLLSOURCE_HSE; + RCC_OscInitStruct.PLL.PLLM = 4; + RCC_OscInitStruct.PLL.PLLN = 96; + RCC_OscInitStruct.PLL.PLLP = RCC_PLLP_DIV2; + RCC_OscInitStruct.PLL.PLLQ = 4; + RCC_OscInitStruct.PLL.PLLR = 2; + if (HAL_RCC_OscConfig(&RCC_OscInitStruct) != HAL_OK) + { + Error_Handler(); + } + /** Activate the Over-Drive mode + */ + if (HAL_PWREx_EnableOverDrive() != HAL_OK) + { + Error_Handler(); + } + /** Initializes the CPU, AHB and APB buses clocks + */ + RCC_ClkInitStruct.ClockType = RCC_CLOCKTYPE_HCLK|RCC_CLOCKTYPE_SYSCLK + |RCC_CLOCKTYPE_PCLK1|RCC_CLOCKTYPE_PCLK2; + RCC_ClkInitStruct.SYSCLKSource = RCC_SYSCLKSOURCE_PLLCLK; + RCC_ClkInitStruct.AHBCLKDivider = RCC_SYSCLK_DIV1; + RCC_ClkInitStruct.APB1CLKDivider = RCC_HCLK_DIV2; + RCC_ClkInitStruct.APB2CLKDivider = RCC_HCLK_DIV1; + + if (HAL_RCC_ClockConfig(&RCC_ClkInitStruct, FLASH_LATENCY_3) != HAL_OK) + { + Error_Handler(); + } + PeriphClkInitStruct.PeriphClockSelection = RCC_PERIPHCLK_USART3|RCC_PERIPHCLK_CLK48; + PeriphClkInitStruct.Usart3ClockSelection = RCC_USART3CLKSOURCE_PCLK1; + PeriphClkInitStruct.Clk48ClockSelection = RCC_CLK48SOURCE_PLL; + if (HAL_RCCEx_PeriphCLKConfig(&PeriphClkInitStruct) != HAL_OK) + { + Error_Handler(); + } +} + +/** + * @brief USART3 Initialization Function + * @param None + * @retval None + */ +static void MX_USART3_UART_Init(void) +{ + + /* USER CODE BEGIN USART3_Init 0 */ + + /* USER CODE END USART3_Init 0 */ + + /* USER CODE BEGIN USART3_Init 1 */ + + /* USER CODE END USART3_Init 1 */ + huart3.Instance = USART3; + huart3.Init.BaudRate = 115200; + huart3.Init.WordLength = UART_WORDLENGTH_8B; + huart3.Init.StopBits = UART_STOPBITS_1; + huart3.Init.Parity = UART_PARITY_NONE; + huart3.Init.Mode = UART_MODE_TX_RX; + huart3.Init.HwFlowCtl = UART_HWCONTROL_NONE; + huart3.Init.OverSampling = UART_OVERSAMPLING_16; + huart3.Init.OneBitSampling = UART_ONE_BIT_SAMPLE_DISABLE; + huart3.AdvancedInit.AdvFeatureInit = UART_ADVFEATURE_NO_INIT; + if (HAL_UART_Init(&huart3) != HAL_OK) + { + Error_Handler(); + } + /* USER CODE BEGIN USART3_Init 2 */ + + /* USER CODE END USART3_Init 2 */ + +} + +/** + * @brief USB_OTG_FS Initialization Function + * @param None + * @retval None + */ +static void MX_USB_OTG_FS_PCD_Init(void) +{ + + /* USER CODE BEGIN USB_OTG_FS_Init 0 */ + + /* USER CODE END USB_OTG_FS_Init 0 */ + + /* USER CODE BEGIN USB_OTG_FS_Init 1 */ + + /* USER CODE END USB_OTG_FS_Init 1 */ + hpcd_USB_OTG_FS.Instance = USB_OTG_FS; + hpcd_USB_OTG_FS.Init.dev_endpoints = 6; + hpcd_USB_OTG_FS.Init.speed = PCD_SPEED_FULL; + hpcd_USB_OTG_FS.Init.dma_enable = DISABLE; + hpcd_USB_OTG_FS.Init.phy_itface = PCD_PHY_EMBEDDED; + hpcd_USB_OTG_FS.Init.Sof_enable = ENABLE; + hpcd_USB_OTG_FS.Init.low_power_enable = DISABLE; + hpcd_USB_OTG_FS.Init.lpm_enable = DISABLE; + hpcd_USB_OTG_FS.Init.vbus_sensing_enable = ENABLE; + hpcd_USB_OTG_FS.Init.use_dedicated_ep1 = DISABLE; + if (HAL_PCD_Init(&hpcd_USB_OTG_FS) != HAL_OK) + { + Error_Handler(); + } + /* USER CODE BEGIN USB_OTG_FS_Init 2 */ + + /* USER CODE END USB_OTG_FS_Init 2 */ + +} + +/** + * @brief GPIO Initialization Function + * @param None + * @retval None + */ +static void MX_GPIO_Init(void) +{ + GPIO_InitTypeDef GPIO_InitStruct = {0}; + + /* GPIO Ports Clock Enable */ + __HAL_RCC_GPIOC_CLK_ENABLE(); + __HAL_RCC_GPIOH_CLK_ENABLE(); + __HAL_RCC_GPIOA_CLK_ENABLE(); + __HAL_RCC_GPIOB_CLK_ENABLE(); + __HAL_RCC_GPIOD_CLK_ENABLE(); + __HAL_RCC_GPIOG_CLK_ENABLE(); + + /*Configure GPIO pin Output Level */ + HAL_GPIO_WritePin(GPIOB, LD1_Pin|LD3_Pin|LD2_Pin, GPIO_PIN_RESET); + + /*Configure GPIO pin Output Level */ + HAL_GPIO_WritePin(USB_PowerSwitchOn_GPIO_Port, USB_PowerSwitchOn_Pin, GPIO_PIN_RESET); + + /*Configure GPIO pin : USER_Btn_Pin */ + GPIO_InitStruct.Pin = USER_Btn_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_IT_RISING; + GPIO_InitStruct.Pull = GPIO_NOPULL; + HAL_GPIO_Init(USER_Btn_GPIO_Port, &GPIO_InitStruct); + + /*Configure GPIO pins : LD1_Pin LD3_Pin LD2_Pin */ + GPIO_InitStruct.Pin = LD1_Pin|LD3_Pin|LD2_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_OUTPUT_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_LOW; + HAL_GPIO_Init(GPIOB, &GPIO_InitStruct); + + /*Configure GPIO pin : USB_PowerSwitchOn_Pin */ + GPIO_InitStruct.Pin = USB_PowerSwitchOn_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_OUTPUT_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_LOW; + HAL_GPIO_Init(USB_PowerSwitchOn_GPIO_Port, &GPIO_InitStruct); + + /*Configure GPIO pin : USB_OverCurrent_Pin */ + GPIO_InitStruct.Pin = USB_OverCurrent_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_INPUT; + GPIO_InitStruct.Pull = GPIO_NOPULL; + HAL_GPIO_Init(USB_OverCurrent_GPIO_Port, &GPIO_InitStruct); + +} + +/* USER CODE BEGIN 4 */ +#ifdef __GNUC__ +int __io_putchar(int ch) +#else +int fputc(int ch, FILE *f) +#endif /* __GNUC__ */ +{ + HAL_UART_Transmit(&huart3, (uint8_t *)&ch, 1, 0xFFFF); + return ch; +} +/* USER CODE END 4 */ + +/* USER CODE BEGIN Header_StartDefaultTask */ +void * microros_allocate(size_t size, void * state); +void microros_deallocate(void * pointer, void * state); +void * microros_reallocate(void * pointer, size_t size, void * state); +void * microros_zero_allocate(size_t number_of_elements, size_t size_of_element, void * state); + +rcl_publisher_t publisher; +rcl_subscription_t subscriber; + +std_msgs__msg__UInt16 msg; + +void subscription_callback(const void * msgin) +{ + //printf("sub\r\n"); + + const std_msgs__msg__UInt16 * msg = (const std_msgs__msg__UInt16 *)msgin; + + rcl_publish(&publisher, (const void*)msg, NULL); +} + +/** + * @brief Function implementing the defaultTask thread. + * @param argument: Not used + * @retval None + */ +/* USER CODE END Header_StartDefaultTask */ +void StartDefaultTask(void *argument) +{ + /* init code for LWIP */ + MX_LWIP_Init(); + /* USER CODE BEGIN 5 */ + rcl_allocator_t freeRTOS_allocator = rcutils_get_zero_initialized_allocator(); + freeRTOS_allocator.allocate = microros_allocate; + freeRTOS_allocator.deallocate = microros_deallocate; + freeRTOS_allocator.reallocate = microros_reallocate; + freeRTOS_allocator.zero_allocate = microros_zero_allocate; + + if (!rcutils_set_default_allocator(&freeRTOS_allocator)) { + printf("Error on default allocators (line %d)\n", __LINE__); + } + + printf("main start\r\n"); + + // micro-ROS app + + rclc_support_t support; + rcl_allocator_t allocator; + rcl_node_t node; + + allocator = rcl_get_default_allocator(); + + //create init_options + rclc_support_init(&support, 0, NULL, &allocator); + + // create node + rclc_node_init_default(&node, "cubemx_node", "", &support); + + // create publisher + rclc_publisher_init_default( + &publisher, + &node, + ROSIDL_GET_MSG_TYPE_SUPPORT(std_msgs, msg, UInt16), + "to_linux"); + + // create subscriber + rclc_subscription_init_default( + &subscriber, + &node, + ROSIDL_GET_MSG_TYPE_SUPPORT(std_msgs, msg, UInt16), + "to_stm"); + + // Create executor + rclc_executor_t executor; + rclc_executor_init(&executor, &support.context, 3, &allocator); + rclc_executor_add_subscription(&executor, &subscriber, &msg, + &subscription_callback, ON_NEW_DATA); + + // TO print this enable FreeRTOS -> USE_STATS_FORMATTING_FUNCTIONS + + // static char ptrTaskList[1000]; + + // vTaskList(ptrTaskList); + // printf("**********************************"); + // printf("Task State Prio Stack Num"); + // printf("**********************************"); + // printf(ptrTaskList); + // printf("**********************************"); + + + for(;;) { + rclc_executor_spin_some(&executor, RCL_MS_TO_NS(10)); + osDelay(10); + } + /* USER CODE END 5 */ +} + +/** + * @brief Period elapsed callback in non blocking mode + * @note This function is called when TIM14 interrupt took place, inside + * HAL_TIM_IRQHandler(). It makes a direct call to HAL_IncTick() to increment + * a global variable "uwTick" used as application time base. + * @param htim : TIM handle + * @retval None + */ +void HAL_TIM_PeriodElapsedCallback(TIM_HandleTypeDef *htim) +{ + /* USER CODE BEGIN Callback 0 */ + + /* USER CODE END Callback 0 */ + if (htim->Instance == TIM14) { + HAL_IncTick(); + } + /* USER CODE BEGIN Callback 1 */ + + /* USER CODE END Callback 1 */ +} + +/** + * @brief This function is executed in case of error occurrence. + * @retval None + */ +void Error_Handler(void) +{ + /* USER CODE BEGIN Error_Handler_Debug */ + /* User can add his own implementation to report the HAL error return state */ + __disable_irq(); + while (1) + { + } + /* USER CODE END Error_Handler_Debug */ +} + +#ifdef USE_FULL_ASSERT +/** + * @brief Reports the name of the source file and the source line number + * where the assert_param error has occurred. + * @param file: pointer to the source file name + * @param line: assert_param error line source number + * @retval None + */ +void assert_failed(uint8_t *file, uint32_t line) +{ + /* USER CODE BEGIN 6 */ + /* User can add his own implementation to report the file name and line number, + ex: printf("Wrong parameters value: file %s on line %d\r\n", file, line) */ + /* USER CODE END 6 */ +} +#endif /* USE_FULL_ASSERT */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/microros_allocators.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/microros_allocators.c new file mode 100644 index 0000000..aaf7a17 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/microros_allocators.c @@ -0,0 +1,50 @@ + +#include +#include "cmsis_os.h" + +int absoluteUsedMemory = 0; +int usedMemory = 0; + +void *pvPortMallocMicroROS( size_t xWantedSize ); +void vPortFreeMicroROS( void *pv ); +void *pvPortReallocMicroROS( void *pv, size_t xWantedSize ); +size_t getBlockSize( void *pv ); +void *pvPortCallocMicroROS( size_t num, size_t xWantedSize ); + +void * microros_allocate(size_t size, void * state){ + (void) state; + // printf("-- Alloc %d (prev: %d B)\n",size, xPortGetFreeHeapSize()); + absoluteUsedMemory += size; + usedMemory += size; + return pvPortMallocMicroROS(size); +} + +void microros_deallocate(void * pointer, void * state){ + (void) state; + // printf("-- Free %d (prev: %d B)\n",getBlockSize(pointer), xPortGetFreeHeapSize()); + if (NULL != pointer){ + usedMemory -= getBlockSize(pointer); + vPortFreeMicroROS(pointer); + } +} + +void * microros_reallocate(void * pointer, size_t size, void * state){ + (void) state; + // printf("-- Realloc %d -> %d (prev: %d B)\n",getBlockSize(pointer),size, xPortGetFreeHeapSize()); + absoluteUsedMemory += size; + usedMemory += size; + if (NULL == pointer){ + return pvPortMallocMicroROS(size); + } else { + usedMemory -= getBlockSize(pointer); + return pvPortReallocMicroROS(pointer,size); + } +} + +void * microros_zero_allocate(size_t number_of_elements, size_t size_of_element, void * state){ + (void) state; + // printf("-- Calloc %d x %d = %d -> (prev: %d B)\n",number_of_elements,size_of_element, number_of_elements*size_of_element, xPortGetFreeHeapSize()); + absoluteUsedMemory += number_of_elements*size_of_element; + usedMemory += number_of_elements*size_of_element; + return pvPortCallocMicroROS(number_of_elements,size_of_element); +} \ No newline at end of file diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/microros_time.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/microros_time.c new file mode 100644 index 0000000..1022171 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/microros_time.c @@ -0,0 +1,57 @@ +#include +#include +#include "cmsis_os.h" + +#define MICROSECONDS_PER_SECOND ( 1000000LL ) /**< Microseconds per second. */ +#define NANOSECONDS_PER_SECOND ( 1000000000LL ) /**< Nanoseconds per second. */ +#define NANOSECONDS_PER_TICK ( NANOSECONDS_PER_SECOND / configTICK_RATE_HZ ) /**< Nanoseconds per FreeRTOS tick. */ + +void UTILS_NanosecondsToTimespec( int64_t llSource, + struct timespec * const pxDestination ) +{ + long lCarrySec = 0; + + /* Convert to timespec. */ + pxDestination->tv_sec = ( time_t ) ( llSource / NANOSECONDS_PER_SECOND ); + pxDestination->tv_nsec = ( long ) ( llSource % NANOSECONDS_PER_SECOND ); + + /* Subtract from tv_sec if tv_nsec < 0. */ + if( pxDestination->tv_nsec < 0L ) + { + /* Compute the number of seconds to carry. */ + lCarrySec = ( pxDestination->tv_nsec / ( long ) NANOSECONDS_PER_SECOND ) + 1L; + + pxDestination->tv_sec -= ( time_t ) ( lCarrySec ); + pxDestination->tv_nsec += lCarrySec * ( long ) NANOSECONDS_PER_SECOND; + } +} + +int clock_gettime( int clock_id, + struct timespec * tp ) +{ + TimeOut_t xCurrentTime = { 0 }; + + /* Intermediate variable used to convert TimeOut_t to struct timespec. + * Also used to detect overflow issues. It must be unsigned because the + * behavior of signed integer overflow is undefined. */ + uint64_t ullTickCount = 0ULL; + + /* Silence warnings about unused parameters. */ + ( void ) clock_id; + + /* Get the current tick count and overflow count. vTaskSetTimeOutState() + * is used to get these values because they are both static in tasks.c. */ + vTaskSetTimeOutState( &xCurrentTime ); + + /* Adjust the tick count for the number of times a TickType_t has overflowed. + * portMAX_DELAY should be the maximum value of a TickType_t. */ + ullTickCount = ( uint64_t ) ( xCurrentTime.xOverflowCount ) << ( sizeof( TickType_t ) * 8 ); + + /* Add the current tick count. */ + ullTickCount += xCurrentTime.xTimeOnEntering; + + /* Convert ullTickCount to timespec. */ + UTILS_NanosecondsToTimespec( ( int64_t ) ullTickCount * NANOSECONDS_PER_TICK, tp ); + + return 0; +} \ No newline at end of file diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/stm32f7xx_hal_msp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/stm32f7xx_hal_msp.c new file mode 100644 index 0000000..20daa59 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/stm32f7xx_hal_msp.c @@ -0,0 +1,228 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file stm32f7xx_hal_msp.c + * @brief This file provides code for the MSP Initialization + * and de-Initialization codes. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2022 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +/* Private typedef -----------------------------------------------------------*/ +/* USER CODE BEGIN TD */ + +/* USER CODE END TD */ + +/* Private define ------------------------------------------------------------*/ +/* USER CODE BEGIN Define */ + +/* USER CODE END Define */ + +/* Private macro -------------------------------------------------------------*/ +/* USER CODE BEGIN Macro */ + +/* USER CODE END Macro */ + +/* Private variables ---------------------------------------------------------*/ +/* USER CODE BEGIN PV */ + +/* USER CODE END PV */ + +/* Private function prototypes -----------------------------------------------*/ +/* USER CODE BEGIN PFP */ + +/* USER CODE END PFP */ + +/* External functions --------------------------------------------------------*/ +/* USER CODE BEGIN ExternalFunctions */ + +/* USER CODE END ExternalFunctions */ + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ +/** + * Initializes the Global MSP. + */ +void HAL_MspInit(void) +{ + /* USER CODE BEGIN MspInit 0 */ + + /* USER CODE END MspInit 0 */ + + __HAL_RCC_PWR_CLK_ENABLE(); + __HAL_RCC_SYSCFG_CLK_ENABLE(); + + /* System interrupt init*/ + /* PendSV_IRQn interrupt configuration */ + HAL_NVIC_SetPriority(PendSV_IRQn, 15, 0); + + /* USER CODE BEGIN MspInit 1 */ + + /* USER CODE END MspInit 1 */ +} + +/** +* @brief UART MSP Initialization +* This function configures the hardware resources used in this example +* @param huart: UART handle pointer +* @retval None +*/ +void HAL_UART_MspInit(UART_HandleTypeDef* huart) +{ + GPIO_InitTypeDef GPIO_InitStruct = {0}; + if(huart->Instance==USART3) + { + /* USER CODE BEGIN USART3_MspInit 0 */ + + /* USER CODE END USART3_MspInit 0 */ + /* Peripheral clock enable */ + __HAL_RCC_USART3_CLK_ENABLE(); + + __HAL_RCC_GPIOD_CLK_ENABLE(); + /**USART3 GPIO Configuration + PD8 ------> USART3_TX + PD9 ------> USART3_RX + */ + GPIO_InitStruct.Pin = STLK_RX_Pin|STLK_TX_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF7_USART3; + HAL_GPIO_Init(GPIOD, &GPIO_InitStruct); + + /* USER CODE BEGIN USART3_MspInit 1 */ + + /* USER CODE END USART3_MspInit 1 */ + } + +} + +/** +* @brief UART MSP De-Initialization +* This function freeze the hardware resources used in this example +* @param huart: UART handle pointer +* @retval None +*/ +void HAL_UART_MspDeInit(UART_HandleTypeDef* huart) +{ + if(huart->Instance==USART3) + { + /* USER CODE BEGIN USART3_MspDeInit 0 */ + + /* USER CODE END USART3_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_USART3_CLK_DISABLE(); + + /**USART3 GPIO Configuration + PD8 ------> USART3_TX + PD9 ------> USART3_RX + */ + HAL_GPIO_DeInit(GPIOD, STLK_RX_Pin|STLK_TX_Pin); + + /* USER CODE BEGIN USART3_MspDeInit 1 */ + + /* USER CODE END USART3_MspDeInit 1 */ + } + +} + +/** +* @brief PCD MSP Initialization +* This function configures the hardware resources used in this example +* @param hpcd: PCD handle pointer +* @retval None +*/ +void HAL_PCD_MspInit(PCD_HandleTypeDef* hpcd) +{ + GPIO_InitTypeDef GPIO_InitStruct = {0}; + if(hpcd->Instance==USB_OTG_FS) + { + /* USER CODE BEGIN USB_OTG_FS_MspInit 0 */ + + /* USER CODE END USB_OTG_FS_MspInit 0 */ + + __HAL_RCC_GPIOA_CLK_ENABLE(); + /**USB_OTG_FS GPIO Configuration + PA8 ------> USB_OTG_FS_SOF + PA9 ------> USB_OTG_FS_VBUS + PA10 ------> USB_OTG_FS_ID + PA11 ------> USB_OTG_FS_DM + PA12 ------> USB_OTG_FS_DP + */ + GPIO_InitStruct.Pin = USB_SOF_Pin|USB_ID_Pin|USB_DM_Pin|USB_DP_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF10_OTG_FS; + HAL_GPIO_Init(GPIOA, &GPIO_InitStruct); + + GPIO_InitStruct.Pin = USB_VBUS_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_INPUT; + GPIO_InitStruct.Pull = GPIO_NOPULL; + HAL_GPIO_Init(USB_VBUS_GPIO_Port, &GPIO_InitStruct); + + /* Peripheral clock enable */ + __HAL_RCC_USB_OTG_FS_CLK_ENABLE(); + /* USER CODE BEGIN USB_OTG_FS_MspInit 1 */ + + /* USER CODE END USB_OTG_FS_MspInit 1 */ + } + +} + +/** +* @brief PCD MSP De-Initialization +* This function freeze the hardware resources used in this example +* @param hpcd: PCD handle pointer +* @retval None +*/ +void HAL_PCD_MspDeInit(PCD_HandleTypeDef* hpcd) +{ + if(hpcd->Instance==USB_OTG_FS) + { + /* USER CODE BEGIN USB_OTG_FS_MspDeInit 0 */ + + /* USER CODE END USB_OTG_FS_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_USB_OTG_FS_CLK_DISABLE(); + + /**USB_OTG_FS GPIO Configuration + PA8 ------> USB_OTG_FS_SOF + PA9 ------> USB_OTG_FS_VBUS + PA10 ------> USB_OTG_FS_ID + PA11 ------> USB_OTG_FS_DM + PA12 ------> USB_OTG_FS_DP + */ + HAL_GPIO_DeInit(GPIOA, USB_SOF_Pin|USB_VBUS_Pin|USB_ID_Pin|USB_DM_Pin + |USB_DP_Pin); + + /* USER CODE BEGIN USB_OTG_FS_MspDeInit 1 */ + + /* USER CODE END USB_OTG_FS_MspDeInit 1 */ + } + +} + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/stm32f7xx_hal_timebase_tim.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/stm32f7xx_hal_timebase_tim.c new file mode 100644 index 0000000..571f6f5 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/stm32f7xx_hal_timebase_tim.c @@ -0,0 +1,113 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file stm32f7xx_hal_timebase_TIM.c + * @brief HAL time base based on the hardware TIM. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2022 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" +#include "stm32f7xx_hal_tim.h" + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +TIM_HandleTypeDef htim14; +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ + +/** + * @brief This function configures the TIM14 as a time base source. + * The time source is configured to have 1ms time base with a dedicated + * Tick interrupt priority. + * @note This function is called automatically at the beginning of program after + * reset by HAL_Init() or at any time when clock is configured, by HAL_RCC_ClockConfig(). + * @param TickPriority: Tick interrupt priority. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_InitTick(uint32_t TickPriority) +{ + RCC_ClkInitTypeDef clkconfig; + uint32_t uwTimclock = 0; + uint32_t uwPrescalerValue = 0; + uint32_t pFLatency; + /*Configure the TIM14 IRQ priority */ + HAL_NVIC_SetPriority(TIM8_TRG_COM_TIM14_IRQn, TickPriority ,0); + + /* Enable the TIM14 global Interrupt */ + HAL_NVIC_EnableIRQ(TIM8_TRG_COM_TIM14_IRQn); + + /* Enable TIM14 clock */ + __HAL_RCC_TIM14_CLK_ENABLE(); + + /* Get clock configuration */ + HAL_RCC_GetClockConfig(&clkconfig, &pFLatency); + + /* Compute TIM14 clock */ + uwTimclock = 2*HAL_RCC_GetPCLK1Freq(); + /* Compute the prescaler value to have TIM14 counter clock equal to 1MHz */ + uwPrescalerValue = (uint32_t) ((uwTimclock / 1000000U) - 1U); + + /* Initialize TIM14 */ + htim14.Instance = TIM14; + + /* Initialize TIMx peripheral as follow: + + Period = [(TIM14CLK/1000) - 1]. to have a (1/1000) s time base. + + Prescaler = (uwTimclock/1000000 - 1) to have a 1MHz counter clock. + + ClockDivision = 0 + + Counter direction = Up + */ + htim14.Init.Period = (1000000U / 1000U) - 1U; + htim14.Init.Prescaler = uwPrescalerValue; + htim14.Init.ClockDivision = 0; + htim14.Init.CounterMode = TIM_COUNTERMODE_UP; + + if(HAL_TIM_Base_Init(&htim14) == HAL_OK) + { + /* Start the TIM time Base generation in interrupt mode */ + return HAL_TIM_Base_Start_IT(&htim14); + } + + /* Return function status */ + return HAL_ERROR; +} + +/** + * @brief Suspend Tick increment. + * @note Disable the tick increment by disabling TIM14 update interrupt. + * @param None + * @retval None + */ +void HAL_SuspendTick(void) +{ + /* Disable TIM14 update Interrupt */ + __HAL_TIM_DISABLE_IT(&htim14, TIM_IT_UPDATE); +} + +/** + * @brief Resume Tick increment. + * @note Enable the tick increment by Enabling TIM14 update interrupt. + * @param None + * @retval None + */ +void HAL_ResumeTick(void) +{ + /* Enable TIM14 Update interrupt */ + __HAL_TIM_ENABLE_IT(&htim14, TIM_IT_UPDATE); +} + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/stm32f7xx_it.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/stm32f7xx_it.c new file mode 100644 index 0000000..6a4bd43 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/stm32f7xx_it.c @@ -0,0 +1,195 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file stm32f7xx_it.c + * @brief Interrupt Service Routines. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2022 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" +#include "stm32f7xx_it.h" +/* Private includes ----------------------------------------------------------*/ +/* USER CODE BEGIN Includes */ +/* USER CODE END Includes */ + +/* Private typedef -----------------------------------------------------------*/ +/* USER CODE BEGIN TD */ + +/* USER CODE END TD */ + +/* Private define ------------------------------------------------------------*/ +/* USER CODE BEGIN PD */ + +/* USER CODE END PD */ + +/* Private macro -------------------------------------------------------------*/ +/* USER CODE BEGIN PM */ + +/* USER CODE END PM */ + +/* Private variables ---------------------------------------------------------*/ +/* USER CODE BEGIN PV */ + +/* USER CODE END PV */ + +/* Private function prototypes -----------------------------------------------*/ +/* USER CODE BEGIN PFP */ + +/* USER CODE END PFP */ + +/* Private user code ---------------------------------------------------------*/ +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +/* External variables --------------------------------------------------------*/ +extern ETH_HandleTypeDef heth; +extern TIM_HandleTypeDef htim14; + +/* USER CODE BEGIN EV */ + +/* USER CODE END EV */ + +/******************************************************************************/ +/* Cortex-M7 Processor Interruption and Exception Handlers */ +/******************************************************************************/ +/** + * @brief This function handles Non maskable interrupt. + */ +void NMI_Handler(void) +{ + /* USER CODE BEGIN NonMaskableInt_IRQn 0 */ + + /* USER CODE END NonMaskableInt_IRQn 0 */ + /* USER CODE BEGIN NonMaskableInt_IRQn 1 */ + while (1) + { + } + /* USER CODE END NonMaskableInt_IRQn 1 */ +} + +/** + * @brief This function handles Hard fault interrupt. + */ +void HardFault_Handler(void) +{ + /* USER CODE BEGIN HardFault_IRQn 0 */ + + /* USER CODE END HardFault_IRQn 0 */ + while (1) + { + /* USER CODE BEGIN W1_HardFault_IRQn 0 */ + /* USER CODE END W1_HardFault_IRQn 0 */ + } +} + +/** + * @brief This function handles Memory management fault. + */ +void MemManage_Handler(void) +{ + /* USER CODE BEGIN MemoryManagement_IRQn 0 */ + + /* USER CODE END MemoryManagement_IRQn 0 */ + while (1) + { + /* USER CODE BEGIN W1_MemoryManagement_IRQn 0 */ + /* USER CODE END W1_MemoryManagement_IRQn 0 */ + } +} + +/** + * @brief This function handles Pre-fetch fault, memory access fault. + */ +void BusFault_Handler(void) +{ + /* USER CODE BEGIN BusFault_IRQn 0 */ + + /* USER CODE END BusFault_IRQn 0 */ + while (1) + { + /* USER CODE BEGIN W1_BusFault_IRQn 0 */ + /* USER CODE END W1_BusFault_IRQn 0 */ + } +} + +/** + * @brief This function handles Undefined instruction or illegal state. + */ +void UsageFault_Handler(void) +{ + /* USER CODE BEGIN UsageFault_IRQn 0 */ + + /* USER CODE END UsageFault_IRQn 0 */ + while (1) + { + /* USER CODE BEGIN W1_UsageFault_IRQn 0 */ + /* USER CODE END W1_UsageFault_IRQn 0 */ + } +} + +/** + * @brief This function handles Debug monitor. + */ +void DebugMon_Handler(void) +{ + /* USER CODE BEGIN DebugMonitor_IRQn 0 */ + + /* USER CODE END DebugMonitor_IRQn 0 */ + /* USER CODE BEGIN DebugMonitor_IRQn 1 */ + + /* USER CODE END DebugMonitor_IRQn 1 */ +} + +/******************************************************************************/ +/* STM32F7xx Peripheral Interrupt Handlers */ +/* Add here the Interrupt Handlers for the used peripherals. */ +/* For the available peripheral interrupt handler names, */ +/* please refer to the startup file (startup_stm32f7xx.s). */ +/******************************************************************************/ + +/** + * @brief This function handles TIM8 trigger and commutation interrupts and TIM14 global interrupt. + */ +void TIM8_TRG_COM_TIM14_IRQHandler(void) +{ + /* USER CODE BEGIN TIM8_TRG_COM_TIM14_IRQn 0 */ + + /* USER CODE END TIM8_TRG_COM_TIM14_IRQn 0 */ + HAL_TIM_IRQHandler(&htim14); + /* USER CODE BEGIN TIM8_TRG_COM_TIM14_IRQn 1 */ + + /* USER CODE END TIM8_TRG_COM_TIM14_IRQn 1 */ +} + +/** + * @brief This function handles Ethernet global interrupt. + */ +void ETH_IRQHandler(void) +{ + /* USER CODE BEGIN ETH_IRQn 0 */ + + /* USER CODE END ETH_IRQn 0 */ + HAL_ETH_IRQHandler(&heth); + /* USER CODE BEGIN ETH_IRQn 1 */ + + /* USER CODE END ETH_IRQn 1 */ +} + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/syscalls.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/syscalls.c new file mode 100644 index 0000000..bc0dd6c --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/syscalls.c @@ -0,0 +1,156 @@ +/** + ****************************************************************************** + * @file syscalls.c + * @author Auto-generated by STM32CubeIDE + * @brief STM32CubeIDE Minimal System calls file + * + * For more information about which c-functions + * need which of these lowlevel functions + * please consult the Newlib libc-manual + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes */ +#include +#include +#include +#include +#include +#include +#include +#include + + +/* Variables */ +extern int __io_putchar(int ch) __attribute__((weak)); +extern int __io_getchar(void) __attribute__((weak)); + + +char *__env[1] = { 0 }; +char **environ = __env; + + +/* Functions */ +void initialise_monitor_handles() +{ +} + +int _getpid(void) +{ + return 1; +} + +int _kill(int pid, int sig) +{ + errno = EINVAL; + return -1; +} + +void _exit (int status) +{ + _kill(status, -1); + while (1) {} /* Make sure we hang here */ +} + +__attribute__((weak)) int _read(int file, char *ptr, int len) +{ + int DataIdx; + + for (DataIdx = 0; DataIdx < len; DataIdx++) + { + *ptr++ = __io_getchar(); + } + +return len; +} + +__attribute__((weak)) int _write(int file, char *ptr, int len) +{ + int DataIdx; + + for (DataIdx = 0; DataIdx < len; DataIdx++) + { + __io_putchar(*ptr++); + } + return len; +} + +int _close(int file) +{ + return -1; +} + + +int _fstat(int file, struct stat *st) +{ + st->st_mode = S_IFCHR; + return 0; +} + +int _isatty(int file) +{ + return 1; +} + +int _lseek(int file, int ptr, int dir) +{ + return 0; +} + +int _open(char *path, int flags, ...) +{ + /* Pretend like we always fail */ + return -1; +} + +int _wait(int *status) +{ + errno = ECHILD; + return -1; +} + +int _unlink(char *name) +{ + errno = ENOENT; + return -1; +} + +int _times(struct tms *buf) +{ + return -1; +} + +int _stat(char *file, struct stat *st) +{ + st->st_mode = S_IFCHR; + return 0; +} + +int _link(char *old, char *new) +{ + errno = EMLINK; + return -1; +} + +int _fork(void) +{ + errno = EAGAIN; + return -1; +} + +int _execve(char *name, char **argv, char **env) +{ + errno = ENOMEM; + return -1; +} diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/sysmem.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/sysmem.c new file mode 100644 index 0000000..d7cc52c --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/sysmem.c @@ -0,0 +1,80 @@ +/** + ****************************************************************************** + * @file sysmem.c + * @author Generated by STM32CubeIDE + * @brief STM32CubeIDE System Memory calls file + * + * For more information about which C functions + * need which of these lowlevel functions + * please consult the newlib libc manual + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2020 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ + +/* Includes */ +#include +#include + +/** + * Pointer to the current high watermark of the heap usage + */ +static uint8_t *__sbrk_heap_end = NULL; + +/** + * @brief _sbrk() allocates memory to the newlib heap and is used by malloc + * and others from the C library + * + * @verbatim + * ############################################################################ + * # .data # .bss # newlib heap # MSP stack # + * # # # # Reserved by _Min_Stack_Size # + * ############################################################################ + * ^-- RAM start ^-- _end _estack, RAM end --^ + * @endverbatim + * + * This implementation starts allocating at the '_end' linker symbol + * The '_Min_Stack_Size' linker symbol reserves a memory for the MSP stack + * The implementation considers '_estack' linker symbol to be RAM end + * NOTE: If the MSP stack, at any point during execution, grows larger than the + * reserved size, please increase the '_Min_Stack_Size'. + * + * @param incr Memory size + * @return Pointer to allocated memory + */ +void *_sbrk(ptrdiff_t incr) +{ + extern uint8_t _end; /* Symbol defined in the linker script */ + extern uint8_t _estack; /* Symbol defined in the linker script */ + extern uint32_t _Min_Stack_Size; /* Symbol defined in the linker script */ + const uint32_t stack_limit = (uint32_t)&_estack - (uint32_t)&_Min_Stack_Size; + const uint8_t *max_heap = (uint8_t *)stack_limit; + uint8_t *prev_heap_end; + + /* Initialize heap end at first call */ + if (NULL == __sbrk_heap_end) + { + __sbrk_heap_end = &_end; + } + + /* Protect heap from growing into the reserved MSP stack */ + if (__sbrk_heap_end + incr > max_heap) + { + errno = ENOMEM; + return (void *)-1; + } + + prev_heap_end = __sbrk_heap_end; + __sbrk_heap_end += incr; + + return (void *)prev_heap_end; +} diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/system_stm32f7xx.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/system_stm32f7xx.c new file mode 100644 index 0000000..32e7fa5 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Src/system_stm32f7xx.c @@ -0,0 +1,259 @@ +/** + ****************************************************************************** + * @file system_stm32f7xx.c + * @author MCD Application Team + * @brief CMSIS Cortex-M7 Device Peripheral Access Layer System Source File. + * + * This file provides two functions and one global variable to be called from + * user application: + * - SystemInit(): This function is called at startup just after reset and + * before branch to main program. This call is made inside + * the "startup_stm32f7xx.s" file. + * + * - SystemCoreClock variable: Contains the core clock (HCLK), it can be used + * by the user application to setup the SysTick + * timer or configure other parameters. + * + * - SystemCoreClockUpdate(): Updates the variable SystemCoreClock and must + * be called whenever the core clock is changed + * during program execution. + * + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/** @addtogroup CMSIS + * @{ + */ + +/** @addtogroup stm32f7xx_system + * @{ + */ + +/** @addtogroup STM32F7xx_System_Private_Includes + * @{ + */ + +#include "stm32f7xx.h" + +#if !defined (HSE_VALUE) + #define HSE_VALUE ((uint32_t)25000000) /*!< Default value of the External oscillator in Hz */ +#endif /* HSE_VALUE */ + +#if !defined (HSI_VALUE) + #define HSI_VALUE ((uint32_t)16000000) /*!< Value of the Internal oscillator in Hz*/ +#endif /* HSI_VALUE */ + +/** + * @} + */ + +/** @addtogroup STM32F7xx_System_Private_TypesDefinitions + * @{ + */ + +/** + * @} + */ + +/** @addtogroup STM32F7xx_System_Private_Defines + * @{ + */ + +/************************* Miscellaneous Configuration ************************/ + +/* Note: Following vector table addresses must be defined in line with linker + configuration. */ +/*!< Uncomment the following line if you need to relocate the vector table + anywhere in Flash or Sram, else the vector table is kept at the automatic + remap of boot address selected */ +/* #define USER_VECT_TAB_ADDRESS */ + +#if defined(USER_VECT_TAB_ADDRESS) +/*!< Uncomment the following line if you need to relocate your vector Table + in Sram else user remap will be done in Flash. */ +/* #define VECT_TAB_SRAM */ +#if defined(VECT_TAB_SRAM) +#define VECT_TAB_BASE_ADDRESS RAMDTCM_BASE /*!< Vector Table base address field. + This value must be a multiple of 0x200. */ +#define VECT_TAB_OFFSET 0x00000000U /*!< Vector Table base offset field. + This value must be a multiple of 0x200. */ +#else +#define VECT_TAB_BASE_ADDRESS FLASH_BASE /*!< Vector Table base address field. + This value must be a multiple of 0x200. */ +#define VECT_TAB_OFFSET 0x00000000U /*!< Vector Table base offset field. + This value must be a multiple of 0x200. */ +#endif /* VECT_TAB_SRAM */ +#endif /* USER_VECT_TAB_ADDRESS */ +/******************************************************************************/ + +/** + * @} + */ + +/** @addtogroup STM32F7xx_System_Private_Macros + * @{ + */ + +/** + * @} + */ + +/** @addtogroup STM32F7xx_System_Private_Variables + * @{ + */ + + /* This variable is updated in three ways: + 1) by calling CMSIS function SystemCoreClockUpdate() + 2) by calling HAL API function HAL_RCC_GetHCLKFreq() + 3) each time HAL_RCC_ClockConfig() is called to configure the system clock frequency + Note: If you use this function to configure the system clock; then there + is no need to call the 2 first functions listed above, since SystemCoreClock + variable is updated automatically. + */ + uint32_t SystemCoreClock = 16000000; + const uint8_t AHBPrescTable[16] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 6, 7, 8, 9}; + const uint8_t APBPrescTable[8] = {0, 0, 0, 0, 1, 2, 3, 4}; + +/** + * @} + */ + +/** @addtogroup STM32F7xx_System_Private_FunctionPrototypes + * @{ + */ + +/** + * @} + */ + +/** @addtogroup STM32F7xx_System_Private_Functions + * @{ + */ + +/** + * @brief Setup the microcontroller system + * Initialize the Embedded Flash Interface, the PLL and update the + * SystemFrequency variable. + * @param None + * @retval None + */ +void SystemInit(void) +{ + /* FPU settings ------------------------------------------------------------*/ +#if (__FPU_PRESENT == 1) && (__FPU_USED == 1) + SCB->CPACR |= ((3UL << 10*2)|(3UL << 11*2)); /* set CP10 and CP11 Full Access */ +#endif + + /* Configure the Vector Table location -------------------------------------*/ +#if defined(USER_VECT_TAB_ADDRESS) + SCB->VTOR = VECT_TAB_BASE_ADDRESS | VECT_TAB_OFFSET; /* Vector Table Relocation in Internal SRAM */ +#endif /* USER_VECT_TAB_ADDRESS */ +} + +/** + * @brief Update SystemCoreClock variable according to Clock Register Values. + * The SystemCoreClock variable contains the core clock (HCLK), it can + * be used by the user application to setup the SysTick timer or configure + * other parameters. + * + * @note Each time the core clock (HCLK) changes, this function must be called + * to update SystemCoreClock variable value. Otherwise, any configuration + * based on this variable will be incorrect. + * + * @note - The system frequency computed by this function is not the real + * frequency in the chip. It is calculated based on the predefined + * constant and the selected clock source: + * + * - If SYSCLK source is HSI, SystemCoreClock will contain the HSI_VALUE(*) + * + * - If SYSCLK source is HSE, SystemCoreClock will contain the HSE_VALUE(**) + * + * - If SYSCLK source is PLL, SystemCoreClock will contain the HSE_VALUE(**) + * or HSI_VALUE(*) multiplied/divided by the PLL factors. + * + * (*) HSI_VALUE is a constant defined in stm32f7xx_hal_conf.h file (default value + * 16 MHz) but the real value may vary depending on the variations + * in voltage and temperature. + * + * (**) HSE_VALUE is a constant defined in stm32f7xx_hal_conf.h file (default value + * 25 MHz), user has to ensure that HSE_VALUE is same as the real + * frequency of the crystal used. Otherwise, this function may + * have wrong result. + * + * - The result of this function could be not correct when using fractional + * value for HSE crystal. + * + * @param None + * @retval None + */ +void SystemCoreClockUpdate(void) +{ + uint32_t tmp = 0, pllvco = 0, pllp = 2, pllsource = 0, pllm = 2; + + /* Get SYSCLK source -------------------------------------------------------*/ + tmp = RCC->CFGR & RCC_CFGR_SWS; + + switch (tmp) + { + case 0x00: /* HSI used as system clock source */ + SystemCoreClock = HSI_VALUE; + break; + case 0x04: /* HSE used as system clock source */ + SystemCoreClock = HSE_VALUE; + break; + case 0x08: /* PLL used as system clock source */ + + /* PLL_VCO = (HSE_VALUE or HSI_VALUE / PLL_M) * PLL_N + SYSCLK = PLL_VCO / PLL_P + */ + pllsource = (RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) >> 22; + pllm = RCC->PLLCFGR & RCC_PLLCFGR_PLLM; + + if (pllsource != 0) + { + /* HSE used as PLL clock source */ + pllvco = (HSE_VALUE / pllm) * ((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> 6); + } + else + { + /* HSI used as PLL clock source */ + pllvco = (HSI_VALUE / pllm) * ((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> 6); + } + + pllp = (((RCC->PLLCFGR & RCC_PLLCFGR_PLLP) >>16) + 1 ) *2; + SystemCoreClock = pllvco/pllp; + break; + default: + SystemCoreClock = HSI_VALUE; + break; + } + /* Compute HCLK frequency --------------------------------------------------*/ + /* Get HCLK prescaler */ + tmp = AHBPrescTable[((RCC->CFGR & RCC_CFGR_HPRE) >> 4)]; + /* HCLK frequency */ + SystemCoreClock >>= tmp; +} + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Startup/startup_stm32f767zitx.s b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Startup/startup_stm32f767zitx.s new file mode 100644 index 0000000..cd7170c --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Core/Startup/startup_stm32f767zitx.s @@ -0,0 +1,620 @@ +/** + ****************************************************************************** + * @file startup_stm32f767xx.s + * @author MCD Application Team + * @brief STM32F767xx Devices vector table for GCC based toolchain. + * This module performs: + * - Set the initial SP + * - Set the initial PC == Reset_Handler, + * - Set the vector table entries with the exceptions ISR address + * - Branches to main in the C library (which eventually + * calls main()). + * After Reset the Cortex-M7 processor is in Thread mode, + * priority is Privileged, and the Stack is set to Main. + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + + .syntax unified + .cpu cortex-m7 + .fpu softvfp + .thumb + +.global g_pfnVectors +.global Default_Handler + +/* start address for the initialization values of the .data section. +defined in linker script */ +.word _sidata +/* start address for the .data section. defined in linker script */ +.word _sdata +/* end address for the .data section. defined in linker script */ +.word _edata +/* start address for the .bss section. defined in linker script */ +.word _sbss +/* end address for the .bss section. defined in linker script */ +.word _ebss +/* stack used for SystemInit_ExtMemCtl; always internal RAM used */ + +/** + * @brief This is the code that gets called when the processor first + * starts execution following a reset event. Only the absolutely + * necessary set is performed, after which the application + * supplied main() routine is called. + * @param None + * @retval : None +*/ + + .section .text.Reset_Handler + .weak Reset_Handler + .type Reset_Handler, %function +Reset_Handler: + ldr sp, =_estack /* set stack pointer */ + +/* Copy the data segment initializers from flash to SRAM */ + ldr r0, =_sdata + ldr r1, =_edata + ldr r2, =_sidata + movs r3, #0 + b LoopCopyDataInit + +CopyDataInit: + ldr r4, [r2, r3] + str r4, [r0, r3] + adds r3, r3, #4 + +LoopCopyDataInit: + adds r4, r0, r3 + cmp r4, r1 + bcc CopyDataInit + +/* Zero fill the bss segment. */ + ldr r2, =_sbss + ldr r4, =_ebss + movs r3, #0 + b LoopFillZerobss + +FillZerobss: + str r3, [r2] + adds r2, r2, #4 + +LoopFillZerobss: + cmp r2, r4 + bcc FillZerobss + +/* Call the clock system initialization function.*/ + bl SystemInit +/* Call static constructors */ + bl __libc_init_array +/* Call the application's entry point.*/ + bl main + bx lr +.size Reset_Handler, .-Reset_Handler + +/** + * @brief This is the code that gets called when the processor receives an + * unexpected interrupt. This simply enters an infinite loop, preserving + * the system state for examination by a debugger. + * @param None + * @retval None +*/ + .section .text.Default_Handler,"ax",%progbits +Default_Handler: +Infinite_Loop: + b Infinite_Loop + .size Default_Handler, .-Default_Handler +/****************************************************************************** +* +* The minimal vector table for a Cortex M7. Note that the proper constructs +* must be placed on this to ensure that it ends up at physical address +* 0x0000.0000. +* +*******************************************************************************/ + .section .isr_vector,"a",%progbits + .type g_pfnVectors, %object + .size g_pfnVectors, .-g_pfnVectors + + +g_pfnVectors: + .word _estack + .word Reset_Handler + + .word NMI_Handler + .word HardFault_Handler + .word MemManage_Handler + .word BusFault_Handler + .word UsageFault_Handler + .word 0 + .word 0 + .word 0 + .word 0 + .word SVC_Handler + .word DebugMon_Handler + .word 0 + .word PendSV_Handler + .word SysTick_Handler + + /* External Interrupts */ + .word WWDG_IRQHandler /* Window WatchDog */ + .word PVD_IRQHandler /* PVD through EXTI Line detection */ + .word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */ + .word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */ + .word FLASH_IRQHandler /* FLASH */ + .word RCC_IRQHandler /* RCC */ + .word EXTI0_IRQHandler /* EXTI Line0 */ + .word EXTI1_IRQHandler /* EXTI Line1 */ + .word EXTI2_IRQHandler /* EXTI Line2 */ + .word EXTI3_IRQHandler /* EXTI Line3 */ + .word EXTI4_IRQHandler /* EXTI Line4 */ + .word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */ + .word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */ + .word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */ + .word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */ + .word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */ + .word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */ + .word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */ + .word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */ + .word CAN1_TX_IRQHandler /* CAN1 TX */ + .word CAN1_RX0_IRQHandler /* CAN1 RX0 */ + .word CAN1_RX1_IRQHandler /* CAN1 RX1 */ + .word CAN1_SCE_IRQHandler /* CAN1 SCE */ + .word EXTI9_5_IRQHandler /* External Line[9:5]s */ + .word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */ + .word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */ + .word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */ + .word TIM1_CC_IRQHandler /* TIM1 Capture Compare */ + .word TIM2_IRQHandler /* TIM2 */ + .word TIM3_IRQHandler /* TIM3 */ + .word TIM4_IRQHandler /* TIM4 */ + .word I2C1_EV_IRQHandler /* I2C1 Event */ + .word I2C1_ER_IRQHandler /* I2C1 Error */ + .word I2C2_EV_IRQHandler /* I2C2 Event */ + .word I2C2_ER_IRQHandler /* I2C2 Error */ + .word SPI1_IRQHandler /* SPI1 */ + .word SPI2_IRQHandler /* SPI2 */ + .word USART1_IRQHandler /* USART1 */ + .word USART2_IRQHandler /* USART2 */ + .word USART3_IRQHandler /* USART3 */ + .word EXTI15_10_IRQHandler /* External Line[15:10]s */ + .word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */ + .word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */ + .word TIM8_BRK_TIM12_IRQHandler /* TIM8 Break and TIM12 */ + .word TIM8_UP_TIM13_IRQHandler /* TIM8 Update and TIM13 */ + .word TIM8_TRG_COM_TIM14_IRQHandler /* TIM8 Trigger and Commutation and TIM14 */ + .word TIM8_CC_IRQHandler /* TIM8 Capture Compare */ + .word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */ + .word FMC_IRQHandler /* FMC */ + .word SDMMC1_IRQHandler /* SDMMC1 */ + .word TIM5_IRQHandler /* TIM5 */ + .word SPI3_IRQHandler /* SPI3 */ + .word UART4_IRQHandler /* UART4 */ + .word UART5_IRQHandler /* UART5 */ + .word TIM6_DAC_IRQHandler /* TIM6 and DAC1&2 underrun errors */ + .word TIM7_IRQHandler /* TIM7 */ + .word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */ + .word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */ + .word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */ + .word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */ + .word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */ + .word ETH_IRQHandler /* Ethernet */ + .word ETH_WKUP_IRQHandler /* Ethernet Wakeup through EXTI line */ + .word CAN2_TX_IRQHandler /* CAN2 TX */ + .word CAN2_RX0_IRQHandler /* CAN2 RX0 */ + .word CAN2_RX1_IRQHandler /* CAN2 RX1 */ + .word CAN2_SCE_IRQHandler /* CAN2 SCE */ + .word OTG_FS_IRQHandler /* USB OTG FS */ + .word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */ + .word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */ + .word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */ + .word USART6_IRQHandler /* USART6 */ + .word I2C3_EV_IRQHandler /* I2C3 event */ + .word I2C3_ER_IRQHandler /* I2C3 error */ + .word OTG_HS_EP1_OUT_IRQHandler /* USB OTG HS End Point 1 Out */ + .word OTG_HS_EP1_IN_IRQHandler /* USB OTG HS End Point 1 In */ + .word OTG_HS_WKUP_IRQHandler /* USB OTG HS Wakeup through EXTI */ + .word OTG_HS_IRQHandler /* USB OTG HS */ + .word DCMI_IRQHandler /* DCMI */ + .word 0 /* Reserved */ + .word RNG_IRQHandler /* RNG */ + .word FPU_IRQHandler /* FPU */ + .word UART7_IRQHandler /* UART7 */ + .word UART8_IRQHandler /* UART8 */ + .word SPI4_IRQHandler /* SPI4 */ + .word SPI5_IRQHandler /* SPI5 */ + .word SPI6_IRQHandler /* SPI6 */ + .word SAI1_IRQHandler /* SAI1 */ + .word LTDC_IRQHandler /* LTDC */ + .word LTDC_ER_IRQHandler /* LTDC error */ + .word DMA2D_IRQHandler /* DMA2D */ + .word SAI2_IRQHandler /* SAI2 */ + .word QUADSPI_IRQHandler /* QUADSPI */ + .word LPTIM1_IRQHandler /* LPTIM1 */ + .word CEC_IRQHandler /* HDMI_CEC */ + .word I2C4_EV_IRQHandler /* I2C4 Event */ + .word I2C4_ER_IRQHandler /* I2C4 Error */ + .word SPDIF_RX_IRQHandler /* SPDIF_RX */ + .word 0 /* Reserved */ + .word DFSDM1_FLT0_IRQHandler /* DFSDM1 Filter 0 global Interrupt */ + .word DFSDM1_FLT1_IRQHandler /* DFSDM1 Filter 1 global Interrupt */ + .word DFSDM1_FLT2_IRQHandler /* DFSDM1 Filter 2 global Interrupt */ + .word DFSDM1_FLT3_IRQHandler /* DFSDM1 Filter 3 global Interrupt */ + .word SDMMC2_IRQHandler /* SDMMC2 */ + .word CAN3_TX_IRQHandler /* CAN3 TX */ + .word CAN3_RX0_IRQHandler /* CAN3 RX0 */ + .word CAN3_RX1_IRQHandler /* CAN3 RX1 */ + .word CAN3_SCE_IRQHandler /* CAN3 SCE */ + .word JPEG_IRQHandler /* JPEG */ + .word MDIOS_IRQHandler /* MDIOS */ + +/******************************************************************************* +* +* Provide weak aliases for each Exception handler to the Default_Handler. +* As they are weak aliases, any function with the same name will override +* this definition. +* +*******************************************************************************/ + .weak NMI_Handler + .thumb_set NMI_Handler,Default_Handler + + .weak HardFault_Handler + .thumb_set HardFault_Handler,Default_Handler + + .weak MemManage_Handler + .thumb_set MemManage_Handler,Default_Handler + + .weak BusFault_Handler + .thumb_set BusFault_Handler,Default_Handler + + .weak UsageFault_Handler + .thumb_set UsageFault_Handler,Default_Handler + + .weak SVC_Handler + .thumb_set SVC_Handler,Default_Handler + + .weak DebugMon_Handler + .thumb_set DebugMon_Handler,Default_Handler + + .weak PendSV_Handler + .thumb_set PendSV_Handler,Default_Handler + + .weak SysTick_Handler + .thumb_set SysTick_Handler,Default_Handler + + .weak WWDG_IRQHandler + .thumb_set WWDG_IRQHandler,Default_Handler + + .weak PVD_IRQHandler + .thumb_set PVD_IRQHandler,Default_Handler + + .weak TAMP_STAMP_IRQHandler + .thumb_set TAMP_STAMP_IRQHandler,Default_Handler + + .weak RTC_WKUP_IRQHandler + .thumb_set RTC_WKUP_IRQHandler,Default_Handler + + .weak FLASH_IRQHandler + .thumb_set FLASH_IRQHandler,Default_Handler + + .weak RCC_IRQHandler + .thumb_set RCC_IRQHandler,Default_Handler + + .weak EXTI0_IRQHandler + .thumb_set EXTI0_IRQHandler,Default_Handler + + .weak EXTI1_IRQHandler + .thumb_set EXTI1_IRQHandler,Default_Handler + + .weak EXTI2_IRQHandler + .thumb_set EXTI2_IRQHandler,Default_Handler + + .weak EXTI3_IRQHandler + .thumb_set EXTI3_IRQHandler,Default_Handler + + .weak EXTI4_IRQHandler + .thumb_set EXTI4_IRQHandler,Default_Handler + + .weak DMA1_Stream0_IRQHandler + .thumb_set DMA1_Stream0_IRQHandler,Default_Handler + + .weak DMA1_Stream1_IRQHandler + .thumb_set DMA1_Stream1_IRQHandler,Default_Handler + + .weak DMA1_Stream2_IRQHandler + .thumb_set DMA1_Stream2_IRQHandler,Default_Handler + + .weak DMA1_Stream3_IRQHandler + .thumb_set DMA1_Stream3_IRQHandler,Default_Handler + + .weak DMA1_Stream4_IRQHandler + .thumb_set DMA1_Stream4_IRQHandler,Default_Handler + + .weak DMA1_Stream5_IRQHandler + .thumb_set DMA1_Stream5_IRQHandler,Default_Handler + + .weak DMA1_Stream6_IRQHandler + .thumb_set DMA1_Stream6_IRQHandler,Default_Handler + + .weak ADC_IRQHandler + .thumb_set ADC_IRQHandler,Default_Handler + + .weak CAN1_TX_IRQHandler + .thumb_set CAN1_TX_IRQHandler,Default_Handler + + .weak CAN1_RX0_IRQHandler + .thumb_set CAN1_RX0_IRQHandler,Default_Handler + + .weak CAN1_RX1_IRQHandler + .thumb_set CAN1_RX1_IRQHandler,Default_Handler + + .weak CAN1_SCE_IRQHandler + .thumb_set CAN1_SCE_IRQHandler,Default_Handler + + .weak EXTI9_5_IRQHandler + .thumb_set EXTI9_5_IRQHandler,Default_Handler + + .weak TIM1_BRK_TIM9_IRQHandler + .thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler + + .weak TIM1_UP_TIM10_IRQHandler + .thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler + + .weak TIM1_TRG_COM_TIM11_IRQHandler + .thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler + + .weak TIM1_CC_IRQHandler + .thumb_set TIM1_CC_IRQHandler,Default_Handler + + .weak TIM2_IRQHandler + .thumb_set TIM2_IRQHandler,Default_Handler + + .weak TIM3_IRQHandler + .thumb_set TIM3_IRQHandler,Default_Handler + + .weak TIM4_IRQHandler + .thumb_set TIM4_IRQHandler,Default_Handler + + .weak I2C1_EV_IRQHandler + .thumb_set I2C1_EV_IRQHandler,Default_Handler + + .weak I2C1_ER_IRQHandler + .thumb_set I2C1_ER_IRQHandler,Default_Handler + + .weak I2C2_EV_IRQHandler + .thumb_set I2C2_EV_IRQHandler,Default_Handler + + .weak I2C2_ER_IRQHandler + .thumb_set I2C2_ER_IRQHandler,Default_Handler + + .weak SPI1_IRQHandler + .thumb_set SPI1_IRQHandler,Default_Handler + + .weak SPI2_IRQHandler + .thumb_set SPI2_IRQHandler,Default_Handler + + .weak USART1_IRQHandler + .thumb_set USART1_IRQHandler,Default_Handler + + .weak USART2_IRQHandler + .thumb_set USART2_IRQHandler,Default_Handler + + .weak USART3_IRQHandler + .thumb_set USART3_IRQHandler,Default_Handler + + .weak EXTI15_10_IRQHandler + .thumb_set EXTI15_10_IRQHandler,Default_Handler + + .weak RTC_Alarm_IRQHandler + .thumb_set RTC_Alarm_IRQHandler,Default_Handler + + .weak OTG_FS_WKUP_IRQHandler + .thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler + + .weak TIM8_BRK_TIM12_IRQHandler + .thumb_set TIM8_BRK_TIM12_IRQHandler,Default_Handler + + .weak TIM8_UP_TIM13_IRQHandler + .thumb_set TIM8_UP_TIM13_IRQHandler,Default_Handler + + .weak TIM8_TRG_COM_TIM14_IRQHandler + .thumb_set TIM8_TRG_COM_TIM14_IRQHandler,Default_Handler + + .weak TIM8_CC_IRQHandler + .thumb_set TIM8_CC_IRQHandler,Default_Handler + + .weak DMA1_Stream7_IRQHandler + .thumb_set DMA1_Stream7_IRQHandler,Default_Handler + + .weak FMC_IRQHandler + .thumb_set FMC_IRQHandler,Default_Handler + + .weak SDMMC1_IRQHandler + .thumb_set SDMMC1_IRQHandler,Default_Handler + + .weak TIM5_IRQHandler + .thumb_set TIM5_IRQHandler,Default_Handler + + .weak SPI3_IRQHandler + .thumb_set SPI3_IRQHandler,Default_Handler + + .weak UART4_IRQHandler + .thumb_set UART4_IRQHandler,Default_Handler + + .weak UART5_IRQHandler + .thumb_set UART5_IRQHandler,Default_Handler + + .weak TIM6_DAC_IRQHandler + .thumb_set TIM6_DAC_IRQHandler,Default_Handler + + .weak TIM7_IRQHandler + .thumb_set TIM7_IRQHandler,Default_Handler + + .weak DMA2_Stream0_IRQHandler + .thumb_set DMA2_Stream0_IRQHandler,Default_Handler + + .weak DMA2_Stream1_IRQHandler + .thumb_set DMA2_Stream1_IRQHandler,Default_Handler + + .weak DMA2_Stream2_IRQHandler + .thumb_set DMA2_Stream2_IRQHandler,Default_Handler + + .weak DMA2_Stream3_IRQHandler + .thumb_set DMA2_Stream3_IRQHandler,Default_Handler + + .weak DMA2_Stream4_IRQHandler + .thumb_set DMA2_Stream4_IRQHandler,Default_Handler + + .weak DMA2_Stream4_IRQHandler + .thumb_set DMA2_Stream4_IRQHandler,Default_Handler + + .weak ETH_IRQHandler + .thumb_set ETH_IRQHandler,Default_Handler + + .weak ETH_WKUP_IRQHandler + .thumb_set ETH_WKUP_IRQHandler,Default_Handler + + .weak CAN2_TX_IRQHandler + .thumb_set CAN2_TX_IRQHandler,Default_Handler + + .weak CAN2_RX0_IRQHandler + .thumb_set CAN2_RX0_IRQHandler,Default_Handler + + .weak CAN2_RX1_IRQHandler + .thumb_set CAN2_RX1_IRQHandler,Default_Handler + + .weak CAN2_SCE_IRQHandler + .thumb_set CAN2_SCE_IRQHandler,Default_Handler + + .weak OTG_FS_IRQHandler + .thumb_set OTG_FS_IRQHandler,Default_Handler + + .weak DMA2_Stream5_IRQHandler + .thumb_set DMA2_Stream5_IRQHandler,Default_Handler + + .weak DMA2_Stream6_IRQHandler + .thumb_set DMA2_Stream6_IRQHandler,Default_Handler + + .weak DMA2_Stream7_IRQHandler + .thumb_set DMA2_Stream7_IRQHandler,Default_Handler + + .weak USART6_IRQHandler + .thumb_set USART6_IRQHandler,Default_Handler + + .weak I2C3_EV_IRQHandler + .thumb_set I2C3_EV_IRQHandler,Default_Handler + + .weak I2C3_ER_IRQHandler + .thumb_set I2C3_ER_IRQHandler,Default_Handler + + .weak OTG_HS_EP1_OUT_IRQHandler + .thumb_set OTG_HS_EP1_OUT_IRQHandler,Default_Handler + + .weak OTG_HS_EP1_IN_IRQHandler + .thumb_set OTG_HS_EP1_IN_IRQHandler,Default_Handler + + .weak OTG_HS_WKUP_IRQHandler + .thumb_set OTG_HS_WKUP_IRQHandler,Default_Handler + + .weak OTG_HS_IRQHandler + .thumb_set OTG_HS_IRQHandler,Default_Handler + + .weak DCMI_IRQHandler + .thumb_set DCMI_IRQHandler,Default_Handler + + .weak RNG_IRQHandler + .thumb_set RNG_IRQHandler,Default_Handler + + .weak FPU_IRQHandler + .thumb_set FPU_IRQHandler,Default_Handler + + .weak UART7_IRQHandler + .thumb_set UART7_IRQHandler,Default_Handler + + .weak UART8_IRQHandler + .thumb_set UART8_IRQHandler,Default_Handler + + .weak SPI4_IRQHandler + .thumb_set SPI4_IRQHandler,Default_Handler + + .weak SPI5_IRQHandler + .thumb_set SPI5_IRQHandler,Default_Handler + + .weak SPI6_IRQHandler + .thumb_set SPI6_IRQHandler,Default_Handler + + .weak SAI1_IRQHandler + .thumb_set SAI1_IRQHandler,Default_Handler + + .weak LTDC_IRQHandler + .thumb_set LTDC_IRQHandler,Default_Handler + + .weak LTDC_ER_IRQHandler + .thumb_set LTDC_ER_IRQHandler,Default_Handler + + .weak DMA2D_IRQHandler + .thumb_set DMA2D_IRQHandler,Default_Handler + + .weak SAI2_IRQHandler + .thumb_set SAI2_IRQHandler,Default_Handler + + .weak QUADSPI_IRQHandler + .thumb_set QUADSPI_IRQHandler,Default_Handler + + .weak LPTIM1_IRQHandler + .thumb_set LPTIM1_IRQHandler,Default_Handler + + .weak CEC_IRQHandler + .thumb_set CEC_IRQHandler,Default_Handler + + .weak I2C4_EV_IRQHandler + .thumb_set I2C4_EV_IRQHandler,Default_Handler + + .weak I2C4_ER_IRQHandler + .thumb_set I2C4_ER_IRQHandler,Default_Handler + + .weak SPDIF_RX_IRQHandler + .thumb_set SPDIF_RX_IRQHandler,Default_Handler + + .weak DFSDM1_FLT0_IRQHandler + .thumb_set DFSDM1_FLT0_IRQHandler,Default_Handler + + .weak DFSDM1_FLT1_IRQHandler + .thumb_set DFSDM1_FLT1_IRQHandler,Default_Handler + + .weak DFSDM1_FLT2_IRQHandler + .thumb_set DFSDM1_FLT2_IRQHandler,Default_Handler + + .weak DFSDM1_FLT3_IRQHandler + .thumb_set DFSDM1_FLT3_IRQHandler,Default_Handler + + .weak SDMMC2_IRQHandler + .thumb_set SDMMC2_IRQHandler,Default_Handler + + .weak CAN3_TX_IRQHandler + .thumb_set CAN3_TX_IRQHandler,Default_Handler + + .weak CAN3_RX0_IRQHandler + .thumb_set CAN3_RX0_IRQHandler,Default_Handler + + .weak CAN3_RX1_IRQHandler + .thumb_set CAN3_RX1_IRQHandler,Default_Handler + + .weak CAN3_SCE_IRQHandler + .thumb_set CAN3_SCE_IRQHandler,Default_Handler + + .weak JPEG_IRQHandler + .thumb_set JPEG_IRQHandler,Default_Handler + + .weak MDIOS_IRQHandler + .thumb_set MDIOS_IRQHandler,Default_Handler + + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Device/ST/STM32F7xx/Include/stm32f767xx.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Device/ST/STM32F7xx/Include/stm32f767xx.h new file mode 100644 index 0000000..44daec2 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Device/ST/STM32F7xx/Include/stm32f767xx.h @@ -0,0 +1,18941 @@ +/** + ****************************************************************************** + * @file stm32f767xx.h + * @author MCD Application Team + * @brief CMSIS Cortex-M7 Device Peripheral Access Layer Header File. + * + * This file contains: + * - Data structures and the address mapping for all peripherals + * - Peripheral's registers declarations and bits definition + * - Macros to access peripheral's registers hardware + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2016 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/** @addtogroup CMSIS_Device + * @{ + */ + +/** @addtogroup stm32f767xx + * @{ + */ + +#ifndef __STM32F767xx_H +#define __STM32F767xx_H + +#ifdef __cplusplus + extern "C" { +#endif /* __cplusplus */ + +/** @addtogroup Configuration_section_for_CMSIS + * @{ + */ + +/** + * @brief STM32F7xx Interrupt Number Definition, according to the selected device + * in @ref Library_configuration_section + */ +typedef enum +{ +/****** Cortex-M7 Processor Exceptions Numbers ****************************************************************/ + NonMaskableInt_IRQn = -14, /*!< 2 Non Maskable Interrupt */ + MemoryManagement_IRQn = -12, /*!< 4 Cortex-M7 Memory Management Interrupt */ + BusFault_IRQn = -11, /*!< 5 Cortex-M7 Bus Fault Interrupt */ + UsageFault_IRQn = -10, /*!< 6 Cortex-M7 Usage Fault Interrupt */ + SVCall_IRQn = -5, /*!< 11 Cortex-M7 SV Call Interrupt */ + DebugMonitor_IRQn = -4, /*!< 12 Cortex-M7 Debug Monitor Interrupt */ + PendSV_IRQn = -2, /*!< 14 Cortex-M7 Pend SV Interrupt */ + SysTick_IRQn = -1, /*!< 15 Cortex-M7 System Tick Interrupt */ +/****** STM32 specific Interrupt Numbers **********************************************************************/ + WWDG_IRQn = 0, /*!< Window WatchDog Interrupt */ + PVD_IRQn = 1, /*!< PVD through EXTI Line detection Interrupt */ + TAMP_STAMP_IRQn = 2, /*!< Tamper and TimeStamp interrupts through the EXTI line */ + RTC_WKUP_IRQn = 3, /*!< RTC Wakeup interrupt through the EXTI line */ + FLASH_IRQn = 4, /*!< FLASH global Interrupt */ + RCC_IRQn = 5, /*!< RCC global Interrupt */ + EXTI0_IRQn = 6, /*!< EXTI Line0 Interrupt */ + EXTI1_IRQn = 7, /*!< EXTI Line1 Interrupt */ + EXTI2_IRQn = 8, /*!< EXTI Line2 Interrupt */ + EXTI3_IRQn = 9, /*!< EXTI Line3 Interrupt */ + EXTI4_IRQn = 10, /*!< EXTI Line4 Interrupt */ + DMA1_Stream0_IRQn = 11, /*!< DMA1 Stream 0 global Interrupt */ + DMA1_Stream1_IRQn = 12, /*!< DMA1 Stream 1 global Interrupt */ + DMA1_Stream2_IRQn = 13, /*!< DMA1 Stream 2 global Interrupt */ + DMA1_Stream3_IRQn = 14, /*!< DMA1 Stream 3 global Interrupt */ + DMA1_Stream4_IRQn = 15, /*!< DMA1 Stream 4 global Interrupt */ + DMA1_Stream5_IRQn = 16, /*!< DMA1 Stream 5 global Interrupt */ + DMA1_Stream6_IRQn = 17, /*!< DMA1 Stream 6 global Interrupt */ + ADC_IRQn = 18, /*!< ADC1, ADC2 and ADC3 global Interrupts */ + CAN1_TX_IRQn = 19, /*!< CAN1 TX Interrupt */ + CAN1_RX0_IRQn = 20, /*!< CAN1 RX0 Interrupt */ + CAN1_RX1_IRQn = 21, /*!< CAN1 RX1 Interrupt */ + CAN1_SCE_IRQn = 22, /*!< CAN1 SCE Interrupt */ + EXTI9_5_IRQn = 23, /*!< External Line[9:5] Interrupts */ + TIM1_BRK_TIM9_IRQn = 24, /*!< TIM1 Break interrupt and TIM9 global interrupt */ + TIM1_UP_TIM10_IRQn = 25, /*!< TIM1 Update Interrupt and TIM10 global interrupt */ + TIM1_TRG_COM_TIM11_IRQn = 26, /*!< TIM1 Trigger and Commutation Interrupt and TIM11 global interrupt */ + TIM1_CC_IRQn = 27, /*!< TIM1 Capture Compare Interrupt */ + TIM2_IRQn = 28, /*!< TIM2 global Interrupt */ + TIM3_IRQn = 29, /*!< TIM3 global Interrupt */ + TIM4_IRQn = 30, /*!< TIM4 global Interrupt */ + I2C1_EV_IRQn = 31, /*!< I2C1 Event Interrupt */ + I2C1_ER_IRQn = 32, /*!< I2C1 Error Interrupt */ + I2C2_EV_IRQn = 33, /*!< I2C2 Event Interrupt */ + I2C2_ER_IRQn = 34, /*!< I2C2 Error Interrupt */ + SPI1_IRQn = 35, /*!< SPI1 global Interrupt */ + SPI2_IRQn = 36, /*!< SPI2 global Interrupt */ + USART1_IRQn = 37, /*!< USART1 global Interrupt */ + USART2_IRQn = 38, /*!< USART2 global Interrupt */ + USART3_IRQn = 39, /*!< USART3 global Interrupt */ + EXTI15_10_IRQn = 40, /*!< External Line[15:10] Interrupts */ + RTC_Alarm_IRQn = 41, /*!< RTC Alarm (A and B) through EXTI Line Interrupt */ + OTG_FS_WKUP_IRQn = 42, /*!< USB OTG FS Wakeup through EXTI line interrupt */ + TIM8_BRK_TIM12_IRQn = 43, /*!< TIM8 Break Interrupt and TIM12 global interrupt */ + TIM8_UP_TIM13_IRQn = 44, /*!< TIM8 Update Interrupt and TIM13 global interrupt */ + TIM8_TRG_COM_TIM14_IRQn = 45, /*!< TIM8 Trigger and Commutation Interrupt and TIM14 global interrupt */ + TIM8_CC_IRQn = 46, /*!< TIM8 Capture Compare Interrupt */ + DMA1_Stream7_IRQn = 47, /*!< DMA1 Stream7 Interrupt */ + FMC_IRQn = 48, /*!< FMC global Interrupt */ + SDMMC1_IRQn = 49, /*!< SDMMC1 global Interrupt */ + TIM5_IRQn = 50, /*!< TIM5 global Interrupt */ + SPI3_IRQn = 51, /*!< SPI3 global Interrupt */ + UART4_IRQn = 52, /*!< UART4 global Interrupt */ + UART5_IRQn = 53, /*!< UART5 global Interrupt */ + TIM6_DAC_IRQn = 54, /*!< TIM6 global and DAC1&2 underrun error interrupts */ + TIM7_IRQn = 55, /*!< TIM7 global interrupt */ + DMA2_Stream0_IRQn = 56, /*!< DMA2 Stream 0 global Interrupt */ + DMA2_Stream1_IRQn = 57, /*!< DMA2 Stream 1 global Interrupt */ + DMA2_Stream2_IRQn = 58, /*!< DMA2 Stream 2 global Interrupt */ + DMA2_Stream3_IRQn = 59, /*!< DMA2 Stream 3 global Interrupt */ + DMA2_Stream4_IRQn = 60, /*!< DMA2 Stream 4 global Interrupt */ + ETH_IRQn = 61, /*!< Ethernet global Interrupt */ + ETH_WKUP_IRQn = 62, /*!< Ethernet Wakeup through EXTI line Interrupt */ + CAN2_TX_IRQn = 63, /*!< CAN2 TX Interrupt */ + CAN2_RX0_IRQn = 64, /*!< CAN2 RX0 Interrupt */ + CAN2_RX1_IRQn = 65, /*!< CAN2 RX1 Interrupt */ + CAN2_SCE_IRQn = 66, /*!< CAN2 SCE Interrupt */ + OTG_FS_IRQn = 67, /*!< USB OTG FS global Interrupt */ + DMA2_Stream5_IRQn = 68, /*!< DMA2 Stream 5 global interrupt */ + DMA2_Stream6_IRQn = 69, /*!< DMA2 Stream 6 global interrupt */ + DMA2_Stream7_IRQn = 70, /*!< DMA2 Stream 7 global interrupt */ + USART6_IRQn = 71, /*!< USART6 global interrupt */ + I2C3_EV_IRQn = 72, /*!< I2C3 event interrupt */ + I2C3_ER_IRQn = 73, /*!< I2C3 error interrupt */ + OTG_HS_EP1_OUT_IRQn = 74, /*!< USB OTG HS End Point 1 Out global interrupt */ + OTG_HS_EP1_IN_IRQn = 75, /*!< USB OTG HS End Point 1 In global interrupt */ + OTG_HS_WKUP_IRQn = 76, /*!< USB OTG HS Wakeup through EXTI interrupt */ + OTG_HS_IRQn = 77, /*!< USB OTG HS global interrupt */ + DCMI_IRQn = 78, /*!< DCMI global interrupt */ + RNG_IRQn = 80, /*!< RNG global interrupt */ + FPU_IRQn = 81, /*!< FPU global interrupt */ + UART7_IRQn = 82, /*!< UART7 global interrupt */ + UART8_IRQn = 83, /*!< UART8 global interrupt */ + SPI4_IRQn = 84, /*!< SPI4 global Interrupt */ + SPI5_IRQn = 85, /*!< SPI5 global Interrupt */ + SPI6_IRQn = 86, /*!< SPI6 global Interrupt */ + SAI1_IRQn = 87, /*!< SAI1 global Interrupt */ + LTDC_IRQn = 88, /*!< LTDC global Interrupt */ + LTDC_ER_IRQn = 89, /*!< LTDC Error global Interrupt */ + DMA2D_IRQn = 90, /*!< DMA2D global Interrupt */ + SAI2_IRQn = 91, /*!< SAI2 global Interrupt */ + QUADSPI_IRQn = 92, /*!< Quad SPI global interrupt */ + LPTIM1_IRQn = 93, /*!< LP TIM1 interrupt */ + CEC_IRQn = 94, /*!< HDMI-CEC global Interrupt */ + I2C4_EV_IRQn = 95, /*!< I2C4 Event Interrupt */ + I2C4_ER_IRQn = 96, /*!< I2C4 Error Interrupt */ + SPDIF_RX_IRQn = 97, /*!< SPDIF-RX global Interrupt */ + DFSDM1_FLT0_IRQn = 99, /*!< DFSDM1 Filter 0 global Interrupt */ + DFSDM1_FLT1_IRQn = 100, /*!< DFSDM1 Filter 1 global Interrupt */ + DFSDM1_FLT2_IRQn = 101, /*!< DFSDM1 Filter 2 global Interrupt */ + DFSDM1_FLT3_IRQn = 102, /*!< DFSDM1 Filter 3 global Interrupt */ + SDMMC2_IRQn = 103, /*!< SDMMC2 global Interrupt */ + CAN3_TX_IRQn = 104, /*!< CAN3 TX Interrupt */ + CAN3_RX0_IRQn = 105, /*!< CAN3 RX0 Interrupt */ + CAN3_RX1_IRQn = 106, /*!< CAN3 RX1 Interrupt */ + CAN3_SCE_IRQn = 107, /*!< CAN3 SCE Interrupt */ + JPEG_IRQn = 108, /*!< JPEG global Interrupt */ + MDIOS_IRQn = 109 /*!< MDIO Slave global Interrupt */ +} IRQn_Type; + +/** + * @} + */ + +/** + * @brief Configuration of the Cortex-M7 Processor and Core Peripherals + */ +#define __CM7_REV 0x0100U /*!< Cortex-M7 revision r1p0 */ +#define __MPU_PRESENT 1U /*!< CM7 provides an MPU */ +#define __NVIC_PRIO_BITS 4U /*!< CM7 uses 4 Bits for the Priority Levels */ +#define __Vendor_SysTickConfig 0U /*!< Set to 1 if different SysTick Config is used */ +#define __FPU_PRESENT 1U /*!< FPU present */ +#define __ICACHE_PRESENT 1U /*!< CM7 instruction cache present */ +#define __DCACHE_PRESENT 1U /*!< CM7 data cache present */ +#include "core_cm7.h" /*!< Cortex-M7 processor and core peripherals */ + + +#include "system_stm32f7xx.h" +#include + +/** @addtogroup Peripheral_registers_structures + * @{ + */ + +/** + * @brief Analog to Digital Converter + */ + +typedef struct +{ + __IO uint32_t SR; /*!< ADC status register, Address offset: 0x00 */ + __IO uint32_t CR1; /*!< ADC control register 1, Address offset: 0x04 */ + __IO uint32_t CR2; /*!< ADC control register 2, Address offset: 0x08 */ + __IO uint32_t SMPR1; /*!< ADC sample time register 1, Address offset: 0x0C */ + __IO uint32_t SMPR2; /*!< ADC sample time register 2, Address offset: 0x10 */ + __IO uint32_t JOFR1; /*!< ADC injected channel data offset register 1, Address offset: 0x14 */ + __IO uint32_t JOFR2; /*!< ADC injected channel data offset register 2, Address offset: 0x18 */ + __IO uint32_t JOFR3; /*!< ADC injected channel data offset register 3, Address offset: 0x1C */ + __IO uint32_t JOFR4; /*!< ADC injected channel data offset register 4, Address offset: 0x20 */ + __IO uint32_t HTR; /*!< ADC watchdog higher threshold register, Address offset: 0x24 */ + __IO uint32_t LTR; /*!< ADC watchdog lower threshold register, Address offset: 0x28 */ + __IO uint32_t SQR1; /*!< ADC regular sequence register 1, Address offset: 0x2C */ + __IO uint32_t SQR2; /*!< ADC regular sequence register 2, Address offset: 0x30 */ + __IO uint32_t SQR3; /*!< ADC regular sequence register 3, Address offset: 0x34 */ + __IO uint32_t JSQR; /*!< ADC injected sequence register, Address offset: 0x38*/ + __IO uint32_t JDR1; /*!< ADC injected data register 1, Address offset: 0x3C */ + __IO uint32_t JDR2; /*!< ADC injected data register 2, Address offset: 0x40 */ + __IO uint32_t JDR3; /*!< ADC injected data register 3, Address offset: 0x44 */ + __IO uint32_t JDR4; /*!< ADC injected data register 4, Address offset: 0x48 */ + __IO uint32_t DR; /*!< ADC regular data register, Address offset: 0x4C */ +} ADC_TypeDef; + +typedef struct +{ + __IO uint32_t CSR; /*!< ADC Common status register, Address offset: ADC1 base address + 0x300 */ + __IO uint32_t CCR; /*!< ADC common control register, Address offset: ADC1 base address + 0x304 */ + __IO uint32_t CDR; /*!< ADC common regular data register for dual + AND triple modes, Address offset: ADC1 base address + 0x308 */ +} ADC_Common_TypeDef; + + +/** + * @brief Controller Area Network TxMailBox + */ + +typedef struct +{ + __IO uint32_t TIR; /*!< CAN TX mailbox identifier register */ + __IO uint32_t TDTR; /*!< CAN mailbox data length control and time stamp register */ + __IO uint32_t TDLR; /*!< CAN mailbox data low register */ + __IO uint32_t TDHR; /*!< CAN mailbox data high register */ +} CAN_TxMailBox_TypeDef; + +/** + * @brief Controller Area Network FIFOMailBox + */ + +typedef struct +{ + __IO uint32_t RIR; /*!< CAN receive FIFO mailbox identifier register */ + __IO uint32_t RDTR; /*!< CAN receive FIFO mailbox data length control and time stamp register */ + __IO uint32_t RDLR; /*!< CAN receive FIFO mailbox data low register */ + __IO uint32_t RDHR; /*!< CAN receive FIFO mailbox data high register */ +} CAN_FIFOMailBox_TypeDef; + +/** + * @brief Controller Area Network FilterRegister + */ + +typedef struct +{ + __IO uint32_t FR1; /*!< CAN Filter bank register 1 */ + __IO uint32_t FR2; /*!< CAN Filter bank register 1 */ +} CAN_FilterRegister_TypeDef; + +/** + * @brief Controller Area Network + */ + +typedef struct +{ + __IO uint32_t MCR; /*!< CAN master control register, Address offset: 0x00 */ + __IO uint32_t MSR; /*!< CAN master status register, Address offset: 0x04 */ + __IO uint32_t TSR; /*!< CAN transmit status register, Address offset: 0x08 */ + __IO uint32_t RF0R; /*!< CAN receive FIFO 0 register, Address offset: 0x0C */ + __IO uint32_t RF1R; /*!< CAN receive FIFO 1 register, Address offset: 0x10 */ + __IO uint32_t IER; /*!< CAN interrupt enable register, Address offset: 0x14 */ + __IO uint32_t ESR; /*!< CAN error status register, Address offset: 0x18 */ + __IO uint32_t BTR; /*!< CAN bit timing register, Address offset: 0x1C */ + uint32_t RESERVED0[88]; /*!< Reserved, 0x020 - 0x17F */ + CAN_TxMailBox_TypeDef sTxMailBox[3]; /*!< CAN Tx MailBox, Address offset: 0x180 - 0x1AC */ + CAN_FIFOMailBox_TypeDef sFIFOMailBox[2]; /*!< CAN FIFO MailBox, Address offset: 0x1B0 - 0x1CC */ + uint32_t RESERVED1[12]; /*!< Reserved, 0x1D0 - 0x1FF */ + __IO uint32_t FMR; /*!< CAN filter master register, Address offset: 0x200 */ + __IO uint32_t FM1R; /*!< CAN filter mode register, Address offset: 0x204 */ + uint32_t RESERVED2; /*!< Reserved, 0x208 */ + __IO uint32_t FS1R; /*!< CAN filter scale register, Address offset: 0x20C */ + uint32_t RESERVED3; /*!< Reserved, 0x210 */ + __IO uint32_t FFA1R; /*!< CAN filter FIFO assignment register, Address offset: 0x214 */ + uint32_t RESERVED4; /*!< Reserved, 0x218 */ + __IO uint32_t FA1R; /*!< CAN filter activation register, Address offset: 0x21C */ + uint32_t RESERVED5[8]; /*!< Reserved, 0x220-0x23F */ + CAN_FilterRegister_TypeDef sFilterRegister[28]; /*!< CAN Filter Register, Address offset: 0x240-0x31C */ +} CAN_TypeDef; + +/** + * @brief HDMI-CEC + */ + +typedef struct +{ + __IO uint32_t CR; /*!< CEC control register, Address offset:0x00 */ + __IO uint32_t CFGR; /*!< CEC configuration register, Address offset:0x04 */ + __IO uint32_t TXDR; /*!< CEC Tx data register , Address offset:0x08 */ + __IO uint32_t RXDR; /*!< CEC Rx Data Register, Address offset:0x0C */ + __IO uint32_t ISR; /*!< CEC Interrupt and Status Register, Address offset:0x10 */ + __IO uint32_t IER; /*!< CEC interrupt enable register, Address offset:0x14 */ +}CEC_TypeDef; + +/** + * @brief CRC calculation unit + */ + +typedef struct +{ + __IO uint32_t DR; /*!< CRC Data register, Address offset: 0x00 */ + __IO uint8_t IDR; /*!< CRC Independent data register, Address offset: 0x04 */ + uint8_t RESERVED0; /*!< Reserved, 0x05 */ + uint16_t RESERVED1; /*!< Reserved, 0x06 */ + __IO uint32_t CR; /*!< CRC Control register, Address offset: 0x08 */ + uint32_t RESERVED2; /*!< Reserved, 0x0C */ + __IO uint32_t INIT; /*!< Initial CRC value register, Address offset: 0x10 */ + __IO uint32_t POL; /*!< CRC polynomial register, Address offset: 0x14 */ +} CRC_TypeDef; + +/** + * @brief Digital to Analog Converter + */ + +typedef struct +{ + __IO uint32_t CR; /*!< DAC control register, Address offset: 0x00 */ + __IO uint32_t SWTRIGR; /*!< DAC software trigger register, Address offset: 0x04 */ + __IO uint32_t DHR12R1; /*!< DAC channel1 12-bit right-aligned data holding register, Address offset: 0x08 */ + __IO uint32_t DHR12L1; /*!< DAC channel1 12-bit left aligned data holding register, Address offset: 0x0C */ + __IO uint32_t DHR8R1; /*!< DAC channel1 8-bit right aligned data holding register, Address offset: 0x10 */ + __IO uint32_t DHR12R2; /*!< DAC channel2 12-bit right aligned data holding register, Address offset: 0x14 */ + __IO uint32_t DHR12L2; /*!< DAC channel2 12-bit left aligned data holding register, Address offset: 0x18 */ + __IO uint32_t DHR8R2; /*!< DAC channel2 8-bit right-aligned data holding register, Address offset: 0x1C */ + __IO uint32_t DHR12RD; /*!< Dual DAC 12-bit right-aligned data holding register, Address offset: 0x20 */ + __IO uint32_t DHR12LD; /*!< DUAL DAC 12-bit left aligned data holding register, Address offset: 0x24 */ + __IO uint32_t DHR8RD; /*!< DUAL DAC 8-bit right aligned data holding register, Address offset: 0x28 */ + __IO uint32_t DOR1; /*!< DAC channel1 data output register, Address offset: 0x2C */ + __IO uint32_t DOR2; /*!< DAC channel2 data output register, Address offset: 0x30 */ + __IO uint32_t SR; /*!< DAC status register, Address offset: 0x34 */ +} DAC_TypeDef; + +/** + * @brief DFSDM module registers + */ +typedef struct +{ + __IO uint32_t FLTCR1; /*!< DFSDM control register1, Address offset: 0x100 */ + __IO uint32_t FLTCR2; /*!< DFSDM control register2, Address offset: 0x104 */ + __IO uint32_t FLTISR; /*!< DFSDM interrupt and status register, Address offset: 0x108 */ + __IO uint32_t FLTICR; /*!< DFSDM interrupt flag clear register, Address offset: 0x10C */ + __IO uint32_t FLTJCHGR; /*!< DFSDM injected channel group selection register, Address offset: 0x110 */ + __IO uint32_t FLTFCR; /*!< DFSDM filter control register, Address offset: 0x114 */ + __IO uint32_t FLTJDATAR; /*!< DFSDM data register for injected group, Address offset: 0x118 */ + __IO uint32_t FLTRDATAR; /*!< DFSDM data register for regular group, Address offset: 0x11C */ + __IO uint32_t FLTAWHTR; /*!< DFSDM analog watchdog high threshold register, Address offset: 0x120 */ + __IO uint32_t FLTAWLTR; /*!< DFSDM analog watchdog low threshold register, Address offset: 0x124 */ + __IO uint32_t FLTAWSR; /*!< DFSDM analog watchdog status register Address offset: 0x128 */ + __IO uint32_t FLTAWCFR; /*!< DFSDM analog watchdog clear flag register Address offset: 0x12C */ + __IO uint32_t FLTEXMAX; /*!< DFSDM extreme detector maximum register, Address offset: 0x130 */ + __IO uint32_t FLTEXMIN; /*!< DFSDM extreme detector minimum register Address offset: 0x134 */ + __IO uint32_t FLTCNVTIMR; /*!< DFSDM conversion timer, Address offset: 0x138 */ +} DFSDM_Filter_TypeDef; + +/** + * @brief DFSDM channel configuration registers + */ +typedef struct +{ + __IO uint32_t CHCFGR1; /*!< DFSDM channel configuration register1, Address offset: 0x00 */ + __IO uint32_t CHCFGR2; /*!< DFSDM channel configuration register2, Address offset: 0x04 */ + __IO uint32_t CHAWSCDR; /*!< DFSDM channel analog watchdog and + short circuit detector register, Address offset: 0x08 */ + __IO uint32_t CHWDATAR; /*!< DFSDM channel watchdog filter data register, Address offset: 0x0C */ + __IO uint32_t CHDATINR; /*!< DFSDM channel data input register, Address offset: 0x10 */ +} DFSDM_Channel_TypeDef; + +/** + * @brief Debug MCU + */ + +typedef struct +{ + __IO uint32_t IDCODE; /*!< MCU device ID code, Address offset: 0x00 */ + __IO uint32_t CR; /*!< Debug MCU configuration register, Address offset: 0x04 */ + __IO uint32_t APB1FZ; /*!< Debug MCU APB1 freeze register, Address offset: 0x08 */ + __IO uint32_t APB2FZ; /*!< Debug MCU APB2 freeze register, Address offset: 0x0C */ +}DBGMCU_TypeDef; + +/** + * @brief DCMI + */ + +typedef struct +{ + __IO uint32_t CR; /*!< DCMI control register 1, Address offset: 0x00 */ + __IO uint32_t SR; /*!< DCMI status register, Address offset: 0x04 */ + __IO uint32_t RISR; /*!< DCMI raw interrupt status register, Address offset: 0x08 */ + __IO uint32_t IER; /*!< DCMI interrupt enable register, Address offset: 0x0C */ + __IO uint32_t MISR; /*!< DCMI masked interrupt status register, Address offset: 0x10 */ + __IO uint32_t ICR; /*!< DCMI interrupt clear register, Address offset: 0x14 */ + __IO uint32_t ESCR; /*!< DCMI embedded synchronization code register, Address offset: 0x18 */ + __IO uint32_t ESUR; /*!< DCMI embedded synchronization unmask register, Address offset: 0x1C */ + __IO uint32_t CWSTRTR; /*!< DCMI crop window start, Address offset: 0x20 */ + __IO uint32_t CWSIZER; /*!< DCMI crop window size, Address offset: 0x24 */ + __IO uint32_t DR; /*!< DCMI data register, Address offset: 0x28 */ +} DCMI_TypeDef; + +/** + * @brief DMA Controller + */ + +typedef struct +{ + __IO uint32_t CR; /*!< DMA stream x configuration register */ + __IO uint32_t NDTR; /*!< DMA stream x number of data register */ + __IO uint32_t PAR; /*!< DMA stream x peripheral address register */ + __IO uint32_t M0AR; /*!< DMA stream x memory 0 address register */ + __IO uint32_t M1AR; /*!< DMA stream x memory 1 address register */ + __IO uint32_t FCR; /*!< DMA stream x FIFO control register */ +} DMA_Stream_TypeDef; + +typedef struct +{ + __IO uint32_t LISR; /*!< DMA low interrupt status register, Address offset: 0x00 */ + __IO uint32_t HISR; /*!< DMA high interrupt status register, Address offset: 0x04 */ + __IO uint32_t LIFCR; /*!< DMA low interrupt flag clear register, Address offset: 0x08 */ + __IO uint32_t HIFCR; /*!< DMA high interrupt flag clear register, Address offset: 0x0C */ +} DMA_TypeDef; + +/** + * @brief DMA2D Controller + */ + +typedef struct +{ + __IO uint32_t CR; /*!< DMA2D Control Register, Address offset: 0x00 */ + __IO uint32_t ISR; /*!< DMA2D Interrupt Status Register, Address offset: 0x04 */ + __IO uint32_t IFCR; /*!< DMA2D Interrupt Flag Clear Register, Address offset: 0x08 */ + __IO uint32_t FGMAR; /*!< DMA2D Foreground Memory Address Register, Address offset: 0x0C */ + __IO uint32_t FGOR; /*!< DMA2D Foreground Offset Register, Address offset: 0x10 */ + __IO uint32_t BGMAR; /*!< DMA2D Background Memory Address Register, Address offset: 0x14 */ + __IO uint32_t BGOR; /*!< DMA2D Background Offset Register, Address offset: 0x18 */ + __IO uint32_t FGPFCCR; /*!< DMA2D Foreground PFC Control Register, Address offset: 0x1C */ + __IO uint32_t FGCOLR; /*!< DMA2D Foreground Color Register, Address offset: 0x20 */ + __IO uint32_t BGPFCCR; /*!< DMA2D Background PFC Control Register, Address offset: 0x24 */ + __IO uint32_t BGCOLR; /*!< DMA2D Background Color Register, Address offset: 0x28 */ + __IO uint32_t FGCMAR; /*!< DMA2D Foreground CLUT Memory Address Register, Address offset: 0x2C */ + __IO uint32_t BGCMAR; /*!< DMA2D Background CLUT Memory Address Register, Address offset: 0x30 */ + __IO uint32_t OPFCCR; /*!< DMA2D Output PFC Control Register, Address offset: 0x34 */ + __IO uint32_t OCOLR; /*!< DMA2D Output Color Register, Address offset: 0x38 */ + __IO uint32_t OMAR; /*!< DMA2D Output Memory Address Register, Address offset: 0x3C */ + __IO uint32_t OOR; /*!< DMA2D Output Offset Register, Address offset: 0x40 */ + __IO uint32_t NLR; /*!< DMA2D Number of Line Register, Address offset: 0x44 */ + __IO uint32_t LWR; /*!< DMA2D Line Watermark Register, Address offset: 0x48 */ + __IO uint32_t AMTCR; /*!< DMA2D AHB Master Timer Configuration Register, Address offset: 0x4C */ + uint32_t RESERVED[236]; /*!< Reserved, 0x50-0x3FF */ + __IO uint32_t FGCLUT[256]; /*!< DMA2D Foreground CLUT, Address offset:400-7FF */ + __IO uint32_t BGCLUT[256]; /*!< DMA2D Background CLUT, Address offset:800-BFF */ +} DMA2D_TypeDef; + + +/** + * @brief Ethernet MAC + */ + +typedef struct +{ + __IO uint32_t MACCR; + __IO uint32_t MACFFR; + __IO uint32_t MACHTHR; + __IO uint32_t MACHTLR; + __IO uint32_t MACMIIAR; + __IO uint32_t MACMIIDR; + __IO uint32_t MACFCR; + __IO uint32_t MACVLANTR; /* 8 */ + uint32_t RESERVED0[2]; + __IO uint32_t MACRWUFFR; /* 11 */ + __IO uint32_t MACPMTCSR; + uint32_t RESERVED1; + __IO uint32_t MACDBGR; + __IO uint32_t MACSR; /* 15 */ + __IO uint32_t MACIMR; + __IO uint32_t MACA0HR; + __IO uint32_t MACA0LR; + __IO uint32_t MACA1HR; + __IO uint32_t MACA1LR; + __IO uint32_t MACA2HR; + __IO uint32_t MACA2LR; + __IO uint32_t MACA3HR; + __IO uint32_t MACA3LR; /* 24 */ + uint32_t RESERVED2[40]; + __IO uint32_t MMCCR; /* 65 */ + __IO uint32_t MMCRIR; + __IO uint32_t MMCTIR; + __IO uint32_t MMCRIMR; + __IO uint32_t MMCTIMR; /* 69 */ + uint32_t RESERVED3[14]; + __IO uint32_t MMCTGFSCCR; /* 84 */ + __IO uint32_t MMCTGFMSCCR; + uint32_t RESERVED4[5]; + __IO uint32_t MMCTGFCR; + uint32_t RESERVED5[10]; + __IO uint32_t MMCRFCECR; + __IO uint32_t MMCRFAECR; + uint32_t RESERVED6[10]; + __IO uint32_t MMCRGUFCR; + uint32_t RESERVED7[334]; + __IO uint32_t PTPTSCR; + __IO uint32_t PTPSSIR; + __IO uint32_t PTPTSHR; + __IO uint32_t PTPTSLR; + __IO uint32_t PTPTSHUR; + __IO uint32_t PTPTSLUR; + __IO uint32_t PTPTSAR; + __IO uint32_t PTPTTHR; + __IO uint32_t PTPTTLR; + __IO uint32_t RESERVED8; + __IO uint32_t PTPTSSR; + __IO uint32_t PTPPPSCR; + uint32_t RESERVED9[564]; + __IO uint32_t DMABMR; + __IO uint32_t DMATPDR; + __IO uint32_t DMARPDR; + __IO uint32_t DMARDLAR; + __IO uint32_t DMATDLAR; + __IO uint32_t DMASR; + __IO uint32_t DMAOMR; + __IO uint32_t DMAIER; + __IO uint32_t DMAMFBOCR; + __IO uint32_t DMARSWTR; + uint32_t RESERVED10[8]; + __IO uint32_t DMACHTDR; + __IO uint32_t DMACHRDR; + __IO uint32_t DMACHTBAR; + __IO uint32_t DMACHRBAR; +} ETH_TypeDef; + +/** + * @brief External Interrupt/Event Controller + */ + +typedef struct +{ + __IO uint32_t IMR; /*!< EXTI Interrupt mask register, Address offset: 0x00 */ + __IO uint32_t EMR; /*!< EXTI Event mask register, Address offset: 0x04 */ + __IO uint32_t RTSR; /*!< EXTI Rising trigger selection register, Address offset: 0x08 */ + __IO uint32_t FTSR; /*!< EXTI Falling trigger selection register, Address offset: 0x0C */ + __IO uint32_t SWIER; /*!< EXTI Software interrupt event register, Address offset: 0x10 */ + __IO uint32_t PR; /*!< EXTI Pending register, Address offset: 0x14 */ +} EXTI_TypeDef; + +/** + * @brief FLASH Registers + */ + +typedef struct +{ + __IO uint32_t ACR; /*!< FLASH access control register, Address offset: 0x00 */ + __IO uint32_t KEYR; /*!< FLASH key register, Address offset: 0x04 */ + __IO uint32_t OPTKEYR; /*!< FLASH option key register, Address offset: 0x08 */ + __IO uint32_t SR; /*!< FLASH status register, Address offset: 0x0C */ + __IO uint32_t CR; /*!< FLASH control register, Address offset: 0x10 */ + __IO uint32_t OPTCR; /*!< FLASH option control register , Address offset: 0x14 */ + __IO uint32_t OPTCR1; /*!< FLASH option control register 1 , Address offset: 0x18 */ +} FLASH_TypeDef; + + + +/** + * @brief Flexible Memory Controller + */ + +typedef struct +{ + __IO uint32_t BTCR[8]; /*!< NOR/PSRAM chip-select control register(BCR) and chip-select timing register(BTR), Address offset: 0x00-1C */ +} FMC_Bank1_TypeDef; + +/** + * @brief Flexible Memory Controller Bank1E + */ + +typedef struct +{ + __IO uint32_t BWTR[7]; /*!< NOR/PSRAM write timing registers, Address offset: 0x104-0x11C */ +} FMC_Bank1E_TypeDef; + +/** + * @brief Flexible Memory Controller Bank3 + */ + +typedef struct +{ + __IO uint32_t PCR; /*!< NAND Flash control register, Address offset: 0x80 */ + __IO uint32_t SR; /*!< NAND Flash FIFO status and interrupt register, Address offset: 0x84 */ + __IO uint32_t PMEM; /*!< NAND Flash Common memory space timing register, Address offset: 0x88 */ + __IO uint32_t PATT; /*!< NAND Flash Attribute memory space timing register, Address offset: 0x8C */ + uint32_t RESERVED0; /*!< Reserved, 0x90 */ + __IO uint32_t ECCR; /*!< NAND Flash ECC result registers, Address offset: 0x94 */ +} FMC_Bank3_TypeDef; + +/** + * @brief Flexible Memory Controller Bank5_6 + */ + +typedef struct +{ + __IO uint32_t SDCR[2]; /*!< SDRAM Control registers , Address offset: 0x140-0x144 */ + __IO uint32_t SDTR[2]; /*!< SDRAM Timing registers , Address offset: 0x148-0x14C */ + __IO uint32_t SDCMR; /*!< SDRAM Command Mode register, Address offset: 0x150 */ + __IO uint32_t SDRTR; /*!< SDRAM Refresh Timer register, Address offset: 0x154 */ + __IO uint32_t SDSR; /*!< SDRAM Status register, Address offset: 0x158 */ +} FMC_Bank5_6_TypeDef; + + +/** + * @brief General Purpose I/O + */ + +typedef struct +{ + __IO uint32_t MODER; /*!< GPIO port mode register, Address offset: 0x00 */ + __IO uint32_t OTYPER; /*!< GPIO port output type register, Address offset: 0x04 */ + __IO uint32_t OSPEEDR; /*!< GPIO port output speed register, Address offset: 0x08 */ + __IO uint32_t PUPDR; /*!< GPIO port pull-up/pull-down register, Address offset: 0x0C */ + __IO uint32_t IDR; /*!< GPIO port input data register, Address offset: 0x10 */ + __IO uint32_t ODR; /*!< GPIO port output data register, Address offset: 0x14 */ + __IO uint32_t BSRR; /*!< GPIO port bit set/reset register, Address offset: 0x18 */ + __IO uint32_t LCKR; /*!< GPIO port configuration lock register, Address offset: 0x1C */ + __IO uint32_t AFR[2]; /*!< GPIO alternate function registers, Address offset: 0x20-0x24 */ +} GPIO_TypeDef; + +/** + * @brief System configuration controller + */ + +typedef struct +{ + __IO uint32_t MEMRMP; /*!< SYSCFG memory remap register, Address offset: 0x00 */ + __IO uint32_t PMC; /*!< SYSCFG peripheral mode configuration register, Address offset: 0x04 */ + __IO uint32_t EXTICR[4]; /*!< SYSCFG external interrupt configuration registers, Address offset: 0x08-0x14 */ + uint32_t RESERVED; /*!< Reserved, 0x18 */ + __IO uint32_t CBR; /*!< SYSCFG Class B register, Address offset: 0x1C */ + __IO uint32_t CMPCR; /*!< SYSCFG Compensation cell control register, Address offset: 0x20 */ +} SYSCFG_TypeDef; + +/** + * @brief Inter-integrated Circuit Interface + */ + +typedef struct +{ + __IO uint32_t CR1; /*!< I2C Control register 1, Address offset: 0x00 */ + __IO uint32_t CR2; /*!< I2C Control register 2, Address offset: 0x04 */ + __IO uint32_t OAR1; /*!< I2C Own address 1 register, Address offset: 0x08 */ + __IO uint32_t OAR2; /*!< I2C Own address 2 register, Address offset: 0x0C */ + __IO uint32_t TIMINGR; /*!< I2C Timing register, Address offset: 0x10 */ + __IO uint32_t TIMEOUTR; /*!< I2C Timeout register, Address offset: 0x14 */ + __IO uint32_t ISR; /*!< I2C Interrupt and status register, Address offset: 0x18 */ + __IO uint32_t ICR; /*!< I2C Interrupt clear register, Address offset: 0x1C */ + __IO uint32_t PECR; /*!< I2C PEC register, Address offset: 0x20 */ + __IO uint32_t RXDR; /*!< I2C Receive data register, Address offset: 0x24 */ + __IO uint32_t TXDR; /*!< I2C Transmit data register, Address offset: 0x28 */ +} I2C_TypeDef; + +/** + * @brief Independent WATCHDOG + */ + +typedef struct +{ + __IO uint32_t KR; /*!< IWDG Key register, Address offset: 0x00 */ + __IO uint32_t PR; /*!< IWDG Prescaler register, Address offset: 0x04 */ + __IO uint32_t RLR; /*!< IWDG Reload register, Address offset: 0x08 */ + __IO uint32_t SR; /*!< IWDG Status register, Address offset: 0x0C */ + __IO uint32_t WINR; /*!< IWDG Window register, Address offset: 0x10 */ +} IWDG_TypeDef; + + +/** + * @brief LCD-TFT Display Controller + */ + +typedef struct +{ + uint32_t RESERVED0[2]; /*!< Reserved, 0x00-0x04 */ + __IO uint32_t SSCR; /*!< LTDC Synchronization Size Configuration Register, Address offset: 0x08 */ + __IO uint32_t BPCR; /*!< LTDC Back Porch Configuration Register, Address offset: 0x0C */ + __IO uint32_t AWCR; /*!< LTDC Active Width Configuration Register, Address offset: 0x10 */ + __IO uint32_t TWCR; /*!< LTDC Total Width Configuration Register, Address offset: 0x14 */ + __IO uint32_t GCR; /*!< LTDC Global Control Register, Address offset: 0x18 */ + uint32_t RESERVED1[2]; /*!< Reserved, 0x1C-0x20 */ + __IO uint32_t SRCR; /*!< LTDC Shadow Reload Configuration Register, Address offset: 0x24 */ + uint32_t RESERVED2[1]; /*!< Reserved, 0x28 */ + __IO uint32_t BCCR; /*!< LTDC Background Color Configuration Register, Address offset: 0x2C */ + uint32_t RESERVED3[1]; /*!< Reserved, 0x30 */ + __IO uint32_t IER; /*!< LTDC Interrupt Enable Register, Address offset: 0x34 */ + __IO uint32_t ISR; /*!< LTDC Interrupt Status Register, Address offset: 0x38 */ + __IO uint32_t ICR; /*!< LTDC Interrupt Clear Register, Address offset: 0x3C */ + __IO uint32_t LIPCR; /*!< LTDC Line Interrupt Position Configuration Register, Address offset: 0x40 */ + __IO uint32_t CPSR; /*!< LTDC Current Position Status Register, Address offset: 0x44 */ + __IO uint32_t CDSR; /*!< LTDC Current Display Status Register, Address offset: 0x48 */ +} LTDC_TypeDef; + +/** + * @brief LCD-TFT Display layer x Controller + */ + +typedef struct +{ + __IO uint32_t CR; /*!< LTDC Layerx Control Register Address offset: 0x84 */ + __IO uint32_t WHPCR; /*!< LTDC Layerx Window Horizontal Position Configuration Register Address offset: 0x88 */ + __IO uint32_t WVPCR; /*!< LTDC Layerx Window Vertical Position Configuration Register Address offset: 0x8C */ + __IO uint32_t CKCR; /*!< LTDC Layerx Color Keying Configuration Register Address offset: 0x90 */ + __IO uint32_t PFCR; /*!< LTDC Layerx Pixel Format Configuration Register Address offset: 0x94 */ + __IO uint32_t CACR; /*!< LTDC Layerx Constant Alpha Configuration Register Address offset: 0x98 */ + __IO uint32_t DCCR; /*!< LTDC Layerx Default Color Configuration Register Address offset: 0x9C */ + __IO uint32_t BFCR; /*!< LTDC Layerx Blending Factors Configuration Register Address offset: 0xA0 */ + uint32_t RESERVED0[2]; /*!< Reserved */ + __IO uint32_t CFBAR; /*!< LTDC Layerx Color Frame Buffer Address Register Address offset: 0xAC */ + __IO uint32_t CFBLR; /*!< LTDC Layerx Color Frame Buffer Length Register Address offset: 0xB0 */ + __IO uint32_t CFBLNR; /*!< LTDC Layerx ColorFrame Buffer Line Number Register Address offset: 0xB4 */ + uint32_t RESERVED1[3]; /*!< Reserved */ + __IO uint32_t CLUTWR; /*!< LTDC Layerx CLUT Write Register Address offset: 0x144 */ + +} LTDC_Layer_TypeDef; + +/** + * @brief Power Control + */ + +typedef struct +{ + __IO uint32_t CR1; /*!< PWR power control register 1, Address offset: 0x00 */ + __IO uint32_t CSR1; /*!< PWR power control/status register 2, Address offset: 0x04 */ + __IO uint32_t CR2; /*!< PWR power control register 2, Address offset: 0x08 */ + __IO uint32_t CSR2; /*!< PWR power control/status register 2, Address offset: 0x0C */ +} PWR_TypeDef; + + +/** + * @brief Reset and Clock Control + */ + +typedef struct +{ + __IO uint32_t CR; /*!< RCC clock control register, Address offset: 0x00 */ + __IO uint32_t PLLCFGR; /*!< RCC PLL configuration register, Address offset: 0x04 */ + __IO uint32_t CFGR; /*!< RCC clock configuration register, Address offset: 0x08 */ + __IO uint32_t CIR; /*!< RCC clock interrupt register, Address offset: 0x0C */ + __IO uint32_t AHB1RSTR; /*!< RCC AHB1 peripheral reset register, Address offset: 0x10 */ + __IO uint32_t AHB2RSTR; /*!< RCC AHB2 peripheral reset register, Address offset: 0x14 */ + __IO uint32_t AHB3RSTR; /*!< RCC AHB3 peripheral reset register, Address offset: 0x18 */ + uint32_t RESERVED0; /*!< Reserved, 0x1C */ + __IO uint32_t APB1RSTR; /*!< RCC APB1 peripheral reset register, Address offset: 0x20 */ + __IO uint32_t APB2RSTR; /*!< RCC APB2 peripheral reset register, Address offset: 0x24 */ + uint32_t RESERVED1[2]; /*!< Reserved, 0x28-0x2C */ + __IO uint32_t AHB1ENR; /*!< RCC AHB1 peripheral clock register, Address offset: 0x30 */ + __IO uint32_t AHB2ENR; /*!< RCC AHB2 peripheral clock register, Address offset: 0x34 */ + __IO uint32_t AHB3ENR; /*!< RCC AHB3 peripheral clock register, Address offset: 0x38 */ + uint32_t RESERVED2; /*!< Reserved, 0x3C */ + __IO uint32_t APB1ENR; /*!< RCC APB1 peripheral clock enable register, Address offset: 0x40 */ + __IO uint32_t APB2ENR; /*!< RCC APB2 peripheral clock enable register, Address offset: 0x44 */ + uint32_t RESERVED3[2]; /*!< Reserved, 0x48-0x4C */ + __IO uint32_t AHB1LPENR; /*!< RCC AHB1 peripheral clock enable in low power mode register, Address offset: 0x50 */ + __IO uint32_t AHB2LPENR; /*!< RCC AHB2 peripheral clock enable in low power mode register, Address offset: 0x54 */ + __IO uint32_t AHB3LPENR; /*!< RCC AHB3 peripheral clock enable in low power mode register, Address offset: 0x58 */ + uint32_t RESERVED4; /*!< Reserved, 0x5C */ + __IO uint32_t APB1LPENR; /*!< RCC APB1 peripheral clock enable in low power mode register, Address offset: 0x60 */ + __IO uint32_t APB2LPENR; /*!< RCC APB2 peripheral clock enable in low power mode register, Address offset: 0x64 */ + uint32_t RESERVED5[2]; /*!< Reserved, 0x68-0x6C */ + __IO uint32_t BDCR; /*!< RCC Backup domain control register, Address offset: 0x70 */ + __IO uint32_t CSR; /*!< RCC clock control & status register, Address offset: 0x74 */ + uint32_t RESERVED6[2]; /*!< Reserved, 0x78-0x7C */ + __IO uint32_t SSCGR; /*!< RCC spread spectrum clock generation register, Address offset: 0x80 */ + __IO uint32_t PLLI2SCFGR; /*!< RCC PLLI2S configuration register, Address offset: 0x84 */ + __IO uint32_t PLLSAICFGR; /*!< RCC PLLSAI configuration register, Address offset: 0x88 */ + __IO uint32_t DCKCFGR1; /*!< RCC Dedicated Clocks configuration register1, Address offset: 0x8C */ + __IO uint32_t DCKCFGR2; /*!< RCC Dedicated Clocks configuration register 2, Address offset: 0x90 */ + +} RCC_TypeDef; + +/** + * @brief Real-Time Clock + */ + +typedef struct +{ + __IO uint32_t TR; /*!< RTC time register, Address offset: 0x00 */ + __IO uint32_t DR; /*!< RTC date register, Address offset: 0x04 */ + __IO uint32_t CR; /*!< RTC control register, Address offset: 0x08 */ + __IO uint32_t ISR; /*!< RTC initialization and status register, Address offset: 0x0C */ + __IO uint32_t PRER; /*!< RTC prescaler register, Address offset: 0x10 */ + __IO uint32_t WUTR; /*!< RTC wakeup timer register, Address offset: 0x14 */ + uint32_t reserved; /*!< Reserved */ + __IO uint32_t ALRMAR; /*!< RTC alarm A register, Address offset: 0x1C */ + __IO uint32_t ALRMBR; /*!< RTC alarm B register, Address offset: 0x20 */ + __IO uint32_t WPR; /*!< RTC write protection register, Address offset: 0x24 */ + __IO uint32_t SSR; /*!< RTC sub second register, Address offset: 0x28 */ + __IO uint32_t SHIFTR; /*!< RTC shift control register, Address offset: 0x2C */ + __IO uint32_t TSTR; /*!< RTC time stamp time register, Address offset: 0x30 */ + __IO uint32_t TSDR; /*!< RTC time stamp date register, Address offset: 0x34 */ + __IO uint32_t TSSSR; /*!< RTC time-stamp sub second register, Address offset: 0x38 */ + __IO uint32_t CALR; /*!< RTC calibration register, Address offset: 0x3C */ + __IO uint32_t TAMPCR; /*!< RTC tamper configuration register, Address offset: 0x40 */ + __IO uint32_t ALRMASSR; /*!< RTC alarm A sub second register, Address offset: 0x44 */ + __IO uint32_t ALRMBSSR; /*!< RTC alarm B sub second register, Address offset: 0x48 */ + __IO uint32_t OR; /*!< RTC option register, Address offset: 0x4C */ + __IO uint32_t BKP0R; /*!< RTC backup register 0, Address offset: 0x50 */ + __IO uint32_t BKP1R; /*!< RTC backup register 1, Address offset: 0x54 */ + __IO uint32_t BKP2R; /*!< RTC backup register 2, Address offset: 0x58 */ + __IO uint32_t BKP3R; /*!< RTC backup register 3, Address offset: 0x5C */ + __IO uint32_t BKP4R; /*!< RTC backup register 4, Address offset: 0x60 */ + __IO uint32_t BKP5R; /*!< RTC backup register 5, Address offset: 0x64 */ + __IO uint32_t BKP6R; /*!< RTC backup register 6, Address offset: 0x68 */ + __IO uint32_t BKP7R; /*!< RTC backup register 7, Address offset: 0x6C */ + __IO uint32_t BKP8R; /*!< RTC backup register 8, Address offset: 0x70 */ + __IO uint32_t BKP9R; /*!< RTC backup register 9, Address offset: 0x74 */ + __IO uint32_t BKP10R; /*!< RTC backup register 10, Address offset: 0x78 */ + __IO uint32_t BKP11R; /*!< RTC backup register 11, Address offset: 0x7C */ + __IO uint32_t BKP12R; /*!< RTC backup register 12, Address offset: 0x80 */ + __IO uint32_t BKP13R; /*!< RTC backup register 13, Address offset: 0x84 */ + __IO uint32_t BKP14R; /*!< RTC backup register 14, Address offset: 0x88 */ + __IO uint32_t BKP15R; /*!< RTC backup register 15, Address offset: 0x8C */ + __IO uint32_t BKP16R; /*!< RTC backup register 16, Address offset: 0x90 */ + __IO uint32_t BKP17R; /*!< RTC backup register 17, Address offset: 0x94 */ + __IO uint32_t BKP18R; /*!< RTC backup register 18, Address offset: 0x98 */ + __IO uint32_t BKP19R; /*!< RTC backup register 19, Address offset: 0x9C */ + __IO uint32_t BKP20R; /*!< RTC backup register 20, Address offset: 0xA0 */ + __IO uint32_t BKP21R; /*!< RTC backup register 21, Address offset: 0xA4 */ + __IO uint32_t BKP22R; /*!< RTC backup register 22, Address offset: 0xA8 */ + __IO uint32_t BKP23R; /*!< RTC backup register 23, Address offset: 0xAC */ + __IO uint32_t BKP24R; /*!< RTC backup register 24, Address offset: 0xB0 */ + __IO uint32_t BKP25R; /*!< RTC backup register 25, Address offset: 0xB4 */ + __IO uint32_t BKP26R; /*!< RTC backup register 26, Address offset: 0xB8 */ + __IO uint32_t BKP27R; /*!< RTC backup register 27, Address offset: 0xBC */ + __IO uint32_t BKP28R; /*!< RTC backup register 28, Address offset: 0xC0 */ + __IO uint32_t BKP29R; /*!< RTC backup register 29, Address offset: 0xC4 */ + __IO uint32_t BKP30R; /*!< RTC backup register 30, Address offset: 0xC8 */ + __IO uint32_t BKP31R; /*!< RTC backup register 31, Address offset: 0xCC */ +} RTC_TypeDef; + + +/** + * @brief Serial Audio Interface + */ + +typedef struct +{ + __IO uint32_t GCR; /*!< SAI global configuration register, Address offset: 0x00 */ +} SAI_TypeDef; + +typedef struct +{ + __IO uint32_t CR1; /*!< SAI block x configuration register 1, Address offset: 0x04 */ + __IO uint32_t CR2; /*!< SAI block x configuration register 2, Address offset: 0x08 */ + __IO uint32_t FRCR; /*!< SAI block x frame configuration register, Address offset: 0x0C */ + __IO uint32_t SLOTR; /*!< SAI block x slot register, Address offset: 0x10 */ + __IO uint32_t IMR; /*!< SAI block x interrupt mask register, Address offset: 0x14 */ + __IO uint32_t SR; /*!< SAI block x status register, Address offset: 0x18 */ + __IO uint32_t CLRFR; /*!< SAI block x clear flag register, Address offset: 0x1C */ + __IO uint32_t DR; /*!< SAI block x data register, Address offset: 0x20 */ +} SAI_Block_TypeDef; + +/** + * @brief SPDIF-RX Interface + */ + +typedef struct +{ + __IO uint32_t CR; /*!< Control register, Address offset: 0x00 */ + __IO uint32_t IMR; /*!< Interrupt mask register, Address offset: 0x04 */ + __IO uint32_t SR; /*!< Status register, Address offset: 0x08 */ + __IO uint32_t IFCR; /*!< Interrupt Flag Clear register, Address offset: 0x0C */ + __IO uint32_t DR; /*!< Data input register, Address offset: 0x10 */ + __IO uint32_t CSR; /*!< Channel Status register, Address offset: 0x14 */ + __IO uint32_t DIR; /*!< Debug Information register, Address offset: 0x18 */ +} SPDIFRX_TypeDef; + +/** + * @brief SD host Interface + */ + +typedef struct +{ + __IO uint32_t POWER; /*!< SDMMC power control register, Address offset: 0x00 */ + __IO uint32_t CLKCR; /*!< SDMMClock control register, Address offset: 0x04 */ + __IO uint32_t ARG; /*!< SDMMC argument register, Address offset: 0x08 */ + __IO uint32_t CMD; /*!< SDMMC command register, Address offset: 0x0C */ + __I uint32_t RESPCMD; /*!< SDMMC command response register, Address offset: 0x10 */ + __I uint32_t RESP1; /*!< SDMMC response 1 register, Address offset: 0x14 */ + __I uint32_t RESP2; /*!< SDMMC response 2 register, Address offset: 0x18 */ + __I uint32_t RESP3; /*!< SDMMC response 3 register, Address offset: 0x1C */ + __I uint32_t RESP4; /*!< SDMMC response 4 register, Address offset: 0x20 */ + __IO uint32_t DTIMER; /*!< SDMMC data timer register, Address offset: 0x24 */ + __IO uint32_t DLEN; /*!< SDMMC data length register, Address offset: 0x28 */ + __IO uint32_t DCTRL; /*!< SDMMC data control register, Address offset: 0x2C */ + __I uint32_t DCOUNT; /*!< SDMMC data counter register, Address offset: 0x30 */ + __I uint32_t STA; /*!< SDMMC status register, Address offset: 0x34 */ + __IO uint32_t ICR; /*!< SDMMC interrupt clear register, Address offset: 0x38 */ + __IO uint32_t MASK; /*!< SDMMC mask register, Address offset: 0x3C */ + uint32_t RESERVED0[2]; /*!< Reserved, 0x40-0x44 */ + __I uint32_t FIFOCNT; /*!< SDMMC FIFO counter register, Address offset: 0x48 */ + uint32_t RESERVED1[13]; /*!< Reserved, 0x4C-0x7C */ + __IO uint32_t FIFO; /*!< SDMMC data FIFO register, Address offset: 0x80 */ +} SDMMC_TypeDef; + +/** + * @brief Serial Peripheral Interface + */ + +typedef struct +{ + __IO uint32_t CR1; /*!< SPI control register 1 (not used in I2S mode), Address offset: 0x00 */ + __IO uint32_t CR2; /*!< SPI control register 2, Address offset: 0x04 */ + __IO uint32_t SR; /*!< SPI status register, Address offset: 0x08 */ + __IO uint32_t DR; /*!< SPI data register, Address offset: 0x0C */ + __IO uint32_t CRCPR; /*!< SPI CRC polynomial register (not used in I2S mode), Address offset: 0x10 */ + __IO uint32_t RXCRCR; /*!< SPI RX CRC register (not used in I2S mode), Address offset: 0x14 */ + __IO uint32_t TXCRCR; /*!< SPI TX CRC register (not used in I2S mode), Address offset: 0x18 */ + __IO uint32_t I2SCFGR; /*!< SPI_I2S configuration register, Address offset: 0x1C */ + __IO uint32_t I2SPR; /*!< SPI_I2S prescaler register, Address offset: 0x20 */ +} SPI_TypeDef; + +/** + * @brief QUAD Serial Peripheral Interface + */ + +typedef struct +{ + __IO uint32_t CR; /*!< QUADSPI Control register, Address offset: 0x00 */ + __IO uint32_t DCR; /*!< QUADSPI Device Configuration register, Address offset: 0x04 */ + __IO uint32_t SR; /*!< QUADSPI Status register, Address offset: 0x08 */ + __IO uint32_t FCR; /*!< QUADSPI Flag Clear register, Address offset: 0x0C */ + __IO uint32_t DLR; /*!< QUADSPI Data Length register, Address offset: 0x10 */ + __IO uint32_t CCR; /*!< QUADSPI Communication Configuration register, Address offset: 0x14 */ + __IO uint32_t AR; /*!< QUADSPI Address register, Address offset: 0x18 */ + __IO uint32_t ABR; /*!< QUADSPI Alternate Bytes register, Address offset: 0x1C */ + __IO uint32_t DR; /*!< QUADSPI Data register, Address offset: 0x20 */ + __IO uint32_t PSMKR; /*!< QUADSPI Polling Status Mask register, Address offset: 0x24 */ + __IO uint32_t PSMAR; /*!< QUADSPI Polling Status Match register, Address offset: 0x28 */ + __IO uint32_t PIR; /*!< QUADSPI Polling Interval register, Address offset: 0x2C */ + __IO uint32_t LPTR; /*!< QUADSPI Low Power Timeout register, Address offset: 0x30 */ +} QUADSPI_TypeDef; + +/** + * @brief TIM + */ + +typedef struct +{ + __IO uint32_t CR1; /*!< TIM control register 1, Address offset: 0x00 */ + __IO uint32_t CR2; /*!< TIM control register 2, Address offset: 0x04 */ + __IO uint32_t SMCR; /*!< TIM slave mode control register, Address offset: 0x08 */ + __IO uint32_t DIER; /*!< TIM DMA/interrupt enable register, Address offset: 0x0C */ + __IO uint32_t SR; /*!< TIM status register, Address offset: 0x10 */ + __IO uint32_t EGR; /*!< TIM event generation register, Address offset: 0x14 */ + __IO uint32_t CCMR1; /*!< TIM capture/compare mode register 1, Address offset: 0x18 */ + __IO uint32_t CCMR2; /*!< TIM capture/compare mode register 2, Address offset: 0x1C */ + __IO uint32_t CCER; /*!< TIM capture/compare enable register, Address offset: 0x20 */ + __IO uint32_t CNT; /*!< TIM counter register, Address offset: 0x24 */ + __IO uint32_t PSC; /*!< TIM prescaler, Address offset: 0x28 */ + __IO uint32_t ARR; /*!< TIM auto-reload register, Address offset: 0x2C */ + __IO uint32_t RCR; /*!< TIM repetition counter register, Address offset: 0x30 */ + __IO uint32_t CCR1; /*!< TIM capture/compare register 1, Address offset: 0x34 */ + __IO uint32_t CCR2; /*!< TIM capture/compare register 2, Address offset: 0x38 */ + __IO uint32_t CCR3; /*!< TIM capture/compare register 3, Address offset: 0x3C */ + __IO uint32_t CCR4; /*!< TIM capture/compare register 4, Address offset: 0x40 */ + __IO uint32_t BDTR; /*!< TIM break and dead-time register, Address offset: 0x44 */ + __IO uint32_t DCR; /*!< TIM DMA control register, Address offset: 0x48 */ + __IO uint32_t DMAR; /*!< TIM DMA address for full transfer, Address offset: 0x4C */ + __IO uint32_t OR; /*!< TIM option register, Address offset: 0x50 */ + __IO uint32_t CCMR3; /*!< TIM capture/compare mode register 3, Address offset: 0x54 */ + __IO uint32_t CCR5; /*!< TIM capture/compare mode register5, Address offset: 0x58 */ + __IO uint32_t CCR6; /*!< TIM capture/compare mode register6, Address offset: 0x5C */ + __IO uint32_t AF1; /*!< TIM Alternate function option register 1, Address offset: 0x60 */ + __IO uint32_t AF2; /*!< TIM Alternate function option register 2, Address offset: 0x64 */ + +} TIM_TypeDef; + +/** + * @brief LPTIMIMER + */ +typedef struct +{ + __IO uint32_t ISR; /*!< LPTIM Interrupt and Status register, Address offset: 0x00 */ + __IO uint32_t ICR; /*!< LPTIM Interrupt Clear register, Address offset: 0x04 */ + __IO uint32_t IER; /*!< LPTIM Interrupt Enable register, Address offset: 0x08 */ + __IO uint32_t CFGR; /*!< LPTIM Configuration register, Address offset: 0x0C */ + __IO uint32_t CR; /*!< LPTIM Control register, Address offset: 0x10 */ + __IO uint32_t CMP; /*!< LPTIM Compare register, Address offset: 0x14 */ + __IO uint32_t ARR; /*!< LPTIM Autoreload register, Address offset: 0x18 */ + __IO uint32_t CNT; /*!< LPTIM Counter register, Address offset: 0x1C */ +} LPTIM_TypeDef; + + +/** + * @brief Universal Synchronous Asynchronous Receiver Transmitter + */ + +typedef struct +{ + __IO uint32_t CR1; /*!< USART Control register 1, Address offset: 0x00 */ + __IO uint32_t CR2; /*!< USART Control register 2, Address offset: 0x04 */ + __IO uint32_t CR3; /*!< USART Control register 3, Address offset: 0x08 */ + __IO uint32_t BRR; /*!< USART Baud rate register, Address offset: 0x0C */ + __IO uint32_t GTPR; /*!< USART Guard time and prescaler register, Address offset: 0x10 */ + __IO uint32_t RTOR; /*!< USART Receiver Time Out register, Address offset: 0x14 */ + __IO uint32_t RQR; /*!< USART Request register, Address offset: 0x18 */ + __IO uint32_t ISR; /*!< USART Interrupt and status register, Address offset: 0x1C */ + __IO uint32_t ICR; /*!< USART Interrupt flag Clear register, Address offset: 0x20 */ + __IO uint32_t RDR; /*!< USART Receive Data register, Address offset: 0x24 */ + __IO uint32_t TDR; /*!< USART Transmit Data register, Address offset: 0x28 */ +} USART_TypeDef; + + +/** + * @brief Window WATCHDOG + */ + +typedef struct +{ + __IO uint32_t CR; /*!< WWDG Control register, Address offset: 0x00 */ + __IO uint32_t CFR; /*!< WWDG Configuration register, Address offset: 0x04 */ + __IO uint32_t SR; /*!< WWDG Status register, Address offset: 0x08 */ +} WWDG_TypeDef; + + +/** + * @brief RNG + */ + +typedef struct +{ + __IO uint32_t CR; /*!< RNG control register, Address offset: 0x00 */ + __IO uint32_t SR; /*!< RNG status register, Address offset: 0x04 */ + __IO uint32_t DR; /*!< RNG data register, Address offset: 0x08 */ +} RNG_TypeDef; + +/** + * @} + */ + +/** + * @brief USB_OTG_Core_Registers + */ +typedef struct +{ + __IO uint32_t GOTGCTL; /*!< USB_OTG Control and Status Register 000h */ + __IO uint32_t GOTGINT; /*!< USB_OTG Interrupt Register 004h */ + __IO uint32_t GAHBCFG; /*!< Core AHB Configuration Register 008h */ + __IO uint32_t GUSBCFG; /*!< Core USB Configuration Register 00Ch */ + __IO uint32_t GRSTCTL; /*!< Core Reset Register 010h */ + __IO uint32_t GINTSTS; /*!< Core Interrupt Register 014h */ + __IO uint32_t GINTMSK; /*!< Core Interrupt Mask Register 018h */ + __IO uint32_t GRXSTSR; /*!< Receive Sts Q Read Register 01Ch */ + __IO uint32_t GRXSTSP; /*!< Receive Sts Q Read & POP Register 020h */ + __IO uint32_t GRXFSIZ; /*!< Receive FIFO Size Register 024h */ + __IO uint32_t DIEPTXF0_HNPTXFSIZ; /*!< EP0 / Non Periodic Tx FIFO Size Register 028h */ + __IO uint32_t HNPTXSTS; /*!< Non Periodic Tx FIFO/Queue Sts reg 02Ch */ + uint32_t Reserved30[2]; /*!< Reserved 030h */ + __IO uint32_t GCCFG; /*!< General Purpose IO Register 038h */ + __IO uint32_t CID; /*!< User ID Register 03Ch */ + uint32_t Reserved5[3]; /*!< Reserved 040h-048h */ + __IO uint32_t GHWCFG3; /*!< User HW config3 04Ch */ + uint32_t Reserved6; /*!< Reserved 050h */ + __IO uint32_t GLPMCFG; /*!< LPM Register 054h */ + uint32_t Reserved7; /*!< Reserved 058h */ + __IO uint32_t GDFIFOCFG; /*!< DFIFO Software Config Register 05Ch */ + uint32_t Reserved43[40]; /*!< Reserved 60h-0FFh */ + __IO uint32_t HPTXFSIZ; /*!< Host Periodic Tx FIFO Size Reg 100h */ + __IO uint32_t DIEPTXF[0x0F]; /*!< dev Periodic Transmit FIFO 104h-13Ch */ +} USB_OTG_GlobalTypeDef; + + +/** + * @brief USB_OTG_device_Registers + */ +typedef struct +{ + __IO uint32_t DCFG; /*!< dev Configuration Register 800h */ + __IO uint32_t DCTL; /*!< dev Control Register 804h */ + __IO uint32_t DSTS; /*!< dev Status Register (RO) 808h */ + uint32_t Reserved0C; /*!< Reserved 80Ch */ + __IO uint32_t DIEPMSK; /*!< dev IN Endpoint Mask 810h */ + __IO uint32_t DOEPMSK; /*!< dev OUT Endpoint Mask 814h */ + __IO uint32_t DAINT; /*!< dev All Endpoints Itr Reg 818h */ + __IO uint32_t DAINTMSK; /*!< dev All Endpoints Itr Mask 81Ch */ + uint32_t Reserved20; /*!< Reserved 820h */ + uint32_t Reserved9; /*!< Reserved 824h */ + __IO uint32_t DVBUSDIS; /*!< dev VBUS discharge Register 828h */ + __IO uint32_t DVBUSPULSE; /*!< dev VBUS Pulse Register 82Ch */ + __IO uint32_t DTHRCTL; /*!< dev threshold 830h */ + __IO uint32_t DIEPEMPMSK; /*!< dev empty msk 834h */ + __IO uint32_t DEACHINT; /*!< dedicated EP interrupt 838h */ + __IO uint32_t DEACHMSK; /*!< dedicated EP msk 83Ch */ + uint32_t Reserved40; /*!< dedicated EP mask 840h */ + __IO uint32_t DINEP1MSK; /*!< dedicated EP mask 844h */ + uint32_t Reserved44[15]; /*!< Reserved 844-87Ch */ + __IO uint32_t DOUTEP1MSK; /*!< dedicated EP msk 884h */ +} USB_OTG_DeviceTypeDef; + + +/** + * @brief USB_OTG_IN_Endpoint-Specific_Register + */ +typedef struct +{ + __IO uint32_t DIEPCTL; /*!< dev IN Endpoint Control Reg 900h + (ep_num * 20h) + 00h */ + uint32_t Reserved04; /*!< Reserved 900h + (ep_num * 20h) + 04h */ + __IO uint32_t DIEPINT; /*!< dev IN Endpoint Itr Reg 900h + (ep_num * 20h) + 08h */ + uint32_t Reserved0C; /*!< Reserved 900h + (ep_num * 20h) + 0Ch */ + __IO uint32_t DIEPTSIZ; /*!< IN Endpoint Txfer Size 900h + (ep_num * 20h) + 10h */ + __IO uint32_t DIEPDMA; /*!< IN Endpoint DMA Address Reg 900h + (ep_num * 20h) + 14h */ + __IO uint32_t DTXFSTS; /*!< IN Endpoint Tx FIFO Status Reg 900h + (ep_num * 20h) + 18h */ + uint32_t Reserved18; /*!< Reserved 900h+(ep_num*20h)+1Ch-900h+ (ep_num * 20h) + 1Ch */ +} USB_OTG_INEndpointTypeDef; + + +/** + * @brief USB_OTG_OUT_Endpoint-Specific_Registers + */ +typedef struct +{ + __IO uint32_t DOEPCTL; /*!< dev OUT Endpoint Control Reg B00h + (ep_num * 20h) + 00h */ + uint32_t Reserved04; /*!< Reserved B00h + (ep_num * 20h) + 04h */ + __IO uint32_t DOEPINT; /*!< dev OUT Endpoint Itr Reg B00h + (ep_num * 20h) + 08h */ + uint32_t Reserved0C; /*!< Reserved B00h + (ep_num * 20h) + 0Ch */ + __IO uint32_t DOEPTSIZ; /*!< dev OUT Endpoint Txfer Size B00h + (ep_num * 20h) + 10h */ + __IO uint32_t DOEPDMA; /*!< dev OUT Endpoint DMA Address B00h + (ep_num * 20h) + 14h */ + uint32_t Reserved18[2]; /*!< Reserved B00h + (ep_num * 20h) + 18h - B00h + (ep_num * 20h) + 1Ch */ +} USB_OTG_OUTEndpointTypeDef; + + +/** + * @brief USB_OTG_Host_Mode_Register_Structures + */ +typedef struct +{ + __IO uint32_t HCFG; /*!< Host Configuration Register 400h */ + __IO uint32_t HFIR; /*!< Host Frame Interval Register 404h */ + __IO uint32_t HFNUM; /*!< Host Frame Nbr/Frame Remaining 408h */ + uint32_t Reserved40C; /*!< Reserved 40Ch */ + __IO uint32_t HPTXSTS; /*!< Host Periodic Tx FIFO/ Queue Status 410h */ + __IO uint32_t HAINT; /*!< Host All Channels Interrupt Register 414h */ + __IO uint32_t HAINTMSK; /*!< Host All Channels Interrupt Mask 418h */ +} USB_OTG_HostTypeDef; + +/** + * @brief USB_OTG_Host_Channel_Specific_Registers + */ +typedef struct +{ + __IO uint32_t HCCHAR; /*!< Host Channel Characteristics Register 500h */ + __IO uint32_t HCSPLT; /*!< Host Channel Split Control Register 504h */ + __IO uint32_t HCINT; /*!< Host Channel Interrupt Register 508h */ + __IO uint32_t HCINTMSK; /*!< Host Channel Interrupt Mask Register 50Ch */ + __IO uint32_t HCTSIZ; /*!< Host Channel Transfer Size Register 510h */ + __IO uint32_t HCDMA; /*!< Host Channel DMA Address Register 514h */ + uint32_t Reserved[2]; /*!< Reserved */ +} USB_OTG_HostChannelTypeDef; +/** + * @} + */ + +/** + * @brief JPEG Codec + */ +typedef struct +{ + __IO uint32_t CONFR0; /*!< JPEG Codec Control Register (JPEG_CONFR0), Address offset: 00h */ + __IO uint32_t CONFR1; /*!< JPEG Codec Control Register (JPEG_CONFR1), Address offset: 04h */ + __IO uint32_t CONFR2; /*!< JPEG Codec Control Register (JPEG_CONFR2), Address offset: 08h */ + __IO uint32_t CONFR3; /*!< JPEG Codec Control Register (JPEG_CONFR3), Address offset: 0Ch */ + __IO uint32_t CONFR4; /*!< JPEG Codec Control Register (JPEG_CONFR4), Address offset: 10h */ + __IO uint32_t CONFR5; /*!< JPEG Codec Control Register (JPEG_CONFR5), Address offset: 14h */ + __IO uint32_t CONFR6; /*!< JPEG Codec Control Register (JPEG_CONFR6), Address offset: 18h */ + __IO uint32_t CONFR7; /*!< JPEG Codec Control Register (JPEG_CONFR7), Address offset: 1Ch */ + uint32_t Reserved20[4]; /* Reserved Address offset: 20h-2Ch */ + __IO uint32_t CR; /*!< JPEG Control Register (JPEG_CR), Address offset: 30h */ + __IO uint32_t SR; /*!< JPEG Status Register (JPEG_SR), Address offset: 34h */ + __IO uint32_t CFR; /*!< JPEG Clear Flag Register (JPEG_CFR), Address offset: 38h */ + uint32_t Reserved3c; /* Reserved Address offset: 3Ch */ + __IO uint32_t DIR; /*!< JPEG Data Input Register (JPEG_DIR), Address offset: 40h */ + __IO uint32_t DOR; /*!< JPEG Data Output Register (JPEG_DOR), Address offset: 44h */ + uint32_t Reserved48[2]; /* Reserved Address offset: 48h-4Ch */ + __IO uint32_t QMEM0[16]; /*!< JPEG quantization tables 0, Address offset: 50h-8Ch */ + __IO uint32_t QMEM1[16]; /*!< JPEG quantization tables 1, Address offset: 90h-CCh */ + __IO uint32_t QMEM2[16]; /*!< JPEG quantization tables 2, Address offset: D0h-10Ch */ + __IO uint32_t QMEM3[16]; /*!< JPEG quantization tables 3, Address offset: 110h-14Ch */ + __IO uint32_t HUFFMIN[16]; /*!< JPEG HuffMin tables, Address offset: 150h-18Ch */ + __IO uint32_t HUFFBASE[32]; /*!< JPEG HuffSymb tables, Address offset: 190h-20Ch */ + __IO uint32_t HUFFSYMB[84]; /*!< JPEG HUFFSYMB tables, Address offset: 210h-35Ch */ + __IO uint32_t DHTMEM[103]; /*!< JPEG DHTMem tables, Address offset: 360h-4F8h */ + uint32_t Reserved4FC; /* Reserved Address offset: 4FCh */ + __IO uint32_t HUFFENC_AC0[88]; /*!< JPEG encoder, AC Huffman table 0, Address offset: 500h-65Ch */ + __IO uint32_t HUFFENC_AC1[88]; /*!< JPEG encoder, AC Huffman table 1, Address offset: 660h-7BCh */ + __IO uint32_t HUFFENC_DC0[8]; /*!< JPEG encoder, DC Huffman table 0, Address offset: 7C0h-7DCh */ + __IO uint32_t HUFFENC_DC1[8]; /*!< JPEG encoder, DC Huffman table 1, Address offset: 7E0h-7FCh */ + +} JPEG_TypeDef; + +/** + * @brief MDIOS + */ + +typedef struct +{ + __IO uint32_t CR; /*!< MDIOS Configuration Register (MDIOS_CR), Address offset: 00h */ + __IO uint32_t WRFR; /*!< MDIOS Configuration Register (MDIOS_CR), Address offset: 04h */ + __IO uint32_t CWRFR; /*!< MDIOS Configuration Register (MDIOS_CR), Address offset: 08h */ + __IO uint32_t RDFR; /*!< MDIOS Configuration Register (MDIOS_CR), Address offset: 0Ch */ + __IO uint32_t CRDFR; /*!< MDIOS Configuration Register (MDIOS_CR), Address offset: 10h */ + __IO uint32_t SR; /*!< MDIOS Configuration Register (MDIOS_CR), Address offset: 14h */ + __IO uint32_t CLRFR; /*!< MDIOS Configuration Register (MDIOS_CR), Address offset: 18h */ + uint32_t RESERVED0[57]; /* Reserved Address offset: 1Ch */ + __IO uint32_t DINR0; /*!< MDIOS Input Data Register (MDIOS_DINR0), Address offset: 100h */ + __IO uint32_t DINR1; /*!< MDIOS Input Data Register (MDIOS_DINR1), Address offset: 104h */ + __IO uint32_t DINR2; /*!< MDIOS Input Data Register (MDIOS_DINR2), Address offset: 108h */ + __IO uint32_t DINR3; /*!< MDIOS Input Data Register (MDIOS_DINR3), Address offset: 10Ch */ + __IO uint32_t DINR4; /*!< MDIOS Input Data Register (MDIOS_DINR4), Address offset: 110h */ + __IO uint32_t DINR5; /*!< MDIOS Input Data Register (MDIOS_DINR5), Address offset: 114h */ + __IO uint32_t DINR6; /*!< MDIOS Input Data Register (MDIOS_DINR6), Address offset: 118h */ + __IO uint32_t DINR7; /*!< MDIOS Input Data Register (MDIOS_DINR7), Address offset: 11Ch */ + __IO uint32_t DINR8; /*!< MDIOS Input Data Register (MDIOS_DINR8), Address offset: 120h */ + __IO uint32_t DINR9; /*!< MDIOS Input Data Register (MDIOS_DINR9), Address offset: 124h */ + __IO uint32_t DINR10; /*!< MDIOS Input Data Register (MDIOS_DINR10), Address offset: 128h */ + __IO uint32_t DINR11; /*!< MDIOS Input Data Register (MDIOS_DINR11), Address offset: 12Ch */ + __IO uint32_t DINR12; /*!< MDIOS Input Data Register (MDIOS_DINR12), Address offset: 130h */ + __IO uint32_t DINR13; /*!< MDIOS Input Data Register (MDIOS_DINR13), Address offset: 134h */ + __IO uint32_t DINR14; /*!< MDIOS Input Data Register (MDIOS_DINR14), Address offset: 138h */ + __IO uint32_t DINR15; /*!< MDIOS Input Data Register (MDIOS_DINR15), Address offset: 13Ch */ + __IO uint32_t DINR16; /*!< MDIOS Input Data Register (MDIOS_DINR16), Address offset: 140h */ + __IO uint32_t DINR17; /*!< MDIOS Input Data Register (MDIOS_DINR17), Address offset: 144h */ + __IO uint32_t DINR18; /*!< MDIOS Input Data Register (MDIOS_DINR18), Address offset: 148h */ + __IO uint32_t DINR19; /*!< MDIOS Input Data Register (MDIOS_DINR19), Address offset: 14Ch */ + __IO uint32_t DINR20; /*!< MDIOS Input Data Register (MDIOS_DINR20), Address offset: 150h */ + __IO uint32_t DINR21; /*!< MDIOS Input Data Register (MDIOS_DINR21), Address offset: 154h */ + __IO uint32_t DINR22; /*!< MDIOS Input Data Register (MDIOS_DINR22), Address offset: 158h */ + __IO uint32_t DINR23; /*!< MDIOS Input Data Register (MDIOS_DINR23), Address offset: 15Ch */ + __IO uint32_t DINR24; /*!< MDIOS Input Data Register (MDIOS_DINR24), Address offset: 160h */ + __IO uint32_t DINR25; /*!< MDIOS Input Data Register (MDIOS_DINR25), Address offset: 164h */ + __IO uint32_t DINR26; /*!< MDIOS Input Data Register (MDIOS_DINR26), Address offset: 168h */ + __IO uint32_t DINR27; /*!< MDIOS Input Data Register (MDIOS_DINR27), Address offset: 16Ch */ + __IO uint32_t DINR28; /*!< MDIOS Input Data Register (MDIOS_DINR28), Address offset: 170h */ + __IO uint32_t DINR29; /*!< MDIOS Input Data Register (MDIOS_DINR29), Address offset: 174h */ + __IO uint32_t DINR30; /*!< MDIOS Input Data Register (MDIOS_DINR30), Address offset: 178h */ + __IO uint32_t DINR31; /*!< MDIOS Input Data Register (MDIOS_DINR31), Address offset: 17Ch */ + __IO uint32_t DOUTR0; /*!< MDIOS Output Data Register (MDIOS_DOUTR0), Address offset: 180h */ + __IO uint32_t DOUTR1; /*!< MDIOS Output Data Register (MDIOS_DOUTR1), Address offset: 184h */ + __IO uint32_t DOUTR2; /*!< MDIOS Output Data Register (MDIOS_DOUTR2), Address offset: 188h */ + __IO uint32_t DOUTR3; /*!< MDIOS Output Data Register (MDIOS_DOUTR3), Address offset: 18Ch */ + __IO uint32_t DOUTR4; /*!< MDIOS Output Data Register (MDIOS_DOUTR4), Address offset: 190h */ + __IO uint32_t DOUTR5; /*!< MDIOS Output Data Register (MDIOS_DOUTR5), Address offset: 194h */ + __IO uint32_t DOUTR6; /*!< MDIOS Output Data Register (MDIOS_DOUTR6), Address offset: 198h */ + __IO uint32_t DOUTR7; /*!< MDIOS Output Data Register (MDIOS_DOUTR7), Address offset: 19Ch */ + __IO uint32_t DOUTR8; /*!< MDIOS Output Data Register (MDIOS_DOUTR8), Address offset: 1A0h */ + __IO uint32_t DOUTR9; /*!< MDIOS Output Data Register (MDIOS_DOUTR9), Address offset: 1A4h */ + __IO uint32_t DOUTR10; /*!< MDIOS Output Data Register (MDIOS_DOUTR10), Address offset: 1A8h */ + __IO uint32_t DOUTR11; /*!< MDIOS Output Data Register (MDIOS_DOUTR11), Address offset: 1ACh */ + __IO uint32_t DOUTR12; /*!< MDIOS Output Data Register (MDIOS_DOUTR12), Address offset: 1B0h */ + __IO uint32_t DOUTR13; /*!< MDIOS Output Data Register (MDIOS_DOUTR13), Address offset: 1B4h */ + __IO uint32_t DOUTR14; /*!< MDIOS Output Data Register (MDIOS_DOUTR14), Address offset: 1B8h */ + __IO uint32_t DOUTR15; /*!< MDIOS Output Data Register (MDIOS_DOUTR15), Address offset: 1BCh */ + __IO uint32_t DOUTR16; /*!< MDIOS Output Data Register (MDIOS_DOUTR16), Address offset: 1C0h */ + __IO uint32_t DOUTR17; /*!< MDIOS Output Data Register (MDIOS_DOUTR17), Address offset: 1C4h */ + __IO uint32_t DOUTR18; /*!< MDIOS Output Data Register (MDIOS_DOUTR18), Address offset: 1C8h */ + __IO uint32_t DOUTR19; /*!< MDIOS Output Data Register (MDIOS_DOUTR19), Address offset: 1CCh */ + __IO uint32_t DOUTR20; /*!< MDIOS Output Data Register (MDIOS_DOUTR20), Address offset: 1D0h */ + __IO uint32_t DOUTR21; /*!< MDIOS Output Data Register (MDIOS_DOUTR21), Address offset: 1D4h */ + __IO uint32_t DOUTR22; /*!< MDIOS Output Data Register (MDIOS_DOUTR22), Address offset: 1D8h */ + __IO uint32_t DOUTR23; /*!< MDIOS Output Data Register (MDIOS_DOUTR23), Address offset: 1DCh */ + __IO uint32_t DOUTR24; /*!< MDIOS Output Data Register (MDIOS_DOUTR24), Address offset: 1E0h */ + __IO uint32_t DOUTR25; /*!< MDIOS Output Data Register (MDIOS_DOUTR25), Address offset: 1E4h */ + __IO uint32_t DOUTR26; /*!< MDIOS Output Data Register (MDIOS_DOUTR26), Address offset: 1E8h */ + __IO uint32_t DOUTR27; /*!< MDIOS Output Data Register (MDIOS_DOUTR27), Address offset: 1ECh */ + __IO uint32_t DOUTR28; /*!< MDIOS Output Data Register (MDIOS_DOUTR28), Address offset: 1F0h */ + __IO uint32_t DOUTR29; /*!< MDIOS Output Data Register (MDIOS_DOUTR29), Address offset: 1F4h */ + __IO uint32_t DOUTR30; /*!< MDIOS Output Data Register (MDIOS_DOUTR30), Address offset: 1F8h */ + __IO uint32_t DOUTR31; /*!< MDIOS Output Data Register (MDIOS_DOUTR31), Address offset: 1FCh */ +} MDIOS_TypeDef; + + +/** @addtogroup Peripheral_memory_map + * @{ + */ +#define RAMITCM_BASE 0x00000000UL /*!< Base address of : 16KB RAM reserved for CPU execution/instruction accessible over ITCM */ +#define FLASHITCM_BASE 0x00200000UL /*!< Base address of : (up to 2 MB) embedded FLASH memory accessible over ITCM */ +#define FLASHAXI_BASE 0x08000000UL /*!< Base address of : (up to 2 MB) embedded FLASH memory accessible over AXI */ +#define RAMDTCM_BASE 0x20000000UL /*!< Base address of : 128KB system data RAM accessible over DTCM */ +#define PERIPH_BASE 0x40000000UL /*!< Base address of : AHB/ABP Peripherals */ +#define BKPSRAM_BASE 0x40024000UL /*!< Base address of : Backup SRAM(4 KB) */ +#define QSPI_BASE 0x90000000UL /*!< Base address of : QSPI memories accessible over AXI */ +#define FMC_R_BASE 0xA0000000UL /*!< Base address of : FMC Control registers */ +#define QSPI_R_BASE 0xA0001000UL /*!< Base address of : QSPI Control registers */ +#define SRAM1_BASE 0x20020000UL /*!< Base address of : 368KB RAM1 accessible over AXI/AHB */ +#define SRAM2_BASE 0x2007C000UL /*!< Base address of : 16KB RAM2 accessible over AXI/AHB */ +#define FLASH_END 0x081FFFFFUL /*!< FLASH end address */ +#define FLASH_OTP_BASE 0x1FF0F000UL /*!< Base address of : (up to 1024 Bytes) embedded FLASH OTP Area */ +#define FLASH_OTP_END 0x1FF0F41FUL /*!< End address of : (up to 1024 Bytes) embedded FLASH OTP Area */ + +/* Legacy define */ +#define FLASH_BASE FLASHAXI_BASE + +/*!< Peripheral memory map */ +#define APB1PERIPH_BASE PERIPH_BASE +#define APB2PERIPH_BASE (PERIPH_BASE + 0x00010000UL) +#define AHB1PERIPH_BASE (PERIPH_BASE + 0x00020000UL) +#define AHB2PERIPH_BASE (PERIPH_BASE + 0x10000000UL) + +/*!< APB1 peripherals */ +#define TIM2_BASE (APB1PERIPH_BASE + 0x0000UL) +#define TIM3_BASE (APB1PERIPH_BASE + 0x0400UL) +#define TIM4_BASE (APB1PERIPH_BASE + 0x0800UL) +#define TIM5_BASE (APB1PERIPH_BASE + 0x0C00UL) +#define TIM6_BASE (APB1PERIPH_BASE + 0x1000UL) +#define TIM7_BASE (APB1PERIPH_BASE + 0x1400UL) +#define TIM12_BASE (APB1PERIPH_BASE + 0x1800UL) +#define TIM13_BASE (APB1PERIPH_BASE + 0x1C00UL) +#define TIM14_BASE (APB1PERIPH_BASE + 0x2000UL) +#define LPTIM1_BASE (APB1PERIPH_BASE + 0x2400UL) +#define RTC_BASE (APB1PERIPH_BASE + 0x2800UL) +#define WWDG_BASE (APB1PERIPH_BASE + 0x2C00UL) +#define IWDG_BASE (APB1PERIPH_BASE + 0x3000UL) +#define CAN3_BASE (APB1PERIPH_BASE + 0x3400UL) +#define SPI2_BASE (APB1PERIPH_BASE + 0x3800UL) +#define SPI3_BASE (APB1PERIPH_BASE + 0x3C00UL) +#define SPDIFRX_BASE (APB1PERIPH_BASE + 0x4000UL) +#define USART2_BASE (APB1PERIPH_BASE + 0x4400UL) +#define USART3_BASE (APB1PERIPH_BASE + 0x4800UL) +#define UART4_BASE (APB1PERIPH_BASE + 0x4C00UL) +#define UART5_BASE (APB1PERIPH_BASE + 0x5000UL) +#define I2C1_BASE (APB1PERIPH_BASE + 0x5400UL) +#define I2C2_BASE (APB1PERIPH_BASE + 0x5800UL) +#define I2C3_BASE (APB1PERIPH_BASE + 0x5C00UL) +#define I2C4_BASE (APB1PERIPH_BASE + 0x6000UL) +#define CAN1_BASE (APB1PERIPH_BASE + 0x6400UL) +#define CAN2_BASE (APB1PERIPH_BASE + 0x6800UL) +#define CEC_BASE (APB1PERIPH_BASE + 0x6C00UL) +#define PWR_BASE (APB1PERIPH_BASE + 0x7000UL) +#define DAC_BASE (APB1PERIPH_BASE + 0x7400UL) +#define UART7_BASE (APB1PERIPH_BASE + 0x7800UL) +#define UART8_BASE (APB1PERIPH_BASE + 0x7C00UL) + +/*!< APB2 peripherals */ +#define TIM1_BASE (APB2PERIPH_BASE + 0x0000UL) +#define TIM8_BASE (APB2PERIPH_BASE + 0x0400UL) +#define USART1_BASE (APB2PERIPH_BASE + 0x1000UL) +#define USART6_BASE (APB2PERIPH_BASE + 0x1400UL) +#define SDMMC2_BASE (APB2PERIPH_BASE + 0x1C00UL) +#define ADC1_BASE (APB2PERIPH_BASE + 0x2000UL) +#define ADC2_BASE (APB2PERIPH_BASE + 0x2100UL) +#define ADC3_BASE (APB2PERIPH_BASE + 0x2200UL) +#define ADC_BASE (APB2PERIPH_BASE + 0x2300UL) +#define SDMMC1_BASE (APB2PERIPH_BASE + 0x2C00UL) +#define SPI1_BASE (APB2PERIPH_BASE + 0x3000UL) +#define SPI4_BASE (APB2PERIPH_BASE + 0x3400UL) +#define SYSCFG_BASE (APB2PERIPH_BASE + 0x3800UL) +#define EXTI_BASE (APB2PERIPH_BASE + 0x3C00UL) +#define TIM9_BASE (APB2PERIPH_BASE + 0x4000UL) +#define TIM10_BASE (APB2PERIPH_BASE + 0x4400UL) +#define TIM11_BASE (APB2PERIPH_BASE + 0x4800UL) +#define SPI5_BASE (APB2PERIPH_BASE + 0x5000UL) +#define SPI6_BASE (APB2PERIPH_BASE + 0x5400UL) +#define SAI1_BASE (APB2PERIPH_BASE + 0x5800UL) +#define SAI2_BASE (APB2PERIPH_BASE + 0x5C00UL) +#define SAI1_Block_A_BASE (SAI1_BASE + 0x004UL) +#define SAI1_Block_B_BASE (SAI1_BASE + 0x024UL) +#define SAI2_Block_A_BASE (SAI2_BASE + 0x004UL) +#define SAI2_Block_B_BASE (SAI2_BASE + 0x024UL) +#define LTDC_BASE (APB2PERIPH_BASE + 0x6800UL) +#define LTDC_Layer1_BASE (LTDC_BASE + 0x0084UL) +#define LTDC_Layer2_BASE (LTDC_BASE + 0x0104UL) +#define DFSDM1_BASE (APB2PERIPH_BASE + 0x7400UL) +#define DFSDM1_Channel0_BASE (DFSDM1_BASE + 0x00UL) +#define DFSDM1_Channel1_BASE (DFSDM1_BASE + 0x20UL) +#define DFSDM1_Channel2_BASE (DFSDM1_BASE + 0x40UL) +#define DFSDM1_Channel3_BASE (DFSDM1_BASE + 0x60UL) +#define DFSDM1_Channel4_BASE (DFSDM1_BASE + 0x80UL) +#define DFSDM1_Channel5_BASE (DFSDM1_BASE + 0xA0UL) +#define DFSDM1_Channel6_BASE (DFSDM1_BASE + 0xC0UL) +#define DFSDM1_Channel7_BASE (DFSDM1_BASE + 0xE0UL) +#define DFSDM1_Filter0_BASE (DFSDM1_BASE + 0x100UL) +#define DFSDM1_Filter1_BASE (DFSDM1_BASE + 0x180UL) +#define DFSDM1_Filter2_BASE (DFSDM1_BASE + 0x200UL) +#define DFSDM1_Filter3_BASE (DFSDM1_BASE + 0x280UL) +#define MDIOS_BASE (APB2PERIPH_BASE + 0x7800UL) +/*!< AHB1 peripherals */ +#define GPIOA_BASE (AHB1PERIPH_BASE + 0x0000UL) +#define GPIOB_BASE (AHB1PERIPH_BASE + 0x0400UL) +#define GPIOC_BASE (AHB1PERIPH_BASE + 0x0800UL) +#define GPIOD_BASE (AHB1PERIPH_BASE + 0x0C00UL) +#define GPIOE_BASE (AHB1PERIPH_BASE + 0x1000UL) +#define GPIOF_BASE (AHB1PERIPH_BASE + 0x1400UL) +#define GPIOG_BASE (AHB1PERIPH_BASE + 0x1800UL) +#define GPIOH_BASE (AHB1PERIPH_BASE + 0x1C00UL) +#define GPIOI_BASE (AHB1PERIPH_BASE + 0x2000UL) +#define GPIOJ_BASE (AHB1PERIPH_BASE + 0x2400UL) +#define GPIOK_BASE (AHB1PERIPH_BASE + 0x2800UL) +#define CRC_BASE (AHB1PERIPH_BASE + 0x3000UL) +#define RCC_BASE (AHB1PERIPH_BASE + 0x3800UL) +#define FLASH_R_BASE (AHB1PERIPH_BASE + 0x3C00UL) +#define UID_BASE 0x1FF0F420UL /*!< Unique device ID register base address */ +#define FLASHSIZE_BASE 0x1FF0F442UL /*!< FLASH Size register base address */ +#define PACKAGE_BASE 0x1FF0F7E0UL /*!< Package size register base address */ +/* Legacy define */ +#define PACKAGESIZE_BASE PACKAGE_BASE + +#define DMA1_BASE (AHB1PERIPH_BASE + 0x6000UL) +#define DMA1_Stream0_BASE (DMA1_BASE + 0x010UL) +#define DMA1_Stream1_BASE (DMA1_BASE + 0x028UL) +#define DMA1_Stream2_BASE (DMA1_BASE + 0x040UL) +#define DMA1_Stream3_BASE (DMA1_BASE + 0x058UL) +#define DMA1_Stream4_BASE (DMA1_BASE + 0x070UL) +#define DMA1_Stream5_BASE (DMA1_BASE + 0x088UL) +#define DMA1_Stream6_BASE (DMA1_BASE + 0x0A0UL) +#define DMA1_Stream7_BASE (DMA1_BASE + 0x0B8UL) +#define DMA2_BASE (AHB1PERIPH_BASE + 0x6400UL) +#define DMA2_Stream0_BASE (DMA2_BASE + 0x010UL) +#define DMA2_Stream1_BASE (DMA2_BASE + 0x028UL) +#define DMA2_Stream2_BASE (DMA2_BASE + 0x040UL) +#define DMA2_Stream3_BASE (DMA2_BASE + 0x058UL) +#define DMA2_Stream4_BASE (DMA2_BASE + 0x070UL) +#define DMA2_Stream5_BASE (DMA2_BASE + 0x088UL) +#define DMA2_Stream6_BASE (DMA2_BASE + 0x0A0UL) +#define DMA2_Stream7_BASE (DMA2_BASE + 0x0B8UL) +#define ETH_BASE (AHB1PERIPH_BASE + 0x8000UL) +#define ETH_MAC_BASE (ETH_BASE) +#define ETH_MMC_BASE (ETH_BASE + 0x0100UL) +#define ETH_PTP_BASE (ETH_BASE + 0x0700UL) +#define ETH_DMA_BASE (ETH_BASE + 0x1000UL) +#define DMA2D_BASE (AHB1PERIPH_BASE + 0xB000UL) +/*!< AHB2 peripherals */ +#define DCMI_BASE (AHB2PERIPH_BASE + 0x50000UL) +#define JPEG_BASE (AHB2PERIPH_BASE + 0x51000UL) +#define RNG_BASE (AHB2PERIPH_BASE + 0x60800UL) +/*!< FMC Bankx registers base address */ +#define FMC_Bank1_R_BASE (FMC_R_BASE + 0x0000UL) +#define FMC_Bank1E_R_BASE (FMC_R_BASE + 0x0104UL) +#define FMC_Bank3_R_BASE (FMC_R_BASE + 0x0080UL) +#define FMC_Bank5_6_R_BASE (FMC_R_BASE + 0x0140UL) + +/* Debug MCU registers base address */ +#define DBGMCU_BASE 0xE0042000UL + +/*!< USB registers base address */ +#define USB_OTG_HS_PERIPH_BASE 0x40040000UL +#define USB_OTG_FS_PERIPH_BASE 0x50000000UL + +#define USB_OTG_GLOBAL_BASE 0x0000UL +#define USB_OTG_DEVICE_BASE 0x0800UL +#define USB_OTG_IN_ENDPOINT_BASE 0x0900UL +#define USB_OTG_OUT_ENDPOINT_BASE 0x0B00UL +#define USB_OTG_EP_REG_SIZE 0x0020UL +#define USB_OTG_HOST_BASE 0x0400UL +#define USB_OTG_HOST_PORT_BASE 0x0440UL +#define USB_OTG_HOST_CHANNEL_BASE 0x0500UL +#define USB_OTG_HOST_CHANNEL_SIZE 0x0020UL +#define USB_OTG_PCGCCTL_BASE 0x0E00UL +#define USB_OTG_FIFO_BASE 0x1000UL +#define USB_OTG_FIFO_SIZE 0x1000UL + +/** + * @} + */ + +/** @addtogroup Peripheral_declaration + * @{ + */ +#define TIM2 ((TIM_TypeDef *) TIM2_BASE) +#define TIM3 ((TIM_TypeDef *) TIM3_BASE) +#define TIM4 ((TIM_TypeDef *) TIM4_BASE) +#define TIM5 ((TIM_TypeDef *) TIM5_BASE) +#define TIM6 ((TIM_TypeDef *) TIM6_BASE) +#define TIM7 ((TIM_TypeDef *) TIM7_BASE) +#define TIM12 ((TIM_TypeDef *) TIM12_BASE) +#define TIM13 ((TIM_TypeDef *) TIM13_BASE) +#define TIM14 ((TIM_TypeDef *) TIM14_BASE) +#define LPTIM1 ((LPTIM_TypeDef *) LPTIM1_BASE) +#define RTC ((RTC_TypeDef *) RTC_BASE) +#define WWDG ((WWDG_TypeDef *) WWDG_BASE) +#define IWDG ((IWDG_TypeDef *) IWDG_BASE) +#define SPI2 ((SPI_TypeDef *) SPI2_BASE) +#define SPI3 ((SPI_TypeDef *) SPI3_BASE) +#define SPDIFRX ((SPDIFRX_TypeDef *) SPDIFRX_BASE) +#define USART2 ((USART_TypeDef *) USART2_BASE) +#define USART3 ((USART_TypeDef *) USART3_BASE) +#define UART4 ((USART_TypeDef *) UART4_BASE) +#define UART5 ((USART_TypeDef *) UART5_BASE) +#define I2C1 ((I2C_TypeDef *) I2C1_BASE) +#define I2C2 ((I2C_TypeDef *) I2C2_BASE) +#define I2C3 ((I2C_TypeDef *) I2C3_BASE) +#define I2C4 ((I2C_TypeDef *) I2C4_BASE) +#define CAN1 ((CAN_TypeDef *) CAN1_BASE) +#define CAN2 ((CAN_TypeDef *) CAN2_BASE) +#define CEC ((CEC_TypeDef *) CEC_BASE) +#define PWR ((PWR_TypeDef *) PWR_BASE) +#define DAC1 ((DAC_TypeDef *) DAC_BASE) +#define DAC ((DAC_TypeDef *) DAC_BASE) /* Kept for legacy purpose */ +#define UART7 ((USART_TypeDef *) UART7_BASE) +#define UART8 ((USART_TypeDef *) UART8_BASE) +#define TIM1 ((TIM_TypeDef *) TIM1_BASE) +#define TIM8 ((TIM_TypeDef *) TIM8_BASE) +#define USART1 ((USART_TypeDef *) USART1_BASE) +#define USART6 ((USART_TypeDef *) USART6_BASE) +#define ADC ((ADC_Common_TypeDef *) ADC_BASE) +#define ADC1 ((ADC_TypeDef *) ADC1_BASE) +#define ADC2 ((ADC_TypeDef *) ADC2_BASE) +#define ADC3 ((ADC_TypeDef *) ADC3_BASE) +#define ADC123_COMMON ((ADC_Common_TypeDef *) ADC_BASE) +#define SDMMC1 ((SDMMC_TypeDef *) SDMMC1_BASE) +#define SPI1 ((SPI_TypeDef *) SPI1_BASE) +#define SPI4 ((SPI_TypeDef *) SPI4_BASE) +#define SYSCFG ((SYSCFG_TypeDef *) SYSCFG_BASE) +#define EXTI ((EXTI_TypeDef *) EXTI_BASE) +#define TIM9 ((TIM_TypeDef *) TIM9_BASE) +#define TIM10 ((TIM_TypeDef *) TIM10_BASE) +#define TIM11 ((TIM_TypeDef *) TIM11_BASE) +#define SPI5 ((SPI_TypeDef *) SPI5_BASE) +#define SPI6 ((SPI_TypeDef *) SPI6_BASE) +#define SAI1 ((SAI_TypeDef *) SAI1_BASE) +#define SAI2 ((SAI_TypeDef *) SAI2_BASE) +#define SAI1_Block_A ((SAI_Block_TypeDef *)SAI1_Block_A_BASE) +#define SAI1_Block_B ((SAI_Block_TypeDef *)SAI1_Block_B_BASE) +#define SAI2_Block_A ((SAI_Block_TypeDef *)SAI2_Block_A_BASE) +#define SAI2_Block_B ((SAI_Block_TypeDef *)SAI2_Block_B_BASE) +#define LTDC ((LTDC_TypeDef *)LTDC_BASE) +#define LTDC_Layer1 ((LTDC_Layer_TypeDef *)LTDC_Layer1_BASE) +#define LTDC_Layer2 ((LTDC_Layer_TypeDef *)LTDC_Layer2_BASE) +#define GPIOA ((GPIO_TypeDef *) GPIOA_BASE) +#define GPIOB ((GPIO_TypeDef *) GPIOB_BASE) +#define GPIOC ((GPIO_TypeDef *) GPIOC_BASE) +#define GPIOD ((GPIO_TypeDef *) GPIOD_BASE) +#define GPIOE ((GPIO_TypeDef *) GPIOE_BASE) +#define GPIOF ((GPIO_TypeDef *) GPIOF_BASE) +#define GPIOG ((GPIO_TypeDef *) GPIOG_BASE) +#define GPIOH ((GPIO_TypeDef *) GPIOH_BASE) +#define GPIOI ((GPIO_TypeDef *) GPIOI_BASE) +#define GPIOJ ((GPIO_TypeDef *) GPIOJ_BASE) +#define GPIOK ((GPIO_TypeDef *) GPIOK_BASE) +#define CRC ((CRC_TypeDef *) CRC_BASE) +#define RCC ((RCC_TypeDef *) RCC_BASE) +#define FLASH ((FLASH_TypeDef *) FLASH_R_BASE) +#define DMA1 ((DMA_TypeDef *) DMA1_BASE) +#define DMA1_Stream0 ((DMA_Stream_TypeDef *) DMA1_Stream0_BASE) +#define DMA1_Stream1 ((DMA_Stream_TypeDef *) DMA1_Stream1_BASE) +#define DMA1_Stream2 ((DMA_Stream_TypeDef *) DMA1_Stream2_BASE) +#define DMA1_Stream3 ((DMA_Stream_TypeDef *) DMA1_Stream3_BASE) +#define DMA1_Stream4 ((DMA_Stream_TypeDef *) DMA1_Stream4_BASE) +#define DMA1_Stream5 ((DMA_Stream_TypeDef *) DMA1_Stream5_BASE) +#define DMA1_Stream6 ((DMA_Stream_TypeDef *) DMA1_Stream6_BASE) +#define DMA1_Stream7 ((DMA_Stream_TypeDef *) DMA1_Stream7_BASE) +#define DMA2 ((DMA_TypeDef *) DMA2_BASE) +#define DMA2_Stream0 ((DMA_Stream_TypeDef *) DMA2_Stream0_BASE) +#define DMA2_Stream1 ((DMA_Stream_TypeDef *) DMA2_Stream1_BASE) +#define DMA2_Stream2 ((DMA_Stream_TypeDef *) DMA2_Stream2_BASE) +#define DMA2_Stream3 ((DMA_Stream_TypeDef *) DMA2_Stream3_BASE) +#define DMA2_Stream4 ((DMA_Stream_TypeDef *) DMA2_Stream4_BASE) +#define DMA2_Stream5 ((DMA_Stream_TypeDef *) DMA2_Stream5_BASE) +#define DMA2_Stream6 ((DMA_Stream_TypeDef *) DMA2_Stream6_BASE) +#define DMA2_Stream7 ((DMA_Stream_TypeDef *) DMA2_Stream7_BASE) +#define ETH ((ETH_TypeDef *) ETH_BASE) +#define DMA2D ((DMA2D_TypeDef *)DMA2D_BASE) +#define DCMI ((DCMI_TypeDef *) DCMI_BASE) +#define RNG ((RNG_TypeDef *) RNG_BASE) +#define FMC_Bank1 ((FMC_Bank1_TypeDef *) FMC_Bank1_R_BASE) +#define FMC_Bank1E ((FMC_Bank1E_TypeDef *) FMC_Bank1E_R_BASE) +#define FMC_Bank3 ((FMC_Bank3_TypeDef *) FMC_Bank3_R_BASE) +#define FMC_Bank5_6 ((FMC_Bank5_6_TypeDef *) FMC_Bank5_6_R_BASE) +#define QUADSPI ((QUADSPI_TypeDef *) QSPI_R_BASE) +#define DBGMCU ((DBGMCU_TypeDef *) DBGMCU_BASE) +#define USB_OTG_FS ((USB_OTG_GlobalTypeDef *) USB_OTG_FS_PERIPH_BASE) +#define USB_OTG_HS ((USB_OTG_GlobalTypeDef *) USB_OTG_HS_PERIPH_BASE) +#define CAN3 ((CAN_TypeDef *) CAN3_BASE) +#define SDMMC2 ((SDMMC_TypeDef *) SDMMC2_BASE) +#define MDIOS ((MDIOS_TypeDef *) MDIOS_BASE) +#define DFSDM1_Channel0 ((DFSDM_Channel_TypeDef *) DFSDM1_Channel0_BASE) +#define DFSDM1_Channel1 ((DFSDM_Channel_TypeDef *) DFSDM1_Channel1_BASE) +#define DFSDM1_Channel2 ((DFSDM_Channel_TypeDef *) DFSDM1_Channel2_BASE) +#define DFSDM1_Channel3 ((DFSDM_Channel_TypeDef *) DFSDM1_Channel3_BASE) +#define DFSDM1_Channel4 ((DFSDM_Channel_TypeDef *) DFSDM1_Channel4_BASE) +#define DFSDM1_Channel5 ((DFSDM_Channel_TypeDef *) DFSDM1_Channel5_BASE) +#define DFSDM1_Channel6 ((DFSDM_Channel_TypeDef *) DFSDM1_Channel6_BASE) +#define DFSDM1_Channel7 ((DFSDM_Channel_TypeDef *) DFSDM1_Channel7_BASE) +#define DFSDM1_Filter0 ((DFSDM_Filter_TypeDef *) DFSDM1_Filter0_BASE) +#define DFSDM1_Filter1 ((DFSDM_Filter_TypeDef *) DFSDM1_Filter1_BASE) +#define DFSDM1_Filter2 ((DFSDM_Filter_TypeDef *) DFSDM1_Filter2_BASE) +#define DFSDM1_Filter3 ((DFSDM_Filter_TypeDef *) DFSDM1_Filter3_BASE) +#define JPEG ((JPEG_TypeDef *) JPEG_BASE) + +/** + * @} + */ + +/** @addtogroup Exported_constants + * @{ + */ + + /** @addtogroup Hardware_Constant_Definition + * @{ + */ +#define LSI_STARTUP_TIME 40U /*!< LSI Maximum startup time in us */ + + /** + * @} + */ + + /** @addtogroup Peripheral_Registers_Bits_Definition + * @{ + */ + +/******************************************************************************/ +/* Peripheral Registers_Bits_Definition */ +/******************************************************************************/ + +/******************************************************************************/ +/* */ +/* Analog to Digital Converter */ +/* */ +/******************************************************************************/ +#define VREFINT_CAL_ADDR_CMSIS ((uint16_t*) (0x1FF0F44A)) /*!>= 1U; value != 0U; value >>= 1U) + { + result <<= 1U; + result |= value & 1U; + s--; + } + result <<= s; /* shift when v's highest bits are zero */ + return result; +} +#endif + + +/** + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +#define __CLZ __clz + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) + +/** + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __LDREXB(ptr) ((uint8_t ) __ldrex(ptr)) +#else + #define __LDREXB(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint8_t ) __ldrex(ptr)) _Pragma("pop") +#endif + + +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __LDREXH(ptr) ((uint16_t) __ldrex(ptr)) +#else + #define __LDREXH(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint16_t) __ldrex(ptr)) _Pragma("pop") +#endif + + +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __LDREXW(ptr) ((uint32_t ) __ldrex(ptr)) +#else + #define __LDREXW(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint32_t ) __ldrex(ptr)) _Pragma("pop") +#endif + + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __STREXB(value, ptr) __strex(value, ptr) +#else + #define __STREXB(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") +#endif + + +/** + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __STREXH(value, ptr) __strex(value, ptr) +#else + #define __STREXH(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") +#endif + + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __STREXW(value, ptr) __strex(value, ptr) +#else + #define __STREXW(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") +#endif + + +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +#define __CLREX __clrex + + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT __ssat + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT __usat + + +/** + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value + */ +#ifndef __NO_EMBEDDED_ASM +__attribute__((section(".rrx_text"))) __STATIC_INLINE __ASM uint32_t __RRX(uint32_t value) +{ + rrx r0, r0 + bx lr +} +#endif + + +/** + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDRBT(ptr) ((uint8_t ) __ldrt(ptr)) + + +/** + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDRHT(ptr) ((uint16_t) __ldrt(ptr)) + + +/** + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDRT(ptr) ((uint32_t ) __ldrt(ptr)) + + +/** + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +#define __STRBT(value, ptr) __strt(value, ptr) + + +/** + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +#define __STRHT(value, ptr) __strt(value, ptr) + + +/** + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +#define __STRT(value, ptr) __strt(value, ptr) + +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__attribute__((always_inline)) __STATIC_INLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; +} + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__attribute__((always_inline)) __STATIC_INLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ + +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ + +#if ((defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) + +#define __SADD8 __sadd8 +#define __QADD8 __qadd8 +#define __SHADD8 __shadd8 +#define __UADD8 __uadd8 +#define __UQADD8 __uqadd8 +#define __UHADD8 __uhadd8 +#define __SSUB8 __ssub8 +#define __QSUB8 __qsub8 +#define __SHSUB8 __shsub8 +#define __USUB8 __usub8 +#define __UQSUB8 __uqsub8 +#define __UHSUB8 __uhsub8 +#define __SADD16 __sadd16 +#define __QADD16 __qadd16 +#define __SHADD16 __shadd16 +#define __UADD16 __uadd16 +#define __UQADD16 __uqadd16 +#define __UHADD16 __uhadd16 +#define __SSUB16 __ssub16 +#define __QSUB16 __qsub16 +#define __SHSUB16 __shsub16 +#define __USUB16 __usub16 +#define __UQSUB16 __uqsub16 +#define __UHSUB16 __uhsub16 +#define __SASX __sasx +#define __QASX __qasx +#define __SHASX __shasx +#define __UASX __uasx +#define __UQASX __uqasx +#define __UHASX __uhasx +#define __SSAX __ssax +#define __QSAX __qsax +#define __SHSAX __shsax +#define __USAX __usax +#define __UQSAX __uqsax +#define __UHSAX __uhsax +#define __USAD8 __usad8 +#define __USADA8 __usada8 +#define __SSAT16 __ssat16 +#define __USAT16 __usat16 +#define __UXTB16 __uxtb16 +#define __UXTAB16 __uxtab16 +#define __SXTB16 __sxtb16 +#define __SXTAB16 __sxtab16 +#define __SMUAD __smuad +#define __SMUADX __smuadx +#define __SMLAD __smlad +#define __SMLADX __smladx +#define __SMLALD __smlald +#define __SMLALDX __smlaldx +#define __SMUSD __smusd +#define __SMUSDX __smusdx +#define __SMLSD __smlsd +#define __SMLSDX __smlsdx +#define __SMLSLD __smlsld +#define __SMLSLDX __smlsldx +#define __SEL __sel +#define __QADD __qadd +#define __QSUB __qsub + +#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ + ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) + +#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ + ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) + +#define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \ + ((int64_t)(ARG3) << 32U) ) >> 32U)) + +#endif /* ((defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ +/*@} end of group CMSIS_SIMD_intrinsics */ + + +#endif /* __CMSIS_ARMCC_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/cmsis_armclang.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/cmsis_armclang.h new file mode 100644 index 0000000..d8031b0 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/cmsis_armclang.h @@ -0,0 +1,1869 @@ +/**************************************************************************//** + * @file cmsis_armclang.h + * @brief CMSIS compiler armclang (Arm Compiler 6) header file + * @version V5.0.4 + * @date 10. January 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*lint -esym(9058, IRQn)*/ /* disable MISRA 2012 Rule 2.4 for IRQn */ + +#ifndef __CMSIS_ARMCLANG_H +#define __CMSIS_ARMCLANG_H + +#pragma clang system_header /* treat file as system include file */ + +#ifndef __ARM_COMPAT_H +#include /* Compatibility header for Arm Compiler 5 intrinsics */ +#endif + +/* CMSIS compiler specific defines */ +#ifndef __ASM + #define __ASM __asm +#endif +#ifndef __INLINE + #define __INLINE __inline +#endif +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static __inline +#endif +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline +#endif +#ifndef __NO_RETURN + #define __NO_RETURN __attribute__((__noreturn__)) +#endif +#ifndef __USED + #define __USED __attribute__((used)) +#endif +#ifndef __WEAK + #define __WEAK __attribute__((weak)) +#endif +#ifndef __PACKED + #define __PACKED __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_UNION + #define __PACKED_UNION union __attribute__((packed, aligned(1))) +#endif +#ifndef __UNALIGNED_UINT32 /* deprecated */ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT32)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32 */ + struct __attribute__((packed)) T_UINT32 { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) +#endif +#ifndef __UNALIGNED_UINT16_WRITE + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT16_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_WRITE */ + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT16_READ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT16_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_READ */ + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) +#endif +#ifndef __UNALIGNED_UINT32_WRITE + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT32_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_WRITE */ + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT32_READ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT32_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_READ */ + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) +#endif +#ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) +#endif +#ifndef __RESTRICT + #define __RESTRICT __restrict +#endif + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +/* intrinsic void __enable_irq(); see arm_compat.h */ + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +/* intrinsic void __disable_irq(); see arm_compat.h */ + + +/** + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set + */ +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) +{ + __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set + */ +__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) +{ + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); +} +#endif + + +/** + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_IPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_APSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, apsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_xPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); +} +#endif + + +/** + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); +} +#endif + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); + return(result); +} + + +/** + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) +{ + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); +} +#endif + + +/** + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value + */ +__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask + */ +__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +{ + __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask + */ +__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) +{ + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); +} +#endif + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) +/** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing the F-bit in the CPSR. + Can only be executed in Privileged modes. + */ +#define __enable_fault_irq __enable_fiq /* see arm_compat.h */ + + +/** + \brief Disable FIQ + \details Disables FIQ interrupts by setting the F-bit in the CPSR. + Can only be executed in Privileged modes. + */ +#define __disable_fault_irq __disable_fiq /* see arm_compat.h */ + + +/** + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value + */ +__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, basepri" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) +{ + __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); +} +#endif + + +/** + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); +} + + +/** + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value + */ +__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); +} +#endif + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) + +/** + \brief Get Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). + \return PSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim" : "=r" (result) ); + return result; +#endif +} + +#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return result; +#endif +} +#endif + + +/** + \brief Set Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); +#endif +} +#endif + + +/** + \brief Get Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim" : "=r" (result) ); + return result; +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return result; +#endif +} +#endif + + +/** + \brief Set Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). + \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); +#endif +} +#endif + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value + */ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#define __get_FPSCR (uint32_t)__builtin_arm_get_fpscr +#else +#define __get_FPSCR() ((uint32_t)0U) +#endif + +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set + */ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#define __set_FPSCR __builtin_arm_set_fpscr +#else +#define __set_FPSCR(x) ((void)(x)) +#endif + + +/*@} end of CMSIS_Core_RegAccFunctions */ + + +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ +*/ + +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) +#endif + +/** + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. + */ +#define __NOP __builtin_arm_nop + +/** + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + */ +#define __WFI __builtin_arm_wfi + + +/** + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. + */ +#define __WFE __builtin_arm_wfe + + +/** + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + */ +#define __SEV __builtin_arm_sev + + +/** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ +#define __ISB() __builtin_arm_isb(0xF); + +/** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ +#define __DSB() __builtin_arm_dsb(0xF); + + +/** + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. + */ +#define __DMB() __builtin_arm_dmb(0xF); + + +/** + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV(value) __builtin_bswap32(value) + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV16(value) __ROR(__REV(value), 16) + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REVSH(value) (int16_t)__builtin_bswap16(value) + + +/** + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) +{ + op2 %= 32U; + if (op2 == 0U) + { + return op1; + } + return (op1 >> op2) | (op1 << (32U - op2)); +} + + +/** + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __ASM volatile ("bkpt "#value) + + +/** + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value + */ +#define __RBIT __builtin_arm_rbit + +/** + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +#define __CLZ (uint8_t)__builtin_clz + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +/** + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDREXB (uint8_t)__builtin_arm_ldrex + + +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDREXH (uint16_t)__builtin_arm_ldrex + + +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDREXW (uint32_t)__builtin_arm_ldrex + + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXB (uint32_t)__builtin_arm_strex + + +/** + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXH (uint32_t)__builtin_arm_strex + + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXW (uint32_t)__builtin_arm_strex + + +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +#define __CLREX __builtin_arm_clrex + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT __builtin_arm_ssat + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT __builtin_arm_usat + + +/** + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) +{ + uint32_t result; + + __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return(result); +} + + +/** + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint8_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint16_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return(result); +} + + +/** + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); +} + +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; +} + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +/** + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint8_t) result); +} + + +/** + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint16_t) result); +} + + +/** + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) ); + return(result); +} + + +/** + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDAEXB (uint8_t)__builtin_arm_ldaex + + +/** + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDAEXH (uint16_t)__builtin_arm_ldaex + + +/** + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDAEX (uint32_t)__builtin_arm_ldaex + + +/** + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXB (uint32_t)__builtin_arm_stlex + + +/** + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXH (uint32_t)__builtin_arm_stlex + + +/** + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEX (uint32_t)__builtin_arm_stlex + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ + +#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) + +__STATIC_FORCEINLINE uint32_t __SADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + + +__STATIC_FORCEINLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + + +__STATIC_FORCEINLINE uint32_t __SADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USAD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +#define __SSAT16(ARG1,ARG2) \ +({ \ + int32_t __RES, __ARG1 = (ARG1); \ + __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ + __RES; \ + }) + +#define __USAT16(ARG1,ARG2) \ +({ \ + uint32_t __RES, __ARG1 = (ARG1); \ + __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ + __RES; \ + }) + +__STATIC_FORCEINLINE uint32_t __UXTB16(uint32_t op1) +{ + uint32_t result; + + __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1)); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1) +{ + uint32_t result; + + __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1)); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint32_t __SEL (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +#if 0 +#define __PKHBT(ARG1,ARG2,ARG3) \ +({ \ + uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ + __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ + __RES; \ + }) + +#define __PKHTB(ARG1,ARG2,ARG3) \ +({ \ + uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ + if (ARG3 == 0) \ + __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \ + else \ + __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ + __RES; \ + }) +#endif + +#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ + ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) + +#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ + ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) + +__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) +{ + int32_t result; + + __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +#endif /* (__ARM_FEATURE_DSP == 1) */ +/*@} end of group CMSIS_SIMD_intrinsics */ + + +#endif /* __CMSIS_ARMCLANG_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/cmsis_compiler.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/cmsis_compiler.h new file mode 100644 index 0000000..79a2cac --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/cmsis_compiler.h @@ -0,0 +1,266 @@ +/**************************************************************************//** + * @file cmsis_compiler.h + * @brief CMSIS compiler generic header file + * @version V5.0.4 + * @date 10. January 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_COMPILER_H +#define __CMSIS_COMPILER_H + +#include + +/* + * Arm Compiler 4/5 + */ +#if defined ( __CC_ARM ) + #include "cmsis_armcc.h" + + +/* + * Arm Compiler 6 (armclang) + */ +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #include "cmsis_armclang.h" + + +/* + * GNU Compiler + */ +#elif defined ( __GNUC__ ) + #include "cmsis_gcc.h" + + +/* + * IAR Compiler + */ +#elif defined ( __ICCARM__ ) + #include + + +/* + * TI Arm Compiler + */ +#elif defined ( __TI_ARM__ ) + #include + + #ifndef __ASM + #define __ASM __asm + #endif + #ifndef __INLINE + #define __INLINE inline + #endif + #ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline + #endif + #ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __STATIC_INLINE + #endif + #ifndef __NO_RETURN + #define __NO_RETURN __attribute__((noreturn)) + #endif + #ifndef __USED + #define __USED __attribute__((used)) + #endif + #ifndef __WEAK + #define __WEAK __attribute__((weak)) + #endif + #ifndef __PACKED + #define __PACKED __attribute__((packed)) + #endif + #ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __attribute__((packed)) + #endif + #ifndef __PACKED_UNION + #define __PACKED_UNION union __attribute__((packed)) + #endif + #ifndef __UNALIGNED_UINT32 /* deprecated */ + struct __attribute__((packed)) T_UINT32 { uint32_t v; }; + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) + #endif + #ifndef __UNALIGNED_UINT16_WRITE + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void*)(addr))->v) = (val)) + #endif + #ifndef __UNALIGNED_UINT16_READ + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) + #endif + #ifndef __UNALIGNED_UINT32_WRITE + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) + #endif + #ifndef __UNALIGNED_UINT32_READ + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) + #endif + #ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) + #endif + #ifndef __RESTRICT + #warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored. + #define __RESTRICT + #endif + + +/* + * TASKING Compiler + */ +#elif defined ( __TASKING__ ) + /* + * The CMSIS functions have been implemented as intrinsics in the compiler. + * Please use "carm -?i" to get an up to date list of all intrinsics, + * Including the CMSIS ones. + */ + + #ifndef __ASM + #define __ASM __asm + #endif + #ifndef __INLINE + #define __INLINE inline + #endif + #ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline + #endif + #ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __STATIC_INLINE + #endif + #ifndef __NO_RETURN + #define __NO_RETURN __attribute__((noreturn)) + #endif + #ifndef __USED + #define __USED __attribute__((used)) + #endif + #ifndef __WEAK + #define __WEAK __attribute__((weak)) + #endif + #ifndef __PACKED + #define __PACKED __packed__ + #endif + #ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __packed__ + #endif + #ifndef __PACKED_UNION + #define __PACKED_UNION union __packed__ + #endif + #ifndef __UNALIGNED_UINT32 /* deprecated */ + struct __packed__ T_UINT32 { uint32_t v; }; + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) + #endif + #ifndef __UNALIGNED_UINT16_WRITE + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) + #endif + #ifndef __UNALIGNED_UINT16_READ + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) + #endif + #ifndef __UNALIGNED_UINT32_WRITE + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) + #endif + #ifndef __UNALIGNED_UINT32_READ + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) + #endif + #ifndef __ALIGNED + #define __ALIGNED(x) __align(x) + #endif + #ifndef __RESTRICT + #warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored. + #define __RESTRICT + #endif + + +/* + * COSMIC Compiler + */ +#elif defined ( __CSMC__ ) + #include + + #ifndef __ASM + #define __ASM _asm + #endif + #ifndef __INLINE + #define __INLINE inline + #endif + #ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline + #endif + #ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __STATIC_INLINE + #endif + #ifndef __NO_RETURN + // NO RETURN is automatically detected hence no warning here + #define __NO_RETURN + #endif + #ifndef __USED + #warning No compiler specific solution for __USED. __USED is ignored. + #define __USED + #endif + #ifndef __WEAK + #define __WEAK __weak + #endif + #ifndef __PACKED + #define __PACKED @packed + #endif + #ifndef __PACKED_STRUCT + #define __PACKED_STRUCT @packed struct + #endif + #ifndef __PACKED_UNION + #define __PACKED_UNION @packed union + #endif + #ifndef __UNALIGNED_UINT32 /* deprecated */ + @packed struct T_UINT32 { uint32_t v; }; + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) + #endif + #ifndef __UNALIGNED_UINT16_WRITE + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) + #endif + #ifndef __UNALIGNED_UINT16_READ + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) + #endif + #ifndef __UNALIGNED_UINT32_WRITE + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) + #endif + #ifndef __UNALIGNED_UINT32_READ + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) + #endif + #ifndef __ALIGNED + #warning No compiler specific solution for __ALIGNED. __ALIGNED is ignored. + #define __ALIGNED(x) + #endif + #ifndef __RESTRICT + #warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored. + #define __RESTRICT + #endif + + +#else + #error Unknown compiler. +#endif + + +#endif /* __CMSIS_COMPILER_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/cmsis_gcc.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/cmsis_gcc.h new file mode 100644 index 0000000..1bd41a4 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/cmsis_gcc.h @@ -0,0 +1,2085 @@ +/**************************************************************************//** + * @file cmsis_gcc.h + * @brief CMSIS compiler GCC header file + * @version V5.0.4 + * @date 09. April 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_GCC_H +#define __CMSIS_GCC_H + +/* ignore some GCC warnings */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wsign-conversion" +#pragma GCC diagnostic ignored "-Wconversion" +#pragma GCC diagnostic ignored "-Wunused-parameter" + +/* Fallback for __has_builtin */ +#ifndef __has_builtin + #define __has_builtin(x) (0) +#endif + +/* CMSIS compiler specific defines */ +#ifndef __ASM + #define __ASM __asm +#endif +#ifndef __INLINE + #define __INLINE inline +#endif +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline +#endif +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline +#endif +#ifndef __NO_RETURN + #define __NO_RETURN __attribute__((__noreturn__)) +#endif +#ifndef __USED + #define __USED __attribute__((used)) +#endif +#ifndef __WEAK + #define __WEAK __attribute__((weak)) +#endif +#ifndef __PACKED + #define __PACKED __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_UNION + #define __PACKED_UNION union __attribute__((packed, aligned(1))) +#endif +#ifndef __UNALIGNED_UINT32 /* deprecated */ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + #pragma GCC diagnostic ignored "-Wattributes" + struct __attribute__((packed)) T_UINT32 { uint32_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) +#endif +#ifndef __UNALIGNED_UINT16_WRITE + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + #pragma GCC diagnostic ignored "-Wattributes" + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT16_READ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + #pragma GCC diagnostic ignored "-Wattributes" + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) +#endif +#ifndef __UNALIGNED_UINT32_WRITE + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + #pragma GCC diagnostic ignored "-Wattributes" + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT32_READ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + #pragma GCC diagnostic ignored "-Wattributes" + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) +#endif +#ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) +#endif +#ifndef __RESTRICT + #define __RESTRICT __restrict +#endif + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_irq(void) +{ + __ASM volatile ("cpsie i" : : : "memory"); +} + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_irq(void) +{ + __ASM volatile ("cpsid i" : : : "memory"); +} + + +/** + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set + */ +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) +{ + __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set + */ +__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) +{ + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); +} +#endif + + +/** + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_IPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_APSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, apsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_xPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); +} +#endif + + +/** + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); +} +#endif + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); + return(result); +} + + +/** + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) +{ + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); +} +#endif + + +/** + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value + */ +__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask" : "=r" (result) :: "memory"); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) :: "memory"); + return(result); +} +#endif + + +/** + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask + */ +__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +{ + __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask + */ +__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) +{ + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); +} +#endif + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) +/** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing the F-bit in the CPSR. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_fault_irq(void) +{ + __ASM volatile ("cpsie f" : : : "memory"); +} + + +/** + \brief Disable FIQ + \details Disables FIQ interrupts by setting the F-bit in the CPSR. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_fault_irq(void) +{ + __ASM volatile ("cpsid f" : : : "memory"); +} + + +/** + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value + */ +__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, basepri" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) +{ + __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); +} +#endif + + +/** + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); +} + + +/** + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value + */ +__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); +} +#endif + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) + +/** + \brief Get Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). + \return PSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim" : "=r" (result) ); + return result; +#endif +} + +#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return result; +#endif +} +#endif + + +/** + \brief Set Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); +#endif +} +#endif + + +/** + \brief Get Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim" : "=r" (result) ); + return result; +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return result; +#endif +} +#endif + + +/** + \brief Set Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). + \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); +#endif +} +#endif + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + + +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value + */ +__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) +{ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#if __has_builtin(__builtin_arm_get_fpscr) +// Re-enable using built-in when GCC has been fixed +// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) + /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ + return __builtin_arm_get_fpscr(); +#else + uint32_t result; + + __ASM volatile ("VMRS %0, fpscr" : "=r" (result) ); + return(result); +#endif +#else + return(0U); +#endif +} + + +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set + */ +__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) +{ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#if __has_builtin(__builtin_arm_set_fpscr) +// Re-enable using built-in when GCC has been fixed +// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) + /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ + __builtin_arm_set_fpscr(fpscr); +#else + __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory"); +#endif +#else + (void)fpscr; +#endif +} + + +/*@} end of CMSIS_Core_RegAccFunctions */ + + +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ +*/ + +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_RW_REG(r) "+l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_RW_REG(r) "+r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) +#endif + +/** + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. + */ +#define __NOP() __ASM volatile ("nop") + +/** + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + */ +#define __WFI() __ASM volatile ("wfi") + + +/** + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. + */ +#define __WFE() __ASM volatile ("wfe") + + +/** + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + */ +#define __SEV() __ASM volatile ("sev") + + +/** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ +__STATIC_FORCEINLINE void __ISB(void) +{ + __ASM volatile ("isb 0xF":::"memory"); +} + + +/** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ +__STATIC_FORCEINLINE void __DSB(void) +{ + __ASM volatile ("dsb 0xF":::"memory"); +} + + +/** + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. + */ +__STATIC_FORCEINLINE void __DMB(void) +{ + __ASM volatile ("dmb 0xF":::"memory"); +} + + +/** + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE uint32_t __REV(uint32_t value) +{ +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) + return __builtin_bswap32(value); +#else + uint32_t result; + + __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return result; +#endif +} + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE uint32_t __REV16(uint32_t value) +{ + uint32_t result; + + __ASM volatile ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return result; +} + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE int16_t __REVSH(int16_t value) +{ +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + return (int16_t)__builtin_bswap16(value); +#else + int16_t result; + + __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return result; +#endif +} + + +/** + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) +{ + op2 %= 32U; + if (op2 == 0U) + { + return op1; + } + return (op1 >> op2) | (op1 << (32U - op2)); +} + + +/** + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __ASM volatile ("bkpt "#value) + + +/** + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) +{ + uint32_t result; + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) + __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) ); +#else + uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */ + + result = value; /* r will be reversed bits of v; first get LSB of v */ + for (value >>= 1U; value != 0U; value >>= 1U) + { + result <<= 1U; + result |= value & 1U; + s--; + } + result <<= s; /* shift when v's highest bits are zero */ +#endif + return result; +} + + +/** + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +#define __CLZ (uint8_t)__builtin_clz + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +/** + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr) +{ + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); +#endif + return ((uint8_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr) +{ + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); +#endif + return ((uint16_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) +{ + uint32_t result; + + __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); + return(result); +} + + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) +{ + uint32_t result; + + __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); + return(result); +} + + +/** + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) +{ + uint32_t result; + + __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); + return(result); +} + + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) +{ + uint32_t result; + + __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); + return(result); +} + + +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +__STATIC_FORCEINLINE void __CLREX(void) +{ + __ASM volatile ("clrex" ::: "memory"); +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] ARG1 Value to be saturated + \param [in] ARG2 Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT(ARG1,ARG2) \ +__extension__ \ +({ \ + int32_t __RES, __ARG1 = (ARG1); \ + __ASM ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ + __RES; \ + }) + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] ARG1 Value to be saturated + \param [in] ARG2 Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT(ARG1,ARG2) \ + __extension__ \ +({ \ + uint32_t __RES, __ARG1 = (ARG1); \ + __ASM ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ + __RES; \ + }) + + +/** + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) +{ + uint32_t result; + + __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return(result); +} + + +/** + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) +{ + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" ); +#endif + return ((uint8_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) +{ + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" ); +#endif + return ((uint16_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return(result); +} + + +/** + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); +} + +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; +} + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +/** + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint8_t) result); +} + + +/** + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint16_t) result); +} + + +/** + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) ); + return(result); +} + + +/** + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDAEXB(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldaexb %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint8_t) result); +} + + +/** + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDAEXH(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldaexh %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint16_t) result); +} + + +/** + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDAEX(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldaex %0, %1" : "=r" (result) : "Q" (*ptr) ); + return(result); +} + + +/** + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("stlexb %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) ); + return(result); +} + + +/** + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("stlexh %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) ); + return(result); +} + + +/** + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("stlex %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) ); + return(result); +} + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ + +#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) + +__STATIC_FORCEINLINE uint32_t __SADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + + +__STATIC_FORCEINLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + + +__STATIC_FORCEINLINE uint32_t __SADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USAD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +#define __SSAT16(ARG1,ARG2) \ +({ \ + int32_t __RES, __ARG1 = (ARG1); \ + __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ + __RES; \ + }) + +#define __USAT16(ARG1,ARG2) \ +({ \ + uint32_t __RES, __ARG1 = (ARG1); \ + __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ + __RES; \ + }) + +__STATIC_FORCEINLINE uint32_t __UXTB16(uint32_t op1) +{ + uint32_t result; + + __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1)); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1) +{ + uint32_t result; + + __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1)); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint32_t __SEL (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +#if 0 +#define __PKHBT(ARG1,ARG2,ARG3) \ +({ \ + uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ + __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ + __RES; \ + }) + +#define __PKHTB(ARG1,ARG2,ARG3) \ +({ \ + uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ + if (ARG3 == 0) \ + __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \ + else \ + __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ + __RES; \ + }) +#endif + +#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ + ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) + +#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ + ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) + +__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) +{ + int32_t result; + + __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +#endif /* (__ARM_FEATURE_DSP == 1) */ +/*@} end of group CMSIS_SIMD_intrinsics */ + + +#pragma GCC diagnostic pop + +#endif /* __CMSIS_GCC_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/cmsis_iccarm.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/cmsis_iccarm.h new file mode 100644 index 0000000..3c90a2c --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/cmsis_iccarm.h @@ -0,0 +1,935 @@ +/**************************************************************************//** + * @file cmsis_iccarm.h + * @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file + * @version V5.0.7 + * @date 19. June 2018 + ******************************************************************************/ + +//------------------------------------------------------------------------------ +// +// Copyright (c) 2017-2018 IAR Systems +// +// Licensed under the Apache License, Version 2.0 (the "License") +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//------------------------------------------------------------------------------ + + +#ifndef __CMSIS_ICCARM_H__ +#define __CMSIS_ICCARM_H__ + +#ifndef __ICCARM__ + #error This file should only be compiled by ICCARM +#endif + +#pragma system_include + +#define __IAR_FT _Pragma("inline=forced") __intrinsic + +#if (__VER__ >= 8000000) + #define __ICCARM_V8 1 +#else + #define __ICCARM_V8 0 +#endif + +#ifndef __ALIGNED + #if __ICCARM_V8 + #define __ALIGNED(x) __attribute__((aligned(x))) + #elif (__VER__ >= 7080000) + /* Needs IAR language extensions */ + #define __ALIGNED(x) __attribute__((aligned(x))) + #else + #warning No compiler specific solution for __ALIGNED.__ALIGNED is ignored. + #define __ALIGNED(x) + #endif +#endif + + +/* Define compiler macros for CPU architecture, used in CMSIS 5. + */ +#if __ARM_ARCH_6M__ || __ARM_ARCH_7M__ || __ARM_ARCH_7EM__ || __ARM_ARCH_8M_BASE__ || __ARM_ARCH_8M_MAIN__ +/* Macros already defined */ +#else + #if defined(__ARM8M_MAINLINE__) || defined(__ARM8EM_MAINLINE__) + #define __ARM_ARCH_8M_MAIN__ 1 + #elif defined(__ARM8M_BASELINE__) + #define __ARM_ARCH_8M_BASE__ 1 + #elif defined(__ARM_ARCH_PROFILE) && __ARM_ARCH_PROFILE == 'M' + #if __ARM_ARCH == 6 + #define __ARM_ARCH_6M__ 1 + #elif __ARM_ARCH == 7 + #if __ARM_FEATURE_DSP + #define __ARM_ARCH_7EM__ 1 + #else + #define __ARM_ARCH_7M__ 1 + #endif + #endif /* __ARM_ARCH */ + #endif /* __ARM_ARCH_PROFILE == 'M' */ +#endif + +/* Alternativ core deduction for older ICCARM's */ +#if !defined(__ARM_ARCH_6M__) && !defined(__ARM_ARCH_7M__) && !defined(__ARM_ARCH_7EM__) && \ + !defined(__ARM_ARCH_8M_BASE__) && !defined(__ARM_ARCH_8M_MAIN__) + #if defined(__ARM6M__) && (__CORE__ == __ARM6M__) + #define __ARM_ARCH_6M__ 1 + #elif defined(__ARM7M__) && (__CORE__ == __ARM7M__) + #define __ARM_ARCH_7M__ 1 + #elif defined(__ARM7EM__) && (__CORE__ == __ARM7EM__) + #define __ARM_ARCH_7EM__ 1 + #elif defined(__ARM8M_BASELINE__) && (__CORE == __ARM8M_BASELINE__) + #define __ARM_ARCH_8M_BASE__ 1 + #elif defined(__ARM8M_MAINLINE__) && (__CORE == __ARM8M_MAINLINE__) + #define __ARM_ARCH_8M_MAIN__ 1 + #elif defined(__ARM8EM_MAINLINE__) && (__CORE == __ARM8EM_MAINLINE__) + #define __ARM_ARCH_8M_MAIN__ 1 + #else + #error "Unknown target." + #endif +#endif + + + +#if defined(__ARM_ARCH_6M__) && __ARM_ARCH_6M__==1 + #define __IAR_M0_FAMILY 1 +#elif defined(__ARM_ARCH_8M_BASE__) && __ARM_ARCH_8M_BASE__==1 + #define __IAR_M0_FAMILY 1 +#else + #define __IAR_M0_FAMILY 0 +#endif + + +#ifndef __ASM + #define __ASM __asm +#endif + +#ifndef __INLINE + #define __INLINE inline +#endif + +#ifndef __NO_RETURN + #if __ICCARM_V8 + #define __NO_RETURN __attribute__((__noreturn__)) + #else + #define __NO_RETURN _Pragma("object_attribute=__noreturn") + #endif +#endif + +#ifndef __PACKED + #if __ICCARM_V8 + #define __PACKED __attribute__((packed, aligned(1))) + #else + /* Needs IAR language extensions */ + #define __PACKED __packed + #endif +#endif + +#ifndef __PACKED_STRUCT + #if __ICCARM_V8 + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) + #else + /* Needs IAR language extensions */ + #define __PACKED_STRUCT __packed struct + #endif +#endif + +#ifndef __PACKED_UNION + #if __ICCARM_V8 + #define __PACKED_UNION union __attribute__((packed, aligned(1))) + #else + /* Needs IAR language extensions */ + #define __PACKED_UNION __packed union + #endif +#endif + +#ifndef __RESTRICT + #define __RESTRICT __restrict +#endif + +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline +#endif + +#ifndef __FORCEINLINE + #define __FORCEINLINE _Pragma("inline=forced") +#endif + +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __FORCEINLINE __STATIC_INLINE +#endif + +#ifndef __UNALIGNED_UINT16_READ +#pragma language=save +#pragma language=extended +__IAR_FT uint16_t __iar_uint16_read(void const *ptr) +{ + return *(__packed uint16_t*)(ptr); +} +#pragma language=restore +#define __UNALIGNED_UINT16_READ(PTR) __iar_uint16_read(PTR) +#endif + + +#ifndef __UNALIGNED_UINT16_WRITE +#pragma language=save +#pragma language=extended +__IAR_FT void __iar_uint16_write(void const *ptr, uint16_t val) +{ + *(__packed uint16_t*)(ptr) = val;; +} +#pragma language=restore +#define __UNALIGNED_UINT16_WRITE(PTR,VAL) __iar_uint16_write(PTR,VAL) +#endif + +#ifndef __UNALIGNED_UINT32_READ +#pragma language=save +#pragma language=extended +__IAR_FT uint32_t __iar_uint32_read(void const *ptr) +{ + return *(__packed uint32_t*)(ptr); +} +#pragma language=restore +#define __UNALIGNED_UINT32_READ(PTR) __iar_uint32_read(PTR) +#endif + +#ifndef __UNALIGNED_UINT32_WRITE +#pragma language=save +#pragma language=extended +__IAR_FT void __iar_uint32_write(void const *ptr, uint32_t val) +{ + *(__packed uint32_t*)(ptr) = val;; +} +#pragma language=restore +#define __UNALIGNED_UINT32_WRITE(PTR,VAL) __iar_uint32_write(PTR,VAL) +#endif + +#ifndef __UNALIGNED_UINT32 /* deprecated */ +#pragma language=save +#pragma language=extended +__packed struct __iar_u32 { uint32_t v; }; +#pragma language=restore +#define __UNALIGNED_UINT32(PTR) (((struct __iar_u32 *)(PTR))->v) +#endif + +#ifndef __USED + #if __ICCARM_V8 + #define __USED __attribute__((used)) + #else + #define __USED _Pragma("__root") + #endif +#endif + +#ifndef __WEAK + #if __ICCARM_V8 + #define __WEAK __attribute__((weak)) + #else + #define __WEAK _Pragma("__weak") + #endif +#endif + + +#ifndef __ICCARM_INTRINSICS_VERSION__ + #define __ICCARM_INTRINSICS_VERSION__ 0 +#endif + +#if __ICCARM_INTRINSICS_VERSION__ == 2 + + #if defined(__CLZ) + #undef __CLZ + #endif + #if defined(__REVSH) + #undef __REVSH + #endif + #if defined(__RBIT) + #undef __RBIT + #endif + #if defined(__SSAT) + #undef __SSAT + #endif + #if defined(__USAT) + #undef __USAT + #endif + + #include "iccarm_builtin.h" + + #define __disable_fault_irq __iar_builtin_disable_fiq + #define __disable_irq __iar_builtin_disable_interrupt + #define __enable_fault_irq __iar_builtin_enable_fiq + #define __enable_irq __iar_builtin_enable_interrupt + #define __arm_rsr __iar_builtin_rsr + #define __arm_wsr __iar_builtin_wsr + + + #define __get_APSR() (__arm_rsr("APSR")) + #define __get_BASEPRI() (__arm_rsr("BASEPRI")) + #define __get_CONTROL() (__arm_rsr("CONTROL")) + #define __get_FAULTMASK() (__arm_rsr("FAULTMASK")) + + #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) + #define __get_FPSCR() (__arm_rsr("FPSCR")) + #define __set_FPSCR(VALUE) (__arm_wsr("FPSCR", (VALUE))) + #else + #define __get_FPSCR() ( 0 ) + #define __set_FPSCR(VALUE) ((void)VALUE) + #endif + + #define __get_IPSR() (__arm_rsr("IPSR")) + #define __get_MSP() (__arm_rsr("MSP")) + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + #define __get_MSPLIM() (0U) + #else + #define __get_MSPLIM() (__arm_rsr("MSPLIM")) + #endif + #define __get_PRIMASK() (__arm_rsr("PRIMASK")) + #define __get_PSP() (__arm_rsr("PSP")) + + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + #define __get_PSPLIM() (0U) + #else + #define __get_PSPLIM() (__arm_rsr("PSPLIM")) + #endif + + #define __get_xPSR() (__arm_rsr("xPSR")) + + #define __set_BASEPRI(VALUE) (__arm_wsr("BASEPRI", (VALUE))) + #define __set_BASEPRI_MAX(VALUE) (__arm_wsr("BASEPRI_MAX", (VALUE))) + #define __set_CONTROL(VALUE) (__arm_wsr("CONTROL", (VALUE))) + #define __set_FAULTMASK(VALUE) (__arm_wsr("FAULTMASK", (VALUE))) + #define __set_MSP(VALUE) (__arm_wsr("MSP", (VALUE))) + + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + #define __set_MSPLIM(VALUE) ((void)(VALUE)) + #else + #define __set_MSPLIM(VALUE) (__arm_wsr("MSPLIM", (VALUE))) + #endif + #define __set_PRIMASK(VALUE) (__arm_wsr("PRIMASK", (VALUE))) + #define __set_PSP(VALUE) (__arm_wsr("PSP", (VALUE))) + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + #define __set_PSPLIM(VALUE) ((void)(VALUE)) + #else + #define __set_PSPLIM(VALUE) (__arm_wsr("PSPLIM", (VALUE))) + #endif + + #define __TZ_get_CONTROL_NS() (__arm_rsr("CONTROL_NS")) + #define __TZ_set_CONTROL_NS(VALUE) (__arm_wsr("CONTROL_NS", (VALUE))) + #define __TZ_get_PSP_NS() (__arm_rsr("PSP_NS")) + #define __TZ_set_PSP_NS(VALUE) (__arm_wsr("PSP_NS", (VALUE))) + #define __TZ_get_MSP_NS() (__arm_rsr("MSP_NS")) + #define __TZ_set_MSP_NS(VALUE) (__arm_wsr("MSP_NS", (VALUE))) + #define __TZ_get_SP_NS() (__arm_rsr("SP_NS")) + #define __TZ_set_SP_NS(VALUE) (__arm_wsr("SP_NS", (VALUE))) + #define __TZ_get_PRIMASK_NS() (__arm_rsr("PRIMASK_NS")) + #define __TZ_set_PRIMASK_NS(VALUE) (__arm_wsr("PRIMASK_NS", (VALUE))) + #define __TZ_get_BASEPRI_NS() (__arm_rsr("BASEPRI_NS")) + #define __TZ_set_BASEPRI_NS(VALUE) (__arm_wsr("BASEPRI_NS", (VALUE))) + #define __TZ_get_FAULTMASK_NS() (__arm_rsr("FAULTMASK_NS")) + #define __TZ_set_FAULTMASK_NS(VALUE)(__arm_wsr("FAULTMASK_NS", (VALUE))) + + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + #define __TZ_get_PSPLIM_NS() (0U) + #define __TZ_set_PSPLIM_NS(VALUE) ((void)(VALUE)) + #else + #define __TZ_get_PSPLIM_NS() (__arm_rsr("PSPLIM_NS")) + #define __TZ_set_PSPLIM_NS(VALUE) (__arm_wsr("PSPLIM_NS", (VALUE))) + #endif + + #define __TZ_get_MSPLIM_NS() (__arm_rsr("MSPLIM_NS")) + #define __TZ_set_MSPLIM_NS(VALUE) (__arm_wsr("MSPLIM_NS", (VALUE))) + + #define __NOP __iar_builtin_no_operation + + #define __CLZ __iar_builtin_CLZ + #define __CLREX __iar_builtin_CLREX + + #define __DMB __iar_builtin_DMB + #define __DSB __iar_builtin_DSB + #define __ISB __iar_builtin_ISB + + #define __LDREXB __iar_builtin_LDREXB + #define __LDREXH __iar_builtin_LDREXH + #define __LDREXW __iar_builtin_LDREX + + #define __RBIT __iar_builtin_RBIT + #define __REV __iar_builtin_REV + #define __REV16 __iar_builtin_REV16 + + __IAR_FT int16_t __REVSH(int16_t val) + { + return (int16_t) __iar_builtin_REVSH(val); + } + + #define __ROR __iar_builtin_ROR + #define __RRX __iar_builtin_RRX + + #define __SEV __iar_builtin_SEV + + #if !__IAR_M0_FAMILY + #define __SSAT __iar_builtin_SSAT + #endif + + #define __STREXB __iar_builtin_STREXB + #define __STREXH __iar_builtin_STREXH + #define __STREXW __iar_builtin_STREX + + #if !__IAR_M0_FAMILY + #define __USAT __iar_builtin_USAT + #endif + + #define __WFE __iar_builtin_WFE + #define __WFI __iar_builtin_WFI + + #if __ARM_MEDIA__ + #define __SADD8 __iar_builtin_SADD8 + #define __QADD8 __iar_builtin_QADD8 + #define __SHADD8 __iar_builtin_SHADD8 + #define __UADD8 __iar_builtin_UADD8 + #define __UQADD8 __iar_builtin_UQADD8 + #define __UHADD8 __iar_builtin_UHADD8 + #define __SSUB8 __iar_builtin_SSUB8 + #define __QSUB8 __iar_builtin_QSUB8 + #define __SHSUB8 __iar_builtin_SHSUB8 + #define __USUB8 __iar_builtin_USUB8 + #define __UQSUB8 __iar_builtin_UQSUB8 + #define __UHSUB8 __iar_builtin_UHSUB8 + #define __SADD16 __iar_builtin_SADD16 + #define __QADD16 __iar_builtin_QADD16 + #define __SHADD16 __iar_builtin_SHADD16 + #define __UADD16 __iar_builtin_UADD16 + #define __UQADD16 __iar_builtin_UQADD16 + #define __UHADD16 __iar_builtin_UHADD16 + #define __SSUB16 __iar_builtin_SSUB16 + #define __QSUB16 __iar_builtin_QSUB16 + #define __SHSUB16 __iar_builtin_SHSUB16 + #define __USUB16 __iar_builtin_USUB16 + #define __UQSUB16 __iar_builtin_UQSUB16 + #define __UHSUB16 __iar_builtin_UHSUB16 + #define __SASX __iar_builtin_SASX + #define __QASX __iar_builtin_QASX + #define __SHASX __iar_builtin_SHASX + #define __UASX __iar_builtin_UASX + #define __UQASX __iar_builtin_UQASX + #define __UHASX __iar_builtin_UHASX + #define __SSAX __iar_builtin_SSAX + #define __QSAX __iar_builtin_QSAX + #define __SHSAX __iar_builtin_SHSAX + #define __USAX __iar_builtin_USAX + #define __UQSAX __iar_builtin_UQSAX + #define __UHSAX __iar_builtin_UHSAX + #define __USAD8 __iar_builtin_USAD8 + #define __USADA8 __iar_builtin_USADA8 + #define __SSAT16 __iar_builtin_SSAT16 + #define __USAT16 __iar_builtin_USAT16 + #define __UXTB16 __iar_builtin_UXTB16 + #define __UXTAB16 __iar_builtin_UXTAB16 + #define __SXTB16 __iar_builtin_SXTB16 + #define __SXTAB16 __iar_builtin_SXTAB16 + #define __SMUAD __iar_builtin_SMUAD + #define __SMUADX __iar_builtin_SMUADX + #define __SMMLA __iar_builtin_SMMLA + #define __SMLAD __iar_builtin_SMLAD + #define __SMLADX __iar_builtin_SMLADX + #define __SMLALD __iar_builtin_SMLALD + #define __SMLALDX __iar_builtin_SMLALDX + #define __SMUSD __iar_builtin_SMUSD + #define __SMUSDX __iar_builtin_SMUSDX + #define __SMLSD __iar_builtin_SMLSD + #define __SMLSDX __iar_builtin_SMLSDX + #define __SMLSLD __iar_builtin_SMLSLD + #define __SMLSLDX __iar_builtin_SMLSLDX + #define __SEL __iar_builtin_SEL + #define __QADD __iar_builtin_QADD + #define __QSUB __iar_builtin_QSUB + #define __PKHBT __iar_builtin_PKHBT + #define __PKHTB __iar_builtin_PKHTB + #endif + +#else /* __ICCARM_INTRINSICS_VERSION__ == 2 */ + + #if __IAR_M0_FAMILY + /* Avoid clash between intrinsics.h and arm_math.h when compiling for Cortex-M0. */ + #define __CLZ __cmsis_iar_clz_not_active + #define __SSAT __cmsis_iar_ssat_not_active + #define __USAT __cmsis_iar_usat_not_active + #define __RBIT __cmsis_iar_rbit_not_active + #define __get_APSR __cmsis_iar_get_APSR_not_active + #endif + + + #if (!((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) )) + #define __get_FPSCR __cmsis_iar_get_FPSR_not_active + #define __set_FPSCR __cmsis_iar_set_FPSR_not_active + #endif + + #ifdef __INTRINSICS_INCLUDED + #error intrinsics.h is already included previously! + #endif + + #include + + #if __IAR_M0_FAMILY + /* Avoid clash between intrinsics.h and arm_math.h when compiling for Cortex-M0. */ + #undef __CLZ + #undef __SSAT + #undef __USAT + #undef __RBIT + #undef __get_APSR + + __STATIC_INLINE uint8_t __CLZ(uint32_t data) + { + if (data == 0U) { return 32U; } + + uint32_t count = 0U; + uint32_t mask = 0x80000000U; + + while ((data & mask) == 0U) + { + count += 1U; + mask = mask >> 1U; + } + return count; + } + + __STATIC_INLINE uint32_t __RBIT(uint32_t v) + { + uint8_t sc = 31U; + uint32_t r = v; + for (v >>= 1U; v; v >>= 1U) + { + r <<= 1U; + r |= v & 1U; + sc--; + } + return (r << sc); + } + + __STATIC_INLINE uint32_t __get_APSR(void) + { + uint32_t res; + __asm("MRS %0,APSR" : "=r" (res)); + return res; + } + + #endif + + #if (!((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) )) + #undef __get_FPSCR + #undef __set_FPSCR + #define __get_FPSCR() (0) + #define __set_FPSCR(VALUE) ((void)VALUE) + #endif + + #pragma diag_suppress=Pe940 + #pragma diag_suppress=Pe177 + + #define __enable_irq __enable_interrupt + #define __disable_irq __disable_interrupt + #define __NOP __no_operation + + #define __get_xPSR __get_PSR + + #if (!defined(__ARM_ARCH_6M__) || __ARM_ARCH_6M__==0) + + __IAR_FT uint32_t __LDREXW(uint32_t volatile *ptr) + { + return __LDREX((unsigned long *)ptr); + } + + __IAR_FT uint32_t __STREXW(uint32_t value, uint32_t volatile *ptr) + { + return __STREX(value, (unsigned long *)ptr); + } + #endif + + + /* __CORTEX_M is defined in core_cm0.h, core_cm3.h and core_cm4.h. */ + #if (__CORTEX_M >= 0x03) + + __IAR_FT uint32_t __RRX(uint32_t value) + { + uint32_t result; + __ASM("RRX %0, %1" : "=r"(result) : "r" (value) : "cc"); + return(result); + } + + __IAR_FT void __set_BASEPRI_MAX(uint32_t value) + { + __asm volatile("MSR BASEPRI_MAX,%0"::"r" (value)); + } + + + #define __enable_fault_irq __enable_fiq + #define __disable_fault_irq __disable_fiq + + + #endif /* (__CORTEX_M >= 0x03) */ + + __IAR_FT uint32_t __ROR(uint32_t op1, uint32_t op2) + { + return (op1 >> op2) | (op1 << ((sizeof(op1)*8)-op2)); + } + + #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) + + __IAR_FT uint32_t __get_MSPLIM(void) + { + uint32_t res; + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + res = 0U; + #else + __asm volatile("MRS %0,MSPLIM" : "=r" (res)); + #endif + return res; + } + + __IAR_FT void __set_MSPLIM(uint32_t value) + { + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)value; + #else + __asm volatile("MSR MSPLIM,%0" :: "r" (value)); + #endif + } + + __IAR_FT uint32_t __get_PSPLIM(void) + { + uint32_t res; + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + res = 0U; + #else + __asm volatile("MRS %0,PSPLIM" : "=r" (res)); + #endif + return res; + } + + __IAR_FT void __set_PSPLIM(uint32_t value) + { + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)value; + #else + __asm volatile("MSR PSPLIM,%0" :: "r" (value)); + #endif + } + + __IAR_FT uint32_t __TZ_get_CONTROL_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,CONTROL_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_CONTROL_NS(uint32_t value) + { + __asm volatile("MSR CONTROL_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_PSP_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,PSP_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_PSP_NS(uint32_t value) + { + __asm volatile("MSR PSP_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_MSP_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,MSP_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_MSP_NS(uint32_t value) + { + __asm volatile("MSR MSP_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_SP_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,SP_NS" : "=r" (res)); + return res; + } + __IAR_FT void __TZ_set_SP_NS(uint32_t value) + { + __asm volatile("MSR SP_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_PRIMASK_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,PRIMASK_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_PRIMASK_NS(uint32_t value) + { + __asm volatile("MSR PRIMASK_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_BASEPRI_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,BASEPRI_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_BASEPRI_NS(uint32_t value) + { + __asm volatile("MSR BASEPRI_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_FAULTMASK_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,FAULTMASK_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_FAULTMASK_NS(uint32_t value) + { + __asm volatile("MSR FAULTMASK_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_PSPLIM_NS(void) + { + uint32_t res; + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + res = 0U; + #else + __asm volatile("MRS %0,PSPLIM_NS" : "=r" (res)); + #endif + return res; + } + + __IAR_FT void __TZ_set_PSPLIM_NS(uint32_t value) + { + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)value; + #else + __asm volatile("MSR PSPLIM_NS,%0" :: "r" (value)); + #endif + } + + __IAR_FT uint32_t __TZ_get_MSPLIM_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,MSPLIM_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_MSPLIM_NS(uint32_t value) + { + __asm volatile("MSR MSPLIM_NS,%0" :: "r" (value)); + } + + #endif /* __ARM_ARCH_8M_MAIN__ or __ARM_ARCH_8M_BASE__ */ + +#endif /* __ICCARM_INTRINSICS_VERSION__ == 2 */ + +#define __BKPT(value) __asm volatile ("BKPT %0" : : "i"(value)) + +#if __IAR_M0_FAMILY + __STATIC_INLINE int32_t __SSAT(int32_t val, uint32_t sat) + { + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; + } + + __STATIC_INLINE uint32_t __USAT(int32_t val, uint32_t sat) + { + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; + } +#endif + +#if (__CORTEX_M >= 0x03) /* __CORTEX_M is defined in core_cm0.h, core_cm3.h and core_cm4.h. */ + + __IAR_FT uint8_t __LDRBT(volatile uint8_t *addr) + { + uint32_t res; + __ASM("LDRBT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); + return ((uint8_t)res); + } + + __IAR_FT uint16_t __LDRHT(volatile uint16_t *addr) + { + uint32_t res; + __ASM("LDRHT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); + return ((uint16_t)res); + } + + __IAR_FT uint32_t __LDRT(volatile uint32_t *addr) + { + uint32_t res; + __ASM("LDRT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); + return res; + } + + __IAR_FT void __STRBT(uint8_t value, volatile uint8_t *addr) + { + __ASM("STRBT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory"); + } + + __IAR_FT void __STRHT(uint16_t value, volatile uint16_t *addr) + { + __ASM("STRHT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory"); + } + + __IAR_FT void __STRT(uint32_t value, volatile uint32_t *addr) + { + __ASM("STRT %1, [%0]" : : "r" (addr), "r" (value) : "memory"); + } + +#endif /* (__CORTEX_M >= 0x03) */ + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) + + + __IAR_FT uint8_t __LDAB(volatile uint8_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAB %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return ((uint8_t)res); + } + + __IAR_FT uint16_t __LDAH(volatile uint16_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAH %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return ((uint16_t)res); + } + + __IAR_FT uint32_t __LDA(volatile uint32_t *ptr) + { + uint32_t res; + __ASM volatile ("LDA %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return res; + } + + __IAR_FT void __STLB(uint8_t value, volatile uint8_t *ptr) + { + __ASM volatile ("STLB %1, [%0]" :: "r" (ptr), "r" (value) : "memory"); + } + + __IAR_FT void __STLH(uint16_t value, volatile uint16_t *ptr) + { + __ASM volatile ("STLH %1, [%0]" :: "r" (ptr), "r" (value) : "memory"); + } + + __IAR_FT void __STL(uint32_t value, volatile uint32_t *ptr) + { + __ASM volatile ("STL %1, [%0]" :: "r" (ptr), "r" (value) : "memory"); + } + + __IAR_FT uint8_t __LDAEXB(volatile uint8_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAEXB %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return ((uint8_t)res); + } + + __IAR_FT uint16_t __LDAEXH(volatile uint16_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAEXH %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return ((uint16_t)res); + } + + __IAR_FT uint32_t __LDAEX(volatile uint32_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAEX %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return res; + } + + __IAR_FT uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) + { + uint32_t res; + __ASM volatile ("STLEXB %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory"); + return res; + } + + __IAR_FT uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) + { + uint32_t res; + __ASM volatile ("STLEXH %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory"); + return res; + } + + __IAR_FT uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) + { + uint32_t res; + __ASM volatile ("STLEX %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory"); + return res; + } + +#endif /* __ARM_ARCH_8M_MAIN__ or __ARM_ARCH_8M_BASE__ */ + +#undef __IAR_FT +#undef __IAR_M0_FAMILY +#undef __ICCARM_V8 + +#pragma diag_default=Pe940 +#pragma diag_default=Pe177 + +#endif /* __CMSIS_ICCARM_H__ */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/cmsis_version.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/cmsis_version.h new file mode 100644 index 0000000..ae3f2e3 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/cmsis_version.h @@ -0,0 +1,39 @@ +/**************************************************************************//** + * @file cmsis_version.h + * @brief CMSIS Core(M) Version definitions + * @version V5.0.2 + * @date 19. April 2017 + ******************************************************************************/ +/* + * Copyright (c) 2009-2017 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CMSIS_VERSION_H +#define __CMSIS_VERSION_H + +/* CMSIS Version definitions */ +#define __CM_CMSIS_VERSION_MAIN ( 5U) /*!< [31:16] CMSIS Core(M) main version */ +#define __CM_CMSIS_VERSION_SUB ( 1U) /*!< [15:0] CMSIS Core(M) sub version */ +#define __CM_CMSIS_VERSION ((__CM_CMSIS_VERSION_MAIN << 16U) | \ + __CM_CMSIS_VERSION_SUB ) /*!< CMSIS Core(M) version number */ +#endif diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_armv8mbl.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_armv8mbl.h new file mode 100644 index 0000000..ec76ab2 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_armv8mbl.h @@ -0,0 +1,1918 @@ +/**************************************************************************//** + * @file core_armv8mbl.h + * @brief CMSIS Armv8-M Baseline Core Peripheral Access Layer Header File + * @version V5.0.7 + * @date 22. June 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_ARMV8MBL_H_GENERIC +#define __CORE_ARMV8MBL_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_ARMv8MBL + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS definitions */ +#define __ARMv8MBL_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __ARMv8MBL_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __ARMv8MBL_CMSIS_VERSION ((__ARMv8MBL_CMSIS_VERSION_MAIN << 16U) | \ + __ARMv8MBL_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M ( 2U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_PCS_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_ARMV8MBL_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_ARMV8MBL_H_DEPENDANT +#define __CORE_ARMV8MBL_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __ARMv8MBL_REV + #define __ARMv8MBL_REV 0x0000U + #warning "__ARMv8MBL_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 0U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif + + #ifndef __ETM_PRESENT + #define __ETM_PRESENT 0U + #warning "__ETM_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MTB_PRESENT + #define __MTB_PRESENT 0U + #warning "__MTB_PRESENT not defined in device header file; using default!" + #endif + +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group ARMv8MBL */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint32_t IPR[124U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ +#else + uint32_t RESERVED0; +#endif + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IOM uint32_t SHPR[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#endif + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + uint32_t RESERVED0[6U]; + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x3UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Sizes Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Sizes Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[809U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) Software Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) Software Lock Status Register */ + uint32_t RESERVED4[4U]; + __IM uint32_t TYPE; /*!< Offset: 0xFC8 (R/ ) Device Identifier Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_SWOSCALER_Pos 0U /*!< TPI ACPR: SWOSCALER Position */ +#define TPI_ACPR_SWOSCALER_Msk (0xFFFFUL /*<< TPI_ACPR_SWOSCALER_Pos*/) /*!< TPI ACPR: SWOSCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI Periodic Synchronization Control Register Definitions */ +#define TPI_PSCR_PSCount_Pos 0U /*!< TPI PSCR: PSCount Position */ +#define TPI_PSCR_PSCount_Msk (0x1FUL /*<< TPI_PSCR_PSCount_Pos*/) /*!< TPI PSCR: TPSCount Mask */ + +/* TPI Software Lock Status Register Definitions */ +#define TPI_LSR_nTT_Pos 1U /*!< TPI LSR: Not thirty-two bit. Position */ +#define TPI_LSR_nTT_Msk (0x1UL << TPI_LSR_nTT_Pos) /*!< TPI LSR: Not thirty-two bit. Mask */ + +#define TPI_LSR_SLK_Pos 1U /*!< TPI LSR: Software Lock status Position */ +#define TPI_LSR_SLK_Msk (0x1UL << TPI_LSR_SLK_Pos) /*!< TPI LSR: Software Lock status Mask */ + +#define TPI_LSR_SLI_Pos 0U /*!< TPI LSR: Software Lock implemented Position */ +#define TPI_LSR_SLI_Msk (0x1UL /*<< TPI_LSR_SLI_Pos*/) /*!< TPI LSR: Software Lock implemented Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFO depth Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFO depth Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + uint32_t RESERVED0[7U]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 1U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: EN Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: EN Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#endif +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED4[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register */ +#define CoreDebug_DEMCR_DWTENA_Pos 24U /*!< CoreDebug DEMCR: DWTENA Position */ +#define CoreDebug_DEMCR_DWTENA_Msk (1UL << CoreDebug_DEMCR_DWTENA_Pos) /*!< CoreDebug DEMCR: DWTENA Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< Core Debug configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< Core Debug Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< Core Debug configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000002UL) /* bit [1] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + +#define __NVIC_SetPriorityGrouping(X) (void)(X) +#define __NVIC_GetPriorityGrouping() (0U) + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IPR[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHPR[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHPR[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IPR[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHPR[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + If VTOR is not present address 0 must be mapped to SRAM. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *)SCB->VTOR; +#else + uint32_t *vectors = (uint32_t *)0x0U; +#endif + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *)SCB->VTOR; +#else + uint32_t *vectors = (uint32_t *)0x0U; +#endif + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[_IP_IDX(IRQn)] = ((uint32_t)(NVIC_NS->IPR[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB_NS->SHPR[_SHP_IDX(IRQn)] = ((uint32_t)(SCB_NS->SHPR[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IPR[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB_NS->SHPR[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_ARMV8MBL_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_armv8mml.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_armv8mml.h new file mode 100644 index 0000000..2d0f106 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_armv8mml.h @@ -0,0 +1,2927 @@ +/**************************************************************************//** + * @file core_armv8mml.h + * @brief CMSIS Armv8-M Mainline Core Peripheral Access Layer Header File + * @version V5.0.7 + * @date 06. July 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_ARMV8MML_H_GENERIC +#define __CORE_ARMV8MML_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_ARMv8MML + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS Armv8MML definitions */ +#define __ARMv8MML_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __ARMv8MML_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __ARMv8MML_CMSIS_VERSION ((__ARMv8MML_CMSIS_VERSION_MAIN << 16U) | \ + __ARMv8MML_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (81U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_PCS_VFP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_ARMV8MML_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_ARMV8MML_H_DEPENDANT +#define __CORE_ARMV8MML_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __ARMv8MML_REV + #define __ARMv8MML_REV 0x0000U + #warning "__ARMv8MML_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group ARMv8MML */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t _reserved1:28; /*!< bit: 4..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED3[92U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + uint32_t RESERVED4[15U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + uint32_t RESERVED7[6U]; + __IOM uint32_t ITCMCR; /*!< Offset: 0x290 (R/W) Instruction Tightly-Coupled Memory Control Register */ + __IOM uint32_t DTCMCR; /*!< Offset: 0x294 (R/W) Data Tightly-Coupled Memory Control Registers */ + __IOM uint32_t AHBPCR; /*!< Offset: 0x298 (R/W) AHBP Control Register */ + __IOM uint32_t CACR; /*!< Offset: 0x29C (R/W) L1 Cache Control Register */ + __IOM uint32_t AHBSCR; /*!< Offset: 0x2A0 (R/W) AHB Slave Control Register */ + uint32_t RESERVED8[1U]; + __IOM uint32_t ABFSR; /*!< Offset: 0x2A8 (R/W) Auxiliary Bus Fault Status Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CPn_Pos 0U /*!< SCB NSACR: CPn Position */ +#define SCB_NSACR_CPn_Msk (1UL /*<< SCB_NSACR_CPn_Pos*/) /*!< SCB NSACR: CPn Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/* Instruction Tightly-Coupled Memory Control Register Definitions */ +#define SCB_ITCMCR_SZ_Pos 3U /*!< SCB ITCMCR: SZ Position */ +#define SCB_ITCMCR_SZ_Msk (0xFUL << SCB_ITCMCR_SZ_Pos) /*!< SCB ITCMCR: SZ Mask */ + +#define SCB_ITCMCR_RETEN_Pos 2U /*!< SCB ITCMCR: RETEN Position */ +#define SCB_ITCMCR_RETEN_Msk (1UL << SCB_ITCMCR_RETEN_Pos) /*!< SCB ITCMCR: RETEN Mask */ + +#define SCB_ITCMCR_RMW_Pos 1U /*!< SCB ITCMCR: RMW Position */ +#define SCB_ITCMCR_RMW_Msk (1UL << SCB_ITCMCR_RMW_Pos) /*!< SCB ITCMCR: RMW Mask */ + +#define SCB_ITCMCR_EN_Pos 0U /*!< SCB ITCMCR: EN Position */ +#define SCB_ITCMCR_EN_Msk (1UL /*<< SCB_ITCMCR_EN_Pos*/) /*!< SCB ITCMCR: EN Mask */ + +/* Data Tightly-Coupled Memory Control Register Definitions */ +#define SCB_DTCMCR_SZ_Pos 3U /*!< SCB DTCMCR: SZ Position */ +#define SCB_DTCMCR_SZ_Msk (0xFUL << SCB_DTCMCR_SZ_Pos) /*!< SCB DTCMCR: SZ Mask */ + +#define SCB_DTCMCR_RETEN_Pos 2U /*!< SCB DTCMCR: RETEN Position */ +#define SCB_DTCMCR_RETEN_Msk (1UL << SCB_DTCMCR_RETEN_Pos) /*!< SCB DTCMCR: RETEN Mask */ + +#define SCB_DTCMCR_RMW_Pos 1U /*!< SCB DTCMCR: RMW Position */ +#define SCB_DTCMCR_RMW_Msk (1UL << SCB_DTCMCR_RMW_Pos) /*!< SCB DTCMCR: RMW Mask */ + +#define SCB_DTCMCR_EN_Pos 0U /*!< SCB DTCMCR: EN Position */ +#define SCB_DTCMCR_EN_Msk (1UL /*<< SCB_DTCMCR_EN_Pos*/) /*!< SCB DTCMCR: EN Mask */ + +/* AHBP Control Register Definitions */ +#define SCB_AHBPCR_SZ_Pos 1U /*!< SCB AHBPCR: SZ Position */ +#define SCB_AHBPCR_SZ_Msk (7UL << SCB_AHBPCR_SZ_Pos) /*!< SCB AHBPCR: SZ Mask */ + +#define SCB_AHBPCR_EN_Pos 0U /*!< SCB AHBPCR: EN Position */ +#define SCB_AHBPCR_EN_Msk (1UL /*<< SCB_AHBPCR_EN_Pos*/) /*!< SCB AHBPCR: EN Mask */ + +/* L1 Cache Control Register Definitions */ +#define SCB_CACR_FORCEWT_Pos 2U /*!< SCB CACR: FORCEWT Position */ +#define SCB_CACR_FORCEWT_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: FORCEWT Mask */ + +#define SCB_CACR_ECCEN_Pos 1U /*!< SCB CACR: ECCEN Position */ +#define SCB_CACR_ECCEN_Msk (1UL << SCB_CACR_ECCEN_Pos) /*!< SCB CACR: ECCEN Mask */ + +#define SCB_CACR_SIWT_Pos 0U /*!< SCB CACR: SIWT Position */ +#define SCB_CACR_SIWT_Msk (1UL /*<< SCB_CACR_SIWT_Pos*/) /*!< SCB CACR: SIWT Mask */ + +/* AHBS Control Register Definitions */ +#define SCB_AHBSCR_INITCOUNT_Pos 11U /*!< SCB AHBSCR: INITCOUNT Position */ +#define SCB_AHBSCR_INITCOUNT_Msk (0x1FUL << SCB_AHBPCR_INITCOUNT_Pos) /*!< SCB AHBSCR: INITCOUNT Mask */ + +#define SCB_AHBSCR_TPRI_Pos 2U /*!< SCB AHBSCR: TPRI Position */ +#define SCB_AHBSCR_TPRI_Msk (0x1FFUL << SCB_AHBPCR_TPRI_Pos) /*!< SCB AHBSCR: TPRI Mask */ + +#define SCB_AHBSCR_CTL_Pos 0U /*!< SCB AHBSCR: CTL Position*/ +#define SCB_AHBSCR_CTL_Msk (3UL /*<< SCB_AHBPCR_CTL_Pos*/) /*!< SCB AHBSCR: CTL Mask */ + +/* Auxiliary Bus Fault Status Register Definitions */ +#define SCB_ABFSR_AXIMTYPE_Pos 8U /*!< SCB ABFSR: AXIMTYPE Position*/ +#define SCB_ABFSR_AXIMTYPE_Msk (3UL << SCB_ABFSR_AXIMTYPE_Pos) /*!< SCB ABFSR: AXIMTYPE Mask */ + +#define SCB_ABFSR_EPPB_Pos 4U /*!< SCB ABFSR: EPPB Position*/ +#define SCB_ABFSR_EPPB_Msk (1UL << SCB_ABFSR_EPPB_Pos) /*!< SCB ABFSR: EPPB Mask */ + +#define SCB_ABFSR_AXIM_Pos 3U /*!< SCB ABFSR: AXIM Position*/ +#define SCB_ABFSR_AXIM_Msk (1UL << SCB_ABFSR_AXIM_Pos) /*!< SCB ABFSR: AXIM Mask */ + +#define SCB_ABFSR_AHBP_Pos 2U /*!< SCB ABFSR: AHBP Position*/ +#define SCB_ABFSR_AHBP_Msk (1UL << SCB_ABFSR_AHBP_Pos) /*!< SCB ABFSR: AHBP Mask */ + +#define SCB_ABFSR_DTCM_Pos 1U /*!< SCB ABFSR: DTCM Position*/ +#define SCB_ABFSR_DTCM_Msk (1UL << SCB_ABFSR_DTCM_Pos) /*!< SCB ABFSR: DTCM Mask */ + +#define SCB_ABFSR_ITCM_Pos 0U /*!< SCB ABFSR: ITCM Position*/ +#define SCB_ABFSR_ITCM_Msk (1UL /*<< SCB_ABFSR_ITCM_Pos*/) /*!< SCB ABFSR: ITCM Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[29U]; + __OM uint32_t IWR; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */ + __IM uint32_t IRR; /*!< Offset: 0xEFC (R/ ) ITM Integration Read Register */ + __IOM uint32_t IMCR; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */ + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) ITM Device Architecture Register */ + uint32_t RESERVED6[4U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (0x1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (0x1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Integration Write Register Definitions */ +#define ITM_IWR_ATVALIDM_Pos 0U /*!< ITM IWR: ATVALIDM Position */ +#define ITM_IWR_ATVALIDM_Msk (1UL /*<< ITM_IWR_ATVALIDM_Pos*/) /*!< ITM IWR: ATVALIDM Mask */ + +/* ITM Integration Read Register Definitions */ +#define ITM_IRR_ATREADYM_Pos 0U /*!< ITM IRR: ATREADYM Position */ +#define ITM_IRR_ATREADYM_Msk (1UL /*<< ITM_IRR_ATREADYM_Pos*/) /*!< ITM IRR: ATREADYM Mask */ + +/* ITM Integration Mode Control Register Definitions */ +#define ITM_IMCR_INTEGRATION_Pos 0U /*!< ITM IMCR: INTEGRATION Position */ +#define ITM_IMCR_INTEGRATION_Msk (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/) /*!< ITM IMCR: INTEGRATION Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ + uint32_t RESERVED32[934U]; + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ + uint32_t RESERVED33[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (0x1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x1UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Sizes Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Sizes Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[809U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) Software Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) Software Lock Status Register */ + uint32_t RESERVED4[4U]; + __IM uint32_t TYPE; /*!< Offset: 0xFC8 (R/ ) Device Identifier Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_SWOSCALER_Pos 0U /*!< TPI ACPR: SWOSCALER Position */ +#define TPI_ACPR_SWOSCALER_Msk (0xFFFFUL /*<< TPI_ACPR_SWOSCALER_Pos*/) /*!< TPI ACPR: SWOSCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI Periodic Synchronization Control Register Definitions */ +#define TPI_PSCR_PSCount_Pos 0U /*!< TPI PSCR: PSCount Position */ +#define TPI_PSCR_PSCount_Msk (0x1FUL /*<< TPI_PSCR_PSCount_Pos*/) /*!< TPI PSCR: TPSCount Mask */ + +/* TPI Software Lock Status Register Definitions */ +#define TPI_LSR_nTT_Pos 1U /*!< TPI LSR: Not thirty-two bit. Position */ +#define TPI_LSR_nTT_Msk (0x1UL << TPI_LSR_nTT_Pos) /*!< TPI LSR: Not thirty-two bit. Mask */ + +#define TPI_LSR_SLK_Pos 1U /*!< TPI LSR: Software Lock status Position */ +#define TPI_LSR_SLK_Msk (0x1UL << TPI_LSR_SLK_Pos) /*!< TPI LSR: Software Lock status Mask */ + +#define TPI_LSR_SLI_Pos 0U /*!< TPI LSR: Software Lock implemented Position */ +#define TPI_LSR_SLI_Msk (0x1UL /*<< TPI_LSR_SLI_Pos*/) /*!< TPI LSR: Software Lock implemented Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFO depth Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFO depth Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Disable Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/* Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and FP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and FP Feature Register 1 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/* Media and FP Feature Register 0 Definitions */ +#define FPU_MVFR0_FP_rounding_modes_Pos 28U /*!< MVFR0: FP rounding modes bits Position */ +#define FPU_MVFR0_FP_rounding_modes_Msk (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos) /*!< MVFR0: FP rounding modes bits Mask */ + +#define FPU_MVFR0_Short_vectors_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_Short_vectors_Msk (0xFUL << FPU_MVFR0_Short_vectors_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_Square_root_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_Square_root_Msk (0xFUL << FPU_MVFR0_Square_root_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_Divide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_Divide_Msk (0xFUL << FPU_MVFR0_Divide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FP_excep_trapping_Pos 12U /*!< MVFR0: FP exception trapping bits Position */ +#define FPU_MVFR0_FP_excep_trapping_Msk (0xFUL << FPU_MVFR0_FP_excep_trapping_Pos) /*!< MVFR0: FP exception trapping bits Mask */ + +#define FPU_MVFR0_Double_precision_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_Double_precision_Msk (0xFUL << FPU_MVFR0_Double_precision_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_Single_precision_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_Single_precision_Msk (0xFUL << FPU_MVFR0_Single_precision_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_A_SIMD_registers_Pos 0U /*!< MVFR0: A_SIMD registers bits Position */ +#define FPU_MVFR0_A_SIMD_registers_Msk (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/) /*!< MVFR0: A_SIMD registers bits Mask */ + +/* Media and FP Feature Register 1 Definitions */ +#define FPU_MVFR1_FP_fused_MAC_Pos 28U /*!< MVFR1: FP fused MAC bits Position */ +#define FPU_MVFR1_FP_fused_MAC_Msk (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos) /*!< MVFR1: FP fused MAC bits Mask */ + +#define FPU_MVFR1_FP_HPFP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FP_HPFP_Msk (0xFUL << FPU_MVFR1_FP_HPFP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_D_NaN_mode_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_D_NaN_mode_Msk (0xFUL << FPU_MVFR1_D_NaN_mode_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/*@} end of group CMSIS_FPU */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED4[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< Core Debug configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< Core Debug Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCnSCB_NS ((SCnSCB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< Core Debug configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000002UL) /* bit [1] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << 8U) ); /* Insert write key and priorty group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << 8U) ); /* Insert write key and priorty group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_ARMV8MML_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_cm0.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_cm0.h new file mode 100644 index 0000000..6f82227 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_cm0.h @@ -0,0 +1,949 @@ +/**************************************************************************//** + * @file core_cm0.h + * @brief CMSIS Cortex-M0 Core Peripheral Access Layer Header File + * @version V5.0.5 + * @date 28. May 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM0_H_GENERIC +#define __CORE_CM0_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M0 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM0 definitions */ +#define __CM0_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM0_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM0_CMSIS_VERSION ((__CM0_CMSIS_VERSION_MAIN << 16U) | \ + __CM0_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (0U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_PCS_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM0_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM0_H_DEPENDANT +#define __CORE_CM0_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM0_REV + #define __CM0_REV 0x0000U + #warning "__CM0_REV not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M0 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t _reserved0:1; /*!< bit: 0 Reserved */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[31U]; + __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[31U]; + __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[31U]; + __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[31U]; + uint32_t RESERVED4[64U]; + __IOM uint32_t IP[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + uint32_t RESERVED0; + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IOM uint32_t SHP[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Cortex-M0 Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor. + Therefore they are not covered by the Cortex-M0 header file. + @{ + */ +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ +/*#define NVIC_GetActive __NVIC_GetActive not available for Cortex-M0 */ + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + +#define __NVIC_SetPriorityGrouping(X) (void)(X) +#define __NVIC_GetPriorityGrouping() (0U) + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + Address 0 must be mapped to SRAM. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)0x0U; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)0x0U; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM0_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_cm0plus.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_cm0plus.h new file mode 100644 index 0000000..b9377e8 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_cm0plus.h @@ -0,0 +1,1083 @@ +/**************************************************************************//** + * @file core_cm0plus.h + * @brief CMSIS Cortex-M0+ Core Peripheral Access Layer Header File + * @version V5.0.6 + * @date 28. May 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM0PLUS_H_GENERIC +#define __CORE_CM0PLUS_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex-M0+ + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM0+ definitions */ +#define __CM0PLUS_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM0PLUS_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM0PLUS_CMSIS_VERSION ((__CM0PLUS_CMSIS_VERSION_MAIN << 16U) | \ + __CM0PLUS_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (0U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_PCS_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM0PLUS_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM0PLUS_H_DEPENDANT +#define __CORE_CM0PLUS_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM0PLUS_REV + #define __CM0PLUS_REV 0x0000U + #warning "__CM0PLUS_REV not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 0U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex-M0+ */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core MPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[31U]; + __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[31U]; + __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[31U]; + __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[31U]; + uint32_t RESERVED4[64U]; + __IOM uint32_t IP[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ +#else + uint32_t RESERVED0; +#endif + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IOM uint32_t SHP[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) +/* SCB Interrupt Control State Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 8U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0xFFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#endif + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ +} MPU_Type; + +#define MPU_TYPE_RALIASES 1U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 8U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0xFFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/* MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Cortex-M0+ Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor. + Therefore they are not covered by the Cortex-M0+ header file. + @{ + */ +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ +/*#define NVIC_GetActive __NVIC_GetActive not available for Cortex-M0+ */ + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + +#define __NVIC_SetPriorityGrouping(X) (void)(X) +#define __NVIC_GetPriorityGrouping() (0U) + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + If VTOR is not present address 0 must be mapped to SRAM. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *)SCB->VTOR; +#else + uint32_t *vectors = (uint32_t *)0x0U; +#endif + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *)SCB->VTOR; +#else + uint32_t *vectors = (uint32_t *)0x0U; +#endif + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; + +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv7.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM0PLUS_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_cm1.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_cm1.h new file mode 100644 index 0000000..fd1c407 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_cm1.h @@ -0,0 +1,976 @@ +/**************************************************************************//** + * @file core_cm1.h + * @brief CMSIS Cortex-M1 Core Peripheral Access Layer Header File + * @version V1.0.0 + * @date 23. July 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM1_H_GENERIC +#define __CORE_CM1_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M1 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM1 definitions */ +#define __CM1_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM1_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM1_CMSIS_VERSION ((__CM1_CMSIS_VERSION_MAIN << 16U) | \ + __CM1_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (1U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_PCS_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM1_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM1_H_DEPENDANT +#define __CORE_CM1_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM1_REV + #define __CM1_REV 0x0100U + #warning "__CM1_REV not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M1 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t _reserved0:1; /*!< bit: 0 Reserved */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[31U]; + __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[31U]; + __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[31U]; + __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[31U]; + uint32_t RESERVED4[64U]; + __IOM uint32_t IP[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + uint32_t RESERVED0; + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IOM uint32_t SHP[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +} SCnSCB_Type; + +/* Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_ITCMUAEN_Pos 4U /*!< ACTLR: Instruction TCM Upper Alias Enable Position */ +#define SCnSCB_ACTLR_ITCMUAEN_Msk (1UL << SCnSCB_ACTLR_ITCMUAEN_Pos) /*!< ACTLR: Instruction TCM Upper Alias Enable Mask */ + +#define SCnSCB_ACTLR_ITCMLAEN_Pos 3U /*!< ACTLR: Instruction TCM Lower Alias Enable Position */ +#define SCnSCB_ACTLR_ITCMLAEN_Msk (1UL << SCnSCB_ACTLR_ITCMLAEN_Pos) /*!< ACTLR: Instruction TCM Lower Alias Enable Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Cortex-M1 Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor. + Therefore they are not covered by the Cortex-M1 header file. + @{ + */ +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ +/*#define NVIC_GetActive __NVIC_GetActive not available for Cortex-M1 */ + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + +#define __NVIC_SetPriorityGrouping(X) (void)(X) +#define __NVIC_GetPriorityGrouping() (0U) + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + Address 0 must be mapped to SRAM. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)0x0U; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)0x0U; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM1_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_cm23.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_cm23.h new file mode 100644 index 0000000..8202a8d --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_cm23.h @@ -0,0 +1,1993 @@ +/**************************************************************************//** + * @file core_cm23.h + * @brief CMSIS Cortex-M23 Core Peripheral Access Layer Header File + * @version V5.0.7 + * @date 22. June 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM23_H_GENERIC +#define __CORE_CM23_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M23 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS definitions */ +#define __CM23_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM23_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM23_CMSIS_VERSION ((__CM23_CMSIS_VERSION_MAIN << 16U) | \ + __CM23_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (23U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_PCS_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM23_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM23_H_DEPENDANT +#define __CORE_CM23_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM23_REV + #define __CM23_REV 0x0000U + #warning "__CM23_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 0U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif + + #ifndef __ETM_PRESENT + #define __ETM_PRESENT 0U + #warning "__ETM_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MTB_PRESENT + #define __MTB_PRESENT 0U + #warning "__MTB_PRESENT not defined in device header file; using default!" + #endif + +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M23 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint32_t IPR[124U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ +#else + uint32_t RESERVED0; +#endif + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IOM uint32_t SHPR[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#endif + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + uint32_t RESERVED0[6U]; + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x3UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t ITFTTD0; /*!< Offset: 0xEEC (R/ ) Integration Test FIFO Test Data 0 Register */ + __IOM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/W) Integration Test ATB Control Register 2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) Integration Test ATB Control Register 0 */ + __IM uint32_t ITFTTD1; /*!< Offset: 0xEFC (R/ ) Integration Test FIFO Test Data 1 Register */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration Test FIFO Test Data 0 Register Definitions */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD0: ATB Interface 2 ATVALIDPosition */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD0: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD0_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD0: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD0_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data2_Pos 16U /*!< TPI ITFTTD0: ATB Interface 1 data2 Position */ +#define TPI_ITFTTD0_ATB_IF1_data2_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data2 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data1_Pos 8U /*!< TPI ITFTTD0: ATB Interface 1 data1 Position */ +#define TPI_ITFTTD0_ATB_IF1_data1_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data1 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data0_Pos 0U /*!< TPI ITFTTD0: ATB Interface 1 data0 Position */ +#define TPI_ITFTTD0_ATB_IF1_data0_Msk (0xFFUL /*<< TPI_ITFTTD0_ATB_IF1_data0_Pos*/) /*!< TPI ITFTTD0: ATB Interface 1 data0 Mask */ + +/* TPI Integration Test ATB Control Register 2 Register Definitions */ +#define TPI_ITATBCTR2_AFVALID2S_Pos 1U /*!< TPI ITATBCTR2: AFVALID2S Position */ +#define TPI_ITATBCTR2_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID2S_Pos) /*!< TPI ITATBCTR2: AFVALID2SS Mask */ + +#define TPI_ITATBCTR2_AFVALID1S_Pos 1U /*!< TPI ITATBCTR2: AFVALID1S Position */ +#define TPI_ITATBCTR2_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID1S_Pos) /*!< TPI ITATBCTR2: AFVALID1SS Mask */ + +#define TPI_ITATBCTR2_ATREADY2S_Pos 0U /*!< TPI ITATBCTR2: ATREADY2S Position */ +#define TPI_ITATBCTR2_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2S_Pos*/) /*!< TPI ITATBCTR2: ATREADY2S Mask */ + +#define TPI_ITATBCTR2_ATREADY1S_Pos 0U /*!< TPI ITATBCTR2: ATREADY1S Position */ +#define TPI_ITATBCTR2_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1S_Pos*/) /*!< TPI ITATBCTR2: ATREADY1S Mask */ + +/* TPI Integration Test FIFO Test Data 1 Register Definitions */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD1: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD1_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD1: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD1_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data2_Pos 16U /*!< TPI ITFTTD1: ATB Interface 2 data2 Position */ +#define TPI_ITFTTD1_ATB_IF2_data2_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data2 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data1_Pos 8U /*!< TPI ITFTTD1: ATB Interface 2 data1 Position */ +#define TPI_ITFTTD1_ATB_IF2_data1_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data1 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data0_Pos 0U /*!< TPI ITFTTD1: ATB Interface 2 data0 Position */ +#define TPI_ITFTTD1_ATB_IF2_data0_Msk (0xFFUL /*<< TPI_ITFTTD1_ATB_IF2_data0_Pos*/) /*!< TPI ITFTTD1: ATB Interface 2 data0 Mask */ + +/* TPI Integration Test ATB Control Register 0 Definitions */ +#define TPI_ITATBCTR0_AFVALID2S_Pos 1U /*!< TPI ITATBCTR0: AFVALID2S Position */ +#define TPI_ITATBCTR0_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID2S_Pos) /*!< TPI ITATBCTR0: AFVALID2SS Mask */ + +#define TPI_ITATBCTR0_AFVALID1S_Pos 1U /*!< TPI ITATBCTR0: AFVALID1S Position */ +#define TPI_ITATBCTR0_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID1S_Pos) /*!< TPI ITATBCTR0: AFVALID1SS Mask */ + +#define TPI_ITATBCTR0_ATREADY2S_Pos 0U /*!< TPI ITATBCTR0: ATREADY2S Position */ +#define TPI_ITATBCTR0_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2S_Pos*/) /*!< TPI ITATBCTR0: ATREADY2S Mask */ + +#define TPI_ITATBCTR0_ATREADY1S_Pos 0U /*!< TPI ITATBCTR0: ATREADY1S Position */ +#define TPI_ITATBCTR0_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1S_Pos*/) /*!< TPI ITATBCTR0: ATREADY1S Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFOSZ Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFOSZ Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + uint32_t RESERVED0[7U]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 1U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: EN Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: EN Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#endif +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED4[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register */ +#define CoreDebug_DEMCR_DWTENA_Pos 24U /*!< CoreDebug DEMCR: DWTENA Position */ +#define CoreDebug_DEMCR_DWTENA_Msk (1UL << CoreDebug_DEMCR_DWTENA_Pos) /*!< CoreDebug DEMCR: DWTENA Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< Core Debug configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< Core Debug Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< Core Debug configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else +/*#define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping not available for Cortex-M23 */ +/*#define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping not available for Cortex-M23 */ + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000002UL) /* bit [1] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + +#define __NVIC_SetPriorityGrouping(X) (void)(X) +#define __NVIC_GetPriorityGrouping() (0U) + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IPR[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHPR[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHPR[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IPR[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHPR[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + If VTOR is not present address 0 must be mapped to SRAM. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *)SCB->VTOR; +#else + uint32_t *vectors = (uint32_t *)0x0U; +#endif + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *)SCB->VTOR; +#else + uint32_t *vectors = (uint32_t *)0x0U; +#endif + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[_IP_IDX(IRQn)] = ((uint32_t)(NVIC_NS->IPR[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB_NS->SHPR[_SHP_IDX(IRQn)] = ((uint32_t)(SCB_NS->SHPR[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IPR[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB_NS->SHPR[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM23_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_cm3.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_cm3.h new file mode 100644 index 0000000..b0dfbd3 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_cm3.h @@ -0,0 +1,1941 @@ +/**************************************************************************//** + * @file core_cm3.h + * @brief CMSIS Cortex-M3 Core Peripheral Access Layer Header File + * @version V5.0.8 + * @date 04. June 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM3_H_GENERIC +#define __CORE_CM3_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M3 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM3 definitions */ +#define __CM3_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM3_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM3_CMSIS_VERSION ((__CM3_CMSIS_VERSION_MAIN << 16U) | \ + __CM3_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (3U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_PCS_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM3_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM3_H_DEPENDANT +#define __CORE_CM3_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM3_REV + #define __CM3_REV 0x0200U + #warning "__CM3_REV not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M3 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:27; /*!< bit: 0..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:1; /*!< bit: 9 Reserved */ + uint32_t ICI_IT_1:6; /*!< bit: 10..15 ICI/IT part 1 */ + uint32_t _reserved1:8; /*!< bit: 16..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit */ + uint32_t ICI_IT_2:2; /*!< bit: 25..26 ICI/IT part 2 */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_ICI_IT_2_Pos 25U /*!< xPSR: ICI/IT part 2 Position */ +#define xPSR_ICI_IT_2_Msk (3UL << xPSR_ICI_IT_2_Pos) /*!< xPSR: ICI/IT part 2 Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ICI_IT_1_Pos 10U /*!< xPSR: ICI/IT part 1 Position */ +#define xPSR_ICI_IT_1_Msk (0x3FUL << xPSR_ICI_IT_1_Pos) /*!< xPSR: ICI/IT part 1 Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[24U]; + __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[24U]; + __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[24U]; + __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[24U]; + __IOM uint32_t IABR[8U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[56U]; + __IOM uint8_t IP[240U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED5[644U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHP[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ISAR[5U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + uint32_t RESERVED0[5U]; + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#if defined (__CM3_REV) && (__CM3_REV < 0x0201U) /* core r2p1 */ +#define SCB_VTOR_TBLBASE_Pos 29U /*!< SCB VTOR: TBLBASE Position */ +#define SCB_VTOR_TBLBASE_Msk (1UL << SCB_VTOR_TBLBASE_Pos) /*!< SCB VTOR: TBLBASE Mask */ + +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x3FFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#else +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#endif + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +#define SCB_AIRCR_VECTRESET_Pos 0U /*!< SCB AIRCR: VECTRESET Position */ +#define SCB_AIRCR_VECTRESET_Msk (1UL /*<< SCB_AIRCR_VECTRESET_Pos*/) /*!< SCB AIRCR: VECTRESET Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +#define SCB_CCR_NONBASETHRDENA_Pos 0U /*!< SCB CCR: NONBASETHRDENA Position */ +#define SCB_CCR_NONBASETHRDENA_Msk (1UL /*<< SCB_CCR_NONBASETHRDENA_Pos*/) /*!< SCB CCR: NONBASETHRDENA Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ +#if defined (__CM3_REV) && (__CM3_REV >= 0x200U) + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +#else + uint32_t RESERVED1[1U]; +#endif +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/* Auxiliary Control Register Definitions */ + +#define SCnSCB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ +#define SCnSCB_ACTLR_DISFOLD_Msk (1UL << SCnSCB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ + +#define SCnSCB_ACTLR_DISDEFWBUF_Pos 1U /*!< ACTLR: DISDEFWBUF Position */ +#define SCnSCB_ACTLR_DISDEFWBUF_Msk (1UL << SCnSCB_ACTLR_DISDEFWBUF_Pos) /*!< ACTLR: DISDEFWBUF Mask */ + +#define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ +#define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[29U]; + __OM uint32_t IWR; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */ + __IM uint32_t IRR; /*!< Offset: 0xEFC (R/ ) ITM Integration Read Register */ + __IOM uint32_t IMCR; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */ + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[6U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TraceBusID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TraceBusID_Msk (0x7FUL << ITM_TCR_TraceBusID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPrescale_Pos 8U /*!< ITM TCR: TSPrescale Position */ +#define ITM_TCR_TSPrescale_Msk (3UL << ITM_TCR_TSPrescale_Pos) /*!< ITM TCR: TSPrescale Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Integration Write Register Definitions */ +#define ITM_IWR_ATVALIDM_Pos 0U /*!< ITM IWR: ATVALIDM Position */ +#define ITM_IWR_ATVALIDM_Msk (1UL /*<< ITM_IWR_ATVALIDM_Pos*/) /*!< ITM IWR: ATVALIDM Mask */ + +/* ITM Integration Read Register Definitions */ +#define ITM_IRR_ATREADYM_Pos 0U /*!< ITM IRR: ATREADYM Position */ +#define ITM_IRR_ATREADYM_Msk (1UL /*<< ITM_IRR_ATREADYM_Pos*/) /*!< ITM IRR: ATREADYM Mask */ + +/* ITM Integration Mode Control Register Definitions */ +#define ITM_IMCR_INTEGRATION_Pos 0U /*!< ITM IMCR: INTEGRATION Position */ +#define ITM_IMCR_INTEGRATION_Msk (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/) /*!< ITM IMCR: INTEGRATION Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + __IOM uint32_t MASK0; /*!< Offset: 0x024 (R/W) Mask Register 0 */ + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED0[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + __IOM uint32_t MASK1; /*!< Offset: 0x034 (R/W) Mask Register 1 */ + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + __IOM uint32_t MASK2; /*!< Offset: 0x044 (R/W) Mask Register 2 */ + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + __IOM uint32_t MASK3; /*!< Offset: 0x054 (R/W) Mask Register 3 */ + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Mask Register Definitions */ +#define DWT_MASK_MASK_Pos 0U /*!< DWT MASK: MASK Position */ +#define DWT_MASK_MASK_Msk (0x1FUL /*<< DWT_MASK_MASK_Pos*/) /*!< DWT MASK: MASK Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVADDR1_Pos 16U /*!< DWT FUNCTION: DATAVADDR1 Position */ +#define DWT_FUNCTION_DATAVADDR1_Msk (0xFUL << DWT_FUNCTION_DATAVADDR1_Pos) /*!< DWT FUNCTION: DATAVADDR1 Mask */ + +#define DWT_FUNCTION_DATAVADDR0_Pos 12U /*!< DWT FUNCTION: DATAVADDR0 Position */ +#define DWT_FUNCTION_DATAVADDR0_Msk (0xFUL << DWT_FUNCTION_DATAVADDR0_Pos) /*!< DWT FUNCTION: DATAVADDR0 Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_LNK1ENA_Pos 9U /*!< DWT FUNCTION: LNK1ENA Position */ +#define DWT_FUNCTION_LNK1ENA_Msk (0x1UL << DWT_FUNCTION_LNK1ENA_Pos) /*!< DWT FUNCTION: LNK1ENA Mask */ + +#define DWT_FUNCTION_DATAVMATCH_Pos 8U /*!< DWT FUNCTION: DATAVMATCH Position */ +#define DWT_FUNCTION_DATAVMATCH_Msk (0x1UL << DWT_FUNCTION_DATAVMATCH_Pos) /*!< DWT FUNCTION: DATAVMATCH Mask */ + +#define DWT_FUNCTION_CYCMATCH_Pos 7U /*!< DWT FUNCTION: CYCMATCH Position */ +#define DWT_FUNCTION_CYCMATCH_Msk (0x1UL << DWT_FUNCTION_CYCMATCH_Pos) /*!< DWT FUNCTION: CYCMATCH Mask */ + +#define DWT_FUNCTION_EMITRANGE_Pos 5U /*!< DWT FUNCTION: EMITRANGE Position */ +#define DWT_FUNCTION_EMITRANGE_Msk (0x1UL << DWT_FUNCTION_EMITRANGE_Pos) /*!< DWT FUNCTION: EMITRANGE Mask */ + +#define DWT_FUNCTION_FUNCTION_Pos 0U /*!< DWT FUNCTION: FUNCTION Position */ +#define DWT_FUNCTION_FUNCTION_Msk (0xFUL /*<< DWT_FUNCTION_FUNCTION_Pos*/) /*!< DWT FUNCTION: FUNCTION Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IM uint32_t FSCR; /*!< Offset: 0x308 (R/ ) Formatter Synchronization Counter Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t FIFO0; /*!< Offset: 0xEEC (R/ ) Integration ETM Data */ + __IM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/ ) ITATBCTR2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) ITATBCTR0 */ + __IM uint32_t FIFO1; /*!< Offset: 0xEFC (R/ ) Integration ITM Data */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) TPIU_DEVID */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) TPIU_DEVTYPE */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration ETM Data Register Definitions (FIFO0) */ +#define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */ +#define TPI_FIFO0_ITM_ATVALID_Msk (0x3UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ + +#define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */ +#define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */ + +#define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */ +#define TPI_FIFO0_ETM_ATVALID_Msk (0x3UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ + +#define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */ +#define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */ + +#define TPI_FIFO0_ETM2_Pos 16U /*!< TPI FIFO0: ETM2 Position */ +#define TPI_FIFO0_ETM2_Msk (0xFFUL << TPI_FIFO0_ETM2_Pos) /*!< TPI FIFO0: ETM2 Mask */ + +#define TPI_FIFO0_ETM1_Pos 8U /*!< TPI FIFO0: ETM1 Position */ +#define TPI_FIFO0_ETM1_Msk (0xFFUL << TPI_FIFO0_ETM1_Pos) /*!< TPI FIFO0: ETM1 Mask */ + +#define TPI_FIFO0_ETM0_Pos 0U /*!< TPI FIFO0: ETM0 Position */ +#define TPI_FIFO0_ETM0_Msk (0xFFUL /*<< TPI_FIFO0_ETM0_Pos*/) /*!< TPI FIFO0: ETM0 Mask */ + +/* TPI ITATBCTR2 Register Definitions */ +#define TPI_ITATBCTR2_ATREADY2_Pos 0U /*!< TPI ITATBCTR2: ATREADY2 Position */ +#define TPI_ITATBCTR2_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2_Pos*/) /*!< TPI ITATBCTR2: ATREADY2 Mask */ + +#define TPI_ITATBCTR2_ATREADY1_Pos 0U /*!< TPI ITATBCTR2: ATREADY1 Position */ +#define TPI_ITATBCTR2_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1_Pos*/) /*!< TPI ITATBCTR2: ATREADY1 Mask */ + +/* TPI Integration ITM Data Register Definitions (FIFO1) */ +#define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */ +#define TPI_FIFO1_ITM_ATVALID_Msk (0x3UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ + +#define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */ +#define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */ + +#define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */ +#define TPI_FIFO1_ETM_ATVALID_Msk (0x3UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ + +#define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */ +#define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */ + +#define TPI_FIFO1_ITM2_Pos 16U /*!< TPI FIFO1: ITM2 Position */ +#define TPI_FIFO1_ITM2_Msk (0xFFUL << TPI_FIFO1_ITM2_Pos) /*!< TPI FIFO1: ITM2 Mask */ + +#define TPI_FIFO1_ITM1_Pos 8U /*!< TPI FIFO1: ITM1 Position */ +#define TPI_FIFO1_ITM1_Msk (0xFFUL << TPI_FIFO1_ITM1_Pos) /*!< TPI FIFO1: ITM1 Mask */ + +#define TPI_FIFO1_ITM0_Pos 0U /*!< TPI FIFO1: ITM0 Position */ +#define TPI_FIFO1_ITM0_Msk (0xFFUL /*<< TPI_FIFO1_ITM0_Pos*/) /*!< TPI FIFO1: ITM0 Mask */ + +/* TPI ITATBCTR0 Register Definitions */ +#define TPI_ITATBCTR0_ATREADY2_Pos 0U /*!< TPI ITATBCTR0: ATREADY2 Position */ +#define TPI_ITATBCTR0_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2_Pos*/) /*!< TPI ITATBCTR0: ATREADY2 Mask */ + +#define TPI_ITATBCTR0_ATREADY1_Pos 0U /*!< TPI ITATBCTR0: ATREADY1 Position */ +#define TPI_ITATBCTR0_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1_Pos*/) /*!< TPI ITATBCTR0: ATREADY1 Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_MinBufSz_Pos 6U /*!< TPI DEVID: MinBufSz Position */ +#define TPI_DEVID_MinBufSz_Msk (0x7UL << TPI_DEVID_MinBufSz_Pos) /*!< TPI DEVID: MinBufSz Mask */ + +#define TPI_DEVID_AsynClkIn_Pos 5U /*!< TPI DEVID: AsynClkIn Position */ +#define TPI_DEVID_AsynClkIn_Msk (0x1UL << TPI_DEVID_AsynClkIn_Pos) /*!< TPI DEVID: AsynClkIn Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x1FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Alias 1 Region Base Address Register */ + __IOM uint32_t RASR_A1; /*!< Offset: 0x018 (R/W) MPU Alias 1 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Alias 2 Region Base Address Register */ + __IOM uint32_t RASR_A2; /*!< Offset: 0x020 (R/W) MPU Alias 2 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Alias 3 Region Base Address Register */ + __IOM uint32_t RASR_A3; /*!< Offset: 0x028 (R/W) MPU Alias 3 Region Attribute and Size Register */ +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 5U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0x7FFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/* MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ +#define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ +#define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ +#define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ +#define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ +#define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ +#define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ +#define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE) /*!< Core Debug configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHP[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IP[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHP[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv7.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM3_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_cm33.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_cm33.h new file mode 100644 index 0000000..02f82e2 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_cm33.h @@ -0,0 +1,3002 @@ +/**************************************************************************//** + * @file core_cm33.h + * @brief CMSIS Cortex-M33 Core Peripheral Access Layer Header File + * @version V5.0.9 + * @date 06. July 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM33_H_GENERIC +#define __CORE_CM33_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M33 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM33 definitions */ +#define __CM33_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM33_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM33_CMSIS_VERSION ((__CM33_CMSIS_VERSION_MAIN << 16U) | \ + __CM33_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (33U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_PCS_VFP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM33_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM33_H_DEPENDANT +#define __CORE_CM33_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM33_REV + #define __CM33_REV 0x0000U + #warning "__CM33_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M33 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t _reserved1:28; /*!< bit: 4..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED3[92U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + uint32_t RESERVED4[15U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + uint32_t RESERVED7[6U]; + __IOM uint32_t ITCMCR; /*!< Offset: 0x290 (R/W) Instruction Tightly-Coupled Memory Control Register */ + __IOM uint32_t DTCMCR; /*!< Offset: 0x294 (R/W) Data Tightly-Coupled Memory Control Registers */ + __IOM uint32_t AHBPCR; /*!< Offset: 0x298 (R/W) AHBP Control Register */ + __IOM uint32_t CACR; /*!< Offset: 0x29C (R/W) L1 Cache Control Register */ + __IOM uint32_t AHBSCR; /*!< Offset: 0x2A0 (R/W) AHB Slave Control Register */ + uint32_t RESERVED8[1U]; + __IOM uint32_t ABFSR; /*!< Offset: 0x2A8 (R/W) Auxiliary Bus Fault Status Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CPn_Pos 0U /*!< SCB NSACR: CPn Position */ +#define SCB_NSACR_CPn_Msk (1UL /*<< SCB_NSACR_CPn_Pos*/) /*!< SCB NSACR: CPn Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/* Instruction Tightly-Coupled Memory Control Register Definitions */ +#define SCB_ITCMCR_SZ_Pos 3U /*!< SCB ITCMCR: SZ Position */ +#define SCB_ITCMCR_SZ_Msk (0xFUL << SCB_ITCMCR_SZ_Pos) /*!< SCB ITCMCR: SZ Mask */ + +#define SCB_ITCMCR_RETEN_Pos 2U /*!< SCB ITCMCR: RETEN Position */ +#define SCB_ITCMCR_RETEN_Msk (1UL << SCB_ITCMCR_RETEN_Pos) /*!< SCB ITCMCR: RETEN Mask */ + +#define SCB_ITCMCR_RMW_Pos 1U /*!< SCB ITCMCR: RMW Position */ +#define SCB_ITCMCR_RMW_Msk (1UL << SCB_ITCMCR_RMW_Pos) /*!< SCB ITCMCR: RMW Mask */ + +#define SCB_ITCMCR_EN_Pos 0U /*!< SCB ITCMCR: EN Position */ +#define SCB_ITCMCR_EN_Msk (1UL /*<< SCB_ITCMCR_EN_Pos*/) /*!< SCB ITCMCR: EN Mask */ + +/* Data Tightly-Coupled Memory Control Register Definitions */ +#define SCB_DTCMCR_SZ_Pos 3U /*!< SCB DTCMCR: SZ Position */ +#define SCB_DTCMCR_SZ_Msk (0xFUL << SCB_DTCMCR_SZ_Pos) /*!< SCB DTCMCR: SZ Mask */ + +#define SCB_DTCMCR_RETEN_Pos 2U /*!< SCB DTCMCR: RETEN Position */ +#define SCB_DTCMCR_RETEN_Msk (1UL << SCB_DTCMCR_RETEN_Pos) /*!< SCB DTCMCR: RETEN Mask */ + +#define SCB_DTCMCR_RMW_Pos 1U /*!< SCB DTCMCR: RMW Position */ +#define SCB_DTCMCR_RMW_Msk (1UL << SCB_DTCMCR_RMW_Pos) /*!< SCB DTCMCR: RMW Mask */ + +#define SCB_DTCMCR_EN_Pos 0U /*!< SCB DTCMCR: EN Position */ +#define SCB_DTCMCR_EN_Msk (1UL /*<< SCB_DTCMCR_EN_Pos*/) /*!< SCB DTCMCR: EN Mask */ + +/* AHBP Control Register Definitions */ +#define SCB_AHBPCR_SZ_Pos 1U /*!< SCB AHBPCR: SZ Position */ +#define SCB_AHBPCR_SZ_Msk (7UL << SCB_AHBPCR_SZ_Pos) /*!< SCB AHBPCR: SZ Mask */ + +#define SCB_AHBPCR_EN_Pos 0U /*!< SCB AHBPCR: EN Position */ +#define SCB_AHBPCR_EN_Msk (1UL /*<< SCB_AHBPCR_EN_Pos*/) /*!< SCB AHBPCR: EN Mask */ + +/* L1 Cache Control Register Definitions */ +#define SCB_CACR_FORCEWT_Pos 2U /*!< SCB CACR: FORCEWT Position */ +#define SCB_CACR_FORCEWT_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: FORCEWT Mask */ + +#define SCB_CACR_ECCEN_Pos 1U /*!< SCB CACR: ECCEN Position */ +#define SCB_CACR_ECCEN_Msk (1UL << SCB_CACR_ECCEN_Pos) /*!< SCB CACR: ECCEN Mask */ + +#define SCB_CACR_SIWT_Pos 0U /*!< SCB CACR: SIWT Position */ +#define SCB_CACR_SIWT_Msk (1UL /*<< SCB_CACR_SIWT_Pos*/) /*!< SCB CACR: SIWT Mask */ + +/* AHBS Control Register Definitions */ +#define SCB_AHBSCR_INITCOUNT_Pos 11U /*!< SCB AHBSCR: INITCOUNT Position */ +#define SCB_AHBSCR_INITCOUNT_Msk (0x1FUL << SCB_AHBPCR_INITCOUNT_Pos) /*!< SCB AHBSCR: INITCOUNT Mask */ + +#define SCB_AHBSCR_TPRI_Pos 2U /*!< SCB AHBSCR: TPRI Position */ +#define SCB_AHBSCR_TPRI_Msk (0x1FFUL << SCB_AHBPCR_TPRI_Pos) /*!< SCB AHBSCR: TPRI Mask */ + +#define SCB_AHBSCR_CTL_Pos 0U /*!< SCB AHBSCR: CTL Position*/ +#define SCB_AHBSCR_CTL_Msk (3UL /*<< SCB_AHBPCR_CTL_Pos*/) /*!< SCB AHBSCR: CTL Mask */ + +/* Auxiliary Bus Fault Status Register Definitions */ +#define SCB_ABFSR_AXIMTYPE_Pos 8U /*!< SCB ABFSR: AXIMTYPE Position*/ +#define SCB_ABFSR_AXIMTYPE_Msk (3UL << SCB_ABFSR_AXIMTYPE_Pos) /*!< SCB ABFSR: AXIMTYPE Mask */ + +#define SCB_ABFSR_EPPB_Pos 4U /*!< SCB ABFSR: EPPB Position*/ +#define SCB_ABFSR_EPPB_Msk (1UL << SCB_ABFSR_EPPB_Pos) /*!< SCB ABFSR: EPPB Mask */ + +#define SCB_ABFSR_AXIM_Pos 3U /*!< SCB ABFSR: AXIM Position*/ +#define SCB_ABFSR_AXIM_Msk (1UL << SCB_ABFSR_AXIM_Pos) /*!< SCB ABFSR: AXIM Mask */ + +#define SCB_ABFSR_AHBP_Pos 2U /*!< SCB ABFSR: AHBP Position*/ +#define SCB_ABFSR_AHBP_Msk (1UL << SCB_ABFSR_AHBP_Pos) /*!< SCB ABFSR: AHBP Mask */ + +#define SCB_ABFSR_DTCM_Pos 1U /*!< SCB ABFSR: DTCM Position*/ +#define SCB_ABFSR_DTCM_Msk (1UL << SCB_ABFSR_DTCM_Pos) /*!< SCB ABFSR: DTCM Mask */ + +#define SCB_ABFSR_ITCM_Pos 0U /*!< SCB ABFSR: ITCM Position*/ +#define SCB_ABFSR_ITCM_Msk (1UL /*<< SCB_ABFSR_ITCM_Pos*/) /*!< SCB ABFSR: ITCM Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[29U]; + __OM uint32_t IWR; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */ + __IM uint32_t IRR; /*!< Offset: 0xEFC (R/ ) ITM Integration Read Register */ + __IOM uint32_t IMCR; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */ + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) ITM Device Architecture Register */ + uint32_t RESERVED6[4U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (0x1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (0x1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Integration Write Register Definitions */ +#define ITM_IWR_ATVALIDM_Pos 0U /*!< ITM IWR: ATVALIDM Position */ +#define ITM_IWR_ATVALIDM_Msk (1UL /*<< ITM_IWR_ATVALIDM_Pos*/) /*!< ITM IWR: ATVALIDM Mask */ + +/* ITM Integration Read Register Definitions */ +#define ITM_IRR_ATREADYM_Pos 0U /*!< ITM IRR: ATREADYM Position */ +#define ITM_IRR_ATREADYM_Msk (1UL /*<< ITM_IRR_ATREADYM_Pos*/) /*!< ITM IRR: ATREADYM Mask */ + +/* ITM Integration Mode Control Register Definitions */ +#define ITM_IMCR_INTEGRATION_Pos 0U /*!< ITM IMCR: INTEGRATION Position */ +#define ITM_IMCR_INTEGRATION_Msk (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/) /*!< ITM IMCR: INTEGRATION Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ + uint32_t RESERVED32[934U]; + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ + uint32_t RESERVED33[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (0x1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x1UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t ITFTTD0; /*!< Offset: 0xEEC (R/ ) Integration Test FIFO Test Data 0 Register */ + __IOM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/W) Integration Test ATB Control Register 2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) Integration Test ATB Control Register 0 */ + __IM uint32_t ITFTTD1; /*!< Offset: 0xEFC (R/ ) Integration Test FIFO Test Data 1 Register */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration Test FIFO Test Data 0 Register Definitions */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD0: ATB Interface 2 ATVALIDPosition */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD0: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD0_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD0: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD0_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data2_Pos 16U /*!< TPI ITFTTD0: ATB Interface 1 data2 Position */ +#define TPI_ITFTTD0_ATB_IF1_data2_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data2 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data1_Pos 8U /*!< TPI ITFTTD0: ATB Interface 1 data1 Position */ +#define TPI_ITFTTD0_ATB_IF1_data1_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data1 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data0_Pos 0U /*!< TPI ITFTTD0: ATB Interface 1 data0 Position */ +#define TPI_ITFTTD0_ATB_IF1_data0_Msk (0xFFUL /*<< TPI_ITFTTD0_ATB_IF1_data0_Pos*/) /*!< TPI ITFTTD0: ATB Interface 1 data0 Mask */ + +/* TPI Integration Test ATB Control Register 2 Register Definitions */ +#define TPI_ITATBCTR2_AFVALID2S_Pos 1U /*!< TPI ITATBCTR2: AFVALID2S Position */ +#define TPI_ITATBCTR2_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID2S_Pos) /*!< TPI ITATBCTR2: AFVALID2SS Mask */ + +#define TPI_ITATBCTR2_AFVALID1S_Pos 1U /*!< TPI ITATBCTR2: AFVALID1S Position */ +#define TPI_ITATBCTR2_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID1S_Pos) /*!< TPI ITATBCTR2: AFVALID1SS Mask */ + +#define TPI_ITATBCTR2_ATREADY2S_Pos 0U /*!< TPI ITATBCTR2: ATREADY2S Position */ +#define TPI_ITATBCTR2_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2S_Pos*/) /*!< TPI ITATBCTR2: ATREADY2S Mask */ + +#define TPI_ITATBCTR2_ATREADY1S_Pos 0U /*!< TPI ITATBCTR2: ATREADY1S Position */ +#define TPI_ITATBCTR2_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1S_Pos*/) /*!< TPI ITATBCTR2: ATREADY1S Mask */ + +/* TPI Integration Test FIFO Test Data 1 Register Definitions */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD1: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD1_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD1: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD1_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data2_Pos 16U /*!< TPI ITFTTD1: ATB Interface 2 data2 Position */ +#define TPI_ITFTTD1_ATB_IF2_data2_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data2 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data1_Pos 8U /*!< TPI ITFTTD1: ATB Interface 2 data1 Position */ +#define TPI_ITFTTD1_ATB_IF2_data1_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data1 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data0_Pos 0U /*!< TPI ITFTTD1: ATB Interface 2 data0 Position */ +#define TPI_ITFTTD1_ATB_IF2_data0_Msk (0xFFUL /*<< TPI_ITFTTD1_ATB_IF2_data0_Pos*/) /*!< TPI ITFTTD1: ATB Interface 2 data0 Mask */ + +/* TPI Integration Test ATB Control Register 0 Definitions */ +#define TPI_ITATBCTR0_AFVALID2S_Pos 1U /*!< TPI ITATBCTR0: AFVALID2S Position */ +#define TPI_ITATBCTR0_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID2S_Pos) /*!< TPI ITATBCTR0: AFVALID2SS Mask */ + +#define TPI_ITATBCTR0_AFVALID1S_Pos 1U /*!< TPI ITATBCTR0: AFVALID1S Position */ +#define TPI_ITATBCTR0_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID1S_Pos) /*!< TPI ITATBCTR0: AFVALID1SS Mask */ + +#define TPI_ITATBCTR0_ATREADY2S_Pos 0U /*!< TPI ITATBCTR0: ATREADY2S Position */ +#define TPI_ITATBCTR0_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2S_Pos*/) /*!< TPI ITATBCTR0: ATREADY2S Mask */ + +#define TPI_ITATBCTR0_ATREADY1S_Pos 0U /*!< TPI ITATBCTR0: ATREADY1S Position */ +#define TPI_ITATBCTR0_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1S_Pos*/) /*!< TPI ITATBCTR0: ATREADY1S Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFOSZ Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFOSZ Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Disable Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/* Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and FP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and FP Feature Register 1 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/* Media and FP Feature Register 0 Definitions */ +#define FPU_MVFR0_FP_rounding_modes_Pos 28U /*!< MVFR0: FP rounding modes bits Position */ +#define FPU_MVFR0_FP_rounding_modes_Msk (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos) /*!< MVFR0: FP rounding modes bits Mask */ + +#define FPU_MVFR0_Short_vectors_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_Short_vectors_Msk (0xFUL << FPU_MVFR0_Short_vectors_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_Square_root_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_Square_root_Msk (0xFUL << FPU_MVFR0_Square_root_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_Divide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_Divide_Msk (0xFUL << FPU_MVFR0_Divide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FP_excep_trapping_Pos 12U /*!< MVFR0: FP exception trapping bits Position */ +#define FPU_MVFR0_FP_excep_trapping_Msk (0xFUL << FPU_MVFR0_FP_excep_trapping_Pos) /*!< MVFR0: FP exception trapping bits Mask */ + +#define FPU_MVFR0_Double_precision_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_Double_precision_Msk (0xFUL << FPU_MVFR0_Double_precision_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_Single_precision_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_Single_precision_Msk (0xFUL << FPU_MVFR0_Single_precision_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_A_SIMD_registers_Pos 0U /*!< MVFR0: A_SIMD registers bits Position */ +#define FPU_MVFR0_A_SIMD_registers_Msk (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/) /*!< MVFR0: A_SIMD registers bits Mask */ + +/* Media and FP Feature Register 1 Definitions */ +#define FPU_MVFR1_FP_fused_MAC_Pos 28U /*!< MVFR1: FP fused MAC bits Position */ +#define FPU_MVFR1_FP_fused_MAC_Msk (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos) /*!< MVFR1: FP fused MAC bits Mask */ + +#define FPU_MVFR1_FP_HPFP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FP_HPFP_Msk (0xFUL << FPU_MVFR1_FP_HPFP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_D_NaN_mode_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_D_NaN_mode_Msk (0xFUL << FPU_MVFR1_D_NaN_mode_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/*@} end of group CMSIS_FPU */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED4[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< Core Debug configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< Core Debug Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCnSCB_NS ((SCnSCB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< Core Debug configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000002UL) /* bit [1] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << 8U) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM33_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_cm4.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_cm4.h new file mode 100644 index 0000000..308b868 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_cm4.h @@ -0,0 +1,2129 @@ +/**************************************************************************//** + * @file core_cm4.h + * @brief CMSIS Cortex-M4 Core Peripheral Access Layer Header File + * @version V5.0.8 + * @date 04. June 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM4_H_GENERIC +#define __CORE_CM4_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M4 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM4 definitions */ +#define __CM4_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM4_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM4_CMSIS_VERSION ((__CM4_CMSIS_VERSION_MAIN << 16U) | \ + __CM4_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (4U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_PCS_VFP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM4_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM4_H_DEPENDANT +#define __CORE_CM4_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM4_REV + #define __CM4_REV 0x0000U + #warning "__CM4_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M4 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:1; /*!< bit: 9 Reserved */ + uint32_t ICI_IT_1:6; /*!< bit: 10..15 ICI/IT part 1 */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit */ + uint32_t ICI_IT_2:2; /*!< bit: 25..26 ICI/IT part 2 */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_ICI_IT_2_Pos 25U /*!< xPSR: ICI/IT part 2 Position */ +#define xPSR_ICI_IT_2_Msk (3UL << xPSR_ICI_IT_2_Pos) /*!< xPSR: ICI/IT part 2 Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ICI_IT_1_Pos 10U /*!< xPSR: ICI/IT part 1 Position */ +#define xPSR_ICI_IT_1_Msk (0x3FUL << xPSR_ICI_IT_1_Pos) /*!< xPSR: ICI/IT part 1 Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t FPCA:1; /*!< bit: 2 FP extension active flag */ + uint32_t _reserved0:29; /*!< bit: 3..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[24U]; + __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[24U]; + __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[24U]; + __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[24U]; + __IOM uint32_t IABR[8U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[56U]; + __IOM uint8_t IP[240U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED5[644U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHP[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ISAR[5U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + uint32_t RESERVED0[5U]; + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +#define SCB_AIRCR_VECTRESET_Pos 0U /*!< SCB AIRCR: VECTRESET Position */ +#define SCB_AIRCR_VECTRESET_Msk (1UL /*<< SCB_AIRCR_VECTRESET_Pos*/) /*!< SCB AIRCR: VECTRESET Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +#define SCB_CCR_NONBASETHRDENA_Pos 0U /*!< SCB CCR: NONBASETHRDENA Position */ +#define SCB_CCR_NONBASETHRDENA_Msk (1UL /*<< SCB_CCR_NONBASETHRDENA_Pos*/) /*!< SCB CCR: NONBASETHRDENA Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/* Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_DISOOFP_Pos 9U /*!< ACTLR: DISOOFP Position */ +#define SCnSCB_ACTLR_DISOOFP_Msk (1UL << SCnSCB_ACTLR_DISOOFP_Pos) /*!< ACTLR: DISOOFP Mask */ + +#define SCnSCB_ACTLR_DISFPCA_Pos 8U /*!< ACTLR: DISFPCA Position */ +#define SCnSCB_ACTLR_DISFPCA_Msk (1UL << SCnSCB_ACTLR_DISFPCA_Pos) /*!< ACTLR: DISFPCA Mask */ + +#define SCnSCB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ +#define SCnSCB_ACTLR_DISFOLD_Msk (1UL << SCnSCB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ + +#define SCnSCB_ACTLR_DISDEFWBUF_Pos 1U /*!< ACTLR: DISDEFWBUF Position */ +#define SCnSCB_ACTLR_DISDEFWBUF_Msk (1UL << SCnSCB_ACTLR_DISDEFWBUF_Pos) /*!< ACTLR: DISDEFWBUF Mask */ + +#define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ +#define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[29U]; + __OM uint32_t IWR; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */ + __IM uint32_t IRR; /*!< Offset: 0xEFC (R/ ) ITM Integration Read Register */ + __IOM uint32_t IMCR; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */ + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[6U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TraceBusID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TraceBusID_Msk (0x7FUL << ITM_TCR_TraceBusID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPrescale_Pos 8U /*!< ITM TCR: TSPrescale Position */ +#define ITM_TCR_TSPrescale_Msk (3UL << ITM_TCR_TSPrescale_Pos) /*!< ITM TCR: TSPrescale Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Integration Write Register Definitions */ +#define ITM_IWR_ATVALIDM_Pos 0U /*!< ITM IWR: ATVALIDM Position */ +#define ITM_IWR_ATVALIDM_Msk (1UL /*<< ITM_IWR_ATVALIDM_Pos*/) /*!< ITM IWR: ATVALIDM Mask */ + +/* ITM Integration Read Register Definitions */ +#define ITM_IRR_ATREADYM_Pos 0U /*!< ITM IRR: ATREADYM Position */ +#define ITM_IRR_ATREADYM_Msk (1UL /*<< ITM_IRR_ATREADYM_Pos*/) /*!< ITM IRR: ATREADYM Mask */ + +/* ITM Integration Mode Control Register Definitions */ +#define ITM_IMCR_INTEGRATION_Pos 0U /*!< ITM IMCR: INTEGRATION Position */ +#define ITM_IMCR_INTEGRATION_Msk (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/) /*!< ITM IMCR: INTEGRATION Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + __IOM uint32_t MASK0; /*!< Offset: 0x024 (R/W) Mask Register 0 */ + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED0[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + __IOM uint32_t MASK1; /*!< Offset: 0x034 (R/W) Mask Register 1 */ + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + __IOM uint32_t MASK2; /*!< Offset: 0x044 (R/W) Mask Register 2 */ + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + __IOM uint32_t MASK3; /*!< Offset: 0x054 (R/W) Mask Register 3 */ + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Mask Register Definitions */ +#define DWT_MASK_MASK_Pos 0U /*!< DWT MASK: MASK Position */ +#define DWT_MASK_MASK_Msk (0x1FUL /*<< DWT_MASK_MASK_Pos*/) /*!< DWT MASK: MASK Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVADDR1_Pos 16U /*!< DWT FUNCTION: DATAVADDR1 Position */ +#define DWT_FUNCTION_DATAVADDR1_Msk (0xFUL << DWT_FUNCTION_DATAVADDR1_Pos) /*!< DWT FUNCTION: DATAVADDR1 Mask */ + +#define DWT_FUNCTION_DATAVADDR0_Pos 12U /*!< DWT FUNCTION: DATAVADDR0 Position */ +#define DWT_FUNCTION_DATAVADDR0_Msk (0xFUL << DWT_FUNCTION_DATAVADDR0_Pos) /*!< DWT FUNCTION: DATAVADDR0 Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_LNK1ENA_Pos 9U /*!< DWT FUNCTION: LNK1ENA Position */ +#define DWT_FUNCTION_LNK1ENA_Msk (0x1UL << DWT_FUNCTION_LNK1ENA_Pos) /*!< DWT FUNCTION: LNK1ENA Mask */ + +#define DWT_FUNCTION_DATAVMATCH_Pos 8U /*!< DWT FUNCTION: DATAVMATCH Position */ +#define DWT_FUNCTION_DATAVMATCH_Msk (0x1UL << DWT_FUNCTION_DATAVMATCH_Pos) /*!< DWT FUNCTION: DATAVMATCH Mask */ + +#define DWT_FUNCTION_CYCMATCH_Pos 7U /*!< DWT FUNCTION: CYCMATCH Position */ +#define DWT_FUNCTION_CYCMATCH_Msk (0x1UL << DWT_FUNCTION_CYCMATCH_Pos) /*!< DWT FUNCTION: CYCMATCH Mask */ + +#define DWT_FUNCTION_EMITRANGE_Pos 5U /*!< DWT FUNCTION: EMITRANGE Position */ +#define DWT_FUNCTION_EMITRANGE_Msk (0x1UL << DWT_FUNCTION_EMITRANGE_Pos) /*!< DWT FUNCTION: EMITRANGE Mask */ + +#define DWT_FUNCTION_FUNCTION_Pos 0U /*!< DWT FUNCTION: FUNCTION Position */ +#define DWT_FUNCTION_FUNCTION_Msk (0xFUL /*<< DWT_FUNCTION_FUNCTION_Pos*/) /*!< DWT FUNCTION: FUNCTION Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IM uint32_t FSCR; /*!< Offset: 0x308 (R/ ) Formatter Synchronization Counter Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t FIFO0; /*!< Offset: 0xEEC (R/ ) Integration ETM Data */ + __IM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/ ) ITATBCTR2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) ITATBCTR0 */ + __IM uint32_t FIFO1; /*!< Offset: 0xEFC (R/ ) Integration ITM Data */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) TPIU_DEVID */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) TPIU_DEVTYPE */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration ETM Data Register Definitions (FIFO0) */ +#define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */ +#define TPI_FIFO0_ITM_ATVALID_Msk (0x3UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ + +#define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */ +#define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */ + +#define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */ +#define TPI_FIFO0_ETM_ATVALID_Msk (0x3UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ + +#define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */ +#define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */ + +#define TPI_FIFO0_ETM2_Pos 16U /*!< TPI FIFO0: ETM2 Position */ +#define TPI_FIFO0_ETM2_Msk (0xFFUL << TPI_FIFO0_ETM2_Pos) /*!< TPI FIFO0: ETM2 Mask */ + +#define TPI_FIFO0_ETM1_Pos 8U /*!< TPI FIFO0: ETM1 Position */ +#define TPI_FIFO0_ETM1_Msk (0xFFUL << TPI_FIFO0_ETM1_Pos) /*!< TPI FIFO0: ETM1 Mask */ + +#define TPI_FIFO0_ETM0_Pos 0U /*!< TPI FIFO0: ETM0 Position */ +#define TPI_FIFO0_ETM0_Msk (0xFFUL /*<< TPI_FIFO0_ETM0_Pos*/) /*!< TPI FIFO0: ETM0 Mask */ + +/* TPI ITATBCTR2 Register Definitions */ +#define TPI_ITATBCTR2_ATREADY2_Pos 0U /*!< TPI ITATBCTR2: ATREADY2 Position */ +#define TPI_ITATBCTR2_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2_Pos*/) /*!< TPI ITATBCTR2: ATREADY2 Mask */ + +#define TPI_ITATBCTR2_ATREADY1_Pos 0U /*!< TPI ITATBCTR2: ATREADY1 Position */ +#define TPI_ITATBCTR2_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1_Pos*/) /*!< TPI ITATBCTR2: ATREADY1 Mask */ + +/* TPI Integration ITM Data Register Definitions (FIFO1) */ +#define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */ +#define TPI_FIFO1_ITM_ATVALID_Msk (0x3UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ + +#define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */ +#define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */ + +#define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */ +#define TPI_FIFO1_ETM_ATVALID_Msk (0x3UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ + +#define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */ +#define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */ + +#define TPI_FIFO1_ITM2_Pos 16U /*!< TPI FIFO1: ITM2 Position */ +#define TPI_FIFO1_ITM2_Msk (0xFFUL << TPI_FIFO1_ITM2_Pos) /*!< TPI FIFO1: ITM2 Mask */ + +#define TPI_FIFO1_ITM1_Pos 8U /*!< TPI FIFO1: ITM1 Position */ +#define TPI_FIFO1_ITM1_Msk (0xFFUL << TPI_FIFO1_ITM1_Pos) /*!< TPI FIFO1: ITM1 Mask */ + +#define TPI_FIFO1_ITM0_Pos 0U /*!< TPI FIFO1: ITM0 Position */ +#define TPI_FIFO1_ITM0_Msk (0xFFUL /*<< TPI_FIFO1_ITM0_Pos*/) /*!< TPI FIFO1: ITM0 Mask */ + +/* TPI ITATBCTR0 Register Definitions */ +#define TPI_ITATBCTR0_ATREADY2_Pos 0U /*!< TPI ITATBCTR0: ATREADY2 Position */ +#define TPI_ITATBCTR0_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2_Pos*/) /*!< TPI ITATBCTR0: ATREADY2 Mask */ + +#define TPI_ITATBCTR0_ATREADY1_Pos 0U /*!< TPI ITATBCTR0: ATREADY1 Position */ +#define TPI_ITATBCTR0_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1_Pos*/) /*!< TPI ITATBCTR0: ATREADY1 Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_MinBufSz_Pos 6U /*!< TPI DEVID: MinBufSz Position */ +#define TPI_DEVID_MinBufSz_Msk (0x7UL << TPI_DEVID_MinBufSz_Pos) /*!< TPI DEVID: MinBufSz Mask */ + +#define TPI_DEVID_AsynClkIn_Pos 5U /*!< TPI DEVID: AsynClkIn Position */ +#define TPI_DEVID_AsynClkIn_Msk (0x1UL << TPI_DEVID_AsynClkIn_Pos) /*!< TPI DEVID: AsynClkIn Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x1FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Alias 1 Region Base Address Register */ + __IOM uint32_t RASR_A1; /*!< Offset: 0x018 (R/W) MPU Alias 1 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Alias 2 Region Base Address Register */ + __IOM uint32_t RASR_A2; /*!< Offset: 0x020 (R/W) MPU Alias 2 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Alias 3 Region Base Address Register */ + __IOM uint32_t RASR_A3; /*!< Offset: 0x028 (R/W) MPU Alias 3 Region Attribute and Size Register */ +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 5U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0x7FFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/* MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif /* defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and FP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and FP Feature Register 1 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/* Media and FP Feature Register 0 Definitions */ +#define FPU_MVFR0_FP_rounding_modes_Pos 28U /*!< MVFR0: FP rounding modes bits Position */ +#define FPU_MVFR0_FP_rounding_modes_Msk (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos) /*!< MVFR0: FP rounding modes bits Mask */ + +#define FPU_MVFR0_Short_vectors_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_Short_vectors_Msk (0xFUL << FPU_MVFR0_Short_vectors_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_Square_root_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_Square_root_Msk (0xFUL << FPU_MVFR0_Square_root_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_Divide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_Divide_Msk (0xFUL << FPU_MVFR0_Divide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FP_excep_trapping_Pos 12U /*!< MVFR0: FP exception trapping bits Position */ +#define FPU_MVFR0_FP_excep_trapping_Msk (0xFUL << FPU_MVFR0_FP_excep_trapping_Pos) /*!< MVFR0: FP exception trapping bits Mask */ + +#define FPU_MVFR0_Double_precision_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_Double_precision_Msk (0xFUL << FPU_MVFR0_Double_precision_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_Single_precision_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_Single_precision_Msk (0xFUL << FPU_MVFR0_Single_precision_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_A_SIMD_registers_Pos 0U /*!< MVFR0: A_SIMD registers bits Position */ +#define FPU_MVFR0_A_SIMD_registers_Msk (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/) /*!< MVFR0: A_SIMD registers bits Mask */ + +/* Media and FP Feature Register 1 Definitions */ +#define FPU_MVFR1_FP_fused_MAC_Pos 28U /*!< MVFR1: FP fused MAC bits Position */ +#define FPU_MVFR1_FP_fused_MAC_Msk (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos) /*!< MVFR1: FP fused MAC bits Mask */ + +#define FPU_MVFR1_FP_HPFP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FP_HPFP_Msk (0xFUL << FPU_MVFR1_FP_HPFP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_D_NaN_mode_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_D_NaN_mode_Msk (0xFUL << FPU_MVFR1_D_NaN_mode_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/*@} end of group CMSIS_FPU */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ +#define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ +#define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ +#define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ +#define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ +#define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ +#define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ +#define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE) /*!< Core Debug configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +#define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ +#define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ +#define EXC_RETURN_HANDLER_FPU (0xFFFFFFE1UL) /* return to Handler mode, uses MSP after return, restore floating-point state */ +#define EXC_RETURN_THREAD_MSP_FPU (0xFFFFFFE9UL) /* return to Thread mode, uses MSP after return, restore floating-point state */ +#define EXC_RETURN_THREAD_PSP_FPU (0xFFFFFFEDUL) /* return to Thread mode, uses PSP after return, restore floating-point state */ + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHP[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IP[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHP[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv7.h" + +#endif + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM4_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_cm7.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_cm7.h new file mode 100644 index 0000000..ada6c2a --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_cm7.h @@ -0,0 +1,2671 @@ +/**************************************************************************//** + * @file core_cm7.h + * @brief CMSIS Cortex-M7 Core Peripheral Access Layer Header File + * @version V5.0.8 + * @date 04. June 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM7_H_GENERIC +#define __CORE_CM7_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M7 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM7 definitions */ +#define __CM7_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM7_CMSIS_VERSION_SUB ( __CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM7_CMSIS_VERSION ((__CM7_CMSIS_VERSION_MAIN << 16U) | \ + __CM7_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (7U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_PCS_VFP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM7_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM7_H_DEPENDANT +#define __CORE_CM7_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM7_REV + #define __CM7_REV 0x0000U + #warning "__CM7_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ICACHE_PRESENT + #define __ICACHE_PRESENT 0U + #warning "__ICACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DCACHE_PRESENT + #define __DCACHE_PRESENT 0U + #warning "__DCACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DTCM_PRESENT + #define __DTCM_PRESENT 0U + #warning "__DTCM_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M7 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:1; /*!< bit: 9 Reserved */ + uint32_t ICI_IT_1:6; /*!< bit: 10..15 ICI/IT part 1 */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit */ + uint32_t ICI_IT_2:2; /*!< bit: 25..26 ICI/IT part 2 */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_ICI_IT_2_Pos 25U /*!< xPSR: ICI/IT part 2 Position */ +#define xPSR_ICI_IT_2_Msk (3UL << xPSR_ICI_IT_2_Pos) /*!< xPSR: ICI/IT part 2 Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ICI_IT_1_Pos 10U /*!< xPSR: ICI/IT part 1 Position */ +#define xPSR_ICI_IT_1_Msk (0x3FUL << xPSR_ICI_IT_1_Pos) /*!< xPSR: ICI/IT part 1 Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t FPCA:1; /*!< bit: 2 FP extension active flag */ + uint32_t _reserved0:29; /*!< bit: 3..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[24U]; + __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[24U]; + __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[24U]; + __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[24U]; + __IOM uint32_t IABR[8U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[56U]; + __IOM uint8_t IP[240U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED5[644U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[5U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + uint32_t RESERVED0[1U]; + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + uint32_t RESERVED3[93U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + uint32_t RESERVED4[15U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + uint32_t RESERVED7[6U]; + __IOM uint32_t ITCMCR; /*!< Offset: 0x290 (R/W) Instruction Tightly-Coupled Memory Control Register */ + __IOM uint32_t DTCMCR; /*!< Offset: 0x294 (R/W) Data Tightly-Coupled Memory Control Registers */ + __IOM uint32_t AHBPCR; /*!< Offset: 0x298 (R/W) AHBP Control Register */ + __IOM uint32_t CACR; /*!< Offset: 0x29C (R/W) L1 Cache Control Register */ + __IOM uint32_t AHBSCR; /*!< Offset: 0x2A0 (R/W) AHB Slave Control Register */ + uint32_t RESERVED8[1U]; + __IOM uint32_t ABFSR; /*!< Offset: 0x2A8 (R/W) Auxiliary Bus Fault Status Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +#define SCB_AIRCR_VECTRESET_Pos 0U /*!< SCB AIRCR: VECTRESET Position */ +#define SCB_AIRCR_VECTRESET_Msk (1UL /*<< SCB_AIRCR_VECTRESET_Pos*/) /*!< SCB AIRCR: VECTRESET Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: Branch prediction enable bit Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: Branch prediction enable bit Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: Instruction cache enable bit Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: Instruction cache enable bit Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: Cache enable bit Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: Cache enable bit Mask */ + +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +#define SCB_CCR_NONBASETHRDENA_Pos 0U /*!< SCB CCR: NONBASETHRDENA Position */ +#define SCB_CCR_NONBASETHRDENA_Msk (1UL /*<< SCB_CCR_NONBASETHRDENA_Pos*/) /*!< SCB CCR: NONBASETHRDENA Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/* Instruction Tightly-Coupled Memory Control Register Definitions */ +#define SCB_ITCMCR_SZ_Pos 3U /*!< SCB ITCMCR: SZ Position */ +#define SCB_ITCMCR_SZ_Msk (0xFUL << SCB_ITCMCR_SZ_Pos) /*!< SCB ITCMCR: SZ Mask */ + +#define SCB_ITCMCR_RETEN_Pos 2U /*!< SCB ITCMCR: RETEN Position */ +#define SCB_ITCMCR_RETEN_Msk (1UL << SCB_ITCMCR_RETEN_Pos) /*!< SCB ITCMCR: RETEN Mask */ + +#define SCB_ITCMCR_RMW_Pos 1U /*!< SCB ITCMCR: RMW Position */ +#define SCB_ITCMCR_RMW_Msk (1UL << SCB_ITCMCR_RMW_Pos) /*!< SCB ITCMCR: RMW Mask */ + +#define SCB_ITCMCR_EN_Pos 0U /*!< SCB ITCMCR: EN Position */ +#define SCB_ITCMCR_EN_Msk (1UL /*<< SCB_ITCMCR_EN_Pos*/) /*!< SCB ITCMCR: EN Mask */ + +/* Data Tightly-Coupled Memory Control Register Definitions */ +#define SCB_DTCMCR_SZ_Pos 3U /*!< SCB DTCMCR: SZ Position */ +#define SCB_DTCMCR_SZ_Msk (0xFUL << SCB_DTCMCR_SZ_Pos) /*!< SCB DTCMCR: SZ Mask */ + +#define SCB_DTCMCR_RETEN_Pos 2U /*!< SCB DTCMCR: RETEN Position */ +#define SCB_DTCMCR_RETEN_Msk (1UL << SCB_DTCMCR_RETEN_Pos) /*!< SCB DTCMCR: RETEN Mask */ + +#define SCB_DTCMCR_RMW_Pos 1U /*!< SCB DTCMCR: RMW Position */ +#define SCB_DTCMCR_RMW_Msk (1UL << SCB_DTCMCR_RMW_Pos) /*!< SCB DTCMCR: RMW Mask */ + +#define SCB_DTCMCR_EN_Pos 0U /*!< SCB DTCMCR: EN Position */ +#define SCB_DTCMCR_EN_Msk (1UL /*<< SCB_DTCMCR_EN_Pos*/) /*!< SCB DTCMCR: EN Mask */ + +/* AHBP Control Register Definitions */ +#define SCB_AHBPCR_SZ_Pos 1U /*!< SCB AHBPCR: SZ Position */ +#define SCB_AHBPCR_SZ_Msk (7UL << SCB_AHBPCR_SZ_Pos) /*!< SCB AHBPCR: SZ Mask */ + +#define SCB_AHBPCR_EN_Pos 0U /*!< SCB AHBPCR: EN Position */ +#define SCB_AHBPCR_EN_Msk (1UL /*<< SCB_AHBPCR_EN_Pos*/) /*!< SCB AHBPCR: EN Mask */ + +/* L1 Cache Control Register Definitions */ +#define SCB_CACR_FORCEWT_Pos 2U /*!< SCB CACR: FORCEWT Position */ +#define SCB_CACR_FORCEWT_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: FORCEWT Mask */ + +#define SCB_CACR_ECCEN_Pos 1U /*!< SCB CACR: ECCEN Position */ +#define SCB_CACR_ECCEN_Msk (1UL << SCB_CACR_ECCEN_Pos) /*!< SCB CACR: ECCEN Mask */ + +#define SCB_CACR_SIWT_Pos 0U /*!< SCB CACR: SIWT Position */ +#define SCB_CACR_SIWT_Msk (1UL /*<< SCB_CACR_SIWT_Pos*/) /*!< SCB CACR: SIWT Mask */ + +/* AHBS Control Register Definitions */ +#define SCB_AHBSCR_INITCOUNT_Pos 11U /*!< SCB AHBSCR: INITCOUNT Position */ +#define SCB_AHBSCR_INITCOUNT_Msk (0x1FUL << SCB_AHBPCR_INITCOUNT_Pos) /*!< SCB AHBSCR: INITCOUNT Mask */ + +#define SCB_AHBSCR_TPRI_Pos 2U /*!< SCB AHBSCR: TPRI Position */ +#define SCB_AHBSCR_TPRI_Msk (0x1FFUL << SCB_AHBPCR_TPRI_Pos) /*!< SCB AHBSCR: TPRI Mask */ + +#define SCB_AHBSCR_CTL_Pos 0U /*!< SCB AHBSCR: CTL Position*/ +#define SCB_AHBSCR_CTL_Msk (3UL /*<< SCB_AHBPCR_CTL_Pos*/) /*!< SCB AHBSCR: CTL Mask */ + +/* Auxiliary Bus Fault Status Register Definitions */ +#define SCB_ABFSR_AXIMTYPE_Pos 8U /*!< SCB ABFSR: AXIMTYPE Position*/ +#define SCB_ABFSR_AXIMTYPE_Msk (3UL << SCB_ABFSR_AXIMTYPE_Pos) /*!< SCB ABFSR: AXIMTYPE Mask */ + +#define SCB_ABFSR_EPPB_Pos 4U /*!< SCB ABFSR: EPPB Position*/ +#define SCB_ABFSR_EPPB_Msk (1UL << SCB_ABFSR_EPPB_Pos) /*!< SCB ABFSR: EPPB Mask */ + +#define SCB_ABFSR_AXIM_Pos 3U /*!< SCB ABFSR: AXIM Position*/ +#define SCB_ABFSR_AXIM_Msk (1UL << SCB_ABFSR_AXIM_Pos) /*!< SCB ABFSR: AXIM Mask */ + +#define SCB_ABFSR_AHBP_Pos 2U /*!< SCB ABFSR: AHBP Position*/ +#define SCB_ABFSR_AHBP_Msk (1UL << SCB_ABFSR_AHBP_Pos) /*!< SCB ABFSR: AHBP Mask */ + +#define SCB_ABFSR_DTCM_Pos 1U /*!< SCB ABFSR: DTCM Position*/ +#define SCB_ABFSR_DTCM_Msk (1UL << SCB_ABFSR_DTCM_Pos) /*!< SCB ABFSR: DTCM Mask */ + +#define SCB_ABFSR_ITCM_Pos 0U /*!< SCB ABFSR: ITCM Position*/ +#define SCB_ABFSR_ITCM_Msk (1UL /*<< SCB_ABFSR_ITCM_Pos*/) /*!< SCB ABFSR: ITCM Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/* Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_DISITMATBFLUSH_Pos 12U /*!< ACTLR: DISITMATBFLUSH Position */ +#define SCnSCB_ACTLR_DISITMATBFLUSH_Msk (1UL << SCnSCB_ACTLR_DISITMATBFLUSH_Pos) /*!< ACTLR: DISITMATBFLUSH Mask */ + +#define SCnSCB_ACTLR_DISRAMODE_Pos 11U /*!< ACTLR: DISRAMODE Position */ +#define SCnSCB_ACTLR_DISRAMODE_Msk (1UL << SCnSCB_ACTLR_DISRAMODE_Pos) /*!< ACTLR: DISRAMODE Mask */ + +#define SCnSCB_ACTLR_FPEXCODIS_Pos 10U /*!< ACTLR: FPEXCODIS Position */ +#define SCnSCB_ACTLR_FPEXCODIS_Msk (1UL << SCnSCB_ACTLR_FPEXCODIS_Pos) /*!< ACTLR: FPEXCODIS Mask */ + +#define SCnSCB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ +#define SCnSCB_ACTLR_DISFOLD_Msk (1UL << SCnSCB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ + +#define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ +#define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[29U]; + __OM uint32_t IWR; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */ + __IM uint32_t IRR; /*!< Offset: 0xEFC (R/ ) ITM Integration Read Register */ + __IOM uint32_t IMCR; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */ + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[6U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TraceBusID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TraceBusID_Msk (0x7FUL << ITM_TCR_TraceBusID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPrescale_Pos 8U /*!< ITM TCR: TSPrescale Position */ +#define ITM_TCR_TSPrescale_Msk (3UL << ITM_TCR_TSPrescale_Pos) /*!< ITM TCR: TSPrescale Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Integration Write Register Definitions */ +#define ITM_IWR_ATVALIDM_Pos 0U /*!< ITM IWR: ATVALIDM Position */ +#define ITM_IWR_ATVALIDM_Msk (1UL /*<< ITM_IWR_ATVALIDM_Pos*/) /*!< ITM IWR: ATVALIDM Mask */ + +/* ITM Integration Read Register Definitions */ +#define ITM_IRR_ATREADYM_Pos 0U /*!< ITM IRR: ATREADYM Position */ +#define ITM_IRR_ATREADYM_Msk (1UL /*<< ITM_IRR_ATREADYM_Pos*/) /*!< ITM IRR: ATREADYM Mask */ + +/* ITM Integration Mode Control Register Definitions */ +#define ITM_IMCR_INTEGRATION_Pos 0U /*!< ITM IMCR: INTEGRATION Position */ +#define ITM_IMCR_INTEGRATION_Msk (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/) /*!< ITM IMCR: INTEGRATION Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + __IOM uint32_t MASK0; /*!< Offset: 0x024 (R/W) Mask Register 0 */ + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED0[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + __IOM uint32_t MASK1; /*!< Offset: 0x034 (R/W) Mask Register 1 */ + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + __IOM uint32_t MASK2; /*!< Offset: 0x044 (R/W) Mask Register 2 */ + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + __IOM uint32_t MASK3; /*!< Offset: 0x054 (R/W) Mask Register 3 */ + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED3[981U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( W) Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Mask Register Definitions */ +#define DWT_MASK_MASK_Pos 0U /*!< DWT MASK: MASK Position */ +#define DWT_MASK_MASK_Msk (0x1FUL /*<< DWT_MASK_MASK_Pos*/) /*!< DWT MASK: MASK Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVADDR1_Pos 16U /*!< DWT FUNCTION: DATAVADDR1 Position */ +#define DWT_FUNCTION_DATAVADDR1_Msk (0xFUL << DWT_FUNCTION_DATAVADDR1_Pos) /*!< DWT FUNCTION: DATAVADDR1 Mask */ + +#define DWT_FUNCTION_DATAVADDR0_Pos 12U /*!< DWT FUNCTION: DATAVADDR0 Position */ +#define DWT_FUNCTION_DATAVADDR0_Msk (0xFUL << DWT_FUNCTION_DATAVADDR0_Pos) /*!< DWT FUNCTION: DATAVADDR0 Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_LNK1ENA_Pos 9U /*!< DWT FUNCTION: LNK1ENA Position */ +#define DWT_FUNCTION_LNK1ENA_Msk (0x1UL << DWT_FUNCTION_LNK1ENA_Pos) /*!< DWT FUNCTION: LNK1ENA Mask */ + +#define DWT_FUNCTION_DATAVMATCH_Pos 8U /*!< DWT FUNCTION: DATAVMATCH Position */ +#define DWT_FUNCTION_DATAVMATCH_Msk (0x1UL << DWT_FUNCTION_DATAVMATCH_Pos) /*!< DWT FUNCTION: DATAVMATCH Mask */ + +#define DWT_FUNCTION_CYCMATCH_Pos 7U /*!< DWT FUNCTION: CYCMATCH Position */ +#define DWT_FUNCTION_CYCMATCH_Msk (0x1UL << DWT_FUNCTION_CYCMATCH_Pos) /*!< DWT FUNCTION: CYCMATCH Mask */ + +#define DWT_FUNCTION_EMITRANGE_Pos 5U /*!< DWT FUNCTION: EMITRANGE Position */ +#define DWT_FUNCTION_EMITRANGE_Msk (0x1UL << DWT_FUNCTION_EMITRANGE_Pos) /*!< DWT FUNCTION: EMITRANGE Mask */ + +#define DWT_FUNCTION_FUNCTION_Pos 0U /*!< DWT FUNCTION: FUNCTION Position */ +#define DWT_FUNCTION_FUNCTION_Msk (0xFUL /*<< DWT_FUNCTION_FUNCTION_Pos*/) /*!< DWT FUNCTION: FUNCTION Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IM uint32_t FSCR; /*!< Offset: 0x308 (R/ ) Formatter Synchronization Counter Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t FIFO0; /*!< Offset: 0xEEC (R/ ) Integration ETM Data */ + __IM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/ ) ITATBCTR2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) ITATBCTR0 */ + __IM uint32_t FIFO1; /*!< Offset: 0xEFC (R/ ) Integration ITM Data */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) TPIU_DEVID */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) TPIU_DEVTYPE */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration ETM Data Register Definitions (FIFO0) */ +#define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */ +#define TPI_FIFO0_ITM_ATVALID_Msk (0x3UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ + +#define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */ +#define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */ + +#define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */ +#define TPI_FIFO0_ETM_ATVALID_Msk (0x3UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ + +#define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */ +#define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */ + +#define TPI_FIFO0_ETM2_Pos 16U /*!< TPI FIFO0: ETM2 Position */ +#define TPI_FIFO0_ETM2_Msk (0xFFUL << TPI_FIFO0_ETM2_Pos) /*!< TPI FIFO0: ETM2 Mask */ + +#define TPI_FIFO0_ETM1_Pos 8U /*!< TPI FIFO0: ETM1 Position */ +#define TPI_FIFO0_ETM1_Msk (0xFFUL << TPI_FIFO0_ETM1_Pos) /*!< TPI FIFO0: ETM1 Mask */ + +#define TPI_FIFO0_ETM0_Pos 0U /*!< TPI FIFO0: ETM0 Position */ +#define TPI_FIFO0_ETM0_Msk (0xFFUL /*<< TPI_FIFO0_ETM0_Pos*/) /*!< TPI FIFO0: ETM0 Mask */ + +/* TPI ITATBCTR2 Register Definitions */ +#define TPI_ITATBCTR2_ATREADY2_Pos 0U /*!< TPI ITATBCTR2: ATREADY2 Position */ +#define TPI_ITATBCTR2_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2_Pos*/) /*!< TPI ITATBCTR2: ATREADY2 Mask */ + +#define TPI_ITATBCTR2_ATREADY1_Pos 0U /*!< TPI ITATBCTR2: ATREADY1 Position */ +#define TPI_ITATBCTR2_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1_Pos*/) /*!< TPI ITATBCTR2: ATREADY1 Mask */ + +/* TPI Integration ITM Data Register Definitions (FIFO1) */ +#define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */ +#define TPI_FIFO1_ITM_ATVALID_Msk (0x3UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ + +#define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */ +#define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */ + +#define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */ +#define TPI_FIFO1_ETM_ATVALID_Msk (0x3UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ + +#define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */ +#define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */ + +#define TPI_FIFO1_ITM2_Pos 16U /*!< TPI FIFO1: ITM2 Position */ +#define TPI_FIFO1_ITM2_Msk (0xFFUL << TPI_FIFO1_ITM2_Pos) /*!< TPI FIFO1: ITM2 Mask */ + +#define TPI_FIFO1_ITM1_Pos 8U /*!< TPI FIFO1: ITM1 Position */ +#define TPI_FIFO1_ITM1_Msk (0xFFUL << TPI_FIFO1_ITM1_Pos) /*!< TPI FIFO1: ITM1 Mask */ + +#define TPI_FIFO1_ITM0_Pos 0U /*!< TPI FIFO1: ITM0 Position */ +#define TPI_FIFO1_ITM0_Msk (0xFFUL /*<< TPI_FIFO1_ITM0_Pos*/) /*!< TPI FIFO1: ITM0 Mask */ + +/* TPI ITATBCTR0 Register Definitions */ +#define TPI_ITATBCTR0_ATREADY2_Pos 0U /*!< TPI ITATBCTR0: ATREADY2 Position */ +#define TPI_ITATBCTR0_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2_Pos*/) /*!< TPI ITATBCTR0: ATREADY2 Mask */ + +#define TPI_ITATBCTR0_ATREADY1_Pos 0U /*!< TPI ITATBCTR0: ATREADY1 Position */ +#define TPI_ITATBCTR0_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1_Pos*/) /*!< TPI ITATBCTR0: ATREADY1 Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_MinBufSz_Pos 6U /*!< TPI DEVID: MinBufSz Position */ +#define TPI_DEVID_MinBufSz_Msk (0x7UL << TPI_DEVID_MinBufSz_Pos) /*!< TPI DEVID: MinBufSz Mask */ + +#define TPI_DEVID_AsynClkIn_Pos 5U /*!< TPI DEVID: AsynClkIn Position */ +#define TPI_DEVID_AsynClkIn_Msk (0x1UL << TPI_DEVID_AsynClkIn_Pos) /*!< TPI DEVID: AsynClkIn Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x1FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Alias 1 Region Base Address Register */ + __IOM uint32_t RASR_A1; /*!< Offset: 0x018 (R/W) MPU Alias 1 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Alias 2 Region Base Address Register */ + __IOM uint32_t RASR_A2; /*!< Offset: 0x020 (R/W) MPU Alias 2 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Alias 3 Region Base Address Register */ + __IOM uint32_t RASR_A3; /*!< Offset: 0x028 (R/W) MPU Alias 3 Region Attribute and Size Register */ +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 5U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0x7FFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/* MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif /* defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and FP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and FP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and FP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/* Media and FP Feature Register 0 Definitions */ +#define FPU_MVFR0_FP_rounding_modes_Pos 28U /*!< MVFR0: FP rounding modes bits Position */ +#define FPU_MVFR0_FP_rounding_modes_Msk (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos) /*!< MVFR0: FP rounding modes bits Mask */ + +#define FPU_MVFR0_Short_vectors_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_Short_vectors_Msk (0xFUL << FPU_MVFR0_Short_vectors_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_Square_root_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_Square_root_Msk (0xFUL << FPU_MVFR0_Square_root_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_Divide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_Divide_Msk (0xFUL << FPU_MVFR0_Divide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FP_excep_trapping_Pos 12U /*!< MVFR0: FP exception trapping bits Position */ +#define FPU_MVFR0_FP_excep_trapping_Msk (0xFUL << FPU_MVFR0_FP_excep_trapping_Pos) /*!< MVFR0: FP exception trapping bits Mask */ + +#define FPU_MVFR0_Double_precision_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_Double_precision_Msk (0xFUL << FPU_MVFR0_Double_precision_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_Single_precision_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_Single_precision_Msk (0xFUL << FPU_MVFR0_Single_precision_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_A_SIMD_registers_Pos 0U /*!< MVFR0: A_SIMD registers bits Position */ +#define FPU_MVFR0_A_SIMD_registers_Msk (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/) /*!< MVFR0: A_SIMD registers bits Mask */ + +/* Media and FP Feature Register 1 Definitions */ +#define FPU_MVFR1_FP_fused_MAC_Pos 28U /*!< MVFR1: FP fused MAC bits Position */ +#define FPU_MVFR1_FP_fused_MAC_Msk (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos) /*!< MVFR1: FP fused MAC bits Mask */ + +#define FPU_MVFR1_FP_HPFP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FP_HPFP_Msk (0xFUL << FPU_MVFR1_FP_HPFP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_D_NaN_mode_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_D_NaN_mode_Msk (0xFUL << FPU_MVFR1_D_NaN_mode_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/* Media and FP Feature Register 2 Definitions */ + +/*@} end of group CMSIS_FPU */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ +#define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ +#define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ +#define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ +#define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ +#define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ +#define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ +#define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE) /*!< Core Debug configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +#define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ +#define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ +#define EXC_RETURN_HANDLER_FPU (0xFFFFFFE1UL) /* return to Handler mode, uses MSP after return, restore floating-point state */ +#define EXC_RETURN_THREAD_MSP_FPU (0xFFFFFFE9UL) /* return to Thread mode, uses MSP after return, restore floating-point state */ +#define EXC_RETURN_THREAD_PSP_FPU (0xFFFFFFEDUL) /* return to Thread mode, uses PSP after return, restore floating-point state */ + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IP[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv7.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = SCB->MVFR0; + if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## Cache functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_CacheFunctions Cache Functions + \brief Functions that configure Instruction and Data cache. + @{ + */ + +/* Cache Size ID Register Macros */ +#define CCSIDR_WAYS(x) (((x) & SCB_CCSIDR_ASSOCIATIVITY_Msk) >> SCB_CCSIDR_ASSOCIATIVITY_Pos) +#define CCSIDR_SETS(x) (((x) & SCB_CCSIDR_NUMSETS_Msk ) >> SCB_CCSIDR_NUMSETS_Pos ) + + +/** + \brief Enable I-Cache + \details Turns on I-Cache + */ +__STATIC_INLINE void SCB_EnableICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + __DSB(); + __ISB(); + SCB->ICIALLU = 0UL; /* invalidate I-Cache */ + __DSB(); + __ISB(); + SCB->CCR |= (uint32_t)SCB_CCR_IC_Msk; /* enable I-Cache */ + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Disable I-Cache + \details Turns off I-Cache + */ +__STATIC_INLINE void SCB_DisableICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + __DSB(); + __ISB(); + SCB->CCR &= ~(uint32_t)SCB_CCR_IC_Msk; /* disable I-Cache */ + SCB->ICIALLU = 0UL; /* invalidate I-Cache */ + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Invalidate I-Cache + \details Invalidates I-Cache + */ +__STATIC_INLINE void SCB_InvalidateICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + __DSB(); + __ISB(); + SCB->ICIALLU = 0UL; + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Enable D-Cache + \details Turns on D-Cache + */ +__STATIC_INLINE void SCB_EnableDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /*(0U << 1U) | 0U;*/ /* Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCISW = (((sets << SCB_DCISW_SET_Pos) & SCB_DCISW_SET_Msk) | + ((ways << SCB_DCISW_WAY_Pos) & SCB_DCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + __DSB(); + + SCB->CCR |= (uint32_t)SCB_CCR_DC_Msk; /* enable D-Cache */ + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Disable D-Cache + \details Turns off D-Cache + */ +__STATIC_INLINE void SCB_DisableDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /*(0U << 1U) | 0U;*/ /* Level 1 data cache */ + __DSB(); + + SCB->CCR &= ~(uint32_t)SCB_CCR_DC_Msk; /* disable D-Cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean & invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) | + ((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Invalidate D-Cache + \details Invalidates D-Cache + */ +__STATIC_INLINE void SCB_InvalidateDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /*(0U << 1U) | 0U;*/ /* Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCISW = (((sets << SCB_DCISW_SET_Pos) & SCB_DCISW_SET_Msk) | + ((ways << SCB_DCISW_WAY_Pos) & SCB_DCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Clean D-Cache + \details Cleans D-Cache + */ +__STATIC_INLINE void SCB_CleanDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /*(0U << 1U) | 0U;*/ /* Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCSW = (((sets << SCB_DCCSW_SET_Pos) & SCB_DCCSW_SET_Msk) | + ((ways << SCB_DCCSW_WAY_Pos) & SCB_DCCSW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Clean & Invalidate D-Cache + \details Cleans and Invalidates D-Cache + */ +__STATIC_INLINE void SCB_CleanInvalidateDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /*(0U << 1U) | 0U;*/ /* Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean & invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) | + ((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief D-Cache Invalidate by address + \details Invalidates D-Cache for the given address + \param[in] addr address (aligned to 32-byte boundary) + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_INLINE void SCB_InvalidateDCache_by_Addr (uint32_t *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + int32_t op_size = dsize; + uint32_t op_addr = (uint32_t)addr; + int32_t linesize = 32; /* in Cortex-M7 size of cache line is fixed to 8 words (32 bytes) */ + + __DSB(); + + while (op_size > 0) { + SCB->DCIMVAC = op_addr; + op_addr += (uint32_t)linesize; + op_size -= linesize; + } + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief D-Cache Clean by address + \details Cleans D-Cache for the given address + \param[in] addr address (aligned to 32-byte boundary) + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_INLINE void SCB_CleanDCache_by_Addr (uint32_t *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + int32_t op_size = dsize; + uint32_t op_addr = (uint32_t) addr; + int32_t linesize = 32; /* in Cortex-M7 size of cache line is fixed to 8 words (32 bytes) */ + + __DSB(); + + while (op_size > 0) { + SCB->DCCMVAC = op_addr; + op_addr += (uint32_t)linesize; + op_size -= linesize; + } + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief D-Cache Clean and Invalidate by address + \details Cleans and invalidates D_Cache for the given address + \param[in] addr address (aligned to 32-byte boundary) + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_INLINE void SCB_CleanInvalidateDCache_by_Addr (uint32_t *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + int32_t op_size = dsize; + uint32_t op_addr = (uint32_t) addr; + int32_t linesize = 32; /* in Cortex-M7 size of cache line is fixed to 8 words (32 bytes) */ + + __DSB(); + + while (op_size > 0) { + SCB->DCCIMVAC = op_addr; + op_addr += (uint32_t)linesize; + op_size -= linesize; + } + + __DSB(); + __ISB(); + #endif +} + + +/*@} end of CMSIS_Core_CacheFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM7_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_sc000.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_sc000.h new file mode 100644 index 0000000..9086c64 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_sc000.h @@ -0,0 +1,1022 @@ +/**************************************************************************//** + * @file core_sc000.h + * @brief CMSIS SC000 Core Peripheral Access Layer Header File + * @version V5.0.5 + * @date 28. May 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_SC000_H_GENERIC +#define __CORE_SC000_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup SC000 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS SC000 definitions */ +#define __SC000_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __SC000_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __SC000_CMSIS_VERSION ((__SC000_CMSIS_VERSION_MAIN << 16U) | \ + __SC000_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_SC (000U) /*!< Cortex secure core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_PCS_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_SC000_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_SC000_H_DEPENDANT +#define __CORE_SC000_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __SC000_REV + #define __SC000_REV 0x0000U + #warning "__SC000_REV not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group SC000 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core MPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t _reserved0:1; /*!< bit: 0 Reserved */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[31U]; + __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[31U]; + __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[31U]; + __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[31U]; + uint32_t RESERVED4[64U]; + __IOM uint32_t IP[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t SHP[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + uint32_t RESERVED1[154U]; + __IOM uint32_t SFCR; /*!< Offset: 0x290 (R/W) Security Features Control Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +} SCnSCB_Type; + +/* Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ +#define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ +} MPU_Type; + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 8U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0xFFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/* MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief SC000 Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor. + Therefore they are not covered by the SC000 header file. + @{ + */ +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else +/*#define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping not available for SC000 */ +/*#define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping not available for SC000 */ + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ +/*#define NVIC_GetActive __NVIC_GetActive not available for SC000 */ + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_SC000_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_sc300.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_sc300.h new file mode 100644 index 0000000..665822d --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/core_sc300.h @@ -0,0 +1,1915 @@ +/**************************************************************************//** + * @file core_sc300.h + * @brief CMSIS SC300 Core Peripheral Access Layer Header File + * @version V5.0.6 + * @date 04. June 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_SC300_H_GENERIC +#define __CORE_SC300_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup SC3000 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS SC300 definitions */ +#define __SC300_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __SC300_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __SC300_CMSIS_VERSION ((__SC300_CMSIS_VERSION_MAIN << 16U) | \ + __SC300_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_SC (300U) /*!< Cortex secure core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_PCS_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_SC300_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_SC300_H_DEPENDANT +#define __CORE_SC300_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __SC300_REV + #define __SC300_REV 0x0000U + #warning "__SC300_REV not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group SC300 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:27; /*!< bit: 0..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:1; /*!< bit: 9 Reserved */ + uint32_t ICI_IT_1:6; /*!< bit: 10..15 ICI/IT part 1 */ + uint32_t _reserved1:8; /*!< bit: 16..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit */ + uint32_t ICI_IT_2:2; /*!< bit: 25..26 ICI/IT part 2 */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_ICI_IT_2_Pos 25U /*!< xPSR: ICI/IT part 2 Position */ +#define xPSR_ICI_IT_2_Msk (3UL << xPSR_ICI_IT_2_Pos) /*!< xPSR: ICI/IT part 2 Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ICI_IT_1_Pos 10U /*!< xPSR: ICI/IT part 1 Position */ +#define xPSR_ICI_IT_1_Msk (0x3FUL << xPSR_ICI_IT_1_Pos) /*!< xPSR: ICI/IT part 1 Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[24U]; + __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[24U]; + __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[24U]; + __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[24U]; + __IOM uint32_t IABR[8U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[56U]; + __IOM uint8_t IP[240U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED5[644U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHP[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ISAR[5U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + uint32_t RESERVED0[5U]; + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + uint32_t RESERVED1[129U]; + __IOM uint32_t SFCR; /*!< Offset: 0x290 (R/W) Security Features Control Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLBASE_Pos 29U /*!< SCB VTOR: TBLBASE Position */ +#define SCB_VTOR_TBLBASE_Msk (1UL << SCB_VTOR_TBLBASE_Pos) /*!< SCB VTOR: TBLBASE Mask */ + +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x3FFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +#define SCB_AIRCR_VECTRESET_Pos 0U /*!< SCB AIRCR: VECTRESET Position */ +#define SCB_AIRCR_VECTRESET_Msk (1UL /*<< SCB_AIRCR_VECTRESET_Pos*/) /*!< SCB AIRCR: VECTRESET Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +#define SCB_CCR_NONBASETHRDENA_Pos 0U /*!< SCB CCR: NONBASETHRDENA Position */ +#define SCB_CCR_NONBASETHRDENA_Msk (1UL /*<< SCB_CCR_NONBASETHRDENA_Pos*/) /*!< SCB CCR: NONBASETHRDENA Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + uint32_t RESERVED1[1U]; +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[29U]; + __OM uint32_t IWR; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */ + __IM uint32_t IRR; /*!< Offset: 0xEFC (R/ ) ITM Integration Read Register */ + __IOM uint32_t IMCR; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */ + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[6U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TraceBusID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TraceBusID_Msk (0x7FUL << ITM_TCR_TraceBusID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPrescale_Pos 8U /*!< ITM TCR: TSPrescale Position */ +#define ITM_TCR_TSPrescale_Msk (3UL << ITM_TCR_TSPrescale_Pos) /*!< ITM TCR: TSPrescale Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Integration Write Register Definitions */ +#define ITM_IWR_ATVALIDM_Pos 0U /*!< ITM IWR: ATVALIDM Position */ +#define ITM_IWR_ATVALIDM_Msk (1UL /*<< ITM_IWR_ATVALIDM_Pos*/) /*!< ITM IWR: ATVALIDM Mask */ + +/* ITM Integration Read Register Definitions */ +#define ITM_IRR_ATREADYM_Pos 0U /*!< ITM IRR: ATREADYM Position */ +#define ITM_IRR_ATREADYM_Msk (1UL /*<< ITM_IRR_ATREADYM_Pos*/) /*!< ITM IRR: ATREADYM Mask */ + +/* ITM Integration Mode Control Register Definitions */ +#define ITM_IMCR_INTEGRATION_Pos 0U /*!< ITM IMCR: INTEGRATION Position */ +#define ITM_IMCR_INTEGRATION_Msk (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/) /*!< ITM IMCR: INTEGRATION Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + __IOM uint32_t MASK0; /*!< Offset: 0x024 (R/W) Mask Register 0 */ + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED0[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + __IOM uint32_t MASK1; /*!< Offset: 0x034 (R/W) Mask Register 1 */ + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + __IOM uint32_t MASK2; /*!< Offset: 0x044 (R/W) Mask Register 2 */ + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + __IOM uint32_t MASK3; /*!< Offset: 0x054 (R/W) Mask Register 3 */ + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Mask Register Definitions */ +#define DWT_MASK_MASK_Pos 0U /*!< DWT MASK: MASK Position */ +#define DWT_MASK_MASK_Msk (0x1FUL /*<< DWT_MASK_MASK_Pos*/) /*!< DWT MASK: MASK Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVADDR1_Pos 16U /*!< DWT FUNCTION: DATAVADDR1 Position */ +#define DWT_FUNCTION_DATAVADDR1_Msk (0xFUL << DWT_FUNCTION_DATAVADDR1_Pos) /*!< DWT FUNCTION: DATAVADDR1 Mask */ + +#define DWT_FUNCTION_DATAVADDR0_Pos 12U /*!< DWT FUNCTION: DATAVADDR0 Position */ +#define DWT_FUNCTION_DATAVADDR0_Msk (0xFUL << DWT_FUNCTION_DATAVADDR0_Pos) /*!< DWT FUNCTION: DATAVADDR0 Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_LNK1ENA_Pos 9U /*!< DWT FUNCTION: LNK1ENA Position */ +#define DWT_FUNCTION_LNK1ENA_Msk (0x1UL << DWT_FUNCTION_LNK1ENA_Pos) /*!< DWT FUNCTION: LNK1ENA Mask */ + +#define DWT_FUNCTION_DATAVMATCH_Pos 8U /*!< DWT FUNCTION: DATAVMATCH Position */ +#define DWT_FUNCTION_DATAVMATCH_Msk (0x1UL << DWT_FUNCTION_DATAVMATCH_Pos) /*!< DWT FUNCTION: DATAVMATCH Mask */ + +#define DWT_FUNCTION_CYCMATCH_Pos 7U /*!< DWT FUNCTION: CYCMATCH Position */ +#define DWT_FUNCTION_CYCMATCH_Msk (0x1UL << DWT_FUNCTION_CYCMATCH_Pos) /*!< DWT FUNCTION: CYCMATCH Mask */ + +#define DWT_FUNCTION_EMITRANGE_Pos 5U /*!< DWT FUNCTION: EMITRANGE Position */ +#define DWT_FUNCTION_EMITRANGE_Msk (0x1UL << DWT_FUNCTION_EMITRANGE_Pos) /*!< DWT FUNCTION: EMITRANGE Mask */ + +#define DWT_FUNCTION_FUNCTION_Pos 0U /*!< DWT FUNCTION: FUNCTION Position */ +#define DWT_FUNCTION_FUNCTION_Msk (0xFUL /*<< DWT_FUNCTION_FUNCTION_Pos*/) /*!< DWT FUNCTION: FUNCTION Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IM uint32_t FSCR; /*!< Offset: 0x308 (R/ ) Formatter Synchronization Counter Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t FIFO0; /*!< Offset: 0xEEC (R/ ) Integration ETM Data */ + __IM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/ ) ITATBCTR2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) ITATBCTR0 */ + __IM uint32_t FIFO1; /*!< Offset: 0xEFC (R/ ) Integration ITM Data */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) TPIU_DEVID */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) TPIU_DEVTYPE */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration ETM Data Register Definitions (FIFO0) */ +#define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */ +#define TPI_FIFO0_ITM_ATVALID_Msk (0x3UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ + +#define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */ +#define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */ + +#define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */ +#define TPI_FIFO0_ETM_ATVALID_Msk (0x3UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ + +#define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */ +#define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */ + +#define TPI_FIFO0_ETM2_Pos 16U /*!< TPI FIFO0: ETM2 Position */ +#define TPI_FIFO0_ETM2_Msk (0xFFUL << TPI_FIFO0_ETM2_Pos) /*!< TPI FIFO0: ETM2 Mask */ + +#define TPI_FIFO0_ETM1_Pos 8U /*!< TPI FIFO0: ETM1 Position */ +#define TPI_FIFO0_ETM1_Msk (0xFFUL << TPI_FIFO0_ETM1_Pos) /*!< TPI FIFO0: ETM1 Mask */ + +#define TPI_FIFO0_ETM0_Pos 0U /*!< TPI FIFO0: ETM0 Position */ +#define TPI_FIFO0_ETM0_Msk (0xFFUL /*<< TPI_FIFO0_ETM0_Pos*/) /*!< TPI FIFO0: ETM0 Mask */ + +/* TPI ITATBCTR2 Register Definitions */ +#define TPI_ITATBCTR2_ATREADY2_Pos 0U /*!< TPI ITATBCTR2: ATREADY2 Position */ +#define TPI_ITATBCTR2_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2_Pos*/) /*!< TPI ITATBCTR2: ATREADY2 Mask */ + +#define TPI_ITATBCTR2_ATREADY1_Pos 0U /*!< TPI ITATBCTR2: ATREADY1 Position */ +#define TPI_ITATBCTR2_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1_Pos*/) /*!< TPI ITATBCTR2: ATREADY1 Mask */ + +/* TPI Integration ITM Data Register Definitions (FIFO1) */ +#define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */ +#define TPI_FIFO1_ITM_ATVALID_Msk (0x3UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ + +#define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */ +#define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */ + +#define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */ +#define TPI_FIFO1_ETM_ATVALID_Msk (0x3UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ + +#define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */ +#define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */ + +#define TPI_FIFO1_ITM2_Pos 16U /*!< TPI FIFO1: ITM2 Position */ +#define TPI_FIFO1_ITM2_Msk (0xFFUL << TPI_FIFO1_ITM2_Pos) /*!< TPI FIFO1: ITM2 Mask */ + +#define TPI_FIFO1_ITM1_Pos 8U /*!< TPI FIFO1: ITM1 Position */ +#define TPI_FIFO1_ITM1_Msk (0xFFUL << TPI_FIFO1_ITM1_Pos) /*!< TPI FIFO1: ITM1 Mask */ + +#define TPI_FIFO1_ITM0_Pos 0U /*!< TPI FIFO1: ITM0 Position */ +#define TPI_FIFO1_ITM0_Msk (0xFFUL /*<< TPI_FIFO1_ITM0_Pos*/) /*!< TPI FIFO1: ITM0 Mask */ + +/* TPI ITATBCTR0 Register Definitions */ +#define TPI_ITATBCTR0_ATREADY2_Pos 0U /*!< TPI ITATBCTR0: ATREADY2 Position */ +#define TPI_ITATBCTR0_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2_Pos*/) /*!< TPI ITATBCTR0: ATREADY2 Mask */ + +#define TPI_ITATBCTR0_ATREADY1_Pos 0U /*!< TPI ITATBCTR0: ATREADY1 Position */ +#define TPI_ITATBCTR0_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1_Pos*/) /*!< TPI ITATBCTR0: ATREADY1 Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_MinBufSz_Pos 6U /*!< TPI DEVID: MinBufSz Position */ +#define TPI_DEVID_MinBufSz_Msk (0x7UL << TPI_DEVID_MinBufSz_Pos) /*!< TPI DEVID: MinBufSz Mask */ + +#define TPI_DEVID_AsynClkIn_Pos 5U /*!< TPI DEVID: AsynClkIn Position */ +#define TPI_DEVID_AsynClkIn_Msk (0x1UL << TPI_DEVID_AsynClkIn_Pos) /*!< TPI DEVID: AsynClkIn Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x1FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Alias 1 Region Base Address Register */ + __IOM uint32_t RASR_A1; /*!< Offset: 0x018 (R/W) MPU Alias 1 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Alias 2 Region Base Address Register */ + __IOM uint32_t RASR_A2; /*!< Offset: 0x020 (R/W) MPU Alias 2 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Alias 3 Region Base Address Register */ + __IOM uint32_t RASR_A3; /*!< Offset: 0x028 (R/W) MPU Alias 3 Region Attribute and Size Register */ +} MPU_Type; + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 5U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0x7FFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/* MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ +#define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ +#define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ +#define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ +#define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ +#define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ +#define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ +#define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE) /*!< Core Debug configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << 8U) ); /* Insert write key and priorty group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHP[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IP[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHP[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_SC300_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/mpu_armv7.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/mpu_armv7.h new file mode 100644 index 0000000..7d4b600 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/mpu_armv7.h @@ -0,0 +1,270 @@ +/****************************************************************************** + * @file mpu_armv7.h + * @brief CMSIS MPU API for Armv7-M MPU + * @version V5.0.4 + * @date 10. January 2018 + ******************************************************************************/ +/* + * Copyright (c) 2017-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef ARM_MPU_ARMV7_H +#define ARM_MPU_ARMV7_H + +#define ARM_MPU_REGION_SIZE_32B ((uint8_t)0x04U) ///!< MPU Region Size 32 Bytes +#define ARM_MPU_REGION_SIZE_64B ((uint8_t)0x05U) ///!< MPU Region Size 64 Bytes +#define ARM_MPU_REGION_SIZE_128B ((uint8_t)0x06U) ///!< MPU Region Size 128 Bytes +#define ARM_MPU_REGION_SIZE_256B ((uint8_t)0x07U) ///!< MPU Region Size 256 Bytes +#define ARM_MPU_REGION_SIZE_512B ((uint8_t)0x08U) ///!< MPU Region Size 512 Bytes +#define ARM_MPU_REGION_SIZE_1KB ((uint8_t)0x09U) ///!< MPU Region Size 1 KByte +#define ARM_MPU_REGION_SIZE_2KB ((uint8_t)0x0AU) ///!< MPU Region Size 2 KBytes +#define ARM_MPU_REGION_SIZE_4KB ((uint8_t)0x0BU) ///!< MPU Region Size 4 KBytes +#define ARM_MPU_REGION_SIZE_8KB ((uint8_t)0x0CU) ///!< MPU Region Size 8 KBytes +#define ARM_MPU_REGION_SIZE_16KB ((uint8_t)0x0DU) ///!< MPU Region Size 16 KBytes +#define ARM_MPU_REGION_SIZE_32KB ((uint8_t)0x0EU) ///!< MPU Region Size 32 KBytes +#define ARM_MPU_REGION_SIZE_64KB ((uint8_t)0x0FU) ///!< MPU Region Size 64 KBytes +#define ARM_MPU_REGION_SIZE_128KB ((uint8_t)0x10U) ///!< MPU Region Size 128 KBytes +#define ARM_MPU_REGION_SIZE_256KB ((uint8_t)0x11U) ///!< MPU Region Size 256 KBytes +#define ARM_MPU_REGION_SIZE_512KB ((uint8_t)0x12U) ///!< MPU Region Size 512 KBytes +#define ARM_MPU_REGION_SIZE_1MB ((uint8_t)0x13U) ///!< MPU Region Size 1 MByte +#define ARM_MPU_REGION_SIZE_2MB ((uint8_t)0x14U) ///!< MPU Region Size 2 MBytes +#define ARM_MPU_REGION_SIZE_4MB ((uint8_t)0x15U) ///!< MPU Region Size 4 MBytes +#define ARM_MPU_REGION_SIZE_8MB ((uint8_t)0x16U) ///!< MPU Region Size 8 MBytes +#define ARM_MPU_REGION_SIZE_16MB ((uint8_t)0x17U) ///!< MPU Region Size 16 MBytes +#define ARM_MPU_REGION_SIZE_32MB ((uint8_t)0x18U) ///!< MPU Region Size 32 MBytes +#define ARM_MPU_REGION_SIZE_64MB ((uint8_t)0x19U) ///!< MPU Region Size 64 MBytes +#define ARM_MPU_REGION_SIZE_128MB ((uint8_t)0x1AU) ///!< MPU Region Size 128 MBytes +#define ARM_MPU_REGION_SIZE_256MB ((uint8_t)0x1BU) ///!< MPU Region Size 256 MBytes +#define ARM_MPU_REGION_SIZE_512MB ((uint8_t)0x1CU) ///!< MPU Region Size 512 MBytes +#define ARM_MPU_REGION_SIZE_1GB ((uint8_t)0x1DU) ///!< MPU Region Size 1 GByte +#define ARM_MPU_REGION_SIZE_2GB ((uint8_t)0x1EU) ///!< MPU Region Size 2 GBytes +#define ARM_MPU_REGION_SIZE_4GB ((uint8_t)0x1FU) ///!< MPU Region Size 4 GBytes + +#define ARM_MPU_AP_NONE 0U ///!< MPU Access Permission no access +#define ARM_MPU_AP_PRIV 1U ///!< MPU Access Permission privileged access only +#define ARM_MPU_AP_URO 2U ///!< MPU Access Permission unprivileged access read-only +#define ARM_MPU_AP_FULL 3U ///!< MPU Access Permission full access +#define ARM_MPU_AP_PRO 5U ///!< MPU Access Permission privileged access read-only +#define ARM_MPU_AP_RO 6U ///!< MPU Access Permission read-only access + +/** MPU Region Base Address Register Value +* +* \param Region The region to be configured, number 0 to 15. +* \param BaseAddress The base address for the region. +*/ +#define ARM_MPU_RBAR(Region, BaseAddress) \ + (((BaseAddress) & MPU_RBAR_ADDR_Msk) | \ + ((Region) & MPU_RBAR_REGION_Msk) | \ + (MPU_RBAR_VALID_Msk)) + +/** +* MPU Memory Access Attributes +* +* \param TypeExtField Type extension field, allows you to configure memory access type, for example strongly ordered, peripheral. +* \param IsShareable Region is shareable between multiple bus masters. +* \param IsCacheable Region is cacheable, i.e. its value may be kept in cache. +* \param IsBufferable Region is bufferable, i.e. using write-back caching. Cacheable but non-bufferable regions use write-through policy. +*/ +#define ARM_MPU_ACCESS_(TypeExtField, IsShareable, IsCacheable, IsBufferable) \ + ((((TypeExtField ) << MPU_RASR_TEX_Pos) & MPU_RASR_TEX_Msk) | \ + (((IsShareable ) << MPU_RASR_S_Pos) & MPU_RASR_S_Msk) | \ + (((IsCacheable ) << MPU_RASR_C_Pos) & MPU_RASR_C_Msk) | \ + (((IsBufferable ) << MPU_RASR_B_Pos) & MPU_RASR_B_Msk)) + +/** +* MPU Region Attribute and Size Register Value +* +* \param DisableExec Instruction access disable bit, 1= disable instruction fetches. +* \param AccessPermission Data access permissions, allows you to configure read/write access for User and Privileged mode. +* \param AccessAttributes Memory access attribution, see \ref ARM_MPU_ACCESS_. +* \param SubRegionDisable Sub-region disable field. +* \param Size Region size of the region to be configured, for example 4K, 8K. +*/ +#define ARM_MPU_RASR_EX(DisableExec, AccessPermission, AccessAttributes, SubRegionDisable, Size) \ + ((((DisableExec ) << MPU_RASR_XN_Pos) & MPU_RASR_XN_Msk) | \ + (((AccessPermission) << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk) | \ + (((AccessAttributes) ) & (MPU_RASR_TEX_Msk | MPU_RASR_S_Msk | MPU_RASR_C_Msk | MPU_RASR_B_Msk))) + +/** +* MPU Region Attribute and Size Register Value +* +* \param DisableExec Instruction access disable bit, 1= disable instruction fetches. +* \param AccessPermission Data access permissions, allows you to configure read/write access for User and Privileged mode. +* \param TypeExtField Type extension field, allows you to configure memory access type, for example strongly ordered, peripheral. +* \param IsShareable Region is shareable between multiple bus masters. +* \param IsCacheable Region is cacheable, i.e. its value may be kept in cache. +* \param IsBufferable Region is bufferable, i.e. using write-back caching. Cacheable but non-bufferable regions use write-through policy. +* \param SubRegionDisable Sub-region disable field. +* \param Size Region size of the region to be configured, for example 4K, 8K. +*/ +#define ARM_MPU_RASR(DisableExec, AccessPermission, TypeExtField, IsShareable, IsCacheable, IsBufferable, SubRegionDisable, Size) \ + ARM_MPU_RASR_EX(DisableExec, AccessPermission, ARM_MPU_ACCESS_(TypeExtField, IsShareable, IsCacheable, IsBufferable), SubRegionDisable, Size) + +/** +* MPU Memory Access Attribute for strongly ordered memory. +* - TEX: 000b +* - Shareable +* - Non-cacheable +* - Non-bufferable +*/ +#define ARM_MPU_ACCESS_ORDERED ARM_MPU_ACCESS_(0U, 1U, 0U, 0U) + +/** +* MPU Memory Access Attribute for device memory. +* - TEX: 000b (if non-shareable) or 010b (if shareable) +* - Shareable or non-shareable +* - Non-cacheable +* - Bufferable (if shareable) or non-bufferable (if non-shareable) +* +* \param IsShareable Configures the device memory as shareable or non-shareable. +*/ +#define ARM_MPU_ACCESS_DEVICE(IsShareable) ((IsShareable) ? ARM_MPU_ACCESS_(0U, 1U, 0U, 1U) : ARM_MPU_ACCESS_(2U, 0U, 0U, 0U)) + +/** +* MPU Memory Access Attribute for normal memory. +* - TEX: 1BBb (reflecting outer cacheability rules) +* - Shareable or non-shareable +* - Cacheable or non-cacheable (reflecting inner cacheability rules) +* - Bufferable or non-bufferable (reflecting inner cacheability rules) +* +* \param OuterCp Configures the outer cache policy. +* \param InnerCp Configures the inner cache policy. +* \param IsShareable Configures the memory as shareable or non-shareable. +*/ +#define ARM_MPU_ACCESS_NORMAL(OuterCp, InnerCp, IsShareable) ARM_MPU_ACCESS_((4U | (OuterCp)), IsShareable, ((InnerCp) & 2U), ((InnerCp) & 1U)) + +/** +* MPU Memory Access Attribute non-cacheable policy. +*/ +#define ARM_MPU_CACHEP_NOCACHE 0U + +/** +* MPU Memory Access Attribute write-back, write and read allocate policy. +*/ +#define ARM_MPU_CACHEP_WB_WRA 1U + +/** +* MPU Memory Access Attribute write-through, no write allocate policy. +*/ +#define ARM_MPU_CACHEP_WT_NWA 2U + +/** +* MPU Memory Access Attribute write-back, no write allocate policy. +*/ +#define ARM_MPU_CACHEP_WB_NWA 3U + + +/** +* Struct for a single MPU Region +*/ +typedef struct { + uint32_t RBAR; //!< The region base address register value (RBAR) + uint32_t RASR; //!< The region attribute and size register value (RASR) \ref MPU_RASR +} ARM_MPU_Region_t; + +/** Enable the MPU. +* \param MPU_Control Default access permissions for unconfigured regions. +*/ +__STATIC_INLINE void ARM_MPU_Enable(uint32_t MPU_Control) +{ + __DSB(); + __ISB(); + MPU->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk; +#ifdef SCB_SHCSR_MEMFAULTENA_Msk + SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk; +#endif +} + +/** Disable the MPU. +*/ +__STATIC_INLINE void ARM_MPU_Disable(void) +{ + __DSB(); + __ISB(); +#ifdef SCB_SHCSR_MEMFAULTENA_Msk + SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk; +#endif + MPU->CTRL &= ~MPU_CTRL_ENABLE_Msk; +} + +/** Clear and disable the given MPU region. +* \param rnr Region number to be cleared. +*/ +__STATIC_INLINE void ARM_MPU_ClrRegion(uint32_t rnr) +{ + MPU->RNR = rnr; + MPU->RASR = 0U; +} + +/** Configure an MPU region. +* \param rbar Value for RBAR register. +* \param rsar Value for RSAR register. +*/ +__STATIC_INLINE void ARM_MPU_SetRegion(uint32_t rbar, uint32_t rasr) +{ + MPU->RBAR = rbar; + MPU->RASR = rasr; +} + +/** Configure the given MPU region. +* \param rnr Region number to be configured. +* \param rbar Value for RBAR register. +* \param rsar Value for RSAR register. +*/ +__STATIC_INLINE void ARM_MPU_SetRegionEx(uint32_t rnr, uint32_t rbar, uint32_t rasr) +{ + MPU->RNR = rnr; + MPU->RBAR = rbar; + MPU->RASR = rasr; +} + +/** Memcopy with strictly ordered memory access, e.g. for register targets. +* \param dst Destination data is copied to. +* \param src Source data is copied from. +* \param len Amount of data words to be copied. +*/ +__STATIC_INLINE void orderedCpy(volatile uint32_t* dst, const uint32_t* __RESTRICT src, uint32_t len) +{ + uint32_t i; + for (i = 0U; i < len; ++i) + { + dst[i] = src[i]; + } +} + +/** Load the given number of MPU regions from a table. +* \param table Pointer to the MPU configuration table. +* \param cnt Amount of regions to be configured. +*/ +__STATIC_INLINE void ARM_MPU_Load(ARM_MPU_Region_t const* table, uint32_t cnt) +{ + const uint32_t rowWordSize = sizeof(ARM_MPU_Region_t)/4U; + while (cnt > MPU_TYPE_RALIASES) { + orderedCpy(&(MPU->RBAR), &(table->RBAR), MPU_TYPE_RALIASES*rowWordSize); + table += MPU_TYPE_RALIASES; + cnt -= MPU_TYPE_RALIASES; + } + orderedCpy(&(MPU->RBAR), &(table->RBAR), cnt*rowWordSize); +} + +#endif diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/mpu_armv8.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/mpu_armv8.h new file mode 100644 index 0000000..99ee9f9 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/mpu_armv8.h @@ -0,0 +1,333 @@ +/****************************************************************************** + * @file mpu_armv8.h + * @brief CMSIS MPU API for Armv8-M MPU + * @version V5.0.4 + * @date 10. January 2018 + ******************************************************************************/ +/* + * Copyright (c) 2017-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef ARM_MPU_ARMV8_H +#define ARM_MPU_ARMV8_H + +/** \brief Attribute for device memory (outer only) */ +#define ARM_MPU_ATTR_DEVICE ( 0U ) + +/** \brief Attribute for non-cacheable, normal memory */ +#define ARM_MPU_ATTR_NON_CACHEABLE ( 4U ) + +/** \brief Attribute for normal memory (outer and inner) +* \param NT Non-Transient: Set to 1 for non-transient data. +* \param WB Write-Back: Set to 1 to use write-back update policy. +* \param RA Read Allocation: Set to 1 to use cache allocation on read miss. +* \param WA Write Allocation: Set to 1 to use cache allocation on write miss. +*/ +#define ARM_MPU_ATTR_MEMORY_(NT, WB, RA, WA) \ + (((NT & 1U) << 3U) | ((WB & 1U) << 2U) | ((RA & 1U) << 1U) | (WA & 1U)) + +/** \brief Device memory type non Gathering, non Re-ordering, non Early Write Acknowledgement */ +#define ARM_MPU_ATTR_DEVICE_nGnRnE (0U) + +/** \brief Device memory type non Gathering, non Re-ordering, Early Write Acknowledgement */ +#define ARM_MPU_ATTR_DEVICE_nGnRE (1U) + +/** \brief Device memory type non Gathering, Re-ordering, Early Write Acknowledgement */ +#define ARM_MPU_ATTR_DEVICE_nGRE (2U) + +/** \brief Device memory type Gathering, Re-ordering, Early Write Acknowledgement */ +#define ARM_MPU_ATTR_DEVICE_GRE (3U) + +/** \brief Memory Attribute +* \param O Outer memory attributes +* \param I O == ARM_MPU_ATTR_DEVICE: Device memory attributes, else: Inner memory attributes +*/ +#define ARM_MPU_ATTR(O, I) (((O & 0xFU) << 4U) | (((O & 0xFU) != 0U) ? (I & 0xFU) : ((I & 0x3U) << 2U))) + +/** \brief Normal memory non-shareable */ +#define ARM_MPU_SH_NON (0U) + +/** \brief Normal memory outer shareable */ +#define ARM_MPU_SH_OUTER (2U) + +/** \brief Normal memory inner shareable */ +#define ARM_MPU_SH_INNER (3U) + +/** \brief Memory access permissions +* \param RO Read-Only: Set to 1 for read-only memory. +* \param NP Non-Privileged: Set to 1 for non-privileged memory. +*/ +#define ARM_MPU_AP_(RO, NP) (((RO & 1U) << 1U) | (NP & 1U)) + +/** \brief Region Base Address Register value +* \param BASE The base address bits [31:5] of a memory region. The value is zero extended. Effective address gets 32 byte aligned. +* \param SH Defines the Shareability domain for this memory region. +* \param RO Read-Only: Set to 1 for a read-only memory region. +* \param NP Non-Privileged: Set to 1 for a non-privileged memory region. +* \oaram XN eXecute Never: Set to 1 for a non-executable memory region. +*/ +#define ARM_MPU_RBAR(BASE, SH, RO, NP, XN) \ + ((BASE & MPU_RBAR_BASE_Msk) | \ + ((SH << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | \ + ((ARM_MPU_AP_(RO, NP) << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk) | \ + ((XN << MPU_RBAR_XN_Pos) & MPU_RBAR_XN_Msk)) + +/** \brief Region Limit Address Register value +* \param LIMIT The limit address bits [31:5] for this memory region. The value is one extended. +* \param IDX The attribute index to be associated with this memory region. +*/ +#define ARM_MPU_RLAR(LIMIT, IDX) \ + ((LIMIT & MPU_RLAR_LIMIT_Msk) | \ + ((IDX << MPU_RLAR_AttrIndx_Pos) & MPU_RLAR_AttrIndx_Msk) | \ + (MPU_RLAR_EN_Msk)) + +/** +* Struct for a single MPU Region +*/ +typedef struct { + uint32_t RBAR; /*!< Region Base Address Register value */ + uint32_t RLAR; /*!< Region Limit Address Register value */ +} ARM_MPU_Region_t; + +/** Enable the MPU. +* \param MPU_Control Default access permissions for unconfigured regions. +*/ +__STATIC_INLINE void ARM_MPU_Enable(uint32_t MPU_Control) +{ + __DSB(); + __ISB(); + MPU->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk; +#ifdef SCB_SHCSR_MEMFAULTENA_Msk + SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk; +#endif +} + +/** Disable the MPU. +*/ +__STATIC_INLINE void ARM_MPU_Disable(void) +{ + __DSB(); + __ISB(); +#ifdef SCB_SHCSR_MEMFAULTENA_Msk + SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk; +#endif + MPU->CTRL &= ~MPU_CTRL_ENABLE_Msk; +} + +#ifdef MPU_NS +/** Enable the Non-secure MPU. +* \param MPU_Control Default access permissions for unconfigured regions. +*/ +__STATIC_INLINE void ARM_MPU_Enable_NS(uint32_t MPU_Control) +{ + __DSB(); + __ISB(); + MPU_NS->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk; +#ifdef SCB_SHCSR_MEMFAULTENA_Msk + SCB_NS->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk; +#endif +} + +/** Disable the Non-secure MPU. +*/ +__STATIC_INLINE void ARM_MPU_Disable_NS(void) +{ + __DSB(); + __ISB(); +#ifdef SCB_SHCSR_MEMFAULTENA_Msk + SCB_NS->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk; +#endif + MPU_NS->CTRL &= ~MPU_CTRL_ENABLE_Msk; +} +#endif + +/** Set the memory attribute encoding to the given MPU. +* \param mpu Pointer to the MPU to be configured. +* \param idx The attribute index to be set [0-7] +* \param attr The attribute value to be set. +*/ +__STATIC_INLINE void ARM_MPU_SetMemAttrEx(MPU_Type* mpu, uint8_t idx, uint8_t attr) +{ + const uint8_t reg = idx / 4U; + const uint32_t pos = ((idx % 4U) * 8U); + const uint32_t mask = 0xFFU << pos; + + if (reg >= (sizeof(mpu->MAIR) / sizeof(mpu->MAIR[0]))) { + return; // invalid index + } + + mpu->MAIR[reg] = ((mpu->MAIR[reg] & ~mask) | ((attr << pos) & mask)); +} + +/** Set the memory attribute encoding. +* \param idx The attribute index to be set [0-7] +* \param attr The attribute value to be set. +*/ +__STATIC_INLINE void ARM_MPU_SetMemAttr(uint8_t idx, uint8_t attr) +{ + ARM_MPU_SetMemAttrEx(MPU, idx, attr); +} + +#ifdef MPU_NS +/** Set the memory attribute encoding to the Non-secure MPU. +* \param idx The attribute index to be set [0-7] +* \param attr The attribute value to be set. +*/ +__STATIC_INLINE void ARM_MPU_SetMemAttr_NS(uint8_t idx, uint8_t attr) +{ + ARM_MPU_SetMemAttrEx(MPU_NS, idx, attr); +} +#endif + +/** Clear and disable the given MPU region of the given MPU. +* \param mpu Pointer to MPU to be used. +* \param rnr Region number to be cleared. +*/ +__STATIC_INLINE void ARM_MPU_ClrRegionEx(MPU_Type* mpu, uint32_t rnr) +{ + mpu->RNR = rnr; + mpu->RLAR = 0U; +} + +/** Clear and disable the given MPU region. +* \param rnr Region number to be cleared. +*/ +__STATIC_INLINE void ARM_MPU_ClrRegion(uint32_t rnr) +{ + ARM_MPU_ClrRegionEx(MPU, rnr); +} + +#ifdef MPU_NS +/** Clear and disable the given Non-secure MPU region. +* \param rnr Region number to be cleared. +*/ +__STATIC_INLINE void ARM_MPU_ClrRegion_NS(uint32_t rnr) +{ + ARM_MPU_ClrRegionEx(MPU_NS, rnr); +} +#endif + +/** Configure the given MPU region of the given MPU. +* \param mpu Pointer to MPU to be used. +* \param rnr Region number to be configured. +* \param rbar Value for RBAR register. +* \param rlar Value for RLAR register. +*/ +__STATIC_INLINE void ARM_MPU_SetRegionEx(MPU_Type* mpu, uint32_t rnr, uint32_t rbar, uint32_t rlar) +{ + mpu->RNR = rnr; + mpu->RBAR = rbar; + mpu->RLAR = rlar; +} + +/** Configure the given MPU region. +* \param rnr Region number to be configured. +* \param rbar Value for RBAR register. +* \param rlar Value for RLAR register. +*/ +__STATIC_INLINE void ARM_MPU_SetRegion(uint32_t rnr, uint32_t rbar, uint32_t rlar) +{ + ARM_MPU_SetRegionEx(MPU, rnr, rbar, rlar); +} + +#ifdef MPU_NS +/** Configure the given Non-secure MPU region. +* \param rnr Region number to be configured. +* \param rbar Value for RBAR register. +* \param rlar Value for RLAR register. +*/ +__STATIC_INLINE void ARM_MPU_SetRegion_NS(uint32_t rnr, uint32_t rbar, uint32_t rlar) +{ + ARM_MPU_SetRegionEx(MPU_NS, rnr, rbar, rlar); +} +#endif + +/** Memcopy with strictly ordered memory access, e.g. for register targets. +* \param dst Destination data is copied to. +* \param src Source data is copied from. +* \param len Amount of data words to be copied. +*/ +__STATIC_INLINE void orderedCpy(volatile uint32_t* dst, const uint32_t* __RESTRICT src, uint32_t len) +{ + uint32_t i; + for (i = 0U; i < len; ++i) + { + dst[i] = src[i]; + } +} + +/** Load the given number of MPU regions from a table to the given MPU. +* \param mpu Pointer to the MPU registers to be used. +* \param rnr First region number to be configured. +* \param table Pointer to the MPU configuration table. +* \param cnt Amount of regions to be configured. +*/ +__STATIC_INLINE void ARM_MPU_LoadEx(MPU_Type* mpu, uint32_t rnr, ARM_MPU_Region_t const* table, uint32_t cnt) +{ + const uint32_t rowWordSize = sizeof(ARM_MPU_Region_t)/4U; + if (cnt == 1U) { + mpu->RNR = rnr; + orderedCpy(&(mpu->RBAR), &(table->RBAR), rowWordSize); + } else { + uint32_t rnrBase = rnr & ~(MPU_TYPE_RALIASES-1U); + uint32_t rnrOffset = rnr % MPU_TYPE_RALIASES; + + mpu->RNR = rnrBase; + while ((rnrOffset + cnt) > MPU_TYPE_RALIASES) { + uint32_t c = MPU_TYPE_RALIASES - rnrOffset; + orderedCpy(&(mpu->RBAR)+(rnrOffset*2U), &(table->RBAR), c*rowWordSize); + table += c; + cnt -= c; + rnrOffset = 0U; + rnrBase += MPU_TYPE_RALIASES; + mpu->RNR = rnrBase; + } + + orderedCpy(&(mpu->RBAR)+(rnrOffset*2U), &(table->RBAR), cnt*rowWordSize); + } +} + +/** Load the given number of MPU regions from a table. +* \param rnr First region number to be configured. +* \param table Pointer to the MPU configuration table. +* \param cnt Amount of regions to be configured. +*/ +__STATIC_INLINE void ARM_MPU_Load(uint32_t rnr, ARM_MPU_Region_t const* table, uint32_t cnt) +{ + ARM_MPU_LoadEx(MPU, rnr, table, cnt); +} + +#ifdef MPU_NS +/** Load the given number of MPU regions from a table to the Non-secure MPU. +* \param rnr First region number to be configured. +* \param table Pointer to the MPU configuration table. +* \param cnt Amount of regions to be configured. +*/ +__STATIC_INLINE void ARM_MPU_Load_NS(uint32_t rnr, ARM_MPU_Region_t const* table, uint32_t cnt) +{ + ARM_MPU_LoadEx(MPU_NS, rnr, table, cnt); +} +#endif + +#endif + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/tz_context.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/tz_context.h new file mode 100644 index 0000000..d4c1474 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/CMSIS/Include/tz_context.h @@ -0,0 +1,70 @@ +/****************************************************************************** + * @file tz_context.h + * @brief Context Management for Armv8-M TrustZone + * @version V1.0.1 + * @date 10. January 2018 + ******************************************************************************/ +/* + * Copyright (c) 2017-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef TZ_CONTEXT_H +#define TZ_CONTEXT_H + +#include + +#ifndef TZ_MODULEID_T +#define TZ_MODULEID_T +/// \details Data type that identifies secure software modules called by a process. +typedef uint32_t TZ_ModuleId_t; +#endif + +/// \details TZ Memory ID identifies an allocated memory slot. +typedef uint32_t TZ_MemoryId_t; + +/// Initialize secure context memory system +/// \return execution status (1: success, 0: error) +uint32_t TZ_InitContextSystem_S (void); + +/// Allocate context memory for calling secure software modules in TrustZone +/// \param[in] module identifies software modules called from non-secure mode +/// \return value != 0 id TrustZone memory slot identifier +/// \return value 0 no memory available or internal error +TZ_MemoryId_t TZ_AllocModuleContext_S (TZ_ModuleId_t module); + +/// Free context memory that was previously allocated with \ref TZ_AllocModuleContext_S +/// \param[in] id TrustZone memory slot identifier +/// \return execution status (1: success, 0: error) +uint32_t TZ_FreeModuleContext_S (TZ_MemoryId_t id); + +/// Load secure context (called on RTOS thread context switch) +/// \param[in] id TrustZone memory slot identifier +/// \return execution status (1: success, 0: error) +uint32_t TZ_LoadContext_S (TZ_MemoryId_t id); + +/// Store secure context (called on RTOS thread context switch) +/// \param[in] id TrustZone memory slot identifier +/// \return execution status (1: success, 0: error) +uint32_t TZ_StoreContext_S (TZ_MemoryId_t id); + +#endif // TZ_CONTEXT_H diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/Legacy/stm32_hal_legacy.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/Legacy/stm32_hal_legacy.h new file mode 100644 index 0000000..2d39975 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/Legacy/stm32_hal_legacy.h @@ -0,0 +1,3895 @@ +/** + ****************************************************************************** + * @file stm32_hal_legacy.h + * @author MCD Application Team + * @brief This file contains aliases definition for the STM32Cube HAL constants + * macros and functions maintained for legacy purpose. + ****************************************************************************** + * @attention + * + * Copyright (c) 2021 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32_HAL_LEGACY +#define STM32_HAL_LEGACY + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup HAL_AES_Aliased_Defines HAL CRYP Aliased Defines maintained for legacy purpose + * @{ + */ +#define AES_FLAG_RDERR CRYP_FLAG_RDERR +#define AES_FLAG_WRERR CRYP_FLAG_WRERR +#define AES_CLEARFLAG_CCF CRYP_CLEARFLAG_CCF +#define AES_CLEARFLAG_RDERR CRYP_CLEARFLAG_RDERR +#define AES_CLEARFLAG_WRERR CRYP_CLEARFLAG_WRERR +#if defined(STM32U5) +#define CRYP_DATATYPE_32B CRYP_NO_SWAP +#define CRYP_DATATYPE_16B CRYP_HALFWORD_SWAP +#define CRYP_DATATYPE_8B CRYP_BYTE_SWAP +#define CRYP_DATATYPE_1B CRYP_BIT_SWAP +#define CRYP_CCF_CLEAR CRYP_CLEAR_CCF +#define CRYP_ERR_CLEAR CRYP_CLEAR_RWEIF +#endif /* STM32U5 */ +/** + * @} + */ + +/** @defgroup HAL_ADC_Aliased_Defines HAL ADC Aliased Defines maintained for legacy purpose + * @{ + */ +#define ADC_RESOLUTION12b ADC_RESOLUTION_12B +#define ADC_RESOLUTION10b ADC_RESOLUTION_10B +#define ADC_RESOLUTION8b ADC_RESOLUTION_8B +#define ADC_RESOLUTION6b ADC_RESOLUTION_6B +#define OVR_DATA_OVERWRITTEN ADC_OVR_DATA_OVERWRITTEN +#define OVR_DATA_PRESERVED ADC_OVR_DATA_PRESERVED +#define EOC_SINGLE_CONV ADC_EOC_SINGLE_CONV +#define EOC_SEQ_CONV ADC_EOC_SEQ_CONV +#define EOC_SINGLE_SEQ_CONV ADC_EOC_SINGLE_SEQ_CONV +#define REGULAR_GROUP ADC_REGULAR_GROUP +#define INJECTED_GROUP ADC_INJECTED_GROUP +#define REGULAR_INJECTED_GROUP ADC_REGULAR_INJECTED_GROUP +#define AWD_EVENT ADC_AWD_EVENT +#define AWD1_EVENT ADC_AWD1_EVENT +#define AWD2_EVENT ADC_AWD2_EVENT +#define AWD3_EVENT ADC_AWD3_EVENT +#define OVR_EVENT ADC_OVR_EVENT +#define JQOVF_EVENT ADC_JQOVF_EVENT +#define ALL_CHANNELS ADC_ALL_CHANNELS +#define REGULAR_CHANNELS ADC_REGULAR_CHANNELS +#define INJECTED_CHANNELS ADC_INJECTED_CHANNELS +#define SYSCFG_FLAG_SENSOR_ADC ADC_FLAG_SENSOR +#define SYSCFG_FLAG_VREF_ADC ADC_FLAG_VREFINT +#define ADC_CLOCKPRESCALER_PCLK_DIV1 ADC_CLOCK_SYNC_PCLK_DIV1 +#define ADC_CLOCKPRESCALER_PCLK_DIV2 ADC_CLOCK_SYNC_PCLK_DIV2 +#define ADC_CLOCKPRESCALER_PCLK_DIV4 ADC_CLOCK_SYNC_PCLK_DIV4 +#define ADC_CLOCKPRESCALER_PCLK_DIV6 ADC_CLOCK_SYNC_PCLK_DIV6 +#define ADC_CLOCKPRESCALER_PCLK_DIV8 ADC_CLOCK_SYNC_PCLK_DIV8 +#define ADC_EXTERNALTRIG0_T6_TRGO ADC_EXTERNALTRIGCONV_T6_TRGO +#define ADC_EXTERNALTRIG1_T21_CC2 ADC_EXTERNALTRIGCONV_T21_CC2 +#define ADC_EXTERNALTRIG2_T2_TRGO ADC_EXTERNALTRIGCONV_T2_TRGO +#define ADC_EXTERNALTRIG3_T2_CC4 ADC_EXTERNALTRIGCONV_T2_CC4 +#define ADC_EXTERNALTRIG4_T22_TRGO ADC_EXTERNALTRIGCONV_T22_TRGO +#define ADC_EXTERNALTRIG7_EXT_IT11 ADC_EXTERNALTRIGCONV_EXT_IT11 +#define ADC_CLOCK_ASYNC ADC_CLOCK_ASYNC_DIV1 +#define ADC_EXTERNALTRIG_EDGE_NONE ADC_EXTERNALTRIGCONVEDGE_NONE +#define ADC_EXTERNALTRIG_EDGE_RISING ADC_EXTERNALTRIGCONVEDGE_RISING +#define ADC_EXTERNALTRIG_EDGE_FALLING ADC_EXTERNALTRIGCONVEDGE_FALLING +#define ADC_EXTERNALTRIG_EDGE_RISINGFALLING ADC_EXTERNALTRIGCONVEDGE_RISINGFALLING +#define ADC_SAMPLETIME_2CYCLE_5 ADC_SAMPLETIME_2CYCLES_5 + +#define HAL_ADC_STATE_BUSY_REG HAL_ADC_STATE_REG_BUSY +#define HAL_ADC_STATE_BUSY_INJ HAL_ADC_STATE_INJ_BUSY +#define HAL_ADC_STATE_EOC_REG HAL_ADC_STATE_REG_EOC +#define HAL_ADC_STATE_EOC_INJ HAL_ADC_STATE_INJ_EOC +#define HAL_ADC_STATE_ERROR HAL_ADC_STATE_ERROR_INTERNAL +#define HAL_ADC_STATE_BUSY HAL_ADC_STATE_BUSY_INTERNAL +#define HAL_ADC_STATE_AWD HAL_ADC_STATE_AWD1 + +#if defined(STM32H7) +#define ADC_CHANNEL_VBAT_DIV4 ADC_CHANNEL_VBAT +#endif /* STM32H7 */ +/** + * @} + */ + +/** @defgroup HAL_CEC_Aliased_Defines HAL CEC Aliased Defines maintained for legacy purpose + * @{ + */ + +#define __HAL_CEC_GET_IT __HAL_CEC_GET_FLAG + +/** + * @} + */ + +/** @defgroup HAL_COMP_Aliased_Defines HAL COMP Aliased Defines maintained for legacy purpose + * @{ + */ +#define COMP_WINDOWMODE_DISABLED COMP_WINDOWMODE_DISABLE +#define COMP_WINDOWMODE_ENABLED COMP_WINDOWMODE_ENABLE +#define COMP_EXTI_LINE_COMP1_EVENT COMP_EXTI_LINE_COMP1 +#define COMP_EXTI_LINE_COMP2_EVENT COMP_EXTI_LINE_COMP2 +#define COMP_EXTI_LINE_COMP3_EVENT COMP_EXTI_LINE_COMP3 +#define COMP_EXTI_LINE_COMP4_EVENT COMP_EXTI_LINE_COMP4 +#define COMP_EXTI_LINE_COMP5_EVENT COMP_EXTI_LINE_COMP5 +#define COMP_EXTI_LINE_COMP6_EVENT COMP_EXTI_LINE_COMP6 +#define COMP_EXTI_LINE_COMP7_EVENT COMP_EXTI_LINE_COMP7 +#if defined(STM32L0) +#define COMP_LPTIMCONNECTION_ENABLED ((uint32_t)0x00000003U) /*!< COMPX output generic naming: connected to LPTIM input 1 for COMP1, LPTIM input 2 for COMP2 */ +#endif +#define COMP_OUTPUT_COMP6TIM2OCREFCLR COMP_OUTPUT_COMP6_TIM2OCREFCLR +#if defined(STM32F373xC) || defined(STM32F378xx) +#define COMP_OUTPUT_TIM3IC1 COMP_OUTPUT_COMP1_TIM3IC1 +#define COMP_OUTPUT_TIM3OCREFCLR COMP_OUTPUT_COMP1_TIM3OCREFCLR +#endif /* STM32F373xC || STM32F378xx */ + +#if defined(STM32L0) || defined(STM32L4) +#define COMP_WINDOWMODE_ENABLE COMP_WINDOWMODE_COMP1_INPUT_PLUS_COMMON + +#define COMP_NONINVERTINGINPUT_IO1 COMP_INPUT_PLUS_IO1 +#define COMP_NONINVERTINGINPUT_IO2 COMP_INPUT_PLUS_IO2 +#define COMP_NONINVERTINGINPUT_IO3 COMP_INPUT_PLUS_IO3 +#define COMP_NONINVERTINGINPUT_IO4 COMP_INPUT_PLUS_IO4 +#define COMP_NONINVERTINGINPUT_IO5 COMP_INPUT_PLUS_IO5 +#define COMP_NONINVERTINGINPUT_IO6 COMP_INPUT_PLUS_IO6 + +#define COMP_INVERTINGINPUT_1_4VREFINT COMP_INPUT_MINUS_1_4VREFINT +#define COMP_INVERTINGINPUT_1_2VREFINT COMP_INPUT_MINUS_1_2VREFINT +#define COMP_INVERTINGINPUT_3_4VREFINT COMP_INPUT_MINUS_3_4VREFINT +#define COMP_INVERTINGINPUT_VREFINT COMP_INPUT_MINUS_VREFINT +#define COMP_INVERTINGINPUT_DAC1_CH1 COMP_INPUT_MINUS_DAC1_CH1 +#define COMP_INVERTINGINPUT_DAC1_CH2 COMP_INPUT_MINUS_DAC1_CH2 +#define COMP_INVERTINGINPUT_DAC1 COMP_INPUT_MINUS_DAC1_CH1 +#define COMP_INVERTINGINPUT_DAC2 COMP_INPUT_MINUS_DAC1_CH2 +#define COMP_INVERTINGINPUT_IO1 COMP_INPUT_MINUS_IO1 +#if defined(STM32L0) +/* Issue fixed on STM32L0 COMP driver: only 2 dedicated IO (IO1 and IO2), */ +/* IO2 was wrongly assigned to IO shared with DAC and IO3 was corresponding */ +/* to the second dedicated IO (only for COMP2). */ +#define COMP_INVERTINGINPUT_IO2 COMP_INPUT_MINUS_DAC1_CH2 +#define COMP_INVERTINGINPUT_IO3 COMP_INPUT_MINUS_IO2 +#else +#define COMP_INVERTINGINPUT_IO2 COMP_INPUT_MINUS_IO2 +#define COMP_INVERTINGINPUT_IO3 COMP_INPUT_MINUS_IO3 +#endif +#define COMP_INVERTINGINPUT_IO4 COMP_INPUT_MINUS_IO4 +#define COMP_INVERTINGINPUT_IO5 COMP_INPUT_MINUS_IO5 + +#define COMP_OUTPUTLEVEL_LOW COMP_OUTPUT_LEVEL_LOW +#define COMP_OUTPUTLEVEL_HIGH COMP_OUTPUT_LEVEL_HIGH + +/* Note: Literal "COMP_FLAG_LOCK" kept for legacy purpose. */ +/* To check COMP lock state, use macro "__HAL_COMP_IS_LOCKED()". */ +#if defined(COMP_CSR_LOCK) +#define COMP_FLAG_LOCK COMP_CSR_LOCK +#elif defined(COMP_CSR_COMP1LOCK) +#define COMP_FLAG_LOCK COMP_CSR_COMP1LOCK +#elif defined(COMP_CSR_COMPxLOCK) +#define COMP_FLAG_LOCK COMP_CSR_COMPxLOCK +#endif + +#if defined(STM32L4) +#define COMP_BLANKINGSRCE_TIM1OC5 COMP_BLANKINGSRC_TIM1_OC5_COMP1 +#define COMP_BLANKINGSRCE_TIM2OC3 COMP_BLANKINGSRC_TIM2_OC3_COMP1 +#define COMP_BLANKINGSRCE_TIM3OC3 COMP_BLANKINGSRC_TIM3_OC3_COMP1 +#define COMP_BLANKINGSRCE_TIM3OC4 COMP_BLANKINGSRC_TIM3_OC4_COMP2 +#define COMP_BLANKINGSRCE_TIM8OC5 COMP_BLANKINGSRC_TIM8_OC5_COMP2 +#define COMP_BLANKINGSRCE_TIM15OC1 COMP_BLANKINGSRC_TIM15_OC1_COMP2 +#define COMP_BLANKINGSRCE_NONE COMP_BLANKINGSRC_NONE +#endif + +#if defined(STM32L0) +#define COMP_MODE_HIGHSPEED COMP_POWERMODE_MEDIUMSPEED +#define COMP_MODE_LOWSPEED COMP_POWERMODE_ULTRALOWPOWER +#else +#define COMP_MODE_HIGHSPEED COMP_POWERMODE_HIGHSPEED +#define COMP_MODE_MEDIUMSPEED COMP_POWERMODE_MEDIUMSPEED +#define COMP_MODE_LOWPOWER COMP_POWERMODE_LOWPOWER +#define COMP_MODE_ULTRALOWPOWER COMP_POWERMODE_ULTRALOWPOWER +#endif + +#endif +/** + * @} + */ + +/** @defgroup HAL_CORTEX_Aliased_Defines HAL CORTEX Aliased Defines maintained for legacy purpose + * @{ + */ +#define __HAL_CORTEX_SYSTICKCLK_CONFIG HAL_SYSTICK_CLKSourceConfig +#if defined(STM32U5) +#define MPU_DEVICE_nGnRnE MPU_DEVICE_NGNRNE +#define MPU_DEVICE_nGnRE MPU_DEVICE_NGNRE +#define MPU_DEVICE_nGRE MPU_DEVICE_NGRE +#endif /* STM32U5 */ +/** + * @} + */ + +/** @defgroup CRC_Aliases CRC API aliases + * @{ + */ +#define HAL_CRC_Input_Data_Reverse HAL_CRCEx_Input_Data_Reverse /*!< Aliased to HAL_CRCEx_Input_Data_Reverse for inter STM32 series compatibility */ +#define HAL_CRC_Output_Data_Reverse HAL_CRCEx_Output_Data_Reverse /*!< Aliased to HAL_CRCEx_Output_Data_Reverse for inter STM32 series compatibility */ +/** + * @} + */ + +/** @defgroup HAL_CRC_Aliased_Defines HAL CRC Aliased Defines maintained for legacy purpose + * @{ + */ + +#define CRC_OUTPUTDATA_INVERSION_DISABLED CRC_OUTPUTDATA_INVERSION_DISABLE +#define CRC_OUTPUTDATA_INVERSION_ENABLED CRC_OUTPUTDATA_INVERSION_ENABLE + +/** + * @} + */ + +/** @defgroup HAL_DAC_Aliased_Defines HAL DAC Aliased Defines maintained for legacy purpose + * @{ + */ + +#define DAC1_CHANNEL_1 DAC_CHANNEL_1 +#define DAC1_CHANNEL_2 DAC_CHANNEL_2 +#define DAC2_CHANNEL_1 DAC_CHANNEL_1 +#define DAC_WAVE_NONE 0x00000000U +#define DAC_WAVE_NOISE DAC_CR_WAVE1_0 +#define DAC_WAVE_TRIANGLE DAC_CR_WAVE1_1 +#define DAC_WAVEGENERATION_NONE DAC_WAVE_NONE +#define DAC_WAVEGENERATION_NOISE DAC_WAVE_NOISE +#define DAC_WAVEGENERATION_TRIANGLE DAC_WAVE_TRIANGLE + +#if defined(STM32G4) || defined(STM32H7) || defined (STM32U5) +#define DAC_CHIPCONNECT_DISABLE DAC_CHIPCONNECT_EXTERNAL +#define DAC_CHIPCONNECT_ENABLE DAC_CHIPCONNECT_INTERNAL +#endif + +#if defined(STM32U5) +#define DAC_TRIGGER_STOP_LPTIM1_OUT DAC_TRIGGER_STOP_LPTIM1_CH1 +#define DAC_TRIGGER_STOP_LPTIM3_OUT DAC_TRIGGER_STOP_LPTIM3_CH1 +#define DAC_TRIGGER_LPTIM1_OUT DAC_TRIGGER_LPTIM1_CH1 +#define DAC_TRIGGER_LPTIM3_OUT DAC_TRIGGER_LPTIM3_CH1 +#endif + +#if defined(STM32L1) || defined(STM32L4) || defined(STM32G0) || defined(STM32L5) || defined(STM32H7) || defined(STM32F4) || defined(STM32G4) +#define HAL_DAC_MSP_INIT_CB_ID HAL_DAC_MSPINIT_CB_ID +#define HAL_DAC_MSP_DEINIT_CB_ID HAL_DAC_MSPDEINIT_CB_ID +#endif + +/** + * @} + */ + +/** @defgroup HAL_DMA_Aliased_Defines HAL DMA Aliased Defines maintained for legacy purpose + * @{ + */ +#define HAL_REMAPDMA_ADC_DMA_CH2 DMA_REMAP_ADC_DMA_CH2 +#define HAL_REMAPDMA_USART1_TX_DMA_CH4 DMA_REMAP_USART1_TX_DMA_CH4 +#define HAL_REMAPDMA_USART1_RX_DMA_CH5 DMA_REMAP_USART1_RX_DMA_CH5 +#define HAL_REMAPDMA_TIM16_DMA_CH4 DMA_REMAP_TIM16_DMA_CH4 +#define HAL_REMAPDMA_TIM17_DMA_CH2 DMA_REMAP_TIM17_DMA_CH2 +#define HAL_REMAPDMA_USART3_DMA_CH32 DMA_REMAP_USART3_DMA_CH32 +#define HAL_REMAPDMA_TIM16_DMA_CH6 DMA_REMAP_TIM16_DMA_CH6 +#define HAL_REMAPDMA_TIM17_DMA_CH7 DMA_REMAP_TIM17_DMA_CH7 +#define HAL_REMAPDMA_SPI2_DMA_CH67 DMA_REMAP_SPI2_DMA_CH67 +#define HAL_REMAPDMA_USART2_DMA_CH67 DMA_REMAP_USART2_DMA_CH67 +#define HAL_REMAPDMA_I2C1_DMA_CH76 DMA_REMAP_I2C1_DMA_CH76 +#define HAL_REMAPDMA_TIM1_DMA_CH6 DMA_REMAP_TIM1_DMA_CH6 +#define HAL_REMAPDMA_TIM2_DMA_CH7 DMA_REMAP_TIM2_DMA_CH7 +#define HAL_REMAPDMA_TIM3_DMA_CH6 DMA_REMAP_TIM3_DMA_CH6 + +#define IS_HAL_REMAPDMA IS_DMA_REMAP +#define __HAL_REMAPDMA_CHANNEL_ENABLE __HAL_DMA_REMAP_CHANNEL_ENABLE +#define __HAL_REMAPDMA_CHANNEL_DISABLE __HAL_DMA_REMAP_CHANNEL_DISABLE + +#if defined(STM32L4) + +#define HAL_DMAMUX1_REQUEST_GEN_EXTI0 HAL_DMAMUX1_REQ_GEN_EXTI0 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI1 HAL_DMAMUX1_REQ_GEN_EXTI1 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI2 HAL_DMAMUX1_REQ_GEN_EXTI2 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI3 HAL_DMAMUX1_REQ_GEN_EXTI3 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI4 HAL_DMAMUX1_REQ_GEN_EXTI4 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI5 HAL_DMAMUX1_REQ_GEN_EXTI5 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI6 HAL_DMAMUX1_REQ_GEN_EXTI6 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI7 HAL_DMAMUX1_REQ_GEN_EXTI7 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI8 HAL_DMAMUX1_REQ_GEN_EXTI8 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI9 HAL_DMAMUX1_REQ_GEN_EXTI9 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI10 HAL_DMAMUX1_REQ_GEN_EXTI10 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI11 HAL_DMAMUX1_REQ_GEN_EXTI11 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI12 HAL_DMAMUX1_REQ_GEN_EXTI12 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI13 HAL_DMAMUX1_REQ_GEN_EXTI13 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI14 HAL_DMAMUX1_REQ_GEN_EXTI14 +#define HAL_DMAMUX1_REQUEST_GEN_EXTI15 HAL_DMAMUX1_REQ_GEN_EXTI15 +#define HAL_DMAMUX1_REQUEST_GEN_DMAMUX1_CH0_EVT HAL_DMAMUX1_REQ_GEN_DMAMUX1_CH0_EVT +#define HAL_DMAMUX1_REQUEST_GEN_DMAMUX1_CH1_EVT HAL_DMAMUX1_REQ_GEN_DMAMUX1_CH1_EVT +#define HAL_DMAMUX1_REQUEST_GEN_DMAMUX1_CH2_EVT HAL_DMAMUX1_REQ_GEN_DMAMUX1_CH2_EVT +#define HAL_DMAMUX1_REQUEST_GEN_DMAMUX1_CH3_EVT HAL_DMAMUX1_REQ_GEN_DMAMUX1_CH3_EVT +#define HAL_DMAMUX1_REQUEST_GEN_LPTIM1_OUT HAL_DMAMUX1_REQ_GEN_LPTIM1_OUT +#define HAL_DMAMUX1_REQUEST_GEN_LPTIM2_OUT HAL_DMAMUX1_REQ_GEN_LPTIM2_OUT +#define HAL_DMAMUX1_REQUEST_GEN_DSI_TE HAL_DMAMUX1_REQ_GEN_DSI_TE +#define HAL_DMAMUX1_REQUEST_GEN_DSI_EOT HAL_DMAMUX1_REQ_GEN_DSI_EOT +#define HAL_DMAMUX1_REQUEST_GEN_DMA2D_EOT HAL_DMAMUX1_REQ_GEN_DMA2D_EOT +#define HAL_DMAMUX1_REQUEST_GEN_LTDC_IT HAL_DMAMUX1_REQ_GEN_LTDC_IT + +#define HAL_DMAMUX_REQUEST_GEN_NO_EVENT HAL_DMAMUX_REQ_GEN_NO_EVENT +#define HAL_DMAMUX_REQUEST_GEN_RISING HAL_DMAMUX_REQ_GEN_RISING +#define HAL_DMAMUX_REQUEST_GEN_FALLING HAL_DMAMUX_REQ_GEN_FALLING +#define HAL_DMAMUX_REQUEST_GEN_RISING_FALLING HAL_DMAMUX_REQ_GEN_RISING_FALLING + +#if defined(STM32L4R5xx) || defined(STM32L4R9xx) || defined(STM32L4R9xx) || defined(STM32L4S5xx) || defined(STM32L4S7xx) || defined(STM32L4S9xx) +#define DMA_REQUEST_DCMI_PSSI DMA_REQUEST_DCMI +#endif + +#endif /* STM32L4 */ + +#if defined(STM32G0) +#define DMA_REQUEST_DAC1_CHANNEL1 DMA_REQUEST_DAC1_CH1 +#define DMA_REQUEST_DAC1_CHANNEL2 DMA_REQUEST_DAC1_CH2 +#define DMA_REQUEST_TIM16_TRIG_COM DMA_REQUEST_TIM16_COM +#define DMA_REQUEST_TIM17_TRIG_COM DMA_REQUEST_TIM17_COM + +#define LL_DMAMUX_REQ_TIM16_TRIG_COM LL_DMAMUX_REQ_TIM16_COM +#define LL_DMAMUX_REQ_TIM17_TRIG_COM LL_DMAMUX_REQ_TIM17_COM +#endif + +#if defined(STM32H7) + +#define DMA_REQUEST_DAC1 DMA_REQUEST_DAC1_CH1 +#define DMA_REQUEST_DAC2 DMA_REQUEST_DAC1_CH2 + +#define BDMA_REQUEST_LP_UART1_RX BDMA_REQUEST_LPUART1_RX +#define BDMA_REQUEST_LP_UART1_TX BDMA_REQUEST_LPUART1_TX + +#define HAL_DMAMUX1_REQUEST_GEN_DMAMUX1_CH0_EVT HAL_DMAMUX1_REQ_GEN_DMAMUX1_CH0_EVT +#define HAL_DMAMUX1_REQUEST_GEN_DMAMUX1_CH1_EVT HAL_DMAMUX1_REQ_GEN_DMAMUX1_CH1_EVT +#define HAL_DMAMUX1_REQUEST_GEN_DMAMUX1_CH2_EVT HAL_DMAMUX1_REQ_GEN_DMAMUX1_CH2_EVT +#define HAL_DMAMUX1_REQUEST_GEN_LPTIM1_OUT HAL_DMAMUX1_REQ_GEN_LPTIM1_OUT +#define HAL_DMAMUX1_REQUEST_GEN_LPTIM2_OUT HAL_DMAMUX1_REQ_GEN_LPTIM2_OUT +#define HAL_DMAMUX1_REQUEST_GEN_LPTIM3_OUT HAL_DMAMUX1_REQ_GEN_LPTIM3_OUT +#define HAL_DMAMUX1_REQUEST_GEN_EXTI0 HAL_DMAMUX1_REQ_GEN_EXTI0 +#define HAL_DMAMUX1_REQUEST_GEN_TIM12_TRGO HAL_DMAMUX1_REQ_GEN_TIM12_TRGO + +#define HAL_DMAMUX2_REQUEST_GEN_DMAMUX2_CH0_EVT HAL_DMAMUX2_REQ_GEN_DMAMUX2_CH0_EVT +#define HAL_DMAMUX2_REQUEST_GEN_DMAMUX2_CH1_EVT HAL_DMAMUX2_REQ_GEN_DMAMUX2_CH1_EVT +#define HAL_DMAMUX2_REQUEST_GEN_DMAMUX2_CH2_EVT HAL_DMAMUX2_REQ_GEN_DMAMUX2_CH2_EVT +#define HAL_DMAMUX2_REQUEST_GEN_DMAMUX2_CH3_EVT HAL_DMAMUX2_REQ_GEN_DMAMUX2_CH3_EVT +#define HAL_DMAMUX2_REQUEST_GEN_DMAMUX2_CH4_EVT HAL_DMAMUX2_REQ_GEN_DMAMUX2_CH4_EVT +#define HAL_DMAMUX2_REQUEST_GEN_DMAMUX2_CH5_EVT HAL_DMAMUX2_REQ_GEN_DMAMUX2_CH5_EVT +#define HAL_DMAMUX2_REQUEST_GEN_DMAMUX2_CH6_EVT HAL_DMAMUX2_REQ_GEN_DMAMUX2_CH6_EVT +#define HAL_DMAMUX2_REQUEST_GEN_LPUART1_RX_WKUP HAL_DMAMUX2_REQ_GEN_LPUART1_RX_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_LPUART1_TX_WKUP HAL_DMAMUX2_REQ_GEN_LPUART1_TX_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_LPTIM2_WKUP HAL_DMAMUX2_REQ_GEN_LPTIM2_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_LPTIM2_OUT HAL_DMAMUX2_REQ_GEN_LPTIM2_OUT +#define HAL_DMAMUX2_REQUEST_GEN_LPTIM3_WKUP HAL_DMAMUX2_REQ_GEN_LPTIM3_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_LPTIM3_OUT HAL_DMAMUX2_REQ_GEN_LPTIM3_OUT +#define HAL_DMAMUX2_REQUEST_GEN_LPTIM4_WKUP HAL_DMAMUX2_REQ_GEN_LPTIM4_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_LPTIM5_WKUP HAL_DMAMUX2_REQ_GEN_LPTIM5_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_I2C4_WKUP HAL_DMAMUX2_REQ_GEN_I2C4_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_SPI6_WKUP HAL_DMAMUX2_REQ_GEN_SPI6_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_COMP1_OUT HAL_DMAMUX2_REQ_GEN_COMP1_OUT +#define HAL_DMAMUX2_REQUEST_GEN_COMP2_OUT HAL_DMAMUX2_REQ_GEN_COMP2_OUT +#define HAL_DMAMUX2_REQUEST_GEN_RTC_WKUP HAL_DMAMUX2_REQ_GEN_RTC_WKUP +#define HAL_DMAMUX2_REQUEST_GEN_EXTI0 HAL_DMAMUX2_REQ_GEN_EXTI0 +#define HAL_DMAMUX2_REQUEST_GEN_EXTI2 HAL_DMAMUX2_REQ_GEN_EXTI2 +#define HAL_DMAMUX2_REQUEST_GEN_I2C4_IT_EVT HAL_DMAMUX2_REQ_GEN_I2C4_IT_EVT +#define HAL_DMAMUX2_REQUEST_GEN_SPI6_IT HAL_DMAMUX2_REQ_GEN_SPI6_IT +#define HAL_DMAMUX2_REQUEST_GEN_LPUART1_TX_IT HAL_DMAMUX2_REQ_GEN_LPUART1_TX_IT +#define HAL_DMAMUX2_REQUEST_GEN_LPUART1_RX_IT HAL_DMAMUX2_REQ_GEN_LPUART1_RX_IT +#define HAL_DMAMUX2_REQUEST_GEN_ADC3_IT HAL_DMAMUX2_REQ_GEN_ADC3_IT +#define HAL_DMAMUX2_REQUEST_GEN_ADC3_AWD1_OUT HAL_DMAMUX2_REQ_GEN_ADC3_AWD1_OUT +#define HAL_DMAMUX2_REQUEST_GEN_BDMA_CH0_IT HAL_DMAMUX2_REQ_GEN_BDMA_CH0_IT +#define HAL_DMAMUX2_REQUEST_GEN_BDMA_CH1_IT HAL_DMAMUX2_REQ_GEN_BDMA_CH1_IT + +#define HAL_DMAMUX_REQUEST_GEN_NO_EVENT HAL_DMAMUX_REQ_GEN_NO_EVENT +#define HAL_DMAMUX_REQUEST_GEN_RISING HAL_DMAMUX_REQ_GEN_RISING +#define HAL_DMAMUX_REQUEST_GEN_FALLING HAL_DMAMUX_REQ_GEN_FALLING +#define HAL_DMAMUX_REQUEST_GEN_RISING_FALLING HAL_DMAMUX_REQ_GEN_RISING_FALLING + +#define DFSDM_FILTER_EXT_TRIG_LPTIM1 DFSDM_FILTER_EXT_TRIG_LPTIM1_OUT +#define DFSDM_FILTER_EXT_TRIG_LPTIM2 DFSDM_FILTER_EXT_TRIG_LPTIM2_OUT +#define DFSDM_FILTER_EXT_TRIG_LPTIM3 DFSDM_FILTER_EXT_TRIG_LPTIM3_OUT + +#define DAC_TRIGGER_LP1_OUT DAC_TRIGGER_LPTIM1_OUT +#define DAC_TRIGGER_LP2_OUT DAC_TRIGGER_LPTIM2_OUT + +#endif /* STM32H7 */ +/** + * @} + */ + +/** @defgroup HAL_FLASH_Aliased_Defines HAL FLASH Aliased Defines maintained for legacy purpose + * @{ + */ + +#define TYPEPROGRAM_BYTE FLASH_TYPEPROGRAM_BYTE +#define TYPEPROGRAM_HALFWORD FLASH_TYPEPROGRAM_HALFWORD +#define TYPEPROGRAM_WORD FLASH_TYPEPROGRAM_WORD +#define TYPEPROGRAM_DOUBLEWORD FLASH_TYPEPROGRAM_DOUBLEWORD +#define TYPEERASE_SECTORS FLASH_TYPEERASE_SECTORS +#define TYPEERASE_PAGES FLASH_TYPEERASE_PAGES +#define TYPEERASE_PAGEERASE FLASH_TYPEERASE_PAGES +#define TYPEERASE_MASSERASE FLASH_TYPEERASE_MASSERASE +#define WRPSTATE_DISABLE OB_WRPSTATE_DISABLE +#define WRPSTATE_ENABLE OB_WRPSTATE_ENABLE +#define HAL_FLASH_TIMEOUT_VALUE FLASH_TIMEOUT_VALUE +#define OBEX_PCROP OPTIONBYTE_PCROP +#define OBEX_BOOTCONFIG OPTIONBYTE_BOOTCONFIG +#define PCROPSTATE_DISABLE OB_PCROP_STATE_DISABLE +#define PCROPSTATE_ENABLE OB_PCROP_STATE_ENABLE +#define TYPEERASEDATA_BYTE FLASH_TYPEERASEDATA_BYTE +#define TYPEERASEDATA_HALFWORD FLASH_TYPEERASEDATA_HALFWORD +#define TYPEERASEDATA_WORD FLASH_TYPEERASEDATA_WORD +#define TYPEPROGRAMDATA_BYTE FLASH_TYPEPROGRAMDATA_BYTE +#define TYPEPROGRAMDATA_HALFWORD FLASH_TYPEPROGRAMDATA_HALFWORD +#define TYPEPROGRAMDATA_WORD FLASH_TYPEPROGRAMDATA_WORD +#define TYPEPROGRAMDATA_FASTBYTE FLASH_TYPEPROGRAMDATA_FASTBYTE +#define TYPEPROGRAMDATA_FASTHALFWORD FLASH_TYPEPROGRAMDATA_FASTHALFWORD +#define TYPEPROGRAMDATA_FASTWORD FLASH_TYPEPROGRAMDATA_FASTWORD +#define PAGESIZE FLASH_PAGE_SIZE +#define TYPEPROGRAM_FASTBYTE FLASH_TYPEPROGRAM_BYTE +#define TYPEPROGRAM_FASTHALFWORD FLASH_TYPEPROGRAM_HALFWORD +#define TYPEPROGRAM_FASTWORD FLASH_TYPEPROGRAM_WORD +#define VOLTAGE_RANGE_1 FLASH_VOLTAGE_RANGE_1 +#define VOLTAGE_RANGE_2 FLASH_VOLTAGE_RANGE_2 +#define VOLTAGE_RANGE_3 FLASH_VOLTAGE_RANGE_3 +#define VOLTAGE_RANGE_4 FLASH_VOLTAGE_RANGE_4 +#define TYPEPROGRAM_FAST FLASH_TYPEPROGRAM_FAST +#define TYPEPROGRAM_FAST_AND_LAST FLASH_TYPEPROGRAM_FAST_AND_LAST +#define WRPAREA_BANK1_AREAA OB_WRPAREA_BANK1_AREAA +#define WRPAREA_BANK1_AREAB OB_WRPAREA_BANK1_AREAB +#define WRPAREA_BANK2_AREAA OB_WRPAREA_BANK2_AREAA +#define WRPAREA_BANK2_AREAB OB_WRPAREA_BANK2_AREAB +#define IWDG_STDBY_FREEZE OB_IWDG_STDBY_FREEZE +#define IWDG_STDBY_ACTIVE OB_IWDG_STDBY_RUN +#define IWDG_STOP_FREEZE OB_IWDG_STOP_FREEZE +#define IWDG_STOP_ACTIVE OB_IWDG_STOP_RUN +#define FLASH_ERROR_NONE HAL_FLASH_ERROR_NONE +#define FLASH_ERROR_RD HAL_FLASH_ERROR_RD +#define FLASH_ERROR_PG HAL_FLASH_ERROR_PROG +#define FLASH_ERROR_PGP HAL_FLASH_ERROR_PGS +#define FLASH_ERROR_WRP HAL_FLASH_ERROR_WRP +#define FLASH_ERROR_OPTV HAL_FLASH_ERROR_OPTV +#define FLASH_ERROR_OPTVUSR HAL_FLASH_ERROR_OPTVUSR +#define FLASH_ERROR_PROG HAL_FLASH_ERROR_PROG +#define FLASH_ERROR_OP HAL_FLASH_ERROR_OPERATION +#define FLASH_ERROR_PGA HAL_FLASH_ERROR_PGA +#define FLASH_ERROR_SIZE HAL_FLASH_ERROR_SIZE +#define FLASH_ERROR_SIZ HAL_FLASH_ERROR_SIZE +#define FLASH_ERROR_PGS HAL_FLASH_ERROR_PGS +#define FLASH_ERROR_MIS HAL_FLASH_ERROR_MIS +#define FLASH_ERROR_FAST HAL_FLASH_ERROR_FAST +#define FLASH_ERROR_FWWERR HAL_FLASH_ERROR_FWWERR +#define FLASH_ERROR_NOTZERO HAL_FLASH_ERROR_NOTZERO +#define FLASH_ERROR_OPERATION HAL_FLASH_ERROR_OPERATION +#define FLASH_ERROR_ERS HAL_FLASH_ERROR_ERS +#define OB_WDG_SW OB_IWDG_SW +#define OB_WDG_HW OB_IWDG_HW +#define OB_SDADC12_VDD_MONITOR_SET OB_SDACD_VDD_MONITOR_SET +#define OB_SDADC12_VDD_MONITOR_RESET OB_SDACD_VDD_MONITOR_RESET +#define OB_RAM_PARITY_CHECK_SET OB_SRAM_PARITY_SET +#define OB_RAM_PARITY_CHECK_RESET OB_SRAM_PARITY_RESET +#define IS_OB_SDADC12_VDD_MONITOR IS_OB_SDACD_VDD_MONITOR +#define OB_RDP_LEVEL0 OB_RDP_LEVEL_0 +#define OB_RDP_LEVEL1 OB_RDP_LEVEL_1 +#define OB_RDP_LEVEL2 OB_RDP_LEVEL_2 +#if defined(STM32G0) +#define OB_BOOT_LOCK_DISABLE OB_BOOT_ENTRY_FORCED_NONE +#define OB_BOOT_LOCK_ENABLE OB_BOOT_ENTRY_FORCED_FLASH +#else +#define OB_BOOT_ENTRY_FORCED_NONE OB_BOOT_LOCK_DISABLE +#define OB_BOOT_ENTRY_FORCED_FLASH OB_BOOT_LOCK_ENABLE +#endif +#if defined(STM32H7) +#define FLASH_FLAG_SNECCE_BANK1RR FLASH_FLAG_SNECCERR_BANK1 +#define FLASH_FLAG_DBECCE_BANK1RR FLASH_FLAG_DBECCERR_BANK1 +#define FLASH_FLAG_STRBER_BANK1R FLASH_FLAG_STRBERR_BANK1 +#define FLASH_FLAG_SNECCE_BANK2RR FLASH_FLAG_SNECCERR_BANK2 +#define FLASH_FLAG_DBECCE_BANK2RR FLASH_FLAG_DBECCERR_BANK2 +#define FLASH_FLAG_STRBER_BANK2R FLASH_FLAG_STRBERR_BANK2 +#define FLASH_FLAG_WDW FLASH_FLAG_WBNE +#define OB_WRP_SECTOR_All OB_WRP_SECTOR_ALL +#endif /* STM32H7 */ +#if defined(STM32U5) +#define OB_USER_nRST_STOP OB_USER_NRST_STOP +#define OB_USER_nRST_STDBY OB_USER_NRST_STDBY +#define OB_USER_nRST_SHDW OB_USER_NRST_SHDW +#define OB_USER_nSWBOOT0 OB_USER_NSWBOOT0 +#define OB_USER_nBOOT0 OB_USER_NBOOT0 +#define OB_nBOOT0_RESET OB_NBOOT0_RESET +#define OB_nBOOT0_SET OB_NBOOT0_SET +#endif /* STM32U5 */ + +/** + * @} + */ + +/** @defgroup HAL_JPEG_Aliased_Macros HAL JPEG Aliased Macros maintained for legacy purpose + * @{ + */ + +#if defined(STM32H7) +#define __HAL_RCC_JPEG_CLK_ENABLE __HAL_RCC_JPGDECEN_CLK_ENABLE +#define __HAL_RCC_JPEG_CLK_DISABLE __HAL_RCC_JPGDECEN_CLK_DISABLE +#define __HAL_RCC_JPEG_FORCE_RESET __HAL_RCC_JPGDECRST_FORCE_RESET +#define __HAL_RCC_JPEG_RELEASE_RESET __HAL_RCC_JPGDECRST_RELEASE_RESET +#define __HAL_RCC_JPEG_CLK_SLEEP_ENABLE __HAL_RCC_JPGDEC_CLK_SLEEP_ENABLE +#define __HAL_RCC_JPEG_CLK_SLEEP_DISABLE __HAL_RCC_JPGDEC_CLK_SLEEP_DISABLE +#endif /* STM32H7 */ + +/** + * @} + */ + +/** @defgroup HAL_SYSCFG_Aliased_Defines HAL SYSCFG Aliased Defines maintained for legacy purpose + * @{ + */ + +#define HAL_SYSCFG_FASTMODEPLUS_I2C_PA9 I2C_FASTMODEPLUS_PA9 +#define HAL_SYSCFG_FASTMODEPLUS_I2C_PA10 I2C_FASTMODEPLUS_PA10 +#define HAL_SYSCFG_FASTMODEPLUS_I2C_PB6 I2C_FASTMODEPLUS_PB6 +#define HAL_SYSCFG_FASTMODEPLUS_I2C_PB7 I2C_FASTMODEPLUS_PB7 +#define HAL_SYSCFG_FASTMODEPLUS_I2C_PB8 I2C_FASTMODEPLUS_PB8 +#define HAL_SYSCFG_FASTMODEPLUS_I2C_PB9 I2C_FASTMODEPLUS_PB9 +#define HAL_SYSCFG_FASTMODEPLUS_I2C1 I2C_FASTMODEPLUS_I2C1 +#define HAL_SYSCFG_FASTMODEPLUS_I2C2 I2C_FASTMODEPLUS_I2C2 +#define HAL_SYSCFG_FASTMODEPLUS_I2C3 I2C_FASTMODEPLUS_I2C3 +#if defined(STM32G4) + +#define HAL_SYSCFG_EnableIOAnalogSwitchBooster HAL_SYSCFG_EnableIOSwitchBooster +#define HAL_SYSCFG_DisableIOAnalogSwitchBooster HAL_SYSCFG_DisableIOSwitchBooster +#define HAL_SYSCFG_EnableIOAnalogSwitchVDD HAL_SYSCFG_EnableIOSwitchVDD +#define HAL_SYSCFG_DisableIOAnalogSwitchVDD HAL_SYSCFG_DisableIOSwitchVDD +#endif /* STM32G4 */ + +/** + * @} + */ + + +/** @defgroup LL_FMC_Aliased_Defines LL FMC Aliased Defines maintained for compatibility purpose + * @{ + */ +#if defined(STM32L4) || defined(STM32F7) || defined(STM32H7) || defined(STM32G4) +#define FMC_NAND_PCC_WAIT_FEATURE_DISABLE FMC_NAND_WAIT_FEATURE_DISABLE +#define FMC_NAND_PCC_WAIT_FEATURE_ENABLE FMC_NAND_WAIT_FEATURE_ENABLE +#define FMC_NAND_PCC_MEM_BUS_WIDTH_8 FMC_NAND_MEM_BUS_WIDTH_8 +#define FMC_NAND_PCC_MEM_BUS_WIDTH_16 FMC_NAND_MEM_BUS_WIDTH_16 +#elif defined(STM32F1) || defined(STM32F2) || defined(STM32F3) || defined(STM32F4) +#define FMC_NAND_WAIT_FEATURE_DISABLE FMC_NAND_PCC_WAIT_FEATURE_DISABLE +#define FMC_NAND_WAIT_FEATURE_ENABLE FMC_NAND_PCC_WAIT_FEATURE_ENABLE +#define FMC_NAND_MEM_BUS_WIDTH_8 FMC_NAND_PCC_MEM_BUS_WIDTH_8 +#define FMC_NAND_MEM_BUS_WIDTH_16 FMC_NAND_PCC_MEM_BUS_WIDTH_16 +#endif +/** + * @} + */ + +/** @defgroup LL_FSMC_Aliased_Defines LL FSMC Aliased Defines maintained for legacy purpose + * @{ + */ + +#define FSMC_NORSRAM_TYPEDEF FSMC_NORSRAM_TypeDef +#define FSMC_NORSRAM_EXTENDED_TYPEDEF FSMC_NORSRAM_EXTENDED_TypeDef +/** + * @} + */ + +/** @defgroup HAL_GPIO_Aliased_Macros HAL GPIO Aliased Macros maintained for legacy purpose + * @{ + */ +#define GET_GPIO_SOURCE GPIO_GET_INDEX +#define GET_GPIO_INDEX GPIO_GET_INDEX + +#if defined(STM32F4) +#define GPIO_AF12_SDMMC GPIO_AF12_SDIO +#define GPIO_AF12_SDMMC1 GPIO_AF12_SDIO +#endif + +#if defined(STM32F7) +#define GPIO_AF12_SDIO GPIO_AF12_SDMMC1 +#define GPIO_AF12_SDMMC GPIO_AF12_SDMMC1 +#endif + +#if defined(STM32L4) +#define GPIO_AF12_SDIO GPIO_AF12_SDMMC1 +#define GPIO_AF12_SDMMC GPIO_AF12_SDMMC1 +#endif + +#if defined(STM32H7) +#define GPIO_AF7_SDIO1 GPIO_AF7_SDMMC1 +#define GPIO_AF8_SDIO1 GPIO_AF8_SDMMC1 +#define GPIO_AF12_SDIO1 GPIO_AF12_SDMMC1 +#define GPIO_AF9_SDIO2 GPIO_AF9_SDMMC2 +#define GPIO_AF10_SDIO2 GPIO_AF10_SDMMC2 +#define GPIO_AF11_SDIO2 GPIO_AF11_SDMMC2 + +#if defined (STM32H743xx) || defined (STM32H753xx) || defined (STM32H750xx) || defined (STM32H742xx) || \ + defined (STM32H745xx) || defined (STM32H755xx) || defined (STM32H747xx) || defined (STM32H757xx) +#define GPIO_AF10_OTG2_HS GPIO_AF10_OTG2_FS +#define GPIO_AF10_OTG1_FS GPIO_AF10_OTG1_HS +#define GPIO_AF12_OTG2_FS GPIO_AF12_OTG1_FS +#endif /*STM32H743xx || STM32H753xx || STM32H750xx || STM32H742xx || STM32H745xx || STM32H755xx || STM32H747xx || STM32H757xx */ +#endif /* STM32H7 */ + +#define GPIO_AF0_LPTIM GPIO_AF0_LPTIM1 +#define GPIO_AF1_LPTIM GPIO_AF1_LPTIM1 +#define GPIO_AF2_LPTIM GPIO_AF2_LPTIM1 + +#if defined(STM32L0) || defined(STM32L4) || defined(STM32F4) || defined(STM32F2) || defined(STM32F7) || defined(STM32G4) || defined(STM32H7) || defined(STM32WB) || defined(STM32U5) +#define GPIO_SPEED_LOW GPIO_SPEED_FREQ_LOW +#define GPIO_SPEED_MEDIUM GPIO_SPEED_FREQ_MEDIUM +#define GPIO_SPEED_FAST GPIO_SPEED_FREQ_HIGH +#define GPIO_SPEED_HIGH GPIO_SPEED_FREQ_VERY_HIGH +#endif /* STM32L0 || STM32L4 || STM32F4 || STM32F2 || STM32F7 || STM32G4 || STM32H7 || STM32WB || STM32U5*/ + +#if defined(STM32L1) +#define GPIO_SPEED_VERY_LOW GPIO_SPEED_FREQ_LOW +#define GPIO_SPEED_LOW GPIO_SPEED_FREQ_MEDIUM +#define GPIO_SPEED_MEDIUM GPIO_SPEED_FREQ_HIGH +#define GPIO_SPEED_HIGH GPIO_SPEED_FREQ_VERY_HIGH +#endif /* STM32L1 */ + +#if defined(STM32F0) || defined(STM32F3) || defined(STM32F1) +#define GPIO_SPEED_LOW GPIO_SPEED_FREQ_LOW +#define GPIO_SPEED_MEDIUM GPIO_SPEED_FREQ_MEDIUM +#define GPIO_SPEED_HIGH GPIO_SPEED_FREQ_HIGH +#endif /* STM32F0 || STM32F3 || STM32F1 */ + +#define GPIO_AF6_DFSDM GPIO_AF6_DFSDM1 + +#if defined(STM32U5) +#define GPIO_AF0_RTC_50Hz GPIO_AF0_RTC_50HZ +#endif /* STM32U5 */ +/** + * @} + */ + +/** @defgroup HAL_GTZC_Aliased_Defines HAL GTZC Aliased Defines maintained for legacy purpose + * @{ + */ +#if defined(STM32U5) +#define GTZC_PERIPH_DCMI GTZC_PERIPH_DCMI_PSSI +#endif /* STM32U5 */ +/** + * @} + */ + +/** @defgroup HAL_HRTIM_Aliased_Macros HAL HRTIM Aliased Macros maintained for legacy purpose + * @{ + */ +#define HRTIM_TIMDELAYEDPROTECTION_DISABLED HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_DISABLED +#define HRTIM_TIMDELAYEDPROTECTION_DELAYEDOUT1_EEV68 HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_DELAYEDOUT1_EEV6 +#define HRTIM_TIMDELAYEDPROTECTION_DELAYEDOUT2_EEV68 HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_DELAYEDOUT2_EEV6 +#define HRTIM_TIMDELAYEDPROTECTION_DELAYEDBOTH_EEV68 HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_DELAYEDBOTH_EEV6 +#define HRTIM_TIMDELAYEDPROTECTION_BALANCED_EEV68 HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_BALANCED_EEV6 +#define HRTIM_TIMDELAYEDPROTECTION_DELAYEDOUT1_DEEV79 HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_DELAYEDOUT1_DEEV7 +#define HRTIM_TIMDELAYEDPROTECTION_DELAYEDOUT2_DEEV79 HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_DELAYEDOUT2_DEEV7 +#define HRTIM_TIMDELAYEDPROTECTION_DELAYEDBOTH_EEV79 HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_DELAYEDBOTH_EEV7 +#define HRTIM_TIMDELAYEDPROTECTION_BALANCED_EEV79 HRTIM_TIMER_A_B_C_DELAYEDPROTECTION_BALANCED_EEV7 + +#define __HAL_HRTIM_SetCounter __HAL_HRTIM_SETCOUNTER +#define __HAL_HRTIM_GetCounter __HAL_HRTIM_GETCOUNTER +#define __HAL_HRTIM_SetPeriod __HAL_HRTIM_SETPERIOD +#define __HAL_HRTIM_GetPeriod __HAL_HRTIM_GETPERIOD +#define __HAL_HRTIM_SetClockPrescaler __HAL_HRTIM_SETCLOCKPRESCALER +#define __HAL_HRTIM_GetClockPrescaler __HAL_HRTIM_GETCLOCKPRESCALER +#define __HAL_HRTIM_SetCompare __HAL_HRTIM_SETCOMPARE +#define __HAL_HRTIM_GetCompare __HAL_HRTIM_GETCOMPARE + +#if defined(STM32G4) +#define HAL_HRTIM_ExternalEventCounterConfig HAL_HRTIM_ExtEventCounterConfig +#define HAL_HRTIM_ExternalEventCounterEnable HAL_HRTIM_ExtEventCounterEnable +#define HAL_HRTIM_ExternalEventCounterDisable HAL_HRTIM_ExtEventCounterDisable +#define HAL_HRTIM_ExternalEventCounterReset HAL_HRTIM_ExtEventCounterReset +#define HRTIM_TIMEEVENT_A HRTIM_EVENTCOUNTER_A +#define HRTIM_TIMEEVENT_B HRTIM_EVENTCOUNTER_B +#define HRTIM_TIMEEVENTRESETMODE_UNCONDITIONAL HRTIM_EVENTCOUNTER_RSTMODE_UNCONDITIONAL +#define HRTIM_TIMEEVENTRESETMODE_CONDITIONAL HRTIM_EVENTCOUNTER_RSTMODE_CONDITIONAL +#endif /* STM32G4 */ + +#if defined(STM32H7) +#define HRTIM_OUTPUTSET_TIMAEV1_TIMBCMP1 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTSET_TIMAEV2_TIMBCMP2 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTSET_TIMAEV3_TIMCCMP2 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTSET_TIMAEV4_TIMCCMP3 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTSET_TIMAEV5_TIMDCMP1 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTSET_TIMAEV6_TIMDCMP2 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTSET_TIMAEV7_TIMECMP3 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTSET_TIMAEV8_TIMECMP4 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTSET_TIMAEV9_TIMFCMP4 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTSET_TIMBEV1_TIMACMP1 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTSET_TIMBEV2_TIMACMP2 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTSET_TIMBEV3_TIMCCMP3 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTSET_TIMBEV4_TIMCCMP4 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTSET_TIMBEV5_TIMDCMP3 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTSET_TIMBEV6_TIMDCMP4 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTSET_TIMBEV7_TIMECMP1 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTSET_TIMBEV8_TIMECMP2 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTSET_TIMBEV9_TIMFCMP3 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTSET_TIMCEV1_TIMACMP1 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTSET_TIMCEV2_TIMACMP2 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTSET_TIMCEV3_TIMBCMP2 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTSET_TIMCEV4_TIMBCMP3 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTSET_TIMCEV5_TIMDCMP2 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTSET_TIMCEV6_TIMDCMP4 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTSET_TIMCEV7_TIMECMP3 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTSET_TIMCEV8_TIMECMP4 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTSET_TIMCEV9_TIMFCMP2 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTSET_TIMDEV1_TIMACMP1 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTSET_TIMDEV2_TIMACMP4 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTSET_TIMDEV3_TIMBCMP2 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTSET_TIMDEV4_TIMBCMP4 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTSET_TIMDEV5_TIMCCMP4 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTSET_TIMDEV6_TIMECMP1 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTSET_TIMDEV7_TIMECMP4 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTSET_TIMDEV8_TIMFCMP1 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTSET_TIMDEV9_TIMFCMP3 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTSET_TIMEEV1_TIMACMP4 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTSET_TIMEEV2_TIMBCMP3 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTSET_TIMEEV3_TIMBCMP4 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTSET_TIMEEV4_TIMCCMP1 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTSET_TIMEEV5_TIMDCMP2 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTSET_TIMEEV6_TIMDCMP1 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTSET_TIMEEV7_TIMDCMP2 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTSET_TIMEEV8_TIMFCMP3 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTSET_TIMEEV9_TIMFCMP4 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTSET_TIMFEV1_TIMACMP3 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTSET_TIMFEV2_TIMBCMP1 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTSET_TIMFEV3_TIMBCMP4 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTSET_TIMFEV4_TIMCCMP1 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTSET_TIMFEV5_TIMCCMP4 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTSET_TIMFEV6_TIMDCMP3 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTSET_TIMFEV7_TIMDCMP4 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTSET_TIMFEV8_TIMECMP2 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTSET_TIMFEV9_TIMECMP3 HRTIM_OUTPUTSET_TIMEV_9 + +#define HRTIM_OUTPUTRESET_TIMAEV1_TIMBCMP1 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTRESET_TIMAEV2_TIMBCMP2 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTRESET_TIMAEV3_TIMCCMP2 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTRESET_TIMAEV4_TIMCCMP3 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTRESET_TIMAEV5_TIMDCMP1 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTRESET_TIMAEV6_TIMDCMP2 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTRESET_TIMAEV7_TIMECMP3 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTRESET_TIMAEV8_TIMECMP4 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTRESET_TIMAEV9_TIMFCMP4 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTRESET_TIMBEV1_TIMACMP1 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTRESET_TIMBEV2_TIMACMP2 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTRESET_TIMBEV3_TIMCCMP3 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTRESET_TIMBEV4_TIMCCMP4 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTRESET_TIMBEV5_TIMDCMP3 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTRESET_TIMBEV6_TIMDCMP4 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTRESET_TIMBEV7_TIMECMP1 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTRESET_TIMBEV8_TIMECMP2 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTRESET_TIMBEV9_TIMFCMP3 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTRESET_TIMCEV1_TIMACMP1 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTRESET_TIMCEV2_TIMACMP2 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTRESET_TIMCEV3_TIMBCMP2 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTRESET_TIMCEV4_TIMBCMP3 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTRESET_TIMCEV5_TIMDCMP2 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTRESET_TIMCEV6_TIMDCMP4 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTRESET_TIMCEV7_TIMECMP3 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTRESET_TIMCEV8_TIMECMP4 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTRESET_TIMCEV9_TIMFCMP2 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTRESET_TIMDEV1_TIMACMP1 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTRESET_TIMDEV2_TIMACMP4 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTRESET_TIMDEV3_TIMBCMP2 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTRESET_TIMDEV4_TIMBCMP4 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTRESET_TIMDEV5_TIMCCMP4 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTRESET_TIMDEV6_TIMECMP1 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTRESET_TIMDEV7_TIMECMP4 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTRESET_TIMDEV8_TIMFCMP1 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTRESET_TIMDEV9_TIMFCMP3 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTRESET_TIMEEV1_TIMACMP4 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTRESET_TIMEEV2_TIMBCMP3 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTRESET_TIMEEV3_TIMBCMP4 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTRESET_TIMEEV4_TIMCCMP1 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTRESET_TIMEEV5_TIMDCMP2 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTRESET_TIMEEV6_TIMDCMP1 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTRESET_TIMEEV7_TIMDCMP2 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTRESET_TIMEEV8_TIMFCMP3 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTRESET_TIMEEV9_TIMFCMP4 HRTIM_OUTPUTSET_TIMEV_9 +#define HRTIM_OUTPUTRESET_TIMFEV1_TIMACMP3 HRTIM_OUTPUTSET_TIMEV_1 +#define HRTIM_OUTPUTRESET_TIMFEV2_TIMBCMP1 HRTIM_OUTPUTSET_TIMEV_2 +#define HRTIM_OUTPUTRESET_TIMFEV3_TIMBCMP4 HRTIM_OUTPUTSET_TIMEV_3 +#define HRTIM_OUTPUTRESET_TIMFEV4_TIMCCMP1 HRTIM_OUTPUTSET_TIMEV_4 +#define HRTIM_OUTPUTRESET_TIMFEV5_TIMCCMP4 HRTIM_OUTPUTSET_TIMEV_5 +#define HRTIM_OUTPUTRESET_TIMFEV6_TIMDCMP3 HRTIM_OUTPUTSET_TIMEV_6 +#define HRTIM_OUTPUTRESET_TIMFEV7_TIMDCMP4 HRTIM_OUTPUTSET_TIMEV_7 +#define HRTIM_OUTPUTRESET_TIMFEV8_TIMECMP2 HRTIM_OUTPUTSET_TIMEV_8 +#define HRTIM_OUTPUTRESET_TIMFEV9_TIMECMP3 HRTIM_OUTPUTSET_TIMEV_9 +#endif /* STM32H7 */ + +#if defined(STM32F3) +/** @brief Constants defining available sources associated to external events. + */ +#define HRTIM_EVENTSRC_1 (0x00000000U) +#define HRTIM_EVENTSRC_2 (HRTIM_EECR1_EE1SRC_0) +#define HRTIM_EVENTSRC_3 (HRTIM_EECR1_EE1SRC_1) +#define HRTIM_EVENTSRC_4 (HRTIM_EECR1_EE1SRC_1 | HRTIM_EECR1_EE1SRC_0) + +/** @brief Constants defining the DLL calibration periods (in micro seconds) + */ +#define HRTIM_CALIBRATIONRATE_7300 0x00000000U +#define HRTIM_CALIBRATIONRATE_910 (HRTIM_DLLCR_CALRTE_0) +#define HRTIM_CALIBRATIONRATE_114 (HRTIM_DLLCR_CALRTE_1) +#define HRTIM_CALIBRATIONRATE_14 (HRTIM_DLLCR_CALRTE_1 | HRTIM_DLLCR_CALRTE_0) + +#endif /* STM32F3 */ +/** + * @} + */ + +/** @defgroup HAL_I2C_Aliased_Defines HAL I2C Aliased Defines maintained for legacy purpose + * @{ + */ +#define I2C_DUALADDRESS_DISABLED I2C_DUALADDRESS_DISABLE +#define I2C_DUALADDRESS_ENABLED I2C_DUALADDRESS_ENABLE +#define I2C_GENERALCALL_DISABLED I2C_GENERALCALL_DISABLE +#define I2C_GENERALCALL_ENABLED I2C_GENERALCALL_ENABLE +#define I2C_NOSTRETCH_DISABLED I2C_NOSTRETCH_DISABLE +#define I2C_NOSTRETCH_ENABLED I2C_NOSTRETCH_ENABLE +#define I2C_ANALOGFILTER_ENABLED I2C_ANALOGFILTER_ENABLE +#define I2C_ANALOGFILTER_DISABLED I2C_ANALOGFILTER_DISABLE +#if defined(STM32F0) || defined(STM32F1) || defined(STM32F3) || defined(STM32G0) || defined(STM32L4) || defined(STM32L1) || defined(STM32F7) +#define HAL_I2C_STATE_MEM_BUSY_TX HAL_I2C_STATE_BUSY_TX +#define HAL_I2C_STATE_MEM_BUSY_RX HAL_I2C_STATE_BUSY_RX +#define HAL_I2C_STATE_MASTER_BUSY_TX HAL_I2C_STATE_BUSY_TX +#define HAL_I2C_STATE_MASTER_BUSY_RX HAL_I2C_STATE_BUSY_RX +#define HAL_I2C_STATE_SLAVE_BUSY_TX HAL_I2C_STATE_BUSY_TX +#define HAL_I2C_STATE_SLAVE_BUSY_RX HAL_I2C_STATE_BUSY_RX +#endif +/** + * @} + */ + +/** @defgroup HAL_IRDA_Aliased_Defines HAL IRDA Aliased Defines maintained for legacy purpose + * @{ + */ +#define IRDA_ONE_BIT_SAMPLE_DISABLED IRDA_ONE_BIT_SAMPLE_DISABLE +#define IRDA_ONE_BIT_SAMPLE_ENABLED IRDA_ONE_BIT_SAMPLE_ENABLE + +/** + * @} + */ + +/** @defgroup HAL_IWDG_Aliased_Defines HAL IWDG Aliased Defines maintained for legacy purpose + * @{ + */ +#define KR_KEY_RELOAD IWDG_KEY_RELOAD +#define KR_KEY_ENABLE IWDG_KEY_ENABLE +#define KR_KEY_EWA IWDG_KEY_WRITE_ACCESS_ENABLE +#define KR_KEY_DWA IWDG_KEY_WRITE_ACCESS_DISABLE +/** + * @} + */ + +/** @defgroup HAL_LPTIM_Aliased_Defines HAL LPTIM Aliased Defines maintained for legacy purpose + * @{ + */ + +#define LPTIM_CLOCKSAMPLETIME_DIRECTTRANSISTION LPTIM_CLOCKSAMPLETIME_DIRECTTRANSITION +#define LPTIM_CLOCKSAMPLETIME_2TRANSISTIONS LPTIM_CLOCKSAMPLETIME_2TRANSITIONS +#define LPTIM_CLOCKSAMPLETIME_4TRANSISTIONS LPTIM_CLOCKSAMPLETIME_4TRANSITIONS +#define LPTIM_CLOCKSAMPLETIME_8TRANSISTIONS LPTIM_CLOCKSAMPLETIME_8TRANSITIONS + +#define LPTIM_CLOCKPOLARITY_RISINGEDGE LPTIM_CLOCKPOLARITY_RISING +#define LPTIM_CLOCKPOLARITY_FALLINGEDGE LPTIM_CLOCKPOLARITY_FALLING +#define LPTIM_CLOCKPOLARITY_BOTHEDGES LPTIM_CLOCKPOLARITY_RISING_FALLING + +#define LPTIM_TRIGSAMPLETIME_DIRECTTRANSISTION LPTIM_TRIGSAMPLETIME_DIRECTTRANSITION +#define LPTIM_TRIGSAMPLETIME_2TRANSISTIONS LPTIM_TRIGSAMPLETIME_2TRANSITIONS +#define LPTIM_TRIGSAMPLETIME_4TRANSISTIONS LPTIM_TRIGSAMPLETIME_4TRANSITIONS +#define LPTIM_TRIGSAMPLETIME_8TRANSISTIONS LPTIM_TRIGSAMPLETIME_8TRANSITIONS + +/* The following 3 definition have also been present in a temporary version of lptim.h */ +/* They need to be renamed also to the right name, just in case */ +#define LPTIM_TRIGSAMPLETIME_2TRANSITION LPTIM_TRIGSAMPLETIME_2TRANSITIONS +#define LPTIM_TRIGSAMPLETIME_4TRANSITION LPTIM_TRIGSAMPLETIME_4TRANSITIONS +#define LPTIM_TRIGSAMPLETIME_8TRANSITION LPTIM_TRIGSAMPLETIME_8TRANSITIONS + + +/** @defgroup HAL_LPTIM_Aliased_Defines HAL LPTIM Aliased Defines maintained for legacy purpose + * @{ + */ +#define HAL_LPTIM_ReadCompare HAL_LPTIM_ReadCapturedValue +/** + * @} + */ + +#if defined(STM32U5) +#define LPTIM_ISR_CC1 LPTIM_ISR_CC1IF +#define LPTIM_ISR_CC2 LPTIM_ISR_CC2IF +#define LPTIM_CHANNEL_ALL 0x00000000U +#endif /* STM32U5 */ +/** + * @} + */ + +/** @defgroup HAL_NAND_Aliased_Defines HAL NAND Aliased Defines maintained for legacy purpose + * @{ + */ +#define HAL_NAND_Read_Page HAL_NAND_Read_Page_8b +#define HAL_NAND_Write_Page HAL_NAND_Write_Page_8b +#define HAL_NAND_Read_SpareArea HAL_NAND_Read_SpareArea_8b +#define HAL_NAND_Write_SpareArea HAL_NAND_Write_SpareArea_8b + +#define NAND_AddressTypedef NAND_AddressTypeDef + +#define __ARRAY_ADDRESS ARRAY_ADDRESS +#define __ADDR_1st_CYCLE ADDR_1ST_CYCLE +#define __ADDR_2nd_CYCLE ADDR_2ND_CYCLE +#define __ADDR_3rd_CYCLE ADDR_3RD_CYCLE +#define __ADDR_4th_CYCLE ADDR_4TH_CYCLE +/** + * @} + */ + +/** @defgroup HAL_NOR_Aliased_Defines HAL NOR Aliased Defines maintained for legacy purpose + * @{ + */ +#define NOR_StatusTypedef HAL_NOR_StatusTypeDef +#define NOR_SUCCESS HAL_NOR_STATUS_SUCCESS +#define NOR_ONGOING HAL_NOR_STATUS_ONGOING +#define NOR_ERROR HAL_NOR_STATUS_ERROR +#define NOR_TIMEOUT HAL_NOR_STATUS_TIMEOUT + +#define __NOR_WRITE NOR_WRITE +#define __NOR_ADDR_SHIFT NOR_ADDR_SHIFT +/** + * @} + */ + +/** @defgroup HAL_OPAMP_Aliased_Defines HAL OPAMP Aliased Defines maintained for legacy purpose + * @{ + */ + +#define OPAMP_NONINVERTINGINPUT_VP0 OPAMP_NONINVERTINGINPUT_IO0 +#define OPAMP_NONINVERTINGINPUT_VP1 OPAMP_NONINVERTINGINPUT_IO1 +#define OPAMP_NONINVERTINGINPUT_VP2 OPAMP_NONINVERTINGINPUT_IO2 +#define OPAMP_NONINVERTINGINPUT_VP3 OPAMP_NONINVERTINGINPUT_IO3 + +#define OPAMP_SEC_NONINVERTINGINPUT_VP0 OPAMP_SEC_NONINVERTINGINPUT_IO0 +#define OPAMP_SEC_NONINVERTINGINPUT_VP1 OPAMP_SEC_NONINVERTINGINPUT_IO1 +#define OPAMP_SEC_NONINVERTINGINPUT_VP2 OPAMP_SEC_NONINVERTINGINPUT_IO2 +#define OPAMP_SEC_NONINVERTINGINPUT_VP3 OPAMP_SEC_NONINVERTINGINPUT_IO3 + +#define OPAMP_INVERTINGINPUT_VM0 OPAMP_INVERTINGINPUT_IO0 +#define OPAMP_INVERTINGINPUT_VM1 OPAMP_INVERTINGINPUT_IO1 + +#define IOPAMP_INVERTINGINPUT_VM0 OPAMP_INVERTINGINPUT_IO0 +#define IOPAMP_INVERTINGINPUT_VM1 OPAMP_INVERTINGINPUT_IO1 + +#define OPAMP_SEC_INVERTINGINPUT_VM0 OPAMP_SEC_INVERTINGINPUT_IO0 +#define OPAMP_SEC_INVERTINGINPUT_VM1 OPAMP_SEC_INVERTINGINPUT_IO1 + +#define OPAMP_INVERTINGINPUT_VINM OPAMP_SEC_INVERTINGINPUT_IO1 + +#define OPAMP_PGACONNECT_NO OPAMP_PGA_CONNECT_INVERTINGINPUT_NO +#define OPAMP_PGACONNECT_VM0 OPAMP_PGA_CONNECT_INVERTINGINPUT_IO0 +#define OPAMP_PGACONNECT_VM1 OPAMP_PGA_CONNECT_INVERTINGINPUT_IO1 + +#if defined(STM32L1) || defined(STM32L4) || defined(STM32L5) || defined(STM32H7) || defined(STM32G4) +#define HAL_OPAMP_MSP_INIT_CB_ID HAL_OPAMP_MSPINIT_CB_ID +#define HAL_OPAMP_MSP_DEINIT_CB_ID HAL_OPAMP_MSPDEINIT_CB_ID +#endif + +#if defined(STM32L4) || defined(STM32L5) +#define OPAMP_POWERMODE_NORMAL OPAMP_POWERMODE_NORMALPOWER +#elif defined(STM32G4) +#define OPAMP_POWERMODE_NORMAL OPAMP_POWERMODE_NORMALSPEED +#endif + +/** + * @} + */ + +/** @defgroup HAL_I2S_Aliased_Defines HAL I2S Aliased Defines maintained for legacy purpose + * @{ + */ +#define I2S_STANDARD_PHILLIPS I2S_STANDARD_PHILIPS + +#if defined(STM32H7) +#define I2S_IT_TXE I2S_IT_TXP +#define I2S_IT_RXNE I2S_IT_RXP + +#define I2S_FLAG_TXE I2S_FLAG_TXP +#define I2S_FLAG_RXNE I2S_FLAG_RXP +#endif + +#if defined(STM32F7) +#define I2S_CLOCK_SYSCLK I2S_CLOCK_PLL +#endif +/** + * @} + */ + +/** @defgroup HAL_PCCARD_Aliased_Defines HAL PCCARD Aliased Defines maintained for legacy purpose + * @{ + */ + +/* Compact Flash-ATA registers description */ +#define CF_DATA ATA_DATA +#define CF_SECTOR_COUNT ATA_SECTOR_COUNT +#define CF_SECTOR_NUMBER ATA_SECTOR_NUMBER +#define CF_CYLINDER_LOW ATA_CYLINDER_LOW +#define CF_CYLINDER_HIGH ATA_CYLINDER_HIGH +#define CF_CARD_HEAD ATA_CARD_HEAD +#define CF_STATUS_CMD ATA_STATUS_CMD +#define CF_STATUS_CMD_ALTERNATE ATA_STATUS_CMD_ALTERNATE +#define CF_COMMON_DATA_AREA ATA_COMMON_DATA_AREA + +/* Compact Flash-ATA commands */ +#define CF_READ_SECTOR_CMD ATA_READ_SECTOR_CMD +#define CF_WRITE_SECTOR_CMD ATA_WRITE_SECTOR_CMD +#define CF_ERASE_SECTOR_CMD ATA_ERASE_SECTOR_CMD +#define CF_IDENTIFY_CMD ATA_IDENTIFY_CMD + +#define PCCARD_StatusTypedef HAL_PCCARD_StatusTypeDef +#define PCCARD_SUCCESS HAL_PCCARD_STATUS_SUCCESS +#define PCCARD_ONGOING HAL_PCCARD_STATUS_ONGOING +#define PCCARD_ERROR HAL_PCCARD_STATUS_ERROR +#define PCCARD_TIMEOUT HAL_PCCARD_STATUS_TIMEOUT +/** + * @} + */ + +/** @defgroup HAL_RTC_Aliased_Defines HAL RTC Aliased Defines maintained for legacy purpose + * @{ + */ + +#define FORMAT_BIN RTC_FORMAT_BIN +#define FORMAT_BCD RTC_FORMAT_BCD + +#define RTC_ALARMSUBSECONDMASK_None RTC_ALARMSUBSECONDMASK_NONE +#define RTC_TAMPERERASEBACKUP_DISABLED RTC_TAMPER_ERASE_BACKUP_DISABLE +#define RTC_TAMPERMASK_FLAG_DISABLED RTC_TAMPERMASK_FLAG_DISABLE +#define RTC_TAMPERMASK_FLAG_ENABLED RTC_TAMPERMASK_FLAG_ENABLE + +#define RTC_MASKTAMPERFLAG_DISABLED RTC_TAMPERMASK_FLAG_DISABLE +#define RTC_MASKTAMPERFLAG_ENABLED RTC_TAMPERMASK_FLAG_ENABLE +#define RTC_TAMPERERASEBACKUP_ENABLED RTC_TAMPER_ERASE_BACKUP_ENABLE +#define RTC_TAMPER1_2_INTERRUPT RTC_ALL_TAMPER_INTERRUPT +#define RTC_TAMPER1_2_3_INTERRUPT RTC_ALL_TAMPER_INTERRUPT + +#define RTC_TIMESTAMPPIN_PC13 RTC_TIMESTAMPPIN_DEFAULT +#define RTC_TIMESTAMPPIN_PA0 RTC_TIMESTAMPPIN_POS1 +#define RTC_TIMESTAMPPIN_PI8 RTC_TIMESTAMPPIN_POS1 +#define RTC_TIMESTAMPPIN_PC1 RTC_TIMESTAMPPIN_POS2 + +#define RTC_OUTPUT_REMAP_PC13 RTC_OUTPUT_REMAP_NONE +#define RTC_OUTPUT_REMAP_PB14 RTC_OUTPUT_REMAP_POS1 +#define RTC_OUTPUT_REMAP_PB2 RTC_OUTPUT_REMAP_POS1 + +#define RTC_TAMPERPIN_PC13 RTC_TAMPERPIN_DEFAULT +#define RTC_TAMPERPIN_PA0 RTC_TAMPERPIN_POS1 +#define RTC_TAMPERPIN_PI8 RTC_TAMPERPIN_POS1 + +#if defined(STM32H7) +#define RTC_TAMPCR_TAMPXE RTC_TAMPER_X +#define RTC_TAMPCR_TAMPXIE RTC_TAMPER_X_INTERRUPT + +#define RTC_TAMPER1_INTERRUPT RTC_IT_TAMP1 +#define RTC_TAMPER2_INTERRUPT RTC_IT_TAMP2 +#define RTC_TAMPER3_INTERRUPT RTC_IT_TAMP3 +#define RTC_ALL_TAMPER_INTERRUPT RTC_IT_TAMPALL +#endif /* STM32H7 */ + +/** + * @} + */ + + +/** @defgroup HAL_SMARTCARD_Aliased_Defines HAL SMARTCARD Aliased Defines maintained for legacy purpose + * @{ + */ +#define SMARTCARD_NACK_ENABLED SMARTCARD_NACK_ENABLE +#define SMARTCARD_NACK_DISABLED SMARTCARD_NACK_DISABLE + +#define SMARTCARD_ONEBIT_SAMPLING_DISABLED SMARTCARD_ONE_BIT_SAMPLE_DISABLE +#define SMARTCARD_ONEBIT_SAMPLING_ENABLED SMARTCARD_ONE_BIT_SAMPLE_ENABLE +#define SMARTCARD_ONEBIT_SAMPLING_DISABLE SMARTCARD_ONE_BIT_SAMPLE_DISABLE +#define SMARTCARD_ONEBIT_SAMPLING_ENABLE SMARTCARD_ONE_BIT_SAMPLE_ENABLE + +#define SMARTCARD_TIMEOUT_DISABLED SMARTCARD_TIMEOUT_DISABLE +#define SMARTCARD_TIMEOUT_ENABLED SMARTCARD_TIMEOUT_ENABLE + +#define SMARTCARD_LASTBIT_DISABLED SMARTCARD_LASTBIT_DISABLE +#define SMARTCARD_LASTBIT_ENABLED SMARTCARD_LASTBIT_ENABLE +/** + * @} + */ + + +/** @defgroup HAL_SMBUS_Aliased_Defines HAL SMBUS Aliased Defines maintained for legacy purpose + * @{ + */ +#define SMBUS_DUALADDRESS_DISABLED SMBUS_DUALADDRESS_DISABLE +#define SMBUS_DUALADDRESS_ENABLED SMBUS_DUALADDRESS_ENABLE +#define SMBUS_GENERALCALL_DISABLED SMBUS_GENERALCALL_DISABLE +#define SMBUS_GENERALCALL_ENABLED SMBUS_GENERALCALL_ENABLE +#define SMBUS_NOSTRETCH_DISABLED SMBUS_NOSTRETCH_DISABLE +#define SMBUS_NOSTRETCH_ENABLED SMBUS_NOSTRETCH_ENABLE +#define SMBUS_ANALOGFILTER_ENABLED SMBUS_ANALOGFILTER_ENABLE +#define SMBUS_ANALOGFILTER_DISABLED SMBUS_ANALOGFILTER_DISABLE +#define SMBUS_PEC_DISABLED SMBUS_PEC_DISABLE +#define SMBUS_PEC_ENABLED SMBUS_PEC_ENABLE +#define HAL_SMBUS_STATE_SLAVE_LISTEN HAL_SMBUS_STATE_LISTEN +/** + * @} + */ + +/** @defgroup HAL_SPI_Aliased_Defines HAL SPI Aliased Defines maintained for legacy purpose + * @{ + */ +#define SPI_TIMODE_DISABLED SPI_TIMODE_DISABLE +#define SPI_TIMODE_ENABLED SPI_TIMODE_ENABLE + +#define SPI_CRCCALCULATION_DISABLED SPI_CRCCALCULATION_DISABLE +#define SPI_CRCCALCULATION_ENABLED SPI_CRCCALCULATION_ENABLE + +#define SPI_NSS_PULSE_DISABLED SPI_NSS_PULSE_DISABLE +#define SPI_NSS_PULSE_ENABLED SPI_NSS_PULSE_ENABLE + +#if defined(STM32H7) + +#define SPI_FLAG_TXE SPI_FLAG_TXP +#define SPI_FLAG_RXNE SPI_FLAG_RXP + +#define SPI_IT_TXE SPI_IT_TXP +#define SPI_IT_RXNE SPI_IT_RXP + +#define SPI_FRLVL_EMPTY SPI_RX_FIFO_0PACKET +#define SPI_FRLVL_QUARTER_FULL SPI_RX_FIFO_1PACKET +#define SPI_FRLVL_HALF_FULL SPI_RX_FIFO_2PACKET +#define SPI_FRLVL_FULL SPI_RX_FIFO_3PACKET + +#endif /* STM32H7 */ + +/** + * @} + */ + +/** @defgroup HAL_TIM_Aliased_Defines HAL TIM Aliased Defines maintained for legacy purpose + * @{ + */ +#define CCER_CCxE_MASK TIM_CCER_CCxE_MASK +#define CCER_CCxNE_MASK TIM_CCER_CCxNE_MASK + +#define TIM_DMABase_CR1 TIM_DMABASE_CR1 +#define TIM_DMABase_CR2 TIM_DMABASE_CR2 +#define TIM_DMABase_SMCR TIM_DMABASE_SMCR +#define TIM_DMABase_DIER TIM_DMABASE_DIER +#define TIM_DMABase_SR TIM_DMABASE_SR +#define TIM_DMABase_EGR TIM_DMABASE_EGR +#define TIM_DMABase_CCMR1 TIM_DMABASE_CCMR1 +#define TIM_DMABase_CCMR2 TIM_DMABASE_CCMR2 +#define TIM_DMABase_CCER TIM_DMABASE_CCER +#define TIM_DMABase_CNT TIM_DMABASE_CNT +#define TIM_DMABase_PSC TIM_DMABASE_PSC +#define TIM_DMABase_ARR TIM_DMABASE_ARR +#define TIM_DMABase_RCR TIM_DMABASE_RCR +#define TIM_DMABase_CCR1 TIM_DMABASE_CCR1 +#define TIM_DMABase_CCR2 TIM_DMABASE_CCR2 +#define TIM_DMABase_CCR3 TIM_DMABASE_CCR3 +#define TIM_DMABase_CCR4 TIM_DMABASE_CCR4 +#define TIM_DMABase_BDTR TIM_DMABASE_BDTR +#define TIM_DMABase_DCR TIM_DMABASE_DCR +#define TIM_DMABase_DMAR TIM_DMABASE_DMAR +#define TIM_DMABase_OR1 TIM_DMABASE_OR1 +#define TIM_DMABase_CCMR3 TIM_DMABASE_CCMR3 +#define TIM_DMABase_CCR5 TIM_DMABASE_CCR5 +#define TIM_DMABase_CCR6 TIM_DMABASE_CCR6 +#define TIM_DMABase_OR2 TIM_DMABASE_OR2 +#define TIM_DMABase_OR3 TIM_DMABASE_OR3 +#define TIM_DMABase_OR TIM_DMABASE_OR + +#define TIM_EventSource_Update TIM_EVENTSOURCE_UPDATE +#define TIM_EventSource_CC1 TIM_EVENTSOURCE_CC1 +#define TIM_EventSource_CC2 TIM_EVENTSOURCE_CC2 +#define TIM_EventSource_CC3 TIM_EVENTSOURCE_CC3 +#define TIM_EventSource_CC4 TIM_EVENTSOURCE_CC4 +#define TIM_EventSource_COM TIM_EVENTSOURCE_COM +#define TIM_EventSource_Trigger TIM_EVENTSOURCE_TRIGGER +#define TIM_EventSource_Break TIM_EVENTSOURCE_BREAK +#define TIM_EventSource_Break2 TIM_EVENTSOURCE_BREAK2 + +#define TIM_DMABurstLength_1Transfer TIM_DMABURSTLENGTH_1TRANSFER +#define TIM_DMABurstLength_2Transfers TIM_DMABURSTLENGTH_2TRANSFERS +#define TIM_DMABurstLength_3Transfers TIM_DMABURSTLENGTH_3TRANSFERS +#define TIM_DMABurstLength_4Transfers TIM_DMABURSTLENGTH_4TRANSFERS +#define TIM_DMABurstLength_5Transfers TIM_DMABURSTLENGTH_5TRANSFERS +#define TIM_DMABurstLength_6Transfers TIM_DMABURSTLENGTH_6TRANSFERS +#define TIM_DMABurstLength_7Transfers TIM_DMABURSTLENGTH_7TRANSFERS +#define TIM_DMABurstLength_8Transfers TIM_DMABURSTLENGTH_8TRANSFERS +#define TIM_DMABurstLength_9Transfers TIM_DMABURSTLENGTH_9TRANSFERS +#define TIM_DMABurstLength_10Transfers TIM_DMABURSTLENGTH_10TRANSFERS +#define TIM_DMABurstLength_11Transfers TIM_DMABURSTLENGTH_11TRANSFERS +#define TIM_DMABurstLength_12Transfers TIM_DMABURSTLENGTH_12TRANSFERS +#define TIM_DMABurstLength_13Transfers TIM_DMABURSTLENGTH_13TRANSFERS +#define TIM_DMABurstLength_14Transfers TIM_DMABURSTLENGTH_14TRANSFERS +#define TIM_DMABurstLength_15Transfers TIM_DMABURSTLENGTH_15TRANSFERS +#define TIM_DMABurstLength_16Transfers TIM_DMABURSTLENGTH_16TRANSFERS +#define TIM_DMABurstLength_17Transfers TIM_DMABURSTLENGTH_17TRANSFERS +#define TIM_DMABurstLength_18Transfers TIM_DMABURSTLENGTH_18TRANSFERS + +#if defined(STM32L0) +#define TIM22_TI1_GPIO1 TIM22_TI1_GPIO +#define TIM22_TI1_GPIO2 TIM22_TI1_GPIO +#endif + +#if defined(STM32F3) +#define IS_TIM_HALL_INTERFACE_INSTANCE IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE +#endif + +#if defined(STM32H7) +#define TIM_TIM1_ETR_COMP1_OUT TIM_TIM1_ETR_COMP1 +#define TIM_TIM1_ETR_COMP2_OUT TIM_TIM1_ETR_COMP2 +#define TIM_TIM8_ETR_COMP1_OUT TIM_TIM8_ETR_COMP1 +#define TIM_TIM8_ETR_COMP2_OUT TIM_TIM8_ETR_COMP2 +#define TIM_TIM2_ETR_COMP1_OUT TIM_TIM2_ETR_COMP1 +#define TIM_TIM2_ETR_COMP2_OUT TIM_TIM2_ETR_COMP2 +#define TIM_TIM3_ETR_COMP1_OUT TIM_TIM3_ETR_COMP1 +#define TIM_TIM1_TI1_COMP1_OUT TIM_TIM1_TI1_COMP1 +#define TIM_TIM8_TI1_COMP2_OUT TIM_TIM8_TI1_COMP2 +#define TIM_TIM2_TI4_COMP1_OUT TIM_TIM2_TI4_COMP1 +#define TIM_TIM2_TI4_COMP2_OUT TIM_TIM2_TI4_COMP2 +#define TIM_TIM2_TI4_COMP1COMP2_OUT TIM_TIM2_TI4_COMP1_COMP2 +#define TIM_TIM3_TI1_COMP1_OUT TIM_TIM3_TI1_COMP1 +#define TIM_TIM3_TI1_COMP2_OUT TIM_TIM3_TI1_COMP2 +#define TIM_TIM3_TI1_COMP1COMP2_OUT TIM_TIM3_TI1_COMP1_COMP2 +#endif + +#if defined(STM32U5) || defined(STM32MP2) +#define OCREF_CLEAR_SELECT_Pos OCREF_CLEAR_SELECT_POS +#define OCREF_CLEAR_SELECT_Msk OCREF_CLEAR_SELECT_MSK +#endif +/** + * @} + */ + +/** @defgroup HAL_TSC_Aliased_Defines HAL TSC Aliased Defines maintained for legacy purpose + * @{ + */ +#define TSC_SYNC_POL_FALL TSC_SYNC_POLARITY_FALLING +#define TSC_SYNC_POL_RISE_HIGH TSC_SYNC_POLARITY_RISING +/** + * @} + */ + +/** @defgroup HAL_UART_Aliased_Defines HAL UART Aliased Defines maintained for legacy purpose + * @{ + */ +#define UART_ONEBIT_SAMPLING_DISABLED UART_ONE_BIT_SAMPLE_DISABLE +#define UART_ONEBIT_SAMPLING_ENABLED UART_ONE_BIT_SAMPLE_ENABLE +#define UART_ONE_BIT_SAMPLE_DISABLED UART_ONE_BIT_SAMPLE_DISABLE +#define UART_ONE_BIT_SAMPLE_ENABLED UART_ONE_BIT_SAMPLE_ENABLE + +#define __HAL_UART_ONEBIT_ENABLE __HAL_UART_ONE_BIT_SAMPLE_ENABLE +#define __HAL_UART_ONEBIT_DISABLE __HAL_UART_ONE_BIT_SAMPLE_DISABLE + +#define __DIV_SAMPLING16 UART_DIV_SAMPLING16 +#define __DIVMANT_SAMPLING16 UART_DIVMANT_SAMPLING16 +#define __DIVFRAQ_SAMPLING16 UART_DIVFRAQ_SAMPLING16 +#define __UART_BRR_SAMPLING16 UART_BRR_SAMPLING16 + +#define __DIV_SAMPLING8 UART_DIV_SAMPLING8 +#define __DIVMANT_SAMPLING8 UART_DIVMANT_SAMPLING8 +#define __DIVFRAQ_SAMPLING8 UART_DIVFRAQ_SAMPLING8 +#define __UART_BRR_SAMPLING8 UART_BRR_SAMPLING8 + +#define __DIV_LPUART UART_DIV_LPUART + +#define UART_WAKEUPMETHODE_IDLELINE UART_WAKEUPMETHOD_IDLELINE +#define UART_WAKEUPMETHODE_ADDRESSMARK UART_WAKEUPMETHOD_ADDRESSMARK + +/** + * @} + */ + + +/** @defgroup HAL_USART_Aliased_Defines HAL USART Aliased Defines maintained for legacy purpose + * @{ + */ + +#define USART_CLOCK_DISABLED USART_CLOCK_DISABLE +#define USART_CLOCK_ENABLED USART_CLOCK_ENABLE + +#define USARTNACK_ENABLED USART_NACK_ENABLE +#define USARTNACK_DISABLED USART_NACK_DISABLE +/** + * @} + */ + +/** @defgroup HAL_WWDG_Aliased_Defines HAL WWDG Aliased Defines maintained for legacy purpose + * @{ + */ +#define CFR_BASE WWDG_CFR_BASE + +/** + * @} + */ + +/** @defgroup HAL_CAN_Aliased_Defines HAL CAN Aliased Defines maintained for legacy purpose + * @{ + */ +#define CAN_FilterFIFO0 CAN_FILTER_FIFO0 +#define CAN_FilterFIFO1 CAN_FILTER_FIFO1 +#define CAN_IT_RQCP0 CAN_IT_TME +#define CAN_IT_RQCP1 CAN_IT_TME +#define CAN_IT_RQCP2 CAN_IT_TME +#define INAK_TIMEOUT CAN_TIMEOUT_VALUE +#define SLAK_TIMEOUT CAN_TIMEOUT_VALUE +#define CAN_TXSTATUS_FAILED ((uint8_t)0x00U) +#define CAN_TXSTATUS_OK ((uint8_t)0x01U) +#define CAN_TXSTATUS_PENDING ((uint8_t)0x02U) + +/** + * @} + */ + +/** @defgroup HAL_ETH_Aliased_Defines HAL ETH Aliased Defines maintained for legacy purpose + * @{ + */ + +#define VLAN_TAG ETH_VLAN_TAG +#define MIN_ETH_PAYLOAD ETH_MIN_ETH_PAYLOAD +#define MAX_ETH_PAYLOAD ETH_MAX_ETH_PAYLOAD +#define JUMBO_FRAME_PAYLOAD ETH_JUMBO_FRAME_PAYLOAD +#define MACMIIAR_CR_MASK ETH_MACMIIAR_CR_MASK +#define MACCR_CLEAR_MASK ETH_MACCR_CLEAR_MASK +#define MACFCR_CLEAR_MASK ETH_MACFCR_CLEAR_MASK +#define DMAOMR_CLEAR_MASK ETH_DMAOMR_CLEAR_MASK + +#define ETH_MMCCR 0x00000100U +#define ETH_MMCRIR 0x00000104U +#define ETH_MMCTIR 0x00000108U +#define ETH_MMCRIMR 0x0000010CU +#define ETH_MMCTIMR 0x00000110U +#define ETH_MMCTGFSCCR 0x0000014CU +#define ETH_MMCTGFMSCCR 0x00000150U +#define ETH_MMCTGFCR 0x00000168U +#define ETH_MMCRFCECR 0x00000194U +#define ETH_MMCRFAECR 0x00000198U +#define ETH_MMCRGUFCR 0x000001C4U + +#define ETH_MAC_TXFIFO_FULL 0x02000000U /* Tx FIFO full */ +#define ETH_MAC_TXFIFONOT_EMPTY 0x01000000U /* Tx FIFO not empty */ +#define ETH_MAC_TXFIFO_WRITE_ACTIVE 0x00400000U /* Tx FIFO write active */ +#define ETH_MAC_TXFIFO_IDLE 0x00000000U /* Tx FIFO read status: Idle */ +#define ETH_MAC_TXFIFO_READ 0x00100000U /* Tx FIFO read status: Read (transferring data to the MAC transmitter) */ +#define ETH_MAC_TXFIFO_WAITING 0x00200000U /* Tx FIFO read status: Waiting for TxStatus from MAC transmitter */ +#define ETH_MAC_TXFIFO_WRITING 0x00300000U /* Tx FIFO read status: Writing the received TxStatus or flushing the TxFIFO */ +#define ETH_MAC_TRANSMISSION_PAUSE 0x00080000U /* MAC transmitter in pause */ +#define ETH_MAC_TRANSMITFRAMECONTROLLER_IDLE 0x00000000U /* MAC transmit frame controller: Idle */ +#define ETH_MAC_TRANSMITFRAMECONTROLLER_WAITING 0x00020000U /* MAC transmit frame controller: Waiting for Status of previous frame or IFG/backoff period to be over */ +#define ETH_MAC_TRANSMITFRAMECONTROLLER_GENRATING_PCF 0x00040000U /* MAC transmit frame controller: Generating and transmitting a Pause control frame (in full duplex mode) */ +#define ETH_MAC_TRANSMITFRAMECONTROLLER_TRANSFERRING 0x00060000U /* MAC transmit frame controller: Transferring input frame for transmission */ +#define ETH_MAC_MII_TRANSMIT_ACTIVE 0x00010000U /* MAC MII transmit engine active */ +#define ETH_MAC_RXFIFO_EMPTY 0x00000000U /* Rx FIFO fill level: empty */ +#define ETH_MAC_RXFIFO_BELOW_THRESHOLD 0x00000100U /* Rx FIFO fill level: fill-level below flow-control de-activate threshold */ +#define ETH_MAC_RXFIFO_ABOVE_THRESHOLD 0x00000200U /* Rx FIFO fill level: fill-level above flow-control activate threshold */ +#define ETH_MAC_RXFIFO_FULL 0x00000300U /* Rx FIFO fill level: full */ +#if defined(STM32F1) +#else +#define ETH_MAC_READCONTROLLER_IDLE 0x00000000U /* Rx FIFO read controller IDLE state */ +#define ETH_MAC_READCONTROLLER_READING_DATA 0x00000020U /* Rx FIFO read controller Reading frame data */ +#define ETH_MAC_READCONTROLLER_READING_STATUS 0x00000040U /* Rx FIFO read controller Reading frame status (or time-stamp) */ +#endif +#define ETH_MAC_READCONTROLLER_FLUSHING 0x00000060U /* Rx FIFO read controller Flushing the frame data and status */ +#define ETH_MAC_RXFIFO_WRITE_ACTIVE 0x00000010U /* Rx FIFO write controller active */ +#define ETH_MAC_SMALL_FIFO_NOTACTIVE 0x00000000U /* MAC small FIFO read / write controllers not active */ +#define ETH_MAC_SMALL_FIFO_READ_ACTIVE 0x00000002U /* MAC small FIFO read controller active */ +#define ETH_MAC_SMALL_FIFO_WRITE_ACTIVE 0x00000004U /* MAC small FIFO write controller active */ +#define ETH_MAC_SMALL_FIFO_RW_ACTIVE 0x00000006U /* MAC small FIFO read / write controllers active */ +#define ETH_MAC_MII_RECEIVE_PROTOCOL_ACTIVE 0x00000001U /* MAC MII receive protocol engine active */ + +/** + * @} + */ + +/** @defgroup HAL_DCMI_Aliased_Defines HAL DCMI Aliased Defines maintained for legacy purpose + * @{ + */ +#define HAL_DCMI_ERROR_OVF HAL_DCMI_ERROR_OVR +#define DCMI_IT_OVF DCMI_IT_OVR +#define DCMI_FLAG_OVFRI DCMI_FLAG_OVRRI +#define DCMI_FLAG_OVFMI DCMI_FLAG_OVRMI + +#define HAL_DCMI_ConfigCROP HAL_DCMI_ConfigCrop +#define HAL_DCMI_EnableCROP HAL_DCMI_EnableCrop +#define HAL_DCMI_DisableCROP HAL_DCMI_DisableCrop + +/** + * @} + */ + +#if defined(STM32L4) || defined(STM32F7) || defined(STM32F427xx) || defined(STM32F437xx) \ + || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) \ + || defined(STM32H7) +/** @defgroup HAL_DMA2D_Aliased_Defines HAL DMA2D Aliased Defines maintained for legacy purpose + * @{ + */ +#define DMA2D_ARGB8888 DMA2D_OUTPUT_ARGB8888 +#define DMA2D_RGB888 DMA2D_OUTPUT_RGB888 +#define DMA2D_RGB565 DMA2D_OUTPUT_RGB565 +#define DMA2D_ARGB1555 DMA2D_OUTPUT_ARGB1555 +#define DMA2D_ARGB4444 DMA2D_OUTPUT_ARGB4444 + +#define CM_ARGB8888 DMA2D_INPUT_ARGB8888 +#define CM_RGB888 DMA2D_INPUT_RGB888 +#define CM_RGB565 DMA2D_INPUT_RGB565 +#define CM_ARGB1555 DMA2D_INPUT_ARGB1555 +#define CM_ARGB4444 DMA2D_INPUT_ARGB4444 +#define CM_L8 DMA2D_INPUT_L8 +#define CM_AL44 DMA2D_INPUT_AL44 +#define CM_AL88 DMA2D_INPUT_AL88 +#define CM_L4 DMA2D_INPUT_L4 +#define CM_A8 DMA2D_INPUT_A8 +#define CM_A4 DMA2D_INPUT_A4 +/** + * @} + */ +#endif /* STM32L4 || STM32F7 || STM32F4 || STM32H7 */ + +#if defined(STM32L4) || defined(STM32F7) || defined(STM32F427xx) || defined(STM32F437xx) \ + || defined(STM32F429xx) || defined(STM32F439xx) || defined(STM32F469xx) || defined(STM32F479xx) \ + || defined(STM32H7) || defined(STM32U5) +/** @defgroup DMA2D_Aliases DMA2D API Aliases + * @{ + */ +#define HAL_DMA2D_DisableCLUT HAL_DMA2D_CLUTLoading_Abort /*!< Aliased to HAL_DMA2D_CLUTLoading_Abort + for compatibility with legacy code */ +/** + * @} + */ + +#endif /* STM32L4 || STM32F7 || STM32F4 || STM32H7 || STM32U5 */ + +/** @defgroup HAL_PPP_Aliased_Defines HAL PPP Aliased Defines maintained for legacy purpose + * @{ + */ + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup HAL_CRYP_Aliased_Functions HAL CRYP Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_CRYP_ComputationCpltCallback HAL_CRYPEx_ComputationCpltCallback +/** + * @} + */ + +/** @defgroup HAL_DCACHE_Aliased_Functions HAL DCACHE Aliased Functions maintained for legacy purpose + * @{ + */ + +#if defined(STM32U5) +#define HAL_DCACHE_CleanInvalidateByAddr HAL_DCACHE_CleanInvalidByAddr +#define HAL_DCACHE_CleanInvalidateByAddr_IT HAL_DCACHE_CleanInvalidByAddr_IT +#endif /* STM32U5 */ + +/** + * @} + */ + +#if !defined(STM32F2) +/** @defgroup HASH_alias HASH API alias + * @{ + */ +#define HAL_HASHEx_IRQHandler HAL_HASH_IRQHandler /*!< Redirection for compatibility with legacy code */ +/** + * + * @} + */ +#endif /* STM32F2 */ +/** @defgroup HAL_HASH_Aliased_Functions HAL HASH Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_HASH_STATETypeDef HAL_HASH_StateTypeDef +#define HAL_HASHPhaseTypeDef HAL_HASH_PhaseTypeDef +#define HAL_HMAC_MD5_Finish HAL_HASH_MD5_Finish +#define HAL_HMAC_SHA1_Finish HAL_HASH_SHA1_Finish +#define HAL_HMAC_SHA224_Finish HAL_HASH_SHA224_Finish +#define HAL_HMAC_SHA256_Finish HAL_HASH_SHA256_Finish + +/*HASH Algorithm Selection*/ + +#define HASH_AlgoSelection_SHA1 HASH_ALGOSELECTION_SHA1 +#define HASH_AlgoSelection_SHA224 HASH_ALGOSELECTION_SHA224 +#define HASH_AlgoSelection_SHA256 HASH_ALGOSELECTION_SHA256 +#define HASH_AlgoSelection_MD5 HASH_ALGOSELECTION_MD5 + +#define HASH_AlgoMode_HASH HASH_ALGOMODE_HASH +#define HASH_AlgoMode_HMAC HASH_ALGOMODE_HMAC + +#define HASH_HMACKeyType_ShortKey HASH_HMAC_KEYTYPE_SHORTKEY +#define HASH_HMACKeyType_LongKey HASH_HMAC_KEYTYPE_LONGKEY + +#if defined(STM32L4) || defined(STM32L5) || defined(STM32F2) || defined(STM32F4) || defined(STM32F7) || defined(STM32H7) + +#define HAL_HASH_MD5_Accumulate HAL_HASH_MD5_Accmlt +#define HAL_HASH_MD5_Accumulate_End HAL_HASH_MD5_Accmlt_End +#define HAL_HASH_MD5_Accumulate_IT HAL_HASH_MD5_Accmlt_IT +#define HAL_HASH_MD5_Accumulate_End_IT HAL_HASH_MD5_Accmlt_End_IT + +#define HAL_HASH_SHA1_Accumulate HAL_HASH_SHA1_Accmlt +#define HAL_HASH_SHA1_Accumulate_End HAL_HASH_SHA1_Accmlt_End +#define HAL_HASH_SHA1_Accumulate_IT HAL_HASH_SHA1_Accmlt_IT +#define HAL_HASH_SHA1_Accumulate_End_IT HAL_HASH_SHA1_Accmlt_End_IT + +#define HAL_HASHEx_SHA224_Accumulate HAL_HASHEx_SHA224_Accmlt +#define HAL_HASHEx_SHA224_Accumulate_End HAL_HASHEx_SHA224_Accmlt_End +#define HAL_HASHEx_SHA224_Accumulate_IT HAL_HASHEx_SHA224_Accmlt_IT +#define HAL_HASHEx_SHA224_Accumulate_End_IT HAL_HASHEx_SHA224_Accmlt_End_IT + +#define HAL_HASHEx_SHA256_Accumulate HAL_HASHEx_SHA256_Accmlt +#define HAL_HASHEx_SHA256_Accumulate_End HAL_HASHEx_SHA256_Accmlt_End +#define HAL_HASHEx_SHA256_Accumulate_IT HAL_HASHEx_SHA256_Accmlt_IT +#define HAL_HASHEx_SHA256_Accumulate_End_IT HAL_HASHEx_SHA256_Accmlt_End_IT + +#endif /* STM32L4 || STM32L5 || STM32F2 || STM32F4 || STM32F7 || STM32H7 */ +/** + * @} + */ + +/** @defgroup HAL_Aliased_Functions HAL Generic Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_EnableDBGSleepMode HAL_DBGMCU_EnableDBGSleepMode +#define HAL_DisableDBGSleepMode HAL_DBGMCU_DisableDBGSleepMode +#define HAL_EnableDBGStopMode HAL_DBGMCU_EnableDBGStopMode +#define HAL_DisableDBGStopMode HAL_DBGMCU_DisableDBGStopMode +#define HAL_EnableDBGStandbyMode HAL_DBGMCU_EnableDBGStandbyMode +#define HAL_DisableDBGStandbyMode HAL_DBGMCU_DisableDBGStandbyMode +#define HAL_DBG_LowPowerConfig(Periph, cmd) (((cmd\ + )==ENABLE)? HAL_DBGMCU_DBG_EnableLowPowerConfig(Periph) : HAL_DBGMCU_DBG_DisableLowPowerConfig(Periph)) +#define HAL_VREFINT_OutputSelect HAL_SYSCFG_VREFINT_OutputSelect +#define HAL_Lock_Cmd(cmd) (((cmd)==ENABLE) ? HAL_SYSCFG_Enable_Lock_VREFINT() : HAL_SYSCFG_Disable_Lock_VREFINT()) +#if defined(STM32L0) +#else +#define HAL_VREFINT_Cmd(cmd) (((cmd)==ENABLE)? HAL_SYSCFG_EnableVREFINT() : HAL_SYSCFG_DisableVREFINT()) +#endif +#define HAL_ADC_EnableBuffer_Cmd(cmd) (((cmd)==ENABLE) ? HAL_ADCEx_EnableVREFINT() : HAL_ADCEx_DisableVREFINT()) +#define HAL_ADC_EnableBufferSensor_Cmd(cmd) (((cmd\ + )==ENABLE) ? HAL_ADCEx_EnableVREFINTTempSensor() : HAL_ADCEx_DisableVREFINTTempSensor()) +#if defined(STM32H7A3xx) || defined(STM32H7B3xx) || defined(STM32H7B0xx) || defined(STM32H7A3xxQ) || defined(STM32H7B3xxQ) || defined(STM32H7B0xxQ) +#define HAL_EnableSRDomainDBGStopMode HAL_EnableDomain3DBGStopMode +#define HAL_DisableSRDomainDBGStopMode HAL_DisableDomain3DBGStopMode +#define HAL_EnableSRDomainDBGStandbyMode HAL_EnableDomain3DBGStandbyMode +#define HAL_DisableSRDomainDBGStandbyMode HAL_DisableDomain3DBGStandbyMode +#endif /* STM32H7A3xx || STM32H7B3xx || STM32H7B0xx || STM32H7A3xxQ || STM32H7B3xxQ || STM32H7B0xxQ */ + +/** + * @} + */ + +/** @defgroup HAL_FLASH_Aliased_Functions HAL FLASH Aliased Functions maintained for legacy purpose + * @{ + */ +#define FLASH_HalfPageProgram HAL_FLASHEx_HalfPageProgram +#define FLASH_EnableRunPowerDown HAL_FLASHEx_EnableRunPowerDown +#define FLASH_DisableRunPowerDown HAL_FLASHEx_DisableRunPowerDown +#define HAL_DATA_EEPROMEx_Unlock HAL_FLASHEx_DATAEEPROM_Unlock +#define HAL_DATA_EEPROMEx_Lock HAL_FLASHEx_DATAEEPROM_Lock +#define HAL_DATA_EEPROMEx_Erase HAL_FLASHEx_DATAEEPROM_Erase +#define HAL_DATA_EEPROMEx_Program HAL_FLASHEx_DATAEEPROM_Program + +/** + * @} + */ + +/** @defgroup HAL_I2C_Aliased_Functions HAL I2C Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_I2CEx_AnalogFilter_Config HAL_I2CEx_ConfigAnalogFilter +#define HAL_I2CEx_DigitalFilter_Config HAL_I2CEx_ConfigDigitalFilter +#define HAL_FMPI2CEx_AnalogFilter_Config HAL_FMPI2CEx_ConfigAnalogFilter +#define HAL_FMPI2CEx_DigitalFilter_Config HAL_FMPI2CEx_ConfigDigitalFilter + +#define HAL_I2CFastModePlusConfig(SYSCFG_I2CFastModePlus, cmd) (((cmd\ + )==ENABLE)? HAL_I2CEx_EnableFastModePlus(SYSCFG_I2CFastModePlus): HAL_I2CEx_DisableFastModePlus(SYSCFG_I2CFastModePlus)) + +#if defined(STM32H7) || defined(STM32WB) || defined(STM32G0) || defined(STM32F0) || defined(STM32F1) || defined(STM32F2) || defined(STM32F3) || defined(STM32F4) || defined(STM32F7) || defined(STM32L0) || defined(STM32L4) || defined(STM32L5) || defined(STM32G4) || defined(STM32L1) +#define HAL_I2C_Master_Sequential_Transmit_IT HAL_I2C_Master_Seq_Transmit_IT +#define HAL_I2C_Master_Sequential_Receive_IT HAL_I2C_Master_Seq_Receive_IT +#define HAL_I2C_Slave_Sequential_Transmit_IT HAL_I2C_Slave_Seq_Transmit_IT +#define HAL_I2C_Slave_Sequential_Receive_IT HAL_I2C_Slave_Seq_Receive_IT +#endif /* STM32H7 || STM32WB || STM32G0 || STM32F0 || STM32F1 || STM32F2 || STM32F3 || STM32F4 || STM32F7 || STM32L0 || STM32L4 || STM32L5 || STM32G4 || STM32L1 */ +#if defined(STM32H7) || defined(STM32WB) || defined(STM32G0) || defined(STM32F4) || defined(STM32F7) || defined(STM32L0) || defined(STM32L4) || defined(STM32L5) || defined(STM32G4)|| defined(STM32L1) +#define HAL_I2C_Master_Sequential_Transmit_DMA HAL_I2C_Master_Seq_Transmit_DMA +#define HAL_I2C_Master_Sequential_Receive_DMA HAL_I2C_Master_Seq_Receive_DMA +#define HAL_I2C_Slave_Sequential_Transmit_DMA HAL_I2C_Slave_Seq_Transmit_DMA +#define HAL_I2C_Slave_Sequential_Receive_DMA HAL_I2C_Slave_Seq_Receive_DMA +#endif /* STM32H7 || STM32WB || STM32G0 || STM32F4 || STM32F7 || STM32L0 || STM32L4 || STM32L5 || STM32G4 || STM32L1 */ + +#if defined(STM32F4) +#define HAL_FMPI2C_Master_Sequential_Transmit_IT HAL_FMPI2C_Master_Seq_Transmit_IT +#define HAL_FMPI2C_Master_Sequential_Receive_IT HAL_FMPI2C_Master_Seq_Receive_IT +#define HAL_FMPI2C_Slave_Sequential_Transmit_IT HAL_FMPI2C_Slave_Seq_Transmit_IT +#define HAL_FMPI2C_Slave_Sequential_Receive_IT HAL_FMPI2C_Slave_Seq_Receive_IT +#define HAL_FMPI2C_Master_Sequential_Transmit_DMA HAL_FMPI2C_Master_Seq_Transmit_DMA +#define HAL_FMPI2C_Master_Sequential_Receive_DMA HAL_FMPI2C_Master_Seq_Receive_DMA +#define HAL_FMPI2C_Slave_Sequential_Transmit_DMA HAL_FMPI2C_Slave_Seq_Transmit_DMA +#define HAL_FMPI2C_Slave_Sequential_Receive_DMA HAL_FMPI2C_Slave_Seq_Receive_DMA +#endif /* STM32F4 */ +/** + * @} + */ + +/** @defgroup HAL_PWR_Aliased HAL PWR Aliased maintained for legacy purpose + * @{ + */ + +#if defined(STM32G0) +#define HAL_PWR_ConfigPVD HAL_PWREx_ConfigPVD +#define HAL_PWR_EnablePVD HAL_PWREx_EnablePVD +#define HAL_PWR_DisablePVD HAL_PWREx_DisablePVD +#define HAL_PWR_PVD_IRQHandler HAL_PWREx_PVD_IRQHandler +#endif +#define HAL_PWR_PVDConfig HAL_PWR_ConfigPVD +#define HAL_PWR_DisableBkUpReg HAL_PWREx_DisableBkUpReg +#define HAL_PWR_DisableFlashPowerDown HAL_PWREx_DisableFlashPowerDown +#define HAL_PWR_DisableVddio2Monitor HAL_PWREx_DisableVddio2Monitor +#define HAL_PWR_EnableBkUpReg HAL_PWREx_EnableBkUpReg +#define HAL_PWR_EnableFlashPowerDown HAL_PWREx_EnableFlashPowerDown +#define HAL_PWR_EnableVddio2Monitor HAL_PWREx_EnableVddio2Monitor +#define HAL_PWR_PVD_PVM_IRQHandler HAL_PWREx_PVD_PVM_IRQHandler +#define HAL_PWR_PVDLevelConfig HAL_PWR_ConfigPVD +#define HAL_PWR_Vddio2Monitor_IRQHandler HAL_PWREx_Vddio2Monitor_IRQHandler +#define HAL_PWR_Vddio2MonitorCallback HAL_PWREx_Vddio2MonitorCallback +#define HAL_PWREx_ActivateOverDrive HAL_PWREx_EnableOverDrive +#define HAL_PWREx_DeactivateOverDrive HAL_PWREx_DisableOverDrive +#define HAL_PWREx_DisableSDADCAnalog HAL_PWREx_DisableSDADC +#define HAL_PWREx_EnableSDADCAnalog HAL_PWREx_EnableSDADC +#define HAL_PWREx_PVMConfig HAL_PWREx_ConfigPVM + +#define PWR_MODE_NORMAL PWR_PVD_MODE_NORMAL +#define PWR_MODE_IT_RISING PWR_PVD_MODE_IT_RISING +#define PWR_MODE_IT_FALLING PWR_PVD_MODE_IT_FALLING +#define PWR_MODE_IT_RISING_FALLING PWR_PVD_MODE_IT_RISING_FALLING +#define PWR_MODE_EVENT_RISING PWR_PVD_MODE_EVENT_RISING +#define PWR_MODE_EVENT_FALLING PWR_PVD_MODE_EVENT_FALLING +#define PWR_MODE_EVENT_RISING_FALLING PWR_PVD_MODE_EVENT_RISING_FALLING + +#define CR_OFFSET_BB PWR_CR_OFFSET_BB +#define CSR_OFFSET_BB PWR_CSR_OFFSET_BB +#define PMODE_BIT_NUMBER VOS_BIT_NUMBER +#define CR_PMODE_BB CR_VOS_BB + +#define DBP_BitNumber DBP_BIT_NUMBER +#define PVDE_BitNumber PVDE_BIT_NUMBER +#define PMODE_BitNumber PMODE_BIT_NUMBER +#define EWUP_BitNumber EWUP_BIT_NUMBER +#define FPDS_BitNumber FPDS_BIT_NUMBER +#define ODEN_BitNumber ODEN_BIT_NUMBER +#define ODSWEN_BitNumber ODSWEN_BIT_NUMBER +#define MRLVDS_BitNumber MRLVDS_BIT_NUMBER +#define LPLVDS_BitNumber LPLVDS_BIT_NUMBER +#define BRE_BitNumber BRE_BIT_NUMBER + +#define PWR_MODE_EVT PWR_PVD_MODE_NORMAL + +/** + * @} + */ + +/** @defgroup HAL_SMBUS_Aliased_Functions HAL SMBUS Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_SMBUS_Slave_Listen_IT HAL_SMBUS_EnableListen_IT +#define HAL_SMBUS_SlaveAddrCallback HAL_SMBUS_AddrCallback +#define HAL_SMBUS_SlaveListenCpltCallback HAL_SMBUS_ListenCpltCallback +/** + * @} + */ + +/** @defgroup HAL_SPI_Aliased_Functions HAL SPI Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_SPI_FlushRxFifo HAL_SPIEx_FlushRxFifo +/** + * @} + */ + +/** @defgroup HAL_TIM_Aliased_Functions HAL TIM Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_TIM_DMADelayPulseCplt TIM_DMADelayPulseCplt +#define HAL_TIM_DMAError TIM_DMAError +#define HAL_TIM_DMACaptureCplt TIM_DMACaptureCplt +#define HAL_TIMEx_DMACommutationCplt TIMEx_DMACommutationCplt +#if defined(STM32H7) || defined(STM32G0) || defined(STM32F0) || defined(STM32F1) || defined(STM32F2) || defined(STM32F3) || defined(STM32F4) || defined(STM32F7) || defined(STM32L0) || defined(STM32L4) +#define HAL_TIM_SlaveConfigSynchronization HAL_TIM_SlaveConfigSynchro +#define HAL_TIM_SlaveConfigSynchronization_IT HAL_TIM_SlaveConfigSynchro_IT +#define HAL_TIMEx_CommutationCallback HAL_TIMEx_CommutCallback +#define HAL_TIMEx_ConfigCommutationEvent HAL_TIMEx_ConfigCommutEvent +#define HAL_TIMEx_ConfigCommutationEvent_IT HAL_TIMEx_ConfigCommutEvent_IT +#define HAL_TIMEx_ConfigCommutationEvent_DMA HAL_TIMEx_ConfigCommutEvent_DMA +#endif /* STM32H7 || STM32G0 || STM32F0 || STM32F1 || STM32F2 || STM32F3 || STM32F4 || STM32F7 || STM32L0 */ +/** + * @} + */ + +/** @defgroup HAL_UART_Aliased_Functions HAL UART Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_UART_WakeupCallback HAL_UARTEx_WakeupCallback +/** + * @} + */ + +/** @defgroup HAL_LTDC_Aliased_Functions HAL LTDC Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_LTDC_LineEvenCallback HAL_LTDC_LineEventCallback +#define HAL_LTDC_Relaod HAL_LTDC_Reload +#define HAL_LTDC_StructInitFromVideoConfig HAL_LTDCEx_StructInitFromVideoConfig +#define HAL_LTDC_StructInitFromAdaptedCommandConfig HAL_LTDCEx_StructInitFromAdaptedCommandConfig +/** + * @} + */ + + +/** @defgroup HAL_PPP_Aliased_Functions HAL PPP Aliased Functions maintained for legacy purpose + * @{ + */ + +/** + * @} + */ + +/* Exported macros ------------------------------------------------------------*/ + +/** @defgroup HAL_AES_Aliased_Macros HAL CRYP Aliased Macros maintained for legacy purpose + * @{ + */ +#define AES_IT_CC CRYP_IT_CC +#define AES_IT_ERR CRYP_IT_ERR +#define AES_FLAG_CCF CRYP_FLAG_CCF +/** + * @} + */ + +/** @defgroup HAL_Aliased_Macros HAL Generic Aliased Macros maintained for legacy purpose + * @{ + */ +#define __HAL_GET_BOOT_MODE __HAL_SYSCFG_GET_BOOT_MODE +#define __HAL_REMAPMEMORY_FLASH __HAL_SYSCFG_REMAPMEMORY_FLASH +#define __HAL_REMAPMEMORY_SYSTEMFLASH __HAL_SYSCFG_REMAPMEMORY_SYSTEMFLASH +#define __HAL_REMAPMEMORY_SRAM __HAL_SYSCFG_REMAPMEMORY_SRAM +#define __HAL_REMAPMEMORY_FMC __HAL_SYSCFG_REMAPMEMORY_FMC +#define __HAL_REMAPMEMORY_FMC_SDRAM __HAL_SYSCFG_REMAPMEMORY_FMC_SDRAM +#define __HAL_REMAPMEMORY_FSMC __HAL_SYSCFG_REMAPMEMORY_FSMC +#define __HAL_REMAPMEMORY_QUADSPI __HAL_SYSCFG_REMAPMEMORY_QUADSPI +#define __HAL_FMC_BANK __HAL_SYSCFG_FMC_BANK +#define __HAL_GET_FLAG __HAL_SYSCFG_GET_FLAG +#define __HAL_CLEAR_FLAG __HAL_SYSCFG_CLEAR_FLAG +#define __HAL_VREFINT_OUT_ENABLE __HAL_SYSCFG_VREFINT_OUT_ENABLE +#define __HAL_VREFINT_OUT_DISABLE __HAL_SYSCFG_VREFINT_OUT_DISABLE +#define __HAL_SYSCFG_SRAM2_WRP_ENABLE __HAL_SYSCFG_SRAM2_WRP_0_31_ENABLE + +#define SYSCFG_FLAG_VREF_READY SYSCFG_FLAG_VREFINT_READY +#define SYSCFG_FLAG_RC48 RCC_FLAG_HSI48 +#define IS_SYSCFG_FASTMODEPLUS_CONFIG IS_I2C_FASTMODEPLUS +#define UFB_MODE_BitNumber UFB_MODE_BIT_NUMBER +#define CMP_PD_BitNumber CMP_PD_BIT_NUMBER + +/** + * @} + */ + + +/** @defgroup HAL_ADC_Aliased_Macros HAL ADC Aliased Macros maintained for legacy purpose + * @{ + */ +#define __ADC_ENABLE __HAL_ADC_ENABLE +#define __ADC_DISABLE __HAL_ADC_DISABLE +#define __HAL_ADC_ENABLING_CONDITIONS ADC_ENABLING_CONDITIONS +#define __HAL_ADC_DISABLING_CONDITIONS ADC_DISABLING_CONDITIONS +#define __HAL_ADC_IS_ENABLED ADC_IS_ENABLE +#define __ADC_IS_ENABLED ADC_IS_ENABLE +#define __HAL_ADC_IS_SOFTWARE_START_REGULAR ADC_IS_SOFTWARE_START_REGULAR +#define __HAL_ADC_IS_SOFTWARE_START_INJECTED ADC_IS_SOFTWARE_START_INJECTED +#define __HAL_ADC_IS_CONVERSION_ONGOING_REGULAR_INJECTED ADC_IS_CONVERSION_ONGOING_REGULAR_INJECTED +#define __HAL_ADC_IS_CONVERSION_ONGOING_REGULAR ADC_IS_CONVERSION_ONGOING_REGULAR +#define __HAL_ADC_IS_CONVERSION_ONGOING_INJECTED ADC_IS_CONVERSION_ONGOING_INJECTED +#define __HAL_ADC_IS_CONVERSION_ONGOING ADC_IS_CONVERSION_ONGOING +#define __HAL_ADC_CLEAR_ERRORCODE ADC_CLEAR_ERRORCODE + +#define __HAL_ADC_GET_RESOLUTION ADC_GET_RESOLUTION +#define __HAL_ADC_JSQR_RK ADC_JSQR_RK +#define __HAL_ADC_CFGR_AWD1CH ADC_CFGR_AWD1CH_SHIFT +#define __HAL_ADC_CFGR_AWD23CR ADC_CFGR_AWD23CR +#define __HAL_ADC_CFGR_INJECT_AUTO_CONVERSION ADC_CFGR_INJECT_AUTO_CONVERSION +#define __HAL_ADC_CFGR_INJECT_CONTEXT_QUEUE ADC_CFGR_INJECT_CONTEXT_QUEUE +#define __HAL_ADC_CFGR_INJECT_DISCCONTINUOUS ADC_CFGR_INJECT_DISCCONTINUOUS +#define __HAL_ADC_CFGR_REG_DISCCONTINUOUS ADC_CFGR_REG_DISCCONTINUOUS +#define __HAL_ADC_CFGR_DISCONTINUOUS_NUM ADC_CFGR_DISCONTINUOUS_NUM +#define __HAL_ADC_CFGR_AUTOWAIT ADC_CFGR_AUTOWAIT +#define __HAL_ADC_CFGR_CONTINUOUS ADC_CFGR_CONTINUOUS +#define __HAL_ADC_CFGR_OVERRUN ADC_CFGR_OVERRUN +#define __HAL_ADC_CFGR_DMACONTREQ ADC_CFGR_DMACONTREQ +#define __HAL_ADC_CFGR_EXTSEL ADC_CFGR_EXTSEL_SET +#define __HAL_ADC_JSQR_JEXTSEL ADC_JSQR_JEXTSEL_SET +#define __HAL_ADC_OFR_CHANNEL ADC_OFR_CHANNEL +#define __HAL_ADC_DIFSEL_CHANNEL ADC_DIFSEL_CHANNEL +#define __HAL_ADC_CALFACT_DIFF_SET ADC_CALFACT_DIFF_SET +#define __HAL_ADC_CALFACT_DIFF_GET ADC_CALFACT_DIFF_GET +#define __HAL_ADC_TRX_HIGHTHRESHOLD ADC_TRX_HIGHTHRESHOLD + +#define __HAL_ADC_OFFSET_SHIFT_RESOLUTION ADC_OFFSET_SHIFT_RESOLUTION +#define __HAL_ADC_AWD1THRESHOLD_SHIFT_RESOLUTION ADC_AWD1THRESHOLD_SHIFT_RESOLUTION +#define __HAL_ADC_AWD23THRESHOLD_SHIFT_RESOLUTION ADC_AWD23THRESHOLD_SHIFT_RESOLUTION +#define __HAL_ADC_COMMON_REGISTER ADC_COMMON_REGISTER +#define __HAL_ADC_COMMON_CCR_MULTI ADC_COMMON_CCR_MULTI +#define __HAL_ADC_MULTIMODE_IS_ENABLED ADC_MULTIMODE_IS_ENABLE +#define __ADC_MULTIMODE_IS_ENABLED ADC_MULTIMODE_IS_ENABLE +#define __HAL_ADC_NONMULTIMODE_OR_MULTIMODEMASTER ADC_NONMULTIMODE_OR_MULTIMODEMASTER +#define __HAL_ADC_COMMON_ADC_OTHER ADC_COMMON_ADC_OTHER +#define __HAL_ADC_MULTI_SLAVE ADC_MULTI_SLAVE + +#define __HAL_ADC_SQR1_L ADC_SQR1_L_SHIFT +#define __HAL_ADC_JSQR_JL ADC_JSQR_JL_SHIFT +#define __HAL_ADC_JSQR_RK_JL ADC_JSQR_RK_JL +#define __HAL_ADC_CR1_DISCONTINUOUS_NUM ADC_CR1_DISCONTINUOUS_NUM +#define __HAL_ADC_CR1_SCAN ADC_CR1_SCAN_SET +#define __HAL_ADC_CONVCYCLES_MAX_RANGE ADC_CONVCYCLES_MAX_RANGE +#define __HAL_ADC_CLOCK_PRESCALER_RANGE ADC_CLOCK_PRESCALER_RANGE +#define __HAL_ADC_GET_CLOCK_PRESCALER ADC_GET_CLOCK_PRESCALER + +#define __HAL_ADC_SQR1 ADC_SQR1 +#define __HAL_ADC_SMPR1 ADC_SMPR1 +#define __HAL_ADC_SMPR2 ADC_SMPR2 +#define __HAL_ADC_SQR3_RK ADC_SQR3_RK +#define __HAL_ADC_SQR2_RK ADC_SQR2_RK +#define __HAL_ADC_SQR1_RK ADC_SQR1_RK +#define __HAL_ADC_CR2_CONTINUOUS ADC_CR2_CONTINUOUS +#define __HAL_ADC_CR1_DISCONTINUOUS ADC_CR1_DISCONTINUOUS +#define __HAL_ADC_CR1_SCANCONV ADC_CR1_SCANCONV +#define __HAL_ADC_CR2_EOCSelection ADC_CR2_EOCSelection +#define __HAL_ADC_CR2_DMAContReq ADC_CR2_DMAContReq +#define __HAL_ADC_JSQR ADC_JSQR + +#define __HAL_ADC_CHSELR_CHANNEL ADC_CHSELR_CHANNEL +#define __HAL_ADC_CFGR1_REG_DISCCONTINUOUS ADC_CFGR1_REG_DISCCONTINUOUS +#define __HAL_ADC_CFGR1_AUTOOFF ADC_CFGR1_AUTOOFF +#define __HAL_ADC_CFGR1_AUTOWAIT ADC_CFGR1_AUTOWAIT +#define __HAL_ADC_CFGR1_CONTINUOUS ADC_CFGR1_CONTINUOUS +#define __HAL_ADC_CFGR1_OVERRUN ADC_CFGR1_OVERRUN +#define __HAL_ADC_CFGR1_SCANDIR ADC_CFGR1_SCANDIR +#define __HAL_ADC_CFGR1_DMACONTREQ ADC_CFGR1_DMACONTREQ + +/** + * @} + */ + +/** @defgroup HAL_DAC_Aliased_Macros HAL DAC Aliased Macros maintained for legacy purpose + * @{ + */ +#define __HAL_DHR12R1_ALIGNEMENT DAC_DHR12R1_ALIGNMENT +#define __HAL_DHR12R2_ALIGNEMENT DAC_DHR12R2_ALIGNMENT +#define __HAL_DHR12RD_ALIGNEMENT DAC_DHR12RD_ALIGNMENT +#define IS_DAC_GENERATE_WAVE IS_DAC_WAVE + +/** + * @} + */ + +/** @defgroup HAL_DBGMCU_Aliased_Macros HAL DBGMCU Aliased Macros maintained for legacy purpose + * @{ + */ +#define __HAL_FREEZE_TIM1_DBGMCU __HAL_DBGMCU_FREEZE_TIM1 +#define __HAL_UNFREEZE_TIM1_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM1 +#define __HAL_FREEZE_TIM2_DBGMCU __HAL_DBGMCU_FREEZE_TIM2 +#define __HAL_UNFREEZE_TIM2_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM2 +#define __HAL_FREEZE_TIM3_DBGMCU __HAL_DBGMCU_FREEZE_TIM3 +#define __HAL_UNFREEZE_TIM3_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM3 +#define __HAL_FREEZE_TIM4_DBGMCU __HAL_DBGMCU_FREEZE_TIM4 +#define __HAL_UNFREEZE_TIM4_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM4 +#define __HAL_FREEZE_TIM5_DBGMCU __HAL_DBGMCU_FREEZE_TIM5 +#define __HAL_UNFREEZE_TIM5_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM5 +#define __HAL_FREEZE_TIM6_DBGMCU __HAL_DBGMCU_FREEZE_TIM6 +#define __HAL_UNFREEZE_TIM6_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM6 +#define __HAL_FREEZE_TIM7_DBGMCU __HAL_DBGMCU_FREEZE_TIM7 +#define __HAL_UNFREEZE_TIM7_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM7 +#define __HAL_FREEZE_TIM8_DBGMCU __HAL_DBGMCU_FREEZE_TIM8 +#define __HAL_UNFREEZE_TIM8_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM8 + +#define __HAL_FREEZE_TIM9_DBGMCU __HAL_DBGMCU_FREEZE_TIM9 +#define __HAL_UNFREEZE_TIM9_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM9 +#define __HAL_FREEZE_TIM10_DBGMCU __HAL_DBGMCU_FREEZE_TIM10 +#define __HAL_UNFREEZE_TIM10_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM10 +#define __HAL_FREEZE_TIM11_DBGMCU __HAL_DBGMCU_FREEZE_TIM11 +#define __HAL_UNFREEZE_TIM11_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM11 +#define __HAL_FREEZE_TIM12_DBGMCU __HAL_DBGMCU_FREEZE_TIM12 +#define __HAL_UNFREEZE_TIM12_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM12 +#define __HAL_FREEZE_TIM13_DBGMCU __HAL_DBGMCU_FREEZE_TIM13 +#define __HAL_UNFREEZE_TIM13_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM13 +#define __HAL_FREEZE_TIM14_DBGMCU __HAL_DBGMCU_FREEZE_TIM14 +#define __HAL_UNFREEZE_TIM14_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM14 +#define __HAL_FREEZE_CAN2_DBGMCU __HAL_DBGMCU_FREEZE_CAN2 +#define __HAL_UNFREEZE_CAN2_DBGMCU __HAL_DBGMCU_UNFREEZE_CAN2 + + +#define __HAL_FREEZE_TIM15_DBGMCU __HAL_DBGMCU_FREEZE_TIM15 +#define __HAL_UNFREEZE_TIM15_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM15 +#define __HAL_FREEZE_TIM16_DBGMCU __HAL_DBGMCU_FREEZE_TIM16 +#define __HAL_UNFREEZE_TIM16_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM16 +#define __HAL_FREEZE_TIM17_DBGMCU __HAL_DBGMCU_FREEZE_TIM17 +#define __HAL_UNFREEZE_TIM17_DBGMCU __HAL_DBGMCU_UNFREEZE_TIM17 +#define __HAL_FREEZE_RTC_DBGMCU __HAL_DBGMCU_FREEZE_RTC +#define __HAL_UNFREEZE_RTC_DBGMCU __HAL_DBGMCU_UNFREEZE_RTC +#if defined(STM32H7) +#define __HAL_FREEZE_WWDG_DBGMCU __HAL_DBGMCU_FREEZE_WWDG1 +#define __HAL_UNFREEZE_WWDG_DBGMCU __HAL_DBGMCU_UnFreeze_WWDG1 +#define __HAL_FREEZE_IWDG_DBGMCU __HAL_DBGMCU_FREEZE_IWDG1 +#define __HAL_UNFREEZE_IWDG_DBGMCU __HAL_DBGMCU_UnFreeze_IWDG1 +#else +#define __HAL_FREEZE_WWDG_DBGMCU __HAL_DBGMCU_FREEZE_WWDG +#define __HAL_UNFREEZE_WWDG_DBGMCU __HAL_DBGMCU_UNFREEZE_WWDG +#define __HAL_FREEZE_IWDG_DBGMCU __HAL_DBGMCU_FREEZE_IWDG +#define __HAL_UNFREEZE_IWDG_DBGMCU __HAL_DBGMCU_UNFREEZE_IWDG +#endif /* STM32H7 */ +#define __HAL_FREEZE_I2C1_TIMEOUT_DBGMCU __HAL_DBGMCU_FREEZE_I2C1_TIMEOUT +#define __HAL_UNFREEZE_I2C1_TIMEOUT_DBGMCU __HAL_DBGMCU_UNFREEZE_I2C1_TIMEOUT +#define __HAL_FREEZE_I2C2_TIMEOUT_DBGMCU __HAL_DBGMCU_FREEZE_I2C2_TIMEOUT +#define __HAL_UNFREEZE_I2C2_TIMEOUT_DBGMCU __HAL_DBGMCU_UNFREEZE_I2C2_TIMEOUT +#define __HAL_FREEZE_I2C3_TIMEOUT_DBGMCU __HAL_DBGMCU_FREEZE_I2C3_TIMEOUT +#define __HAL_UNFREEZE_I2C3_TIMEOUT_DBGMCU __HAL_DBGMCU_UNFREEZE_I2C3_TIMEOUT +#define __HAL_FREEZE_CAN1_DBGMCU __HAL_DBGMCU_FREEZE_CAN1 +#define __HAL_UNFREEZE_CAN1_DBGMCU __HAL_DBGMCU_UNFREEZE_CAN1 +#define __HAL_FREEZE_LPTIM1_DBGMCU __HAL_DBGMCU_FREEZE_LPTIM1 +#define __HAL_UNFREEZE_LPTIM1_DBGMCU __HAL_DBGMCU_UNFREEZE_LPTIM1 +#define __HAL_FREEZE_LPTIM2_DBGMCU __HAL_DBGMCU_FREEZE_LPTIM2 +#define __HAL_UNFREEZE_LPTIM2_DBGMCU __HAL_DBGMCU_UNFREEZE_LPTIM2 + +/** + * @} + */ + +/** @defgroup HAL_COMP_Aliased_Macros HAL COMP Aliased Macros maintained for legacy purpose + * @{ + */ +#if defined(STM32F3) +#define COMP_START __HAL_COMP_ENABLE +#define COMP_STOP __HAL_COMP_DISABLE +#define COMP_LOCK __HAL_COMP_LOCK + +#if defined(STM32F301x8) || defined(STM32F302x8) || defined(STM32F318xx) || defined(STM32F303x8) || defined(STM32F334x8) || defined(STM32F328xx) +#define __HAL_COMP_EXTI_RISING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_RISING_EDGE() : \ + __HAL_COMP_COMP6_EXTI_ENABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_RISING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_RISING_EDGE() : \ + __HAL_COMP_COMP6_EXTI_DISABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP6_EXTI_ENABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP6_EXTI_DISABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_ENABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_IT() : \ + __HAL_COMP_COMP6_EXTI_ENABLE_IT()) +#define __HAL_COMP_EXTI_DISABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_IT() : \ + __HAL_COMP_COMP6_EXTI_DISABLE_IT()) +#define __HAL_COMP_EXTI_GET_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_GET_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_GET_FLAG() : \ + __HAL_COMP_COMP6_EXTI_GET_FLAG()) +#define __HAL_COMP_EXTI_CLEAR_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_CLEAR_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_CLEAR_FLAG() : \ + __HAL_COMP_COMP6_EXTI_CLEAR_FLAG()) +# endif +# if defined(STM32F302xE) || defined(STM32F302xC) +#define __HAL_COMP_EXTI_RISING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_RISING_EDGE() : \ + __HAL_COMP_COMP6_EXTI_ENABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_RISING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_RISING_EDGE() : \ + __HAL_COMP_COMP6_EXTI_DISABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP6_EXTI_ENABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP6_EXTI_DISABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_ENABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_IT() : \ + __HAL_COMP_COMP6_EXTI_ENABLE_IT()) +#define __HAL_COMP_EXTI_DISABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_IT() : \ + __HAL_COMP_COMP6_EXTI_DISABLE_IT()) +#define __HAL_COMP_EXTI_GET_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_GET_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_GET_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_GET_FLAG() : \ + __HAL_COMP_COMP6_EXTI_GET_FLAG()) +#define __HAL_COMP_EXTI_CLEAR_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_CLEAR_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_CLEAR_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_CLEAR_FLAG() : \ + __HAL_COMP_COMP6_EXTI_CLEAR_FLAG()) +# endif +# if defined(STM32F303xE) || defined(STM32F398xx) || defined(STM32F303xC) || defined(STM32F358xx) +#define __HAL_COMP_EXTI_RISING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_ENABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_ENABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_ENABLE_RISING_EDGE() : \ + __HAL_COMP_COMP7_EXTI_ENABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_RISING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_DISABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_DISABLE_RISING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_DISABLE_RISING_EDGE() : \ + __HAL_COMP_COMP7_EXTI_DISABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_ENABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_ENABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_ENABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP7_EXTI_ENABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_DISABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_DISABLE_FALLING_EDGE() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_DISABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP7_EXTI_DISABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_ENABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_ENABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_ENABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_ENABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_ENABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_ENABLE_IT() : \ + __HAL_COMP_COMP7_EXTI_ENABLE_IT()) +#define __HAL_COMP_EXTI_DISABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_DISABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_DISABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_DISABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_DISABLE_IT() : \ + ((__EXTILINE__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_DISABLE_IT() : \ + __HAL_COMP_COMP7_EXTI_DISABLE_IT()) +#define __HAL_COMP_EXTI_GET_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_GET_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_GET_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_GET_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_GET_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_GET_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_GET_FLAG() : \ + __HAL_COMP_COMP7_EXTI_GET_FLAG()) +#define __HAL_COMP_EXTI_CLEAR_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_CLEAR_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP2) ? __HAL_COMP_COMP2_EXTI_CLEAR_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP3) ? __HAL_COMP_COMP3_EXTI_CLEAR_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP4) ? __HAL_COMP_COMP4_EXTI_CLEAR_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP5) ? __HAL_COMP_COMP5_EXTI_CLEAR_FLAG() : \ + ((__FLAG__) == COMP_EXTI_LINE_COMP6) ? __HAL_COMP_COMP6_EXTI_CLEAR_FLAG() : \ + __HAL_COMP_COMP7_EXTI_CLEAR_FLAG()) +# endif +# if defined(STM32F373xC) ||defined(STM32F378xx) +#define __HAL_COMP_EXTI_RISING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_RISING_EDGE() : \ + __HAL_COMP_COMP2_EXTI_ENABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_RISING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_RISING_EDGE() : \ + __HAL_COMP_COMP2_EXTI_DISABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP2_EXTI_ENABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP2_EXTI_DISABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_ENABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_IT() : \ + __HAL_COMP_COMP2_EXTI_ENABLE_IT()) +#define __HAL_COMP_EXTI_DISABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_IT() : \ + __HAL_COMP_COMP2_EXTI_DISABLE_IT()) +#define __HAL_COMP_EXTI_GET_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_GET_FLAG() : \ + __HAL_COMP_COMP2_EXTI_GET_FLAG()) +#define __HAL_COMP_EXTI_CLEAR_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_CLEAR_FLAG() : \ + __HAL_COMP_COMP2_EXTI_CLEAR_FLAG()) +# endif +#else +#define __HAL_COMP_EXTI_RISING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_RISING_EDGE() : \ + __HAL_COMP_COMP2_EXTI_ENABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_RISING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_RISING_EDGE() : \ + __HAL_COMP_COMP2_EXTI_DISABLE_RISING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_ENABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP2_EXTI_ENABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_FALLING_IT_DISABLE(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_FALLING_EDGE() : \ + __HAL_COMP_COMP2_EXTI_DISABLE_FALLING_EDGE()) +#define __HAL_COMP_EXTI_ENABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_ENABLE_IT() : \ + __HAL_COMP_COMP2_EXTI_ENABLE_IT()) +#define __HAL_COMP_EXTI_DISABLE_IT(__EXTILINE__) (((__EXTILINE__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_DISABLE_IT() : \ + __HAL_COMP_COMP2_EXTI_DISABLE_IT()) +#define __HAL_COMP_EXTI_GET_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_GET_FLAG() : \ + __HAL_COMP_COMP2_EXTI_GET_FLAG()) +#define __HAL_COMP_EXTI_CLEAR_FLAG(__FLAG__) (((__FLAG__) == COMP_EXTI_LINE_COMP1) ? __HAL_COMP_COMP1_EXTI_CLEAR_FLAG() : \ + __HAL_COMP_COMP2_EXTI_CLEAR_FLAG()) +#endif + +#define __HAL_COMP_GET_EXTI_LINE COMP_GET_EXTI_LINE + +#if defined(STM32L0) || defined(STM32L4) +/* Note: On these STM32 families, the only argument of this macro */ +/* is COMP_FLAG_LOCK. */ +/* This macro is replaced by __HAL_COMP_IS_LOCKED with only HAL handle */ +/* argument. */ +#define __HAL_COMP_GET_FLAG(__HANDLE__, __FLAG__) (__HAL_COMP_IS_LOCKED(__HANDLE__)) +#endif +/** + * @} + */ + +#if defined(STM32L0) || defined(STM32L4) +/** @defgroup HAL_COMP_Aliased_Functions HAL COMP Aliased Functions maintained for legacy purpose + * @{ + */ +#define HAL_COMP_Start_IT HAL_COMP_Start /* Function considered as legacy as EXTI event or IT configuration is done into HAL_COMP_Init() */ +#define HAL_COMP_Stop_IT HAL_COMP_Stop /* Function considered as legacy as EXTI event or IT configuration is done into HAL_COMP_Init() */ +/** + * @} + */ +#endif + +/** @defgroup HAL_DAC_Aliased_Macros HAL DAC Aliased Macros maintained for legacy purpose + * @{ + */ + +#define IS_DAC_WAVE(WAVE) (((WAVE) == DAC_WAVE_NONE) || \ + ((WAVE) == DAC_WAVE_NOISE)|| \ + ((WAVE) == DAC_WAVE_TRIANGLE)) + +/** + * @} + */ + +/** @defgroup HAL_FLASH_Aliased_Macros HAL FLASH Aliased Macros maintained for legacy purpose + * @{ + */ + +#define IS_WRPAREA IS_OB_WRPAREA +#define IS_TYPEPROGRAM IS_FLASH_TYPEPROGRAM +#define IS_TYPEPROGRAMFLASH IS_FLASH_TYPEPROGRAM +#define IS_TYPEERASE IS_FLASH_TYPEERASE +#define IS_NBSECTORS IS_FLASH_NBSECTORS +#define IS_OB_WDG_SOURCE IS_OB_IWDG_SOURCE + +/** + * @} + */ + +/** @defgroup HAL_I2C_Aliased_Macros HAL I2C Aliased Macros maintained for legacy purpose + * @{ + */ + +#define __HAL_I2C_RESET_CR2 I2C_RESET_CR2 +#define __HAL_I2C_GENERATE_START I2C_GENERATE_START +#if defined(STM32F1) +#define __HAL_I2C_FREQ_RANGE I2C_FREQRANGE +#else +#define __HAL_I2C_FREQ_RANGE I2C_FREQ_RANGE +#endif /* STM32F1 */ +#define __HAL_I2C_RISE_TIME I2C_RISE_TIME +#define __HAL_I2C_SPEED_STANDARD I2C_SPEED_STANDARD +#define __HAL_I2C_SPEED_FAST I2C_SPEED_FAST +#define __HAL_I2C_SPEED I2C_SPEED +#define __HAL_I2C_7BIT_ADD_WRITE I2C_7BIT_ADD_WRITE +#define __HAL_I2C_7BIT_ADD_READ I2C_7BIT_ADD_READ +#define __HAL_I2C_10BIT_ADDRESS I2C_10BIT_ADDRESS +#define __HAL_I2C_10BIT_HEADER_WRITE I2C_10BIT_HEADER_WRITE +#define __HAL_I2C_10BIT_HEADER_READ I2C_10BIT_HEADER_READ +#define __HAL_I2C_MEM_ADD_MSB I2C_MEM_ADD_MSB +#define __HAL_I2C_MEM_ADD_LSB I2C_MEM_ADD_LSB +#define __HAL_I2C_FREQRANGE I2C_FREQRANGE +/** + * @} + */ + +/** @defgroup HAL_I2S_Aliased_Macros HAL I2S Aliased Macros maintained for legacy purpose + * @{ + */ + +#define IS_I2S_INSTANCE IS_I2S_ALL_INSTANCE +#define IS_I2S_INSTANCE_EXT IS_I2S_ALL_INSTANCE_EXT + +#if defined(STM32H7) +#define __HAL_I2S_CLEAR_FREFLAG __HAL_I2S_CLEAR_TIFREFLAG +#endif + +/** + * @} + */ + +/** @defgroup HAL_IRDA_Aliased_Macros HAL IRDA Aliased Macros maintained for legacy purpose + * @{ + */ + +#define __IRDA_DISABLE __HAL_IRDA_DISABLE +#define __IRDA_ENABLE __HAL_IRDA_ENABLE + +#define __HAL_IRDA_GETCLOCKSOURCE IRDA_GETCLOCKSOURCE +#define __HAL_IRDA_MASK_COMPUTATION IRDA_MASK_COMPUTATION +#define __IRDA_GETCLOCKSOURCE IRDA_GETCLOCKSOURCE +#define __IRDA_MASK_COMPUTATION IRDA_MASK_COMPUTATION + +#define IS_IRDA_ONEBIT_SAMPLE IS_IRDA_ONE_BIT_SAMPLE + + +/** + * @} + */ + + +/** @defgroup HAL_IWDG_Aliased_Macros HAL IWDG Aliased Macros maintained for legacy purpose + * @{ + */ +#define __HAL_IWDG_ENABLE_WRITE_ACCESS IWDG_ENABLE_WRITE_ACCESS +#define __HAL_IWDG_DISABLE_WRITE_ACCESS IWDG_DISABLE_WRITE_ACCESS +/** + * @} + */ + + +/** @defgroup HAL_LPTIM_Aliased_Macros HAL LPTIM Aliased Macros maintained for legacy purpose + * @{ + */ + +#define __HAL_LPTIM_ENABLE_INTERRUPT __HAL_LPTIM_ENABLE_IT +#define __HAL_LPTIM_DISABLE_INTERRUPT __HAL_LPTIM_DISABLE_IT +#define __HAL_LPTIM_GET_ITSTATUS __HAL_LPTIM_GET_IT_SOURCE + +/** + * @} + */ + + +/** @defgroup HAL_OPAMP_Aliased_Macros HAL OPAMP Aliased Macros maintained for legacy purpose + * @{ + */ +#define __OPAMP_CSR_OPAXPD OPAMP_CSR_OPAXPD +#define __OPAMP_CSR_S3SELX OPAMP_CSR_S3SELX +#define __OPAMP_CSR_S4SELX OPAMP_CSR_S4SELX +#define __OPAMP_CSR_S5SELX OPAMP_CSR_S5SELX +#define __OPAMP_CSR_S6SELX OPAMP_CSR_S6SELX +#define __OPAMP_CSR_OPAXCAL_L OPAMP_CSR_OPAXCAL_L +#define __OPAMP_CSR_OPAXCAL_H OPAMP_CSR_OPAXCAL_H +#define __OPAMP_CSR_OPAXLPM OPAMP_CSR_OPAXLPM +#define __OPAMP_CSR_ALL_SWITCHES OPAMP_CSR_ALL_SWITCHES +#define __OPAMP_CSR_ANAWSELX OPAMP_CSR_ANAWSELX +#define __OPAMP_CSR_OPAXCALOUT OPAMP_CSR_OPAXCALOUT +#define __OPAMP_OFFSET_TRIM_BITSPOSITION OPAMP_OFFSET_TRIM_BITSPOSITION +#define __OPAMP_OFFSET_TRIM_SET OPAMP_OFFSET_TRIM_SET + +/** + * @} + */ + + +/** @defgroup HAL_PWR_Aliased_Macros HAL PWR Aliased Macros maintained for legacy purpose + * @{ + */ +#define __HAL_PVD_EVENT_DISABLE __HAL_PWR_PVD_EXTI_DISABLE_EVENT +#define __HAL_PVD_EVENT_ENABLE __HAL_PWR_PVD_EXTI_ENABLE_EVENT +#define __HAL_PVD_EXTI_FALLINGTRIGGER_DISABLE __HAL_PWR_PVD_EXTI_DISABLE_FALLING_EDGE +#define __HAL_PVD_EXTI_FALLINGTRIGGER_ENABLE __HAL_PWR_PVD_EXTI_ENABLE_FALLING_EDGE +#define __HAL_PVD_EXTI_RISINGTRIGGER_DISABLE __HAL_PWR_PVD_EXTI_DISABLE_RISING_EDGE +#define __HAL_PVD_EXTI_RISINGTRIGGER_ENABLE __HAL_PWR_PVD_EXTI_ENABLE_RISING_EDGE +#define __HAL_PVM_EVENT_DISABLE __HAL_PWR_PVM_EVENT_DISABLE +#define __HAL_PVM_EVENT_ENABLE __HAL_PWR_PVM_EVENT_ENABLE +#define __HAL_PVM_EXTI_FALLINGTRIGGER_DISABLE __HAL_PWR_PVM_EXTI_FALLINGTRIGGER_DISABLE +#define __HAL_PVM_EXTI_FALLINGTRIGGER_ENABLE __HAL_PWR_PVM_EXTI_FALLINGTRIGGER_ENABLE +#define __HAL_PVM_EXTI_RISINGTRIGGER_DISABLE __HAL_PWR_PVM_EXTI_RISINGTRIGGER_DISABLE +#define __HAL_PVM_EXTI_RISINGTRIGGER_ENABLE __HAL_PWR_PVM_EXTI_RISINGTRIGGER_ENABLE +#define __HAL_PWR_INTERNALWAKEUP_DISABLE HAL_PWREx_DisableInternalWakeUpLine +#define __HAL_PWR_INTERNALWAKEUP_ENABLE HAL_PWREx_EnableInternalWakeUpLine +#define __HAL_PWR_PULL_UP_DOWN_CONFIG_DISABLE HAL_PWREx_DisablePullUpPullDownConfig +#define __HAL_PWR_PULL_UP_DOWN_CONFIG_ENABLE HAL_PWREx_EnablePullUpPullDownConfig +#define __HAL_PWR_PVD_EXTI_CLEAR_EGDE_TRIGGER() do { __HAL_PWR_PVD_EXTI_DISABLE_RISING_EDGE();__HAL_PWR_PVD_EXTI_DISABLE_FALLING_EDGE(); } while(0) +#define __HAL_PWR_PVD_EXTI_EVENT_DISABLE __HAL_PWR_PVD_EXTI_DISABLE_EVENT +#define __HAL_PWR_PVD_EXTI_EVENT_ENABLE __HAL_PWR_PVD_EXTI_ENABLE_EVENT +#define __HAL_PWR_PVD_EXTI_FALLINGTRIGGER_DISABLE __HAL_PWR_PVD_EXTI_DISABLE_FALLING_EDGE +#define __HAL_PWR_PVD_EXTI_FALLINGTRIGGER_ENABLE __HAL_PWR_PVD_EXTI_ENABLE_FALLING_EDGE +#define __HAL_PWR_PVD_EXTI_RISINGTRIGGER_DISABLE __HAL_PWR_PVD_EXTI_DISABLE_RISING_EDGE +#define __HAL_PWR_PVD_EXTI_RISINGTRIGGER_ENABLE __HAL_PWR_PVD_EXTI_ENABLE_RISING_EDGE +#define __HAL_PWR_PVD_EXTI_SET_FALLING_EGDE_TRIGGER __HAL_PWR_PVD_EXTI_ENABLE_FALLING_EDGE +#define __HAL_PWR_PVD_EXTI_SET_RISING_EDGE_TRIGGER __HAL_PWR_PVD_EXTI_ENABLE_RISING_EDGE +#define __HAL_PWR_PVM_DISABLE() do { HAL_PWREx_DisablePVM1();HAL_PWREx_DisablePVM2();HAL_PWREx_DisablePVM3();HAL_PWREx_DisablePVM4(); } while(0) +#define __HAL_PWR_PVM_ENABLE() do { HAL_PWREx_EnablePVM1();HAL_PWREx_EnablePVM2();HAL_PWREx_EnablePVM3();HAL_PWREx_EnablePVM4(); } while(0) +#define __HAL_PWR_SRAM2CONTENT_PRESERVE_DISABLE HAL_PWREx_DisableSRAM2ContentRetention +#define __HAL_PWR_SRAM2CONTENT_PRESERVE_ENABLE HAL_PWREx_EnableSRAM2ContentRetention +#define __HAL_PWR_VDDIO2_DISABLE HAL_PWREx_DisableVddIO2 +#define __HAL_PWR_VDDIO2_ENABLE HAL_PWREx_EnableVddIO2 +#define __HAL_PWR_VDDIO2_EXTI_CLEAR_EGDE_TRIGGER __HAL_PWR_VDDIO2_EXTI_DISABLE_FALLING_EDGE +#define __HAL_PWR_VDDIO2_EXTI_SET_FALLING_EGDE_TRIGGER __HAL_PWR_VDDIO2_EXTI_ENABLE_FALLING_EDGE +#define __HAL_PWR_VDDUSB_DISABLE HAL_PWREx_DisableVddUSB +#define __HAL_PWR_VDDUSB_ENABLE HAL_PWREx_EnableVddUSB + +#if defined (STM32F4) +#define __HAL_PVD_EXTI_ENABLE_IT(PWR_EXTI_LINE_PVD) __HAL_PWR_PVD_EXTI_ENABLE_IT() +#define __HAL_PVD_EXTI_DISABLE_IT(PWR_EXTI_LINE_PVD) __HAL_PWR_PVD_EXTI_DISABLE_IT() +#define __HAL_PVD_EXTI_GET_FLAG(PWR_EXTI_LINE_PVD) __HAL_PWR_PVD_EXTI_GET_FLAG() +#define __HAL_PVD_EXTI_CLEAR_FLAG(PWR_EXTI_LINE_PVD) __HAL_PWR_PVD_EXTI_CLEAR_FLAG() +#define __HAL_PVD_EXTI_GENERATE_SWIT(PWR_EXTI_LINE_PVD) __HAL_PWR_PVD_EXTI_GENERATE_SWIT() +#else +#define __HAL_PVD_EXTI_CLEAR_FLAG __HAL_PWR_PVD_EXTI_CLEAR_FLAG +#define __HAL_PVD_EXTI_DISABLE_IT __HAL_PWR_PVD_EXTI_DISABLE_IT +#define __HAL_PVD_EXTI_ENABLE_IT __HAL_PWR_PVD_EXTI_ENABLE_IT +#define __HAL_PVD_EXTI_GENERATE_SWIT __HAL_PWR_PVD_EXTI_GENERATE_SWIT +#define __HAL_PVD_EXTI_GET_FLAG __HAL_PWR_PVD_EXTI_GET_FLAG +#endif /* STM32F4 */ +/** + * @} + */ + + +/** @defgroup HAL_RCC_Aliased HAL RCC Aliased maintained for legacy purpose + * @{ + */ + +#define RCC_StopWakeUpClock_MSI RCC_STOP_WAKEUPCLOCK_MSI +#define RCC_StopWakeUpClock_HSI RCC_STOP_WAKEUPCLOCK_HSI + +#define HAL_RCC_CCSCallback HAL_RCC_CSSCallback +#define HAL_RC48_EnableBuffer_Cmd(cmd) (((cmd\ + )==ENABLE) ? HAL_RCCEx_EnableHSI48_VREFINT() : HAL_RCCEx_DisableHSI48_VREFINT()) + +#define __ADC_CLK_DISABLE __HAL_RCC_ADC_CLK_DISABLE +#define __ADC_CLK_ENABLE __HAL_RCC_ADC_CLK_ENABLE +#define __ADC_CLK_SLEEP_DISABLE __HAL_RCC_ADC_CLK_SLEEP_DISABLE +#define __ADC_CLK_SLEEP_ENABLE __HAL_RCC_ADC_CLK_SLEEP_ENABLE +#define __ADC_FORCE_RESET __HAL_RCC_ADC_FORCE_RESET +#define __ADC_RELEASE_RESET __HAL_RCC_ADC_RELEASE_RESET +#define __ADC1_CLK_DISABLE __HAL_RCC_ADC1_CLK_DISABLE +#define __ADC1_CLK_ENABLE __HAL_RCC_ADC1_CLK_ENABLE +#define __ADC1_FORCE_RESET __HAL_RCC_ADC1_FORCE_RESET +#define __ADC1_RELEASE_RESET __HAL_RCC_ADC1_RELEASE_RESET +#define __ADC1_CLK_SLEEP_ENABLE __HAL_RCC_ADC1_CLK_SLEEP_ENABLE +#define __ADC1_CLK_SLEEP_DISABLE __HAL_RCC_ADC1_CLK_SLEEP_DISABLE +#define __ADC2_CLK_DISABLE __HAL_RCC_ADC2_CLK_DISABLE +#define __ADC2_CLK_ENABLE __HAL_RCC_ADC2_CLK_ENABLE +#define __ADC2_FORCE_RESET __HAL_RCC_ADC2_FORCE_RESET +#define __ADC2_RELEASE_RESET __HAL_RCC_ADC2_RELEASE_RESET +#define __ADC3_CLK_DISABLE __HAL_RCC_ADC3_CLK_DISABLE +#define __ADC3_CLK_ENABLE __HAL_RCC_ADC3_CLK_ENABLE +#define __ADC3_FORCE_RESET __HAL_RCC_ADC3_FORCE_RESET +#define __ADC3_RELEASE_RESET __HAL_RCC_ADC3_RELEASE_RESET +#define __AES_CLK_DISABLE __HAL_RCC_AES_CLK_DISABLE +#define __AES_CLK_ENABLE __HAL_RCC_AES_CLK_ENABLE +#define __AES_CLK_SLEEP_DISABLE __HAL_RCC_AES_CLK_SLEEP_DISABLE +#define __AES_CLK_SLEEP_ENABLE __HAL_RCC_AES_CLK_SLEEP_ENABLE +#define __AES_FORCE_RESET __HAL_RCC_AES_FORCE_RESET +#define __AES_RELEASE_RESET __HAL_RCC_AES_RELEASE_RESET +#define __CRYP_CLK_SLEEP_ENABLE __HAL_RCC_CRYP_CLK_SLEEP_ENABLE +#define __CRYP_CLK_SLEEP_DISABLE __HAL_RCC_CRYP_CLK_SLEEP_DISABLE +#define __CRYP_CLK_ENABLE __HAL_RCC_CRYP_CLK_ENABLE +#define __CRYP_CLK_DISABLE __HAL_RCC_CRYP_CLK_DISABLE +#define __CRYP_FORCE_RESET __HAL_RCC_CRYP_FORCE_RESET +#define __CRYP_RELEASE_RESET __HAL_RCC_CRYP_RELEASE_RESET +#define __AFIO_CLK_DISABLE __HAL_RCC_AFIO_CLK_DISABLE +#define __AFIO_CLK_ENABLE __HAL_RCC_AFIO_CLK_ENABLE +#define __AFIO_FORCE_RESET __HAL_RCC_AFIO_FORCE_RESET +#define __AFIO_RELEASE_RESET __HAL_RCC_AFIO_RELEASE_RESET +#define __AHB_FORCE_RESET __HAL_RCC_AHB_FORCE_RESET +#define __AHB_RELEASE_RESET __HAL_RCC_AHB_RELEASE_RESET +#define __AHB1_FORCE_RESET __HAL_RCC_AHB1_FORCE_RESET +#define __AHB1_RELEASE_RESET __HAL_RCC_AHB1_RELEASE_RESET +#define __AHB2_FORCE_RESET __HAL_RCC_AHB2_FORCE_RESET +#define __AHB2_RELEASE_RESET __HAL_RCC_AHB2_RELEASE_RESET +#define __AHB3_FORCE_RESET __HAL_RCC_AHB3_FORCE_RESET +#define __AHB3_RELEASE_RESET __HAL_RCC_AHB3_RELEASE_RESET +#define __APB1_FORCE_RESET __HAL_RCC_APB1_FORCE_RESET +#define __APB1_RELEASE_RESET __HAL_RCC_APB1_RELEASE_RESET +#define __APB2_FORCE_RESET __HAL_RCC_APB2_FORCE_RESET +#define __APB2_RELEASE_RESET __HAL_RCC_APB2_RELEASE_RESET +#define __BKP_CLK_DISABLE __HAL_RCC_BKP_CLK_DISABLE +#define __BKP_CLK_ENABLE __HAL_RCC_BKP_CLK_ENABLE +#define __BKP_FORCE_RESET __HAL_RCC_BKP_FORCE_RESET +#define __BKP_RELEASE_RESET __HAL_RCC_BKP_RELEASE_RESET +#define __CAN1_CLK_DISABLE __HAL_RCC_CAN1_CLK_DISABLE +#define __CAN1_CLK_ENABLE __HAL_RCC_CAN1_CLK_ENABLE +#define __CAN1_CLK_SLEEP_DISABLE __HAL_RCC_CAN1_CLK_SLEEP_DISABLE +#define __CAN1_CLK_SLEEP_ENABLE __HAL_RCC_CAN1_CLK_SLEEP_ENABLE +#define __CAN1_FORCE_RESET __HAL_RCC_CAN1_FORCE_RESET +#define __CAN1_RELEASE_RESET __HAL_RCC_CAN1_RELEASE_RESET +#define __CAN_CLK_DISABLE __HAL_RCC_CAN1_CLK_DISABLE +#define __CAN_CLK_ENABLE __HAL_RCC_CAN1_CLK_ENABLE +#define __CAN_FORCE_RESET __HAL_RCC_CAN1_FORCE_RESET +#define __CAN_RELEASE_RESET __HAL_RCC_CAN1_RELEASE_RESET +#define __CAN2_CLK_DISABLE __HAL_RCC_CAN2_CLK_DISABLE +#define __CAN2_CLK_ENABLE __HAL_RCC_CAN2_CLK_ENABLE +#define __CAN2_FORCE_RESET __HAL_RCC_CAN2_FORCE_RESET +#define __CAN2_RELEASE_RESET __HAL_RCC_CAN2_RELEASE_RESET +#define __CEC_CLK_DISABLE __HAL_RCC_CEC_CLK_DISABLE +#define __CEC_CLK_ENABLE __HAL_RCC_CEC_CLK_ENABLE +#define __COMP_CLK_DISABLE __HAL_RCC_COMP_CLK_DISABLE +#define __COMP_CLK_ENABLE __HAL_RCC_COMP_CLK_ENABLE +#define __COMP_FORCE_RESET __HAL_RCC_COMP_FORCE_RESET +#define __COMP_RELEASE_RESET __HAL_RCC_COMP_RELEASE_RESET +#define __COMP_CLK_SLEEP_ENABLE __HAL_RCC_COMP_CLK_SLEEP_ENABLE +#define __COMP_CLK_SLEEP_DISABLE __HAL_RCC_COMP_CLK_SLEEP_DISABLE +#define __CEC_FORCE_RESET __HAL_RCC_CEC_FORCE_RESET +#define __CEC_RELEASE_RESET __HAL_RCC_CEC_RELEASE_RESET +#define __CRC_CLK_DISABLE __HAL_RCC_CRC_CLK_DISABLE +#define __CRC_CLK_ENABLE __HAL_RCC_CRC_CLK_ENABLE +#define __CRC_CLK_SLEEP_DISABLE __HAL_RCC_CRC_CLK_SLEEP_DISABLE +#define __CRC_CLK_SLEEP_ENABLE __HAL_RCC_CRC_CLK_SLEEP_ENABLE +#define __CRC_FORCE_RESET __HAL_RCC_CRC_FORCE_RESET +#define __CRC_RELEASE_RESET __HAL_RCC_CRC_RELEASE_RESET +#define __DAC_CLK_DISABLE __HAL_RCC_DAC_CLK_DISABLE +#define __DAC_CLK_ENABLE __HAL_RCC_DAC_CLK_ENABLE +#define __DAC_FORCE_RESET __HAL_RCC_DAC_FORCE_RESET +#define __DAC_RELEASE_RESET __HAL_RCC_DAC_RELEASE_RESET +#define __DAC1_CLK_DISABLE __HAL_RCC_DAC1_CLK_DISABLE +#define __DAC1_CLK_ENABLE __HAL_RCC_DAC1_CLK_ENABLE +#define __DAC1_CLK_SLEEP_DISABLE __HAL_RCC_DAC1_CLK_SLEEP_DISABLE +#define __DAC1_CLK_SLEEP_ENABLE __HAL_RCC_DAC1_CLK_SLEEP_ENABLE +#define __DAC1_FORCE_RESET __HAL_RCC_DAC1_FORCE_RESET +#define __DAC1_RELEASE_RESET __HAL_RCC_DAC1_RELEASE_RESET +#define __DBGMCU_CLK_ENABLE __HAL_RCC_DBGMCU_CLK_ENABLE +#define __DBGMCU_CLK_DISABLE __HAL_RCC_DBGMCU_CLK_DISABLE +#define __DBGMCU_FORCE_RESET __HAL_RCC_DBGMCU_FORCE_RESET +#define __DBGMCU_RELEASE_RESET __HAL_RCC_DBGMCU_RELEASE_RESET +#define __DFSDM_CLK_DISABLE __HAL_RCC_DFSDM_CLK_DISABLE +#define __DFSDM_CLK_ENABLE __HAL_RCC_DFSDM_CLK_ENABLE +#define __DFSDM_CLK_SLEEP_DISABLE __HAL_RCC_DFSDM_CLK_SLEEP_DISABLE +#define __DFSDM_CLK_SLEEP_ENABLE __HAL_RCC_DFSDM_CLK_SLEEP_ENABLE +#define __DFSDM_FORCE_RESET __HAL_RCC_DFSDM_FORCE_RESET +#define __DFSDM_RELEASE_RESET __HAL_RCC_DFSDM_RELEASE_RESET +#define __DMA1_CLK_DISABLE __HAL_RCC_DMA1_CLK_DISABLE +#define __DMA1_CLK_ENABLE __HAL_RCC_DMA1_CLK_ENABLE +#define __DMA1_CLK_SLEEP_DISABLE __HAL_RCC_DMA1_CLK_SLEEP_DISABLE +#define __DMA1_CLK_SLEEP_ENABLE __HAL_RCC_DMA1_CLK_SLEEP_ENABLE +#define __DMA1_FORCE_RESET __HAL_RCC_DMA1_FORCE_RESET +#define __DMA1_RELEASE_RESET __HAL_RCC_DMA1_RELEASE_RESET +#define __DMA2_CLK_DISABLE __HAL_RCC_DMA2_CLK_DISABLE +#define __DMA2_CLK_ENABLE __HAL_RCC_DMA2_CLK_ENABLE +#define __DMA2_CLK_SLEEP_DISABLE __HAL_RCC_DMA2_CLK_SLEEP_DISABLE +#define __DMA2_CLK_SLEEP_ENABLE __HAL_RCC_DMA2_CLK_SLEEP_ENABLE +#define __DMA2_FORCE_RESET __HAL_RCC_DMA2_FORCE_RESET +#define __DMA2_RELEASE_RESET __HAL_RCC_DMA2_RELEASE_RESET +#define __ETHMAC_CLK_DISABLE __HAL_RCC_ETHMAC_CLK_DISABLE +#define __ETHMAC_CLK_ENABLE __HAL_RCC_ETHMAC_CLK_ENABLE +#define __ETHMAC_FORCE_RESET __HAL_RCC_ETHMAC_FORCE_RESET +#define __ETHMAC_RELEASE_RESET __HAL_RCC_ETHMAC_RELEASE_RESET +#define __ETHMACRX_CLK_DISABLE __HAL_RCC_ETHMACRX_CLK_DISABLE +#define __ETHMACRX_CLK_ENABLE __HAL_RCC_ETHMACRX_CLK_ENABLE +#define __ETHMACTX_CLK_DISABLE __HAL_RCC_ETHMACTX_CLK_DISABLE +#define __ETHMACTX_CLK_ENABLE __HAL_RCC_ETHMACTX_CLK_ENABLE +#define __FIREWALL_CLK_DISABLE __HAL_RCC_FIREWALL_CLK_DISABLE +#define __FIREWALL_CLK_ENABLE __HAL_RCC_FIREWALL_CLK_ENABLE +#define __FLASH_CLK_DISABLE __HAL_RCC_FLASH_CLK_DISABLE +#define __FLASH_CLK_ENABLE __HAL_RCC_FLASH_CLK_ENABLE +#define __FLASH_CLK_SLEEP_DISABLE __HAL_RCC_FLASH_CLK_SLEEP_DISABLE +#define __FLASH_CLK_SLEEP_ENABLE __HAL_RCC_FLASH_CLK_SLEEP_ENABLE +#define __FLASH_FORCE_RESET __HAL_RCC_FLASH_FORCE_RESET +#define __FLASH_RELEASE_RESET __HAL_RCC_FLASH_RELEASE_RESET +#define __FLITF_CLK_DISABLE __HAL_RCC_FLITF_CLK_DISABLE +#define __FLITF_CLK_ENABLE __HAL_RCC_FLITF_CLK_ENABLE +#define __FLITF_FORCE_RESET __HAL_RCC_FLITF_FORCE_RESET +#define __FLITF_RELEASE_RESET __HAL_RCC_FLITF_RELEASE_RESET +#define __FLITF_CLK_SLEEP_ENABLE __HAL_RCC_FLITF_CLK_SLEEP_ENABLE +#define __FLITF_CLK_SLEEP_DISABLE __HAL_RCC_FLITF_CLK_SLEEP_DISABLE +#define __FMC_CLK_DISABLE __HAL_RCC_FMC_CLK_DISABLE +#define __FMC_CLK_ENABLE __HAL_RCC_FMC_CLK_ENABLE +#define __FMC_CLK_SLEEP_DISABLE __HAL_RCC_FMC_CLK_SLEEP_DISABLE +#define __FMC_CLK_SLEEP_ENABLE __HAL_RCC_FMC_CLK_SLEEP_ENABLE +#define __FMC_FORCE_RESET __HAL_RCC_FMC_FORCE_RESET +#define __FMC_RELEASE_RESET __HAL_RCC_FMC_RELEASE_RESET +#define __FSMC_CLK_DISABLE __HAL_RCC_FSMC_CLK_DISABLE +#define __FSMC_CLK_ENABLE __HAL_RCC_FSMC_CLK_ENABLE +#define __GPIOA_CLK_DISABLE __HAL_RCC_GPIOA_CLK_DISABLE +#define __GPIOA_CLK_ENABLE __HAL_RCC_GPIOA_CLK_ENABLE +#define __GPIOA_CLK_SLEEP_DISABLE __HAL_RCC_GPIOA_CLK_SLEEP_DISABLE +#define __GPIOA_CLK_SLEEP_ENABLE __HAL_RCC_GPIOA_CLK_SLEEP_ENABLE +#define __GPIOA_FORCE_RESET __HAL_RCC_GPIOA_FORCE_RESET +#define __GPIOA_RELEASE_RESET __HAL_RCC_GPIOA_RELEASE_RESET +#define __GPIOB_CLK_DISABLE __HAL_RCC_GPIOB_CLK_DISABLE +#define __GPIOB_CLK_ENABLE __HAL_RCC_GPIOB_CLK_ENABLE +#define __GPIOB_CLK_SLEEP_DISABLE __HAL_RCC_GPIOB_CLK_SLEEP_DISABLE +#define __GPIOB_CLK_SLEEP_ENABLE __HAL_RCC_GPIOB_CLK_SLEEP_ENABLE +#define __GPIOB_FORCE_RESET __HAL_RCC_GPIOB_FORCE_RESET +#define __GPIOB_RELEASE_RESET __HAL_RCC_GPIOB_RELEASE_RESET +#define __GPIOC_CLK_DISABLE __HAL_RCC_GPIOC_CLK_DISABLE +#define __GPIOC_CLK_ENABLE __HAL_RCC_GPIOC_CLK_ENABLE +#define __GPIOC_CLK_SLEEP_DISABLE __HAL_RCC_GPIOC_CLK_SLEEP_DISABLE +#define __GPIOC_CLK_SLEEP_ENABLE __HAL_RCC_GPIOC_CLK_SLEEP_ENABLE +#define __GPIOC_FORCE_RESET __HAL_RCC_GPIOC_FORCE_RESET +#define __GPIOC_RELEASE_RESET __HAL_RCC_GPIOC_RELEASE_RESET +#define __GPIOD_CLK_DISABLE __HAL_RCC_GPIOD_CLK_DISABLE +#define __GPIOD_CLK_ENABLE __HAL_RCC_GPIOD_CLK_ENABLE +#define __GPIOD_CLK_SLEEP_DISABLE __HAL_RCC_GPIOD_CLK_SLEEP_DISABLE +#define __GPIOD_CLK_SLEEP_ENABLE __HAL_RCC_GPIOD_CLK_SLEEP_ENABLE +#define __GPIOD_FORCE_RESET __HAL_RCC_GPIOD_FORCE_RESET +#define __GPIOD_RELEASE_RESET __HAL_RCC_GPIOD_RELEASE_RESET +#define __GPIOE_CLK_DISABLE __HAL_RCC_GPIOE_CLK_DISABLE +#define __GPIOE_CLK_ENABLE __HAL_RCC_GPIOE_CLK_ENABLE +#define __GPIOE_CLK_SLEEP_DISABLE __HAL_RCC_GPIOE_CLK_SLEEP_DISABLE +#define __GPIOE_CLK_SLEEP_ENABLE __HAL_RCC_GPIOE_CLK_SLEEP_ENABLE +#define __GPIOE_FORCE_RESET __HAL_RCC_GPIOE_FORCE_RESET +#define __GPIOE_RELEASE_RESET __HAL_RCC_GPIOE_RELEASE_RESET +#define __GPIOF_CLK_DISABLE __HAL_RCC_GPIOF_CLK_DISABLE +#define __GPIOF_CLK_ENABLE __HAL_RCC_GPIOF_CLK_ENABLE +#define __GPIOF_CLK_SLEEP_DISABLE __HAL_RCC_GPIOF_CLK_SLEEP_DISABLE +#define __GPIOF_CLK_SLEEP_ENABLE __HAL_RCC_GPIOF_CLK_SLEEP_ENABLE +#define __GPIOF_FORCE_RESET __HAL_RCC_GPIOF_FORCE_RESET +#define __GPIOF_RELEASE_RESET __HAL_RCC_GPIOF_RELEASE_RESET +#define __GPIOG_CLK_DISABLE __HAL_RCC_GPIOG_CLK_DISABLE +#define __GPIOG_CLK_ENABLE __HAL_RCC_GPIOG_CLK_ENABLE +#define __GPIOG_CLK_SLEEP_DISABLE __HAL_RCC_GPIOG_CLK_SLEEP_DISABLE +#define __GPIOG_CLK_SLEEP_ENABLE __HAL_RCC_GPIOG_CLK_SLEEP_ENABLE +#define __GPIOG_FORCE_RESET __HAL_RCC_GPIOG_FORCE_RESET +#define __GPIOG_RELEASE_RESET __HAL_RCC_GPIOG_RELEASE_RESET +#define __GPIOH_CLK_DISABLE __HAL_RCC_GPIOH_CLK_DISABLE +#define __GPIOH_CLK_ENABLE __HAL_RCC_GPIOH_CLK_ENABLE +#define __GPIOH_CLK_SLEEP_DISABLE __HAL_RCC_GPIOH_CLK_SLEEP_DISABLE +#define __GPIOH_CLK_SLEEP_ENABLE __HAL_RCC_GPIOH_CLK_SLEEP_ENABLE +#define __GPIOH_FORCE_RESET __HAL_RCC_GPIOH_FORCE_RESET +#define __GPIOH_RELEASE_RESET __HAL_RCC_GPIOH_RELEASE_RESET +#define __I2C1_CLK_DISABLE __HAL_RCC_I2C1_CLK_DISABLE +#define __I2C1_CLK_ENABLE __HAL_RCC_I2C1_CLK_ENABLE +#define __I2C1_CLK_SLEEP_DISABLE __HAL_RCC_I2C1_CLK_SLEEP_DISABLE +#define __I2C1_CLK_SLEEP_ENABLE __HAL_RCC_I2C1_CLK_SLEEP_ENABLE +#define __I2C1_FORCE_RESET __HAL_RCC_I2C1_FORCE_RESET +#define __I2C1_RELEASE_RESET __HAL_RCC_I2C1_RELEASE_RESET +#define __I2C2_CLK_DISABLE __HAL_RCC_I2C2_CLK_DISABLE +#define __I2C2_CLK_ENABLE __HAL_RCC_I2C2_CLK_ENABLE +#define __I2C2_CLK_SLEEP_DISABLE __HAL_RCC_I2C2_CLK_SLEEP_DISABLE +#define __I2C2_CLK_SLEEP_ENABLE __HAL_RCC_I2C2_CLK_SLEEP_ENABLE +#define __I2C2_FORCE_RESET __HAL_RCC_I2C2_FORCE_RESET +#define __I2C2_RELEASE_RESET __HAL_RCC_I2C2_RELEASE_RESET +#define __I2C3_CLK_DISABLE __HAL_RCC_I2C3_CLK_DISABLE +#define __I2C3_CLK_ENABLE __HAL_RCC_I2C3_CLK_ENABLE +#define __I2C3_CLK_SLEEP_DISABLE __HAL_RCC_I2C3_CLK_SLEEP_DISABLE +#define __I2C3_CLK_SLEEP_ENABLE __HAL_RCC_I2C3_CLK_SLEEP_ENABLE +#define __I2C3_FORCE_RESET __HAL_RCC_I2C3_FORCE_RESET +#define __I2C3_RELEASE_RESET __HAL_RCC_I2C3_RELEASE_RESET +#define __LCD_CLK_DISABLE __HAL_RCC_LCD_CLK_DISABLE +#define __LCD_CLK_ENABLE __HAL_RCC_LCD_CLK_ENABLE +#define __LCD_CLK_SLEEP_DISABLE __HAL_RCC_LCD_CLK_SLEEP_DISABLE +#define __LCD_CLK_SLEEP_ENABLE __HAL_RCC_LCD_CLK_SLEEP_ENABLE +#define __LCD_FORCE_RESET __HAL_RCC_LCD_FORCE_RESET +#define __LCD_RELEASE_RESET __HAL_RCC_LCD_RELEASE_RESET +#define __LPTIM1_CLK_DISABLE __HAL_RCC_LPTIM1_CLK_DISABLE +#define __LPTIM1_CLK_ENABLE __HAL_RCC_LPTIM1_CLK_ENABLE +#define __LPTIM1_CLK_SLEEP_DISABLE __HAL_RCC_LPTIM1_CLK_SLEEP_DISABLE +#define __LPTIM1_CLK_SLEEP_ENABLE __HAL_RCC_LPTIM1_CLK_SLEEP_ENABLE +#define __LPTIM1_FORCE_RESET __HAL_RCC_LPTIM1_FORCE_RESET +#define __LPTIM1_RELEASE_RESET __HAL_RCC_LPTIM1_RELEASE_RESET +#define __LPTIM2_CLK_DISABLE __HAL_RCC_LPTIM2_CLK_DISABLE +#define __LPTIM2_CLK_ENABLE __HAL_RCC_LPTIM2_CLK_ENABLE +#define __LPTIM2_CLK_SLEEP_DISABLE __HAL_RCC_LPTIM2_CLK_SLEEP_DISABLE +#define __LPTIM2_CLK_SLEEP_ENABLE __HAL_RCC_LPTIM2_CLK_SLEEP_ENABLE +#define __LPTIM2_FORCE_RESET __HAL_RCC_LPTIM2_FORCE_RESET +#define __LPTIM2_RELEASE_RESET __HAL_RCC_LPTIM2_RELEASE_RESET +#define __LPUART1_CLK_DISABLE __HAL_RCC_LPUART1_CLK_DISABLE +#define __LPUART1_CLK_ENABLE __HAL_RCC_LPUART1_CLK_ENABLE +#define __LPUART1_CLK_SLEEP_DISABLE __HAL_RCC_LPUART1_CLK_SLEEP_DISABLE +#define __LPUART1_CLK_SLEEP_ENABLE __HAL_RCC_LPUART1_CLK_SLEEP_ENABLE +#define __LPUART1_FORCE_RESET __HAL_RCC_LPUART1_FORCE_RESET +#define __LPUART1_RELEASE_RESET __HAL_RCC_LPUART1_RELEASE_RESET +#define __OPAMP_CLK_DISABLE __HAL_RCC_OPAMP_CLK_DISABLE +#define __OPAMP_CLK_ENABLE __HAL_RCC_OPAMP_CLK_ENABLE +#define __OPAMP_CLK_SLEEP_DISABLE __HAL_RCC_OPAMP_CLK_SLEEP_DISABLE +#define __OPAMP_CLK_SLEEP_ENABLE __HAL_RCC_OPAMP_CLK_SLEEP_ENABLE +#define __OPAMP_FORCE_RESET __HAL_RCC_OPAMP_FORCE_RESET +#define __OPAMP_RELEASE_RESET __HAL_RCC_OPAMP_RELEASE_RESET +#define __OTGFS_CLK_DISABLE __HAL_RCC_OTGFS_CLK_DISABLE +#define __OTGFS_CLK_ENABLE __HAL_RCC_OTGFS_CLK_ENABLE +#define __OTGFS_CLK_SLEEP_DISABLE __HAL_RCC_OTGFS_CLK_SLEEP_DISABLE +#define __OTGFS_CLK_SLEEP_ENABLE __HAL_RCC_OTGFS_CLK_SLEEP_ENABLE +#define __OTGFS_FORCE_RESET __HAL_RCC_OTGFS_FORCE_RESET +#define __OTGFS_RELEASE_RESET __HAL_RCC_OTGFS_RELEASE_RESET +#define __PWR_CLK_DISABLE __HAL_RCC_PWR_CLK_DISABLE +#define __PWR_CLK_ENABLE __HAL_RCC_PWR_CLK_ENABLE +#define __PWR_CLK_SLEEP_DISABLE __HAL_RCC_PWR_CLK_SLEEP_DISABLE +#define __PWR_CLK_SLEEP_ENABLE __HAL_RCC_PWR_CLK_SLEEP_ENABLE +#define __PWR_FORCE_RESET __HAL_RCC_PWR_FORCE_RESET +#define __PWR_RELEASE_RESET __HAL_RCC_PWR_RELEASE_RESET +#define __QSPI_CLK_DISABLE __HAL_RCC_QSPI_CLK_DISABLE +#define __QSPI_CLK_ENABLE __HAL_RCC_QSPI_CLK_ENABLE +#define __QSPI_CLK_SLEEP_DISABLE __HAL_RCC_QSPI_CLK_SLEEP_DISABLE +#define __QSPI_CLK_SLEEP_ENABLE __HAL_RCC_QSPI_CLK_SLEEP_ENABLE +#define __QSPI_FORCE_RESET __HAL_RCC_QSPI_FORCE_RESET +#define __QSPI_RELEASE_RESET __HAL_RCC_QSPI_RELEASE_RESET + +#if defined(STM32WB) +#define __HAL_RCC_QSPI_CLK_DISABLE __HAL_RCC_QUADSPI_CLK_DISABLE +#define __HAL_RCC_QSPI_CLK_ENABLE __HAL_RCC_QUADSPI_CLK_ENABLE +#define __HAL_RCC_QSPI_CLK_SLEEP_DISABLE __HAL_RCC_QUADSPI_CLK_SLEEP_DISABLE +#define __HAL_RCC_QSPI_CLK_SLEEP_ENABLE __HAL_RCC_QUADSPI_CLK_SLEEP_ENABLE +#define __HAL_RCC_QSPI_FORCE_RESET __HAL_RCC_QUADSPI_FORCE_RESET +#define __HAL_RCC_QSPI_RELEASE_RESET __HAL_RCC_QUADSPI_RELEASE_RESET +#define __HAL_RCC_QSPI_IS_CLK_ENABLED __HAL_RCC_QUADSPI_IS_CLK_ENABLED +#define __HAL_RCC_QSPI_IS_CLK_DISABLED __HAL_RCC_QUADSPI_IS_CLK_DISABLED +#define __HAL_RCC_QSPI_IS_CLK_SLEEP_ENABLED __HAL_RCC_QUADSPI_IS_CLK_SLEEP_ENABLED +#define __HAL_RCC_QSPI_IS_CLK_SLEEP_DISABLED __HAL_RCC_QUADSPI_IS_CLK_SLEEP_DISABLED +#define QSPI_IRQHandler QUADSPI_IRQHandler +#endif /* __HAL_RCC_QUADSPI_CLK_ENABLE */ + +#define __RNG_CLK_DISABLE __HAL_RCC_RNG_CLK_DISABLE +#define __RNG_CLK_ENABLE __HAL_RCC_RNG_CLK_ENABLE +#define __RNG_CLK_SLEEP_DISABLE __HAL_RCC_RNG_CLK_SLEEP_DISABLE +#define __RNG_CLK_SLEEP_ENABLE __HAL_RCC_RNG_CLK_SLEEP_ENABLE +#define __RNG_FORCE_RESET __HAL_RCC_RNG_FORCE_RESET +#define __RNG_RELEASE_RESET __HAL_RCC_RNG_RELEASE_RESET +#define __SAI1_CLK_DISABLE __HAL_RCC_SAI1_CLK_DISABLE +#define __SAI1_CLK_ENABLE __HAL_RCC_SAI1_CLK_ENABLE +#define __SAI1_CLK_SLEEP_DISABLE __HAL_RCC_SAI1_CLK_SLEEP_DISABLE +#define __SAI1_CLK_SLEEP_ENABLE __HAL_RCC_SAI1_CLK_SLEEP_ENABLE +#define __SAI1_FORCE_RESET __HAL_RCC_SAI1_FORCE_RESET +#define __SAI1_RELEASE_RESET __HAL_RCC_SAI1_RELEASE_RESET +#define __SAI2_CLK_DISABLE __HAL_RCC_SAI2_CLK_DISABLE +#define __SAI2_CLK_ENABLE __HAL_RCC_SAI2_CLK_ENABLE +#define __SAI2_CLK_SLEEP_DISABLE __HAL_RCC_SAI2_CLK_SLEEP_DISABLE +#define __SAI2_CLK_SLEEP_ENABLE __HAL_RCC_SAI2_CLK_SLEEP_ENABLE +#define __SAI2_FORCE_RESET __HAL_RCC_SAI2_FORCE_RESET +#define __SAI2_RELEASE_RESET __HAL_RCC_SAI2_RELEASE_RESET +#define __SDIO_CLK_DISABLE __HAL_RCC_SDIO_CLK_DISABLE +#define __SDIO_CLK_ENABLE __HAL_RCC_SDIO_CLK_ENABLE +#define __SDMMC_CLK_DISABLE __HAL_RCC_SDMMC_CLK_DISABLE +#define __SDMMC_CLK_ENABLE __HAL_RCC_SDMMC_CLK_ENABLE +#define __SDMMC_CLK_SLEEP_DISABLE __HAL_RCC_SDMMC_CLK_SLEEP_DISABLE +#define __SDMMC_CLK_SLEEP_ENABLE __HAL_RCC_SDMMC_CLK_SLEEP_ENABLE +#define __SDMMC_FORCE_RESET __HAL_RCC_SDMMC_FORCE_RESET +#define __SDMMC_RELEASE_RESET __HAL_RCC_SDMMC_RELEASE_RESET +#define __SPI1_CLK_DISABLE __HAL_RCC_SPI1_CLK_DISABLE +#define __SPI1_CLK_ENABLE __HAL_RCC_SPI1_CLK_ENABLE +#define __SPI1_CLK_SLEEP_DISABLE __HAL_RCC_SPI1_CLK_SLEEP_DISABLE +#define __SPI1_CLK_SLEEP_ENABLE __HAL_RCC_SPI1_CLK_SLEEP_ENABLE +#define __SPI1_FORCE_RESET __HAL_RCC_SPI1_FORCE_RESET +#define __SPI1_RELEASE_RESET __HAL_RCC_SPI1_RELEASE_RESET +#define __SPI2_CLK_DISABLE __HAL_RCC_SPI2_CLK_DISABLE +#define __SPI2_CLK_ENABLE __HAL_RCC_SPI2_CLK_ENABLE +#define __SPI2_CLK_SLEEP_DISABLE __HAL_RCC_SPI2_CLK_SLEEP_DISABLE +#define __SPI2_CLK_SLEEP_ENABLE __HAL_RCC_SPI2_CLK_SLEEP_ENABLE +#define __SPI2_FORCE_RESET __HAL_RCC_SPI2_FORCE_RESET +#define __SPI2_RELEASE_RESET __HAL_RCC_SPI2_RELEASE_RESET +#define __SPI3_CLK_DISABLE __HAL_RCC_SPI3_CLK_DISABLE +#define __SPI3_CLK_ENABLE __HAL_RCC_SPI3_CLK_ENABLE +#define __SPI3_CLK_SLEEP_DISABLE __HAL_RCC_SPI3_CLK_SLEEP_DISABLE +#define __SPI3_CLK_SLEEP_ENABLE __HAL_RCC_SPI3_CLK_SLEEP_ENABLE +#define __SPI3_FORCE_RESET __HAL_RCC_SPI3_FORCE_RESET +#define __SPI3_RELEASE_RESET __HAL_RCC_SPI3_RELEASE_RESET +#define __SRAM_CLK_DISABLE __HAL_RCC_SRAM_CLK_DISABLE +#define __SRAM_CLK_ENABLE __HAL_RCC_SRAM_CLK_ENABLE +#define __SRAM1_CLK_SLEEP_DISABLE __HAL_RCC_SRAM1_CLK_SLEEP_DISABLE +#define __SRAM1_CLK_SLEEP_ENABLE __HAL_RCC_SRAM1_CLK_SLEEP_ENABLE +#define __SRAM2_CLK_SLEEP_DISABLE __HAL_RCC_SRAM2_CLK_SLEEP_DISABLE +#define __SRAM2_CLK_SLEEP_ENABLE __HAL_RCC_SRAM2_CLK_SLEEP_ENABLE +#define __SWPMI1_CLK_DISABLE __HAL_RCC_SWPMI1_CLK_DISABLE +#define __SWPMI1_CLK_ENABLE __HAL_RCC_SWPMI1_CLK_ENABLE +#define __SWPMI1_CLK_SLEEP_DISABLE __HAL_RCC_SWPMI1_CLK_SLEEP_DISABLE +#define __SWPMI1_CLK_SLEEP_ENABLE __HAL_RCC_SWPMI1_CLK_SLEEP_ENABLE +#define __SWPMI1_FORCE_RESET __HAL_RCC_SWPMI1_FORCE_RESET +#define __SWPMI1_RELEASE_RESET __HAL_RCC_SWPMI1_RELEASE_RESET +#define __SYSCFG_CLK_DISABLE __HAL_RCC_SYSCFG_CLK_DISABLE +#define __SYSCFG_CLK_ENABLE __HAL_RCC_SYSCFG_CLK_ENABLE +#define __SYSCFG_CLK_SLEEP_DISABLE __HAL_RCC_SYSCFG_CLK_SLEEP_DISABLE +#define __SYSCFG_CLK_SLEEP_ENABLE __HAL_RCC_SYSCFG_CLK_SLEEP_ENABLE +#define __SYSCFG_FORCE_RESET __HAL_RCC_SYSCFG_FORCE_RESET +#define __SYSCFG_RELEASE_RESET __HAL_RCC_SYSCFG_RELEASE_RESET +#define __TIM1_CLK_DISABLE __HAL_RCC_TIM1_CLK_DISABLE +#define __TIM1_CLK_ENABLE __HAL_RCC_TIM1_CLK_ENABLE +#define __TIM1_CLK_SLEEP_DISABLE __HAL_RCC_TIM1_CLK_SLEEP_DISABLE +#define __TIM1_CLK_SLEEP_ENABLE __HAL_RCC_TIM1_CLK_SLEEP_ENABLE +#define __TIM1_FORCE_RESET __HAL_RCC_TIM1_FORCE_RESET +#define __TIM1_RELEASE_RESET __HAL_RCC_TIM1_RELEASE_RESET +#define __TIM10_CLK_DISABLE __HAL_RCC_TIM10_CLK_DISABLE +#define __TIM10_CLK_ENABLE __HAL_RCC_TIM10_CLK_ENABLE +#define __TIM10_FORCE_RESET __HAL_RCC_TIM10_FORCE_RESET +#define __TIM10_RELEASE_RESET __HAL_RCC_TIM10_RELEASE_RESET +#define __TIM11_CLK_DISABLE __HAL_RCC_TIM11_CLK_DISABLE +#define __TIM11_CLK_ENABLE __HAL_RCC_TIM11_CLK_ENABLE +#define __TIM11_FORCE_RESET __HAL_RCC_TIM11_FORCE_RESET +#define __TIM11_RELEASE_RESET __HAL_RCC_TIM11_RELEASE_RESET +#define __TIM12_CLK_DISABLE __HAL_RCC_TIM12_CLK_DISABLE +#define __TIM12_CLK_ENABLE __HAL_RCC_TIM12_CLK_ENABLE +#define __TIM12_FORCE_RESET __HAL_RCC_TIM12_FORCE_RESET +#define __TIM12_RELEASE_RESET __HAL_RCC_TIM12_RELEASE_RESET +#define __TIM13_CLK_DISABLE __HAL_RCC_TIM13_CLK_DISABLE +#define __TIM13_CLK_ENABLE __HAL_RCC_TIM13_CLK_ENABLE +#define __TIM13_FORCE_RESET __HAL_RCC_TIM13_FORCE_RESET +#define __TIM13_RELEASE_RESET __HAL_RCC_TIM13_RELEASE_RESET +#define __TIM14_CLK_DISABLE __HAL_RCC_TIM14_CLK_DISABLE +#define __TIM14_CLK_ENABLE __HAL_RCC_TIM14_CLK_ENABLE +#define __TIM14_FORCE_RESET __HAL_RCC_TIM14_FORCE_RESET +#define __TIM14_RELEASE_RESET __HAL_RCC_TIM14_RELEASE_RESET +#define __TIM15_CLK_DISABLE __HAL_RCC_TIM15_CLK_DISABLE +#define __TIM15_CLK_ENABLE __HAL_RCC_TIM15_CLK_ENABLE +#define __TIM15_CLK_SLEEP_DISABLE __HAL_RCC_TIM15_CLK_SLEEP_DISABLE +#define __TIM15_CLK_SLEEP_ENABLE __HAL_RCC_TIM15_CLK_SLEEP_ENABLE +#define __TIM15_FORCE_RESET __HAL_RCC_TIM15_FORCE_RESET +#define __TIM15_RELEASE_RESET __HAL_RCC_TIM15_RELEASE_RESET +#define __TIM16_CLK_DISABLE __HAL_RCC_TIM16_CLK_DISABLE +#define __TIM16_CLK_ENABLE __HAL_RCC_TIM16_CLK_ENABLE +#define __TIM16_CLK_SLEEP_DISABLE __HAL_RCC_TIM16_CLK_SLEEP_DISABLE +#define __TIM16_CLK_SLEEP_ENABLE __HAL_RCC_TIM16_CLK_SLEEP_ENABLE +#define __TIM16_FORCE_RESET __HAL_RCC_TIM16_FORCE_RESET +#define __TIM16_RELEASE_RESET __HAL_RCC_TIM16_RELEASE_RESET +#define __TIM17_CLK_DISABLE __HAL_RCC_TIM17_CLK_DISABLE +#define __TIM17_CLK_ENABLE __HAL_RCC_TIM17_CLK_ENABLE +#define __TIM17_CLK_SLEEP_DISABLE __HAL_RCC_TIM17_CLK_SLEEP_DISABLE +#define __TIM17_CLK_SLEEP_ENABLE __HAL_RCC_TIM17_CLK_SLEEP_ENABLE +#define __TIM17_FORCE_RESET __HAL_RCC_TIM17_FORCE_RESET +#define __TIM17_RELEASE_RESET __HAL_RCC_TIM17_RELEASE_RESET +#define __TIM2_CLK_DISABLE __HAL_RCC_TIM2_CLK_DISABLE +#define __TIM2_CLK_ENABLE __HAL_RCC_TIM2_CLK_ENABLE +#define __TIM2_CLK_SLEEP_DISABLE __HAL_RCC_TIM2_CLK_SLEEP_DISABLE +#define __TIM2_CLK_SLEEP_ENABLE __HAL_RCC_TIM2_CLK_SLEEP_ENABLE +#define __TIM2_FORCE_RESET __HAL_RCC_TIM2_FORCE_RESET +#define __TIM2_RELEASE_RESET __HAL_RCC_TIM2_RELEASE_RESET +#define __TIM3_CLK_DISABLE __HAL_RCC_TIM3_CLK_DISABLE +#define __TIM3_CLK_ENABLE __HAL_RCC_TIM3_CLK_ENABLE +#define __TIM3_CLK_SLEEP_DISABLE __HAL_RCC_TIM3_CLK_SLEEP_DISABLE +#define __TIM3_CLK_SLEEP_ENABLE __HAL_RCC_TIM3_CLK_SLEEP_ENABLE +#define __TIM3_FORCE_RESET __HAL_RCC_TIM3_FORCE_RESET +#define __TIM3_RELEASE_RESET __HAL_RCC_TIM3_RELEASE_RESET +#define __TIM4_CLK_DISABLE __HAL_RCC_TIM4_CLK_DISABLE +#define __TIM4_CLK_ENABLE __HAL_RCC_TIM4_CLK_ENABLE +#define __TIM4_CLK_SLEEP_DISABLE __HAL_RCC_TIM4_CLK_SLEEP_DISABLE +#define __TIM4_CLK_SLEEP_ENABLE __HAL_RCC_TIM4_CLK_SLEEP_ENABLE +#define __TIM4_FORCE_RESET __HAL_RCC_TIM4_FORCE_RESET +#define __TIM4_RELEASE_RESET __HAL_RCC_TIM4_RELEASE_RESET +#define __TIM5_CLK_DISABLE __HAL_RCC_TIM5_CLK_DISABLE +#define __TIM5_CLK_ENABLE __HAL_RCC_TIM5_CLK_ENABLE +#define __TIM5_CLK_SLEEP_DISABLE __HAL_RCC_TIM5_CLK_SLEEP_DISABLE +#define __TIM5_CLK_SLEEP_ENABLE __HAL_RCC_TIM5_CLK_SLEEP_ENABLE +#define __TIM5_FORCE_RESET __HAL_RCC_TIM5_FORCE_RESET +#define __TIM5_RELEASE_RESET __HAL_RCC_TIM5_RELEASE_RESET +#define __TIM6_CLK_DISABLE __HAL_RCC_TIM6_CLK_DISABLE +#define __TIM6_CLK_ENABLE __HAL_RCC_TIM6_CLK_ENABLE +#define __TIM6_CLK_SLEEP_DISABLE __HAL_RCC_TIM6_CLK_SLEEP_DISABLE +#define __TIM6_CLK_SLEEP_ENABLE __HAL_RCC_TIM6_CLK_SLEEP_ENABLE +#define __TIM6_FORCE_RESET __HAL_RCC_TIM6_FORCE_RESET +#define __TIM6_RELEASE_RESET __HAL_RCC_TIM6_RELEASE_RESET +#define __TIM7_CLK_DISABLE __HAL_RCC_TIM7_CLK_DISABLE +#define __TIM7_CLK_ENABLE __HAL_RCC_TIM7_CLK_ENABLE +#define __TIM7_CLK_SLEEP_DISABLE __HAL_RCC_TIM7_CLK_SLEEP_DISABLE +#define __TIM7_CLK_SLEEP_ENABLE __HAL_RCC_TIM7_CLK_SLEEP_ENABLE +#define __TIM7_FORCE_RESET __HAL_RCC_TIM7_FORCE_RESET +#define __TIM7_RELEASE_RESET __HAL_RCC_TIM7_RELEASE_RESET +#define __TIM8_CLK_DISABLE __HAL_RCC_TIM8_CLK_DISABLE +#define __TIM8_CLK_ENABLE __HAL_RCC_TIM8_CLK_ENABLE +#define __TIM8_CLK_SLEEP_DISABLE __HAL_RCC_TIM8_CLK_SLEEP_DISABLE +#define __TIM8_CLK_SLEEP_ENABLE __HAL_RCC_TIM8_CLK_SLEEP_ENABLE +#define __TIM8_FORCE_RESET __HAL_RCC_TIM8_FORCE_RESET +#define __TIM8_RELEASE_RESET __HAL_RCC_TIM8_RELEASE_RESET +#define __TIM9_CLK_DISABLE __HAL_RCC_TIM9_CLK_DISABLE +#define __TIM9_CLK_ENABLE __HAL_RCC_TIM9_CLK_ENABLE +#define __TIM9_FORCE_RESET __HAL_RCC_TIM9_FORCE_RESET +#define __TIM9_RELEASE_RESET __HAL_RCC_TIM9_RELEASE_RESET +#define __TSC_CLK_DISABLE __HAL_RCC_TSC_CLK_DISABLE +#define __TSC_CLK_ENABLE __HAL_RCC_TSC_CLK_ENABLE +#define __TSC_CLK_SLEEP_DISABLE __HAL_RCC_TSC_CLK_SLEEP_DISABLE +#define __TSC_CLK_SLEEP_ENABLE __HAL_RCC_TSC_CLK_SLEEP_ENABLE +#define __TSC_FORCE_RESET __HAL_RCC_TSC_FORCE_RESET +#define __TSC_RELEASE_RESET __HAL_RCC_TSC_RELEASE_RESET +#define __UART4_CLK_DISABLE __HAL_RCC_UART4_CLK_DISABLE +#define __UART4_CLK_ENABLE __HAL_RCC_UART4_CLK_ENABLE +#define __UART4_CLK_SLEEP_DISABLE __HAL_RCC_UART4_CLK_SLEEP_DISABLE +#define __UART4_CLK_SLEEP_ENABLE __HAL_RCC_UART4_CLK_SLEEP_ENABLE +#define __UART4_FORCE_RESET __HAL_RCC_UART4_FORCE_RESET +#define __UART4_RELEASE_RESET __HAL_RCC_UART4_RELEASE_RESET +#define __UART5_CLK_DISABLE __HAL_RCC_UART5_CLK_DISABLE +#define __UART5_CLK_ENABLE __HAL_RCC_UART5_CLK_ENABLE +#define __UART5_CLK_SLEEP_DISABLE __HAL_RCC_UART5_CLK_SLEEP_DISABLE +#define __UART5_CLK_SLEEP_ENABLE __HAL_RCC_UART5_CLK_SLEEP_ENABLE +#define __UART5_FORCE_RESET __HAL_RCC_UART5_FORCE_RESET +#define __UART5_RELEASE_RESET __HAL_RCC_UART5_RELEASE_RESET +#define __USART1_CLK_DISABLE __HAL_RCC_USART1_CLK_DISABLE +#define __USART1_CLK_ENABLE __HAL_RCC_USART1_CLK_ENABLE +#define __USART1_CLK_SLEEP_DISABLE __HAL_RCC_USART1_CLK_SLEEP_DISABLE +#define __USART1_CLK_SLEEP_ENABLE __HAL_RCC_USART1_CLK_SLEEP_ENABLE +#define __USART1_FORCE_RESET __HAL_RCC_USART1_FORCE_RESET +#define __USART1_RELEASE_RESET __HAL_RCC_USART1_RELEASE_RESET +#define __USART2_CLK_DISABLE __HAL_RCC_USART2_CLK_DISABLE +#define __USART2_CLK_ENABLE __HAL_RCC_USART2_CLK_ENABLE +#define __USART2_CLK_SLEEP_DISABLE __HAL_RCC_USART2_CLK_SLEEP_DISABLE +#define __USART2_CLK_SLEEP_ENABLE __HAL_RCC_USART2_CLK_SLEEP_ENABLE +#define __USART2_FORCE_RESET __HAL_RCC_USART2_FORCE_RESET +#define __USART2_RELEASE_RESET __HAL_RCC_USART2_RELEASE_RESET +#define __USART3_CLK_DISABLE __HAL_RCC_USART3_CLK_DISABLE +#define __USART3_CLK_ENABLE __HAL_RCC_USART3_CLK_ENABLE +#define __USART3_CLK_SLEEP_DISABLE __HAL_RCC_USART3_CLK_SLEEP_DISABLE +#define __USART3_CLK_SLEEP_ENABLE __HAL_RCC_USART3_CLK_SLEEP_ENABLE +#define __USART3_FORCE_RESET __HAL_RCC_USART3_FORCE_RESET +#define __USART3_RELEASE_RESET __HAL_RCC_USART3_RELEASE_RESET +#define __USART4_CLK_DISABLE __HAL_RCC_UART4_CLK_DISABLE +#define __USART4_CLK_ENABLE __HAL_RCC_UART4_CLK_ENABLE +#define __USART4_CLK_SLEEP_ENABLE __HAL_RCC_UART4_CLK_SLEEP_ENABLE +#define __USART4_CLK_SLEEP_DISABLE __HAL_RCC_UART4_CLK_SLEEP_DISABLE +#define __USART4_FORCE_RESET __HAL_RCC_UART4_FORCE_RESET +#define __USART4_RELEASE_RESET __HAL_RCC_UART4_RELEASE_RESET +#define __USART5_CLK_DISABLE __HAL_RCC_UART5_CLK_DISABLE +#define __USART5_CLK_ENABLE __HAL_RCC_UART5_CLK_ENABLE +#define __USART5_CLK_SLEEP_ENABLE __HAL_RCC_UART5_CLK_SLEEP_ENABLE +#define __USART5_CLK_SLEEP_DISABLE __HAL_RCC_UART5_CLK_SLEEP_DISABLE +#define __USART5_FORCE_RESET __HAL_RCC_UART5_FORCE_RESET +#define __USART5_RELEASE_RESET __HAL_RCC_UART5_RELEASE_RESET +#define __USART7_CLK_DISABLE __HAL_RCC_UART7_CLK_DISABLE +#define __USART7_CLK_ENABLE __HAL_RCC_UART7_CLK_ENABLE +#define __USART7_FORCE_RESET __HAL_RCC_UART7_FORCE_RESET +#define __USART7_RELEASE_RESET __HAL_RCC_UART7_RELEASE_RESET +#define __USART8_CLK_DISABLE __HAL_RCC_UART8_CLK_DISABLE +#define __USART8_CLK_ENABLE __HAL_RCC_UART8_CLK_ENABLE +#define __USART8_FORCE_RESET __HAL_RCC_UART8_FORCE_RESET +#define __USART8_RELEASE_RESET __HAL_RCC_UART8_RELEASE_RESET +#define __USB_CLK_DISABLE __HAL_RCC_USB_CLK_DISABLE +#define __USB_CLK_ENABLE __HAL_RCC_USB_CLK_ENABLE +#define __USB_FORCE_RESET __HAL_RCC_USB_FORCE_RESET +#define __USB_CLK_SLEEP_ENABLE __HAL_RCC_USB_CLK_SLEEP_ENABLE +#define __USB_CLK_SLEEP_DISABLE __HAL_RCC_USB_CLK_SLEEP_DISABLE +#define __USB_OTG_FS_CLK_DISABLE __HAL_RCC_USB_OTG_FS_CLK_DISABLE +#define __USB_OTG_FS_CLK_ENABLE __HAL_RCC_USB_OTG_FS_CLK_ENABLE +#define __USB_RELEASE_RESET __HAL_RCC_USB_RELEASE_RESET + +#if defined(STM32H7) +#define __HAL_RCC_WWDG_CLK_DISABLE __HAL_RCC_WWDG1_CLK_DISABLE +#define __HAL_RCC_WWDG_CLK_ENABLE __HAL_RCC_WWDG1_CLK_ENABLE +#define __HAL_RCC_WWDG_CLK_SLEEP_DISABLE __HAL_RCC_WWDG1_CLK_SLEEP_DISABLE +#define __HAL_RCC_WWDG_CLK_SLEEP_ENABLE __HAL_RCC_WWDG1_CLK_SLEEP_ENABLE + +#define __HAL_RCC_WWDG_FORCE_RESET ((void)0U) /* Not available on the STM32H7*/ +#define __HAL_RCC_WWDG_RELEASE_RESET ((void)0U) /* Not available on the STM32H7*/ + + +#define __HAL_RCC_WWDG_IS_CLK_ENABLED __HAL_RCC_WWDG1_IS_CLK_ENABLED +#define __HAL_RCC_WWDG_IS_CLK_DISABLED __HAL_RCC_WWDG1_IS_CLK_DISABLED +#endif + +#define __WWDG_CLK_DISABLE __HAL_RCC_WWDG_CLK_DISABLE +#define __WWDG_CLK_ENABLE __HAL_RCC_WWDG_CLK_ENABLE +#define __WWDG_CLK_SLEEP_DISABLE __HAL_RCC_WWDG_CLK_SLEEP_DISABLE +#define __WWDG_CLK_SLEEP_ENABLE __HAL_RCC_WWDG_CLK_SLEEP_ENABLE +#define __WWDG_FORCE_RESET __HAL_RCC_WWDG_FORCE_RESET +#define __WWDG_RELEASE_RESET __HAL_RCC_WWDG_RELEASE_RESET + +#define __TIM21_CLK_ENABLE __HAL_RCC_TIM21_CLK_ENABLE +#define __TIM21_CLK_DISABLE __HAL_RCC_TIM21_CLK_DISABLE +#define __TIM21_FORCE_RESET __HAL_RCC_TIM21_FORCE_RESET +#define __TIM21_RELEASE_RESET __HAL_RCC_TIM21_RELEASE_RESET +#define __TIM21_CLK_SLEEP_ENABLE __HAL_RCC_TIM21_CLK_SLEEP_ENABLE +#define __TIM21_CLK_SLEEP_DISABLE __HAL_RCC_TIM21_CLK_SLEEP_DISABLE +#define __TIM22_CLK_ENABLE __HAL_RCC_TIM22_CLK_ENABLE +#define __TIM22_CLK_DISABLE __HAL_RCC_TIM22_CLK_DISABLE +#define __TIM22_FORCE_RESET __HAL_RCC_TIM22_FORCE_RESET +#define __TIM22_RELEASE_RESET __HAL_RCC_TIM22_RELEASE_RESET +#define __TIM22_CLK_SLEEP_ENABLE __HAL_RCC_TIM22_CLK_SLEEP_ENABLE +#define __TIM22_CLK_SLEEP_DISABLE __HAL_RCC_TIM22_CLK_SLEEP_DISABLE +#define __CRS_CLK_DISABLE __HAL_RCC_CRS_CLK_DISABLE +#define __CRS_CLK_ENABLE __HAL_RCC_CRS_CLK_ENABLE +#define __CRS_CLK_SLEEP_DISABLE __HAL_RCC_CRS_CLK_SLEEP_DISABLE +#define __CRS_CLK_SLEEP_ENABLE __HAL_RCC_CRS_CLK_SLEEP_ENABLE +#define __CRS_FORCE_RESET __HAL_RCC_CRS_FORCE_RESET +#define __CRS_RELEASE_RESET __HAL_RCC_CRS_RELEASE_RESET +#define __RCC_BACKUPRESET_FORCE __HAL_RCC_BACKUPRESET_FORCE +#define __RCC_BACKUPRESET_RELEASE __HAL_RCC_BACKUPRESET_RELEASE + +#define __USB_OTG_FS_FORCE_RESET __HAL_RCC_USB_OTG_FS_FORCE_RESET +#define __USB_OTG_FS_RELEASE_RESET __HAL_RCC_USB_OTG_FS_RELEASE_RESET +#define __USB_OTG_FS_CLK_SLEEP_ENABLE __HAL_RCC_USB_OTG_FS_CLK_SLEEP_ENABLE +#define __USB_OTG_FS_CLK_SLEEP_DISABLE __HAL_RCC_USB_OTG_FS_CLK_SLEEP_DISABLE +#define __USB_OTG_HS_CLK_DISABLE __HAL_RCC_USB_OTG_HS_CLK_DISABLE +#define __USB_OTG_HS_CLK_ENABLE __HAL_RCC_USB_OTG_HS_CLK_ENABLE +#define __USB_OTG_HS_ULPI_CLK_ENABLE __HAL_RCC_USB_OTG_HS_ULPI_CLK_ENABLE +#define __USB_OTG_HS_ULPI_CLK_DISABLE __HAL_RCC_USB_OTG_HS_ULPI_CLK_DISABLE +#define __TIM9_CLK_SLEEP_ENABLE __HAL_RCC_TIM9_CLK_SLEEP_ENABLE +#define __TIM9_CLK_SLEEP_DISABLE __HAL_RCC_TIM9_CLK_SLEEP_DISABLE +#define __TIM10_CLK_SLEEP_ENABLE __HAL_RCC_TIM10_CLK_SLEEP_ENABLE +#define __TIM10_CLK_SLEEP_DISABLE __HAL_RCC_TIM10_CLK_SLEEP_DISABLE +#define __TIM11_CLK_SLEEP_ENABLE __HAL_RCC_TIM11_CLK_SLEEP_ENABLE +#define __TIM11_CLK_SLEEP_DISABLE __HAL_RCC_TIM11_CLK_SLEEP_DISABLE +#define __ETHMACPTP_CLK_SLEEP_ENABLE __HAL_RCC_ETHMACPTP_CLK_SLEEP_ENABLE +#define __ETHMACPTP_CLK_SLEEP_DISABLE __HAL_RCC_ETHMACPTP_CLK_SLEEP_DISABLE +#define __ETHMACPTP_CLK_ENABLE __HAL_RCC_ETHMACPTP_CLK_ENABLE +#define __ETHMACPTP_CLK_DISABLE __HAL_RCC_ETHMACPTP_CLK_DISABLE +#define __HASH_CLK_ENABLE __HAL_RCC_HASH_CLK_ENABLE +#define __HASH_FORCE_RESET __HAL_RCC_HASH_FORCE_RESET +#define __HASH_RELEASE_RESET __HAL_RCC_HASH_RELEASE_RESET +#define __HASH_CLK_SLEEP_ENABLE __HAL_RCC_HASH_CLK_SLEEP_ENABLE +#define __HASH_CLK_SLEEP_DISABLE __HAL_RCC_HASH_CLK_SLEEP_DISABLE +#define __HASH_CLK_DISABLE __HAL_RCC_HASH_CLK_DISABLE +#define __SPI5_CLK_ENABLE __HAL_RCC_SPI5_CLK_ENABLE +#define __SPI5_CLK_DISABLE __HAL_RCC_SPI5_CLK_DISABLE +#define __SPI5_FORCE_RESET __HAL_RCC_SPI5_FORCE_RESET +#define __SPI5_RELEASE_RESET __HAL_RCC_SPI5_RELEASE_RESET +#define __SPI5_CLK_SLEEP_ENABLE __HAL_RCC_SPI5_CLK_SLEEP_ENABLE +#define __SPI5_CLK_SLEEP_DISABLE __HAL_RCC_SPI5_CLK_SLEEP_DISABLE +#define __SPI6_CLK_ENABLE __HAL_RCC_SPI6_CLK_ENABLE +#define __SPI6_CLK_DISABLE __HAL_RCC_SPI6_CLK_DISABLE +#define __SPI6_FORCE_RESET __HAL_RCC_SPI6_FORCE_RESET +#define __SPI6_RELEASE_RESET __HAL_RCC_SPI6_RELEASE_RESET +#define __SPI6_CLK_SLEEP_ENABLE __HAL_RCC_SPI6_CLK_SLEEP_ENABLE +#define __SPI6_CLK_SLEEP_DISABLE __HAL_RCC_SPI6_CLK_SLEEP_DISABLE +#define __LTDC_CLK_ENABLE __HAL_RCC_LTDC_CLK_ENABLE +#define __LTDC_CLK_DISABLE __HAL_RCC_LTDC_CLK_DISABLE +#define __LTDC_FORCE_RESET __HAL_RCC_LTDC_FORCE_RESET +#define __LTDC_RELEASE_RESET __HAL_RCC_LTDC_RELEASE_RESET +#define __LTDC_CLK_SLEEP_ENABLE __HAL_RCC_LTDC_CLK_SLEEP_ENABLE +#define __ETHMAC_CLK_SLEEP_ENABLE __HAL_RCC_ETHMAC_CLK_SLEEP_ENABLE +#define __ETHMAC_CLK_SLEEP_DISABLE __HAL_RCC_ETHMAC_CLK_SLEEP_DISABLE +#define __ETHMACTX_CLK_SLEEP_ENABLE __HAL_RCC_ETHMACTX_CLK_SLEEP_ENABLE +#define __ETHMACTX_CLK_SLEEP_DISABLE __HAL_RCC_ETHMACTX_CLK_SLEEP_DISABLE +#define __ETHMACRX_CLK_SLEEP_ENABLE __HAL_RCC_ETHMACRX_CLK_SLEEP_ENABLE +#define __ETHMACRX_CLK_SLEEP_DISABLE __HAL_RCC_ETHMACRX_CLK_SLEEP_DISABLE +#define __TIM12_CLK_SLEEP_ENABLE __HAL_RCC_TIM12_CLK_SLEEP_ENABLE +#define __TIM12_CLK_SLEEP_DISABLE __HAL_RCC_TIM12_CLK_SLEEP_DISABLE +#define __TIM13_CLK_SLEEP_ENABLE __HAL_RCC_TIM13_CLK_SLEEP_ENABLE +#define __TIM13_CLK_SLEEP_DISABLE __HAL_RCC_TIM13_CLK_SLEEP_DISABLE +#define __TIM14_CLK_SLEEP_ENABLE __HAL_RCC_TIM14_CLK_SLEEP_ENABLE +#define __TIM14_CLK_SLEEP_DISABLE __HAL_RCC_TIM14_CLK_SLEEP_DISABLE +#define __BKPSRAM_CLK_ENABLE __HAL_RCC_BKPSRAM_CLK_ENABLE +#define __BKPSRAM_CLK_DISABLE __HAL_RCC_BKPSRAM_CLK_DISABLE +#define __BKPSRAM_CLK_SLEEP_ENABLE __HAL_RCC_BKPSRAM_CLK_SLEEP_ENABLE +#define __BKPSRAM_CLK_SLEEP_DISABLE __HAL_RCC_BKPSRAM_CLK_SLEEP_DISABLE +#define __CCMDATARAMEN_CLK_ENABLE __HAL_RCC_CCMDATARAMEN_CLK_ENABLE +#define __CCMDATARAMEN_CLK_DISABLE __HAL_RCC_CCMDATARAMEN_CLK_DISABLE +#define __USART6_CLK_ENABLE __HAL_RCC_USART6_CLK_ENABLE +#define __USART6_CLK_DISABLE __HAL_RCC_USART6_CLK_DISABLE +#define __USART6_FORCE_RESET __HAL_RCC_USART6_FORCE_RESET +#define __USART6_RELEASE_RESET __HAL_RCC_USART6_RELEASE_RESET +#define __USART6_CLK_SLEEP_ENABLE __HAL_RCC_USART6_CLK_SLEEP_ENABLE +#define __USART6_CLK_SLEEP_DISABLE __HAL_RCC_USART6_CLK_SLEEP_DISABLE +#define __SPI4_CLK_ENABLE __HAL_RCC_SPI4_CLK_ENABLE +#define __SPI4_CLK_DISABLE __HAL_RCC_SPI4_CLK_DISABLE +#define __SPI4_FORCE_RESET __HAL_RCC_SPI4_FORCE_RESET +#define __SPI4_RELEASE_RESET __HAL_RCC_SPI4_RELEASE_RESET +#define __SPI4_CLK_SLEEP_ENABLE __HAL_RCC_SPI4_CLK_SLEEP_ENABLE +#define __SPI4_CLK_SLEEP_DISABLE __HAL_RCC_SPI4_CLK_SLEEP_DISABLE +#define __GPIOI_CLK_ENABLE __HAL_RCC_GPIOI_CLK_ENABLE +#define __GPIOI_CLK_DISABLE __HAL_RCC_GPIOI_CLK_DISABLE +#define __GPIOI_FORCE_RESET __HAL_RCC_GPIOI_FORCE_RESET +#define __GPIOI_RELEASE_RESET __HAL_RCC_GPIOI_RELEASE_RESET +#define __GPIOI_CLK_SLEEP_ENABLE __HAL_RCC_GPIOI_CLK_SLEEP_ENABLE +#define __GPIOI_CLK_SLEEP_DISABLE __HAL_RCC_GPIOI_CLK_SLEEP_DISABLE +#define __GPIOJ_CLK_ENABLE __HAL_RCC_GPIOJ_CLK_ENABLE +#define __GPIOJ_CLK_DISABLE __HAL_RCC_GPIOJ_CLK_DISABLE +#define __GPIOJ_FORCE_RESET __HAL_RCC_GPIOJ_FORCE_RESET +#define __GPIOJ_RELEASE_RESET __HAL_RCC_GPIOJ_RELEASE_RESET +#define __GPIOJ_CLK_SLEEP_ENABLE __HAL_RCC_GPIOJ_CLK_SLEEP_ENABLE +#define __GPIOJ_CLK_SLEEP_DISABLE __HAL_RCC_GPIOJ_CLK_SLEEP_DISABLE +#define __GPIOK_CLK_ENABLE __HAL_RCC_GPIOK_CLK_ENABLE +#define __GPIOK_CLK_DISABLE __HAL_RCC_GPIOK_CLK_DISABLE +#define __GPIOK_RELEASE_RESET __HAL_RCC_GPIOK_RELEASE_RESET +#define __GPIOK_CLK_SLEEP_ENABLE __HAL_RCC_GPIOK_CLK_SLEEP_ENABLE +#define __GPIOK_CLK_SLEEP_DISABLE __HAL_RCC_GPIOK_CLK_SLEEP_DISABLE +#define __ETH_CLK_ENABLE __HAL_RCC_ETH_CLK_ENABLE +#define __ETH_CLK_DISABLE __HAL_RCC_ETH_CLK_DISABLE +#define __DCMI_CLK_ENABLE __HAL_RCC_DCMI_CLK_ENABLE +#define __DCMI_CLK_DISABLE __HAL_RCC_DCMI_CLK_DISABLE +#define __DCMI_FORCE_RESET __HAL_RCC_DCMI_FORCE_RESET +#define __DCMI_RELEASE_RESET __HAL_RCC_DCMI_RELEASE_RESET +#define __DCMI_CLK_SLEEP_ENABLE __HAL_RCC_DCMI_CLK_SLEEP_ENABLE +#define __DCMI_CLK_SLEEP_DISABLE __HAL_RCC_DCMI_CLK_SLEEP_DISABLE +#define __UART7_CLK_ENABLE __HAL_RCC_UART7_CLK_ENABLE +#define __UART7_CLK_DISABLE __HAL_RCC_UART7_CLK_DISABLE +#define __UART7_RELEASE_RESET __HAL_RCC_UART7_RELEASE_RESET +#define __UART7_FORCE_RESET __HAL_RCC_UART7_FORCE_RESET +#define __UART7_CLK_SLEEP_ENABLE __HAL_RCC_UART7_CLK_SLEEP_ENABLE +#define __UART7_CLK_SLEEP_DISABLE __HAL_RCC_UART7_CLK_SLEEP_DISABLE +#define __UART8_CLK_ENABLE __HAL_RCC_UART8_CLK_ENABLE +#define __UART8_CLK_DISABLE __HAL_RCC_UART8_CLK_DISABLE +#define __UART8_FORCE_RESET __HAL_RCC_UART8_FORCE_RESET +#define __UART8_RELEASE_RESET __HAL_RCC_UART8_RELEASE_RESET +#define __UART8_CLK_SLEEP_ENABLE __HAL_RCC_UART8_CLK_SLEEP_ENABLE +#define __UART8_CLK_SLEEP_DISABLE __HAL_RCC_UART8_CLK_SLEEP_DISABLE +#define __OTGHS_CLK_SLEEP_ENABLE __HAL_RCC_USB_OTG_HS_CLK_SLEEP_ENABLE +#define __OTGHS_CLK_SLEEP_DISABLE __HAL_RCC_USB_OTG_HS_CLK_SLEEP_DISABLE +#define __OTGHS_FORCE_RESET __HAL_RCC_USB_OTG_HS_FORCE_RESET +#define __OTGHS_RELEASE_RESET __HAL_RCC_USB_OTG_HS_RELEASE_RESET +#define __OTGHSULPI_CLK_SLEEP_ENABLE __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_ENABLE +#define __OTGHSULPI_CLK_SLEEP_DISABLE __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_DISABLE +#define __HAL_RCC_OTGHS_CLK_SLEEP_ENABLE __HAL_RCC_USB_OTG_HS_CLK_SLEEP_ENABLE +#define __HAL_RCC_OTGHS_CLK_SLEEP_DISABLE __HAL_RCC_USB_OTG_HS_CLK_SLEEP_DISABLE +#define __HAL_RCC_OTGHS_IS_CLK_SLEEP_ENABLED __HAL_RCC_USB_OTG_HS_IS_CLK_SLEEP_ENABLED +#define __HAL_RCC_OTGHS_IS_CLK_SLEEP_DISABLED __HAL_RCC_USB_OTG_HS_IS_CLK_SLEEP_DISABLED +#define __HAL_RCC_OTGHS_FORCE_RESET __HAL_RCC_USB_OTG_HS_FORCE_RESET +#define __HAL_RCC_OTGHS_RELEASE_RESET __HAL_RCC_USB_OTG_HS_RELEASE_RESET +#define __HAL_RCC_OTGHSULPI_CLK_SLEEP_ENABLE __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_ENABLE +#define __HAL_RCC_OTGHSULPI_CLK_SLEEP_DISABLE __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_DISABLE +#define __HAL_RCC_OTGHSULPI_IS_CLK_SLEEP_ENABLED __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_SLEEP_ENABLED +#define __HAL_RCC_OTGHSULPI_IS_CLK_SLEEP_DISABLED __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_SLEEP_DISABLED +#define __SRAM3_CLK_SLEEP_ENABLE __HAL_RCC_SRAM3_CLK_SLEEP_ENABLE +#define __CAN2_CLK_SLEEP_ENABLE __HAL_RCC_CAN2_CLK_SLEEP_ENABLE +#define __CAN2_CLK_SLEEP_DISABLE __HAL_RCC_CAN2_CLK_SLEEP_DISABLE +#define __DAC_CLK_SLEEP_ENABLE __HAL_RCC_DAC_CLK_SLEEP_ENABLE +#define __DAC_CLK_SLEEP_DISABLE __HAL_RCC_DAC_CLK_SLEEP_DISABLE +#define __ADC2_CLK_SLEEP_ENABLE __HAL_RCC_ADC2_CLK_SLEEP_ENABLE +#define __ADC2_CLK_SLEEP_DISABLE __HAL_RCC_ADC2_CLK_SLEEP_DISABLE +#define __ADC3_CLK_SLEEP_ENABLE __HAL_RCC_ADC3_CLK_SLEEP_ENABLE +#define __ADC3_CLK_SLEEP_DISABLE __HAL_RCC_ADC3_CLK_SLEEP_DISABLE +#define __FSMC_FORCE_RESET __HAL_RCC_FSMC_FORCE_RESET +#define __FSMC_RELEASE_RESET __HAL_RCC_FSMC_RELEASE_RESET +#define __FSMC_CLK_SLEEP_ENABLE __HAL_RCC_FSMC_CLK_SLEEP_ENABLE +#define __FSMC_CLK_SLEEP_DISABLE __HAL_RCC_FSMC_CLK_SLEEP_DISABLE +#define __SDIO_FORCE_RESET __HAL_RCC_SDIO_FORCE_RESET +#define __SDIO_RELEASE_RESET __HAL_RCC_SDIO_RELEASE_RESET +#define __SDIO_CLK_SLEEP_DISABLE __HAL_RCC_SDIO_CLK_SLEEP_DISABLE +#define __SDIO_CLK_SLEEP_ENABLE __HAL_RCC_SDIO_CLK_SLEEP_ENABLE +#define __DMA2D_CLK_ENABLE __HAL_RCC_DMA2D_CLK_ENABLE +#define __DMA2D_CLK_DISABLE __HAL_RCC_DMA2D_CLK_DISABLE +#define __DMA2D_FORCE_RESET __HAL_RCC_DMA2D_FORCE_RESET +#define __DMA2D_RELEASE_RESET __HAL_RCC_DMA2D_RELEASE_RESET +#define __DMA2D_CLK_SLEEP_ENABLE __HAL_RCC_DMA2D_CLK_SLEEP_ENABLE +#define __DMA2D_CLK_SLEEP_DISABLE __HAL_RCC_DMA2D_CLK_SLEEP_DISABLE + +/* alias define maintained for legacy */ +#define __HAL_RCC_OTGFS_FORCE_RESET __HAL_RCC_USB_OTG_FS_FORCE_RESET +#define __HAL_RCC_OTGFS_RELEASE_RESET __HAL_RCC_USB_OTG_FS_RELEASE_RESET + +#define __ADC12_CLK_ENABLE __HAL_RCC_ADC12_CLK_ENABLE +#define __ADC12_CLK_DISABLE __HAL_RCC_ADC12_CLK_DISABLE +#define __ADC34_CLK_ENABLE __HAL_RCC_ADC34_CLK_ENABLE +#define __ADC34_CLK_DISABLE __HAL_RCC_ADC34_CLK_DISABLE +#define __DAC2_CLK_ENABLE __HAL_RCC_DAC2_CLK_ENABLE +#define __DAC2_CLK_DISABLE __HAL_RCC_DAC2_CLK_DISABLE +#define __TIM18_CLK_ENABLE __HAL_RCC_TIM18_CLK_ENABLE +#define __TIM18_CLK_DISABLE __HAL_RCC_TIM18_CLK_DISABLE +#define __TIM19_CLK_ENABLE __HAL_RCC_TIM19_CLK_ENABLE +#define __TIM19_CLK_DISABLE __HAL_RCC_TIM19_CLK_DISABLE +#define __TIM20_CLK_ENABLE __HAL_RCC_TIM20_CLK_ENABLE +#define __TIM20_CLK_DISABLE __HAL_RCC_TIM20_CLK_DISABLE +#define __HRTIM1_CLK_ENABLE __HAL_RCC_HRTIM1_CLK_ENABLE +#define __HRTIM1_CLK_DISABLE __HAL_RCC_HRTIM1_CLK_DISABLE +#define __SDADC1_CLK_ENABLE __HAL_RCC_SDADC1_CLK_ENABLE +#define __SDADC2_CLK_ENABLE __HAL_RCC_SDADC2_CLK_ENABLE +#define __SDADC3_CLK_ENABLE __HAL_RCC_SDADC3_CLK_ENABLE +#define __SDADC1_CLK_DISABLE __HAL_RCC_SDADC1_CLK_DISABLE +#define __SDADC2_CLK_DISABLE __HAL_RCC_SDADC2_CLK_DISABLE +#define __SDADC3_CLK_DISABLE __HAL_RCC_SDADC3_CLK_DISABLE + +#define __ADC12_FORCE_RESET __HAL_RCC_ADC12_FORCE_RESET +#define __ADC12_RELEASE_RESET __HAL_RCC_ADC12_RELEASE_RESET +#define __ADC34_FORCE_RESET __HAL_RCC_ADC34_FORCE_RESET +#define __ADC34_RELEASE_RESET __HAL_RCC_ADC34_RELEASE_RESET +#define __DAC2_FORCE_RESET __HAL_RCC_DAC2_FORCE_RESET +#define __DAC2_RELEASE_RESET __HAL_RCC_DAC2_RELEASE_RESET +#define __TIM18_FORCE_RESET __HAL_RCC_TIM18_FORCE_RESET +#define __TIM18_RELEASE_RESET __HAL_RCC_TIM18_RELEASE_RESET +#define __TIM19_FORCE_RESET __HAL_RCC_TIM19_FORCE_RESET +#define __TIM19_RELEASE_RESET __HAL_RCC_TIM19_RELEASE_RESET +#define __TIM20_FORCE_RESET __HAL_RCC_TIM20_FORCE_RESET +#define __TIM20_RELEASE_RESET __HAL_RCC_TIM20_RELEASE_RESET +#define __HRTIM1_FORCE_RESET __HAL_RCC_HRTIM1_FORCE_RESET +#define __HRTIM1_RELEASE_RESET __HAL_RCC_HRTIM1_RELEASE_RESET +#define __SDADC1_FORCE_RESET __HAL_RCC_SDADC1_FORCE_RESET +#define __SDADC2_FORCE_RESET __HAL_RCC_SDADC2_FORCE_RESET +#define __SDADC3_FORCE_RESET __HAL_RCC_SDADC3_FORCE_RESET +#define __SDADC1_RELEASE_RESET __HAL_RCC_SDADC1_RELEASE_RESET +#define __SDADC2_RELEASE_RESET __HAL_RCC_SDADC2_RELEASE_RESET +#define __SDADC3_RELEASE_RESET __HAL_RCC_SDADC3_RELEASE_RESET + +#define __ADC1_IS_CLK_ENABLED __HAL_RCC_ADC1_IS_CLK_ENABLED +#define __ADC1_IS_CLK_DISABLED __HAL_RCC_ADC1_IS_CLK_DISABLED +#define __ADC12_IS_CLK_ENABLED __HAL_RCC_ADC12_IS_CLK_ENABLED +#define __ADC12_IS_CLK_DISABLED __HAL_RCC_ADC12_IS_CLK_DISABLED +#define __ADC34_IS_CLK_ENABLED __HAL_RCC_ADC34_IS_CLK_ENABLED +#define __ADC34_IS_CLK_DISABLED __HAL_RCC_ADC34_IS_CLK_DISABLED +#define __CEC_IS_CLK_ENABLED __HAL_RCC_CEC_IS_CLK_ENABLED +#define __CEC_IS_CLK_DISABLED __HAL_RCC_CEC_IS_CLK_DISABLED +#define __CRC_IS_CLK_ENABLED __HAL_RCC_CRC_IS_CLK_ENABLED +#define __CRC_IS_CLK_DISABLED __HAL_RCC_CRC_IS_CLK_DISABLED +#define __DAC1_IS_CLK_ENABLED __HAL_RCC_DAC1_IS_CLK_ENABLED +#define __DAC1_IS_CLK_DISABLED __HAL_RCC_DAC1_IS_CLK_DISABLED +#define __DAC2_IS_CLK_ENABLED __HAL_RCC_DAC2_IS_CLK_ENABLED +#define __DAC2_IS_CLK_DISABLED __HAL_RCC_DAC2_IS_CLK_DISABLED +#define __DMA1_IS_CLK_ENABLED __HAL_RCC_DMA1_IS_CLK_ENABLED +#define __DMA1_IS_CLK_DISABLED __HAL_RCC_DMA1_IS_CLK_DISABLED +#define __DMA2_IS_CLK_ENABLED __HAL_RCC_DMA2_IS_CLK_ENABLED +#define __DMA2_IS_CLK_DISABLED __HAL_RCC_DMA2_IS_CLK_DISABLED +#define __FLITF_IS_CLK_ENABLED __HAL_RCC_FLITF_IS_CLK_ENABLED +#define __FLITF_IS_CLK_DISABLED __HAL_RCC_FLITF_IS_CLK_DISABLED +#define __FMC_IS_CLK_ENABLED __HAL_RCC_FMC_IS_CLK_ENABLED +#define __FMC_IS_CLK_DISABLED __HAL_RCC_FMC_IS_CLK_DISABLED +#define __GPIOA_IS_CLK_ENABLED __HAL_RCC_GPIOA_IS_CLK_ENABLED +#define __GPIOA_IS_CLK_DISABLED __HAL_RCC_GPIOA_IS_CLK_DISABLED +#define __GPIOB_IS_CLK_ENABLED __HAL_RCC_GPIOB_IS_CLK_ENABLED +#define __GPIOB_IS_CLK_DISABLED __HAL_RCC_GPIOB_IS_CLK_DISABLED +#define __GPIOC_IS_CLK_ENABLED __HAL_RCC_GPIOC_IS_CLK_ENABLED +#define __GPIOC_IS_CLK_DISABLED __HAL_RCC_GPIOC_IS_CLK_DISABLED +#define __GPIOD_IS_CLK_ENABLED __HAL_RCC_GPIOD_IS_CLK_ENABLED +#define __GPIOD_IS_CLK_DISABLED __HAL_RCC_GPIOD_IS_CLK_DISABLED +#define __GPIOE_IS_CLK_ENABLED __HAL_RCC_GPIOE_IS_CLK_ENABLED +#define __GPIOE_IS_CLK_DISABLED __HAL_RCC_GPIOE_IS_CLK_DISABLED +#define __GPIOF_IS_CLK_ENABLED __HAL_RCC_GPIOF_IS_CLK_ENABLED +#define __GPIOF_IS_CLK_DISABLED __HAL_RCC_GPIOF_IS_CLK_DISABLED +#define __GPIOG_IS_CLK_ENABLED __HAL_RCC_GPIOG_IS_CLK_ENABLED +#define __GPIOG_IS_CLK_DISABLED __HAL_RCC_GPIOG_IS_CLK_DISABLED +#define __GPIOH_IS_CLK_ENABLED __HAL_RCC_GPIOH_IS_CLK_ENABLED +#define __GPIOH_IS_CLK_DISABLED __HAL_RCC_GPIOH_IS_CLK_DISABLED +#define __HRTIM1_IS_CLK_ENABLED __HAL_RCC_HRTIM1_IS_CLK_ENABLED +#define __HRTIM1_IS_CLK_DISABLED __HAL_RCC_HRTIM1_IS_CLK_DISABLED +#define __I2C1_IS_CLK_ENABLED __HAL_RCC_I2C1_IS_CLK_ENABLED +#define __I2C1_IS_CLK_DISABLED __HAL_RCC_I2C1_IS_CLK_DISABLED +#define __I2C2_IS_CLK_ENABLED __HAL_RCC_I2C2_IS_CLK_ENABLED +#define __I2C2_IS_CLK_DISABLED __HAL_RCC_I2C2_IS_CLK_DISABLED +#define __I2C3_IS_CLK_ENABLED __HAL_RCC_I2C3_IS_CLK_ENABLED +#define __I2C3_IS_CLK_DISABLED __HAL_RCC_I2C3_IS_CLK_DISABLED +#define __PWR_IS_CLK_ENABLED __HAL_RCC_PWR_IS_CLK_ENABLED +#define __PWR_IS_CLK_DISABLED __HAL_RCC_PWR_IS_CLK_DISABLED +#define __SYSCFG_IS_CLK_ENABLED __HAL_RCC_SYSCFG_IS_CLK_ENABLED +#define __SYSCFG_IS_CLK_DISABLED __HAL_RCC_SYSCFG_IS_CLK_DISABLED +#define __SPI1_IS_CLK_ENABLED __HAL_RCC_SPI1_IS_CLK_ENABLED +#define __SPI1_IS_CLK_DISABLED __HAL_RCC_SPI1_IS_CLK_DISABLED +#define __SPI2_IS_CLK_ENABLED __HAL_RCC_SPI2_IS_CLK_ENABLED +#define __SPI2_IS_CLK_DISABLED __HAL_RCC_SPI2_IS_CLK_DISABLED +#define __SPI3_IS_CLK_ENABLED __HAL_RCC_SPI3_IS_CLK_ENABLED +#define __SPI3_IS_CLK_DISABLED __HAL_RCC_SPI3_IS_CLK_DISABLED +#define __SPI4_IS_CLK_ENABLED __HAL_RCC_SPI4_IS_CLK_ENABLED +#define __SPI4_IS_CLK_DISABLED __HAL_RCC_SPI4_IS_CLK_DISABLED +#define __SDADC1_IS_CLK_ENABLED __HAL_RCC_SDADC1_IS_CLK_ENABLED +#define __SDADC1_IS_CLK_DISABLED __HAL_RCC_SDADC1_IS_CLK_DISABLED +#define __SDADC2_IS_CLK_ENABLED __HAL_RCC_SDADC2_IS_CLK_ENABLED +#define __SDADC2_IS_CLK_DISABLED __HAL_RCC_SDADC2_IS_CLK_DISABLED +#define __SDADC3_IS_CLK_ENABLED __HAL_RCC_SDADC3_IS_CLK_ENABLED +#define __SDADC3_IS_CLK_DISABLED __HAL_RCC_SDADC3_IS_CLK_DISABLED +#define __SRAM_IS_CLK_ENABLED __HAL_RCC_SRAM_IS_CLK_ENABLED +#define __SRAM_IS_CLK_DISABLED __HAL_RCC_SRAM_IS_CLK_DISABLED +#define __TIM1_IS_CLK_ENABLED __HAL_RCC_TIM1_IS_CLK_ENABLED +#define __TIM1_IS_CLK_DISABLED __HAL_RCC_TIM1_IS_CLK_DISABLED +#define __TIM2_IS_CLK_ENABLED __HAL_RCC_TIM2_IS_CLK_ENABLED +#define __TIM2_IS_CLK_DISABLED __HAL_RCC_TIM2_IS_CLK_DISABLED +#define __TIM3_IS_CLK_ENABLED __HAL_RCC_TIM3_IS_CLK_ENABLED +#define __TIM3_IS_CLK_DISABLED __HAL_RCC_TIM3_IS_CLK_DISABLED +#define __TIM4_IS_CLK_ENABLED __HAL_RCC_TIM4_IS_CLK_ENABLED +#define __TIM4_IS_CLK_DISABLED __HAL_RCC_TIM4_IS_CLK_DISABLED +#define __TIM5_IS_CLK_ENABLED __HAL_RCC_TIM5_IS_CLK_ENABLED +#define __TIM5_IS_CLK_DISABLED __HAL_RCC_TIM5_IS_CLK_DISABLED +#define __TIM6_IS_CLK_ENABLED __HAL_RCC_TIM6_IS_CLK_ENABLED +#define __TIM6_IS_CLK_DISABLED __HAL_RCC_TIM6_IS_CLK_DISABLED +#define __TIM7_IS_CLK_ENABLED __HAL_RCC_TIM7_IS_CLK_ENABLED +#define __TIM7_IS_CLK_DISABLED __HAL_RCC_TIM7_IS_CLK_DISABLED +#define __TIM8_IS_CLK_ENABLED __HAL_RCC_TIM8_IS_CLK_ENABLED +#define __TIM8_IS_CLK_DISABLED __HAL_RCC_TIM8_IS_CLK_DISABLED +#define __TIM12_IS_CLK_ENABLED __HAL_RCC_TIM12_IS_CLK_ENABLED +#define __TIM12_IS_CLK_DISABLED __HAL_RCC_TIM12_IS_CLK_DISABLED +#define __TIM13_IS_CLK_ENABLED __HAL_RCC_TIM13_IS_CLK_ENABLED +#define __TIM13_IS_CLK_DISABLED __HAL_RCC_TIM13_IS_CLK_DISABLED +#define __TIM14_IS_CLK_ENABLED __HAL_RCC_TIM14_IS_CLK_ENABLED +#define __TIM14_IS_CLK_DISABLED __HAL_RCC_TIM14_IS_CLK_DISABLED +#define __TIM15_IS_CLK_ENABLED __HAL_RCC_TIM15_IS_CLK_ENABLED +#define __TIM15_IS_CLK_DISABLED __HAL_RCC_TIM15_IS_CLK_DISABLED +#define __TIM16_IS_CLK_ENABLED __HAL_RCC_TIM16_IS_CLK_ENABLED +#define __TIM16_IS_CLK_DISABLED __HAL_RCC_TIM16_IS_CLK_DISABLED +#define __TIM17_IS_CLK_ENABLED __HAL_RCC_TIM17_IS_CLK_ENABLED +#define __TIM17_IS_CLK_DISABLED __HAL_RCC_TIM17_IS_CLK_DISABLED +#define __TIM18_IS_CLK_ENABLED __HAL_RCC_TIM18_IS_CLK_ENABLED +#define __TIM18_IS_CLK_DISABLED __HAL_RCC_TIM18_IS_CLK_DISABLED +#define __TIM19_IS_CLK_ENABLED __HAL_RCC_TIM19_IS_CLK_ENABLED +#define __TIM19_IS_CLK_DISABLED __HAL_RCC_TIM19_IS_CLK_DISABLED +#define __TIM20_IS_CLK_ENABLED __HAL_RCC_TIM20_IS_CLK_ENABLED +#define __TIM20_IS_CLK_DISABLED __HAL_RCC_TIM20_IS_CLK_DISABLED +#define __TSC_IS_CLK_ENABLED __HAL_RCC_TSC_IS_CLK_ENABLED +#define __TSC_IS_CLK_DISABLED __HAL_RCC_TSC_IS_CLK_DISABLED +#define __UART4_IS_CLK_ENABLED __HAL_RCC_UART4_IS_CLK_ENABLED +#define __UART4_IS_CLK_DISABLED __HAL_RCC_UART4_IS_CLK_DISABLED +#define __UART5_IS_CLK_ENABLED __HAL_RCC_UART5_IS_CLK_ENABLED +#define __UART5_IS_CLK_DISABLED __HAL_RCC_UART5_IS_CLK_DISABLED +#define __USART1_IS_CLK_ENABLED __HAL_RCC_USART1_IS_CLK_ENABLED +#define __USART1_IS_CLK_DISABLED __HAL_RCC_USART1_IS_CLK_DISABLED +#define __USART2_IS_CLK_ENABLED __HAL_RCC_USART2_IS_CLK_ENABLED +#define __USART2_IS_CLK_DISABLED __HAL_RCC_USART2_IS_CLK_DISABLED +#define __USART3_IS_CLK_ENABLED __HAL_RCC_USART3_IS_CLK_ENABLED +#define __USART3_IS_CLK_DISABLED __HAL_RCC_USART3_IS_CLK_DISABLED +#define __USB_IS_CLK_ENABLED __HAL_RCC_USB_IS_CLK_ENABLED +#define __USB_IS_CLK_DISABLED __HAL_RCC_USB_IS_CLK_DISABLED +#define __WWDG_IS_CLK_ENABLED __HAL_RCC_WWDG_IS_CLK_ENABLED +#define __WWDG_IS_CLK_DISABLED __HAL_RCC_WWDG_IS_CLK_DISABLED + +#if defined(STM32L1) +#define __HAL_RCC_CRYP_CLK_DISABLE __HAL_RCC_AES_CLK_DISABLE +#define __HAL_RCC_CRYP_CLK_ENABLE __HAL_RCC_AES_CLK_ENABLE +#define __HAL_RCC_CRYP_CLK_SLEEP_DISABLE __HAL_RCC_AES_CLK_SLEEP_DISABLE +#define __HAL_RCC_CRYP_CLK_SLEEP_ENABLE __HAL_RCC_AES_CLK_SLEEP_ENABLE +#define __HAL_RCC_CRYP_FORCE_RESET __HAL_RCC_AES_FORCE_RESET +#define __HAL_RCC_CRYP_RELEASE_RESET __HAL_RCC_AES_RELEASE_RESET +#endif /* STM32L1 */ + +#if defined(STM32F4) +#define __HAL_RCC_SDMMC1_FORCE_RESET __HAL_RCC_SDIO_FORCE_RESET +#define __HAL_RCC_SDMMC1_RELEASE_RESET __HAL_RCC_SDIO_RELEASE_RESET +#define __HAL_RCC_SDMMC1_CLK_SLEEP_ENABLE __HAL_RCC_SDIO_CLK_SLEEP_ENABLE +#define __HAL_RCC_SDMMC1_CLK_SLEEP_DISABLE __HAL_RCC_SDIO_CLK_SLEEP_DISABLE +#define __HAL_RCC_SDMMC1_CLK_ENABLE __HAL_RCC_SDIO_CLK_ENABLE +#define __HAL_RCC_SDMMC1_CLK_DISABLE __HAL_RCC_SDIO_CLK_DISABLE +#define __HAL_RCC_SDMMC1_IS_CLK_ENABLED __HAL_RCC_SDIO_IS_CLK_ENABLED +#define __HAL_RCC_SDMMC1_IS_CLK_DISABLED __HAL_RCC_SDIO_IS_CLK_DISABLED +#define Sdmmc1ClockSelection SdioClockSelection +#define RCC_PERIPHCLK_SDMMC1 RCC_PERIPHCLK_SDIO +#define RCC_SDMMC1CLKSOURCE_CLK48 RCC_SDIOCLKSOURCE_CK48 +#define RCC_SDMMC1CLKSOURCE_SYSCLK RCC_SDIOCLKSOURCE_SYSCLK +#define __HAL_RCC_SDMMC1_CONFIG __HAL_RCC_SDIO_CONFIG +#define __HAL_RCC_GET_SDMMC1_SOURCE __HAL_RCC_GET_SDIO_SOURCE +#endif + +#if defined(STM32F7) || defined(STM32L4) +#define __HAL_RCC_SDIO_FORCE_RESET __HAL_RCC_SDMMC1_FORCE_RESET +#define __HAL_RCC_SDIO_RELEASE_RESET __HAL_RCC_SDMMC1_RELEASE_RESET +#define __HAL_RCC_SDIO_CLK_SLEEP_ENABLE __HAL_RCC_SDMMC1_CLK_SLEEP_ENABLE +#define __HAL_RCC_SDIO_CLK_SLEEP_DISABLE __HAL_RCC_SDMMC1_CLK_SLEEP_DISABLE +#define __HAL_RCC_SDIO_CLK_ENABLE __HAL_RCC_SDMMC1_CLK_ENABLE +#define __HAL_RCC_SDIO_CLK_DISABLE __HAL_RCC_SDMMC1_CLK_DISABLE +#define __HAL_RCC_SDIO_IS_CLK_ENABLED __HAL_RCC_SDMMC1_IS_CLK_ENABLED +#define __HAL_RCC_SDIO_IS_CLK_DISABLED __HAL_RCC_SDMMC1_IS_CLK_DISABLED +#define SdioClockSelection Sdmmc1ClockSelection +#define RCC_PERIPHCLK_SDIO RCC_PERIPHCLK_SDMMC1 +#define __HAL_RCC_SDIO_CONFIG __HAL_RCC_SDMMC1_CONFIG +#define __HAL_RCC_GET_SDIO_SOURCE __HAL_RCC_GET_SDMMC1_SOURCE +#endif + +#if defined(STM32F7) +#define RCC_SDIOCLKSOURCE_CLK48 RCC_SDMMC1CLKSOURCE_CLK48 +#define RCC_SDIOCLKSOURCE_SYSCLK RCC_SDMMC1CLKSOURCE_SYSCLK +#endif + +#if defined(STM32H7) +#define __HAL_RCC_USB_OTG_HS_CLK_ENABLE() __HAL_RCC_USB1_OTG_HS_CLK_ENABLE() +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_ENABLE() __HAL_RCC_USB1_OTG_HS_ULPI_CLK_ENABLE() +#define __HAL_RCC_USB_OTG_HS_CLK_DISABLE() __HAL_RCC_USB1_OTG_HS_CLK_DISABLE() +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_DISABLE() __HAL_RCC_USB1_OTG_HS_ULPI_CLK_DISABLE() +#define __HAL_RCC_USB_OTG_HS_FORCE_RESET() __HAL_RCC_USB1_OTG_HS_FORCE_RESET() +#define __HAL_RCC_USB_OTG_HS_RELEASE_RESET() __HAL_RCC_USB1_OTG_HS_RELEASE_RESET() +#define __HAL_RCC_USB_OTG_HS_CLK_SLEEP_ENABLE() __HAL_RCC_USB1_OTG_HS_CLK_SLEEP_ENABLE() +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_ENABLE() __HAL_RCC_USB1_OTG_HS_ULPI_CLK_SLEEP_ENABLE() +#define __HAL_RCC_USB_OTG_HS_CLK_SLEEP_DISABLE() __HAL_RCC_USB1_OTG_HS_CLK_SLEEP_DISABLE() +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_DISABLE() __HAL_RCC_USB1_OTG_HS_ULPI_CLK_SLEEP_DISABLE() + +#define __HAL_RCC_USB_OTG_FS_CLK_ENABLE() __HAL_RCC_USB2_OTG_FS_CLK_ENABLE() +#define __HAL_RCC_USB_OTG_FS_ULPI_CLK_ENABLE() __HAL_RCC_USB2_OTG_FS_ULPI_CLK_ENABLE() +#define __HAL_RCC_USB_OTG_FS_CLK_DISABLE() __HAL_RCC_USB2_OTG_FS_CLK_DISABLE() +#define __HAL_RCC_USB_OTG_FS_ULPI_CLK_DISABLE() __HAL_RCC_USB2_OTG_FS_ULPI_CLK_DISABLE() +#define __HAL_RCC_USB_OTG_FS_FORCE_RESET() __HAL_RCC_USB2_OTG_FS_FORCE_RESET() +#define __HAL_RCC_USB_OTG_FS_RELEASE_RESET() __HAL_RCC_USB2_OTG_FS_RELEASE_RESET() +#define __HAL_RCC_USB_OTG_FS_CLK_SLEEP_ENABLE() __HAL_RCC_USB2_OTG_FS_CLK_SLEEP_ENABLE() +#define __HAL_RCC_USB_OTG_FS_ULPI_CLK_SLEEP_ENABLE() __HAL_RCC_USB2_OTG_FS_ULPI_CLK_SLEEP_ENABLE() +#define __HAL_RCC_USB_OTG_FS_CLK_SLEEP_DISABLE() __HAL_RCC_USB2_OTG_FS_CLK_SLEEP_DISABLE() +#define __HAL_RCC_USB_OTG_FS_ULPI_CLK_SLEEP_DISABLE() __HAL_RCC_USB2_OTG_FS_ULPI_CLK_SLEEP_DISABLE() +#endif + +#define __HAL_RCC_I2SCLK __HAL_RCC_I2S_CONFIG +#define __HAL_RCC_I2SCLK_CONFIG __HAL_RCC_I2S_CONFIG + +#define __RCC_PLLSRC RCC_GET_PLL_OSCSOURCE + +#define IS_RCC_MSIRANGE IS_RCC_MSI_CLOCK_RANGE +#define IS_RCC_RTCCLK_SOURCE IS_RCC_RTCCLKSOURCE +#define IS_RCC_SYSCLK_DIV IS_RCC_HCLK +#define IS_RCC_HCLK_DIV IS_RCC_PCLK +#define IS_RCC_PERIPHCLK IS_RCC_PERIPHCLOCK + +#define RCC_IT_HSI14 RCC_IT_HSI14RDY + +#define RCC_IT_CSSLSE RCC_IT_LSECSS +#define RCC_IT_CSSHSE RCC_IT_CSS + +#define RCC_PLLMUL_3 RCC_PLL_MUL3 +#define RCC_PLLMUL_4 RCC_PLL_MUL4 +#define RCC_PLLMUL_6 RCC_PLL_MUL6 +#define RCC_PLLMUL_8 RCC_PLL_MUL8 +#define RCC_PLLMUL_12 RCC_PLL_MUL12 +#define RCC_PLLMUL_16 RCC_PLL_MUL16 +#define RCC_PLLMUL_24 RCC_PLL_MUL24 +#define RCC_PLLMUL_32 RCC_PLL_MUL32 +#define RCC_PLLMUL_48 RCC_PLL_MUL48 + +#define RCC_PLLDIV_2 RCC_PLL_DIV2 +#define RCC_PLLDIV_3 RCC_PLL_DIV3 +#define RCC_PLLDIV_4 RCC_PLL_DIV4 + +#define IS_RCC_MCOSOURCE IS_RCC_MCO1SOURCE +#define __HAL_RCC_MCO_CONFIG __HAL_RCC_MCO1_CONFIG +#define RCC_MCO_NODIV RCC_MCODIV_1 +#define RCC_MCO_DIV1 RCC_MCODIV_1 +#define RCC_MCO_DIV2 RCC_MCODIV_2 +#define RCC_MCO_DIV4 RCC_MCODIV_4 +#define RCC_MCO_DIV8 RCC_MCODIV_8 +#define RCC_MCO_DIV16 RCC_MCODIV_16 +#define RCC_MCO_DIV32 RCC_MCODIV_32 +#define RCC_MCO_DIV64 RCC_MCODIV_64 +#define RCC_MCO_DIV128 RCC_MCODIV_128 +#define RCC_MCOSOURCE_NONE RCC_MCO1SOURCE_NOCLOCK +#define RCC_MCOSOURCE_LSI RCC_MCO1SOURCE_LSI +#define RCC_MCOSOURCE_LSE RCC_MCO1SOURCE_LSE +#define RCC_MCOSOURCE_SYSCLK RCC_MCO1SOURCE_SYSCLK +#define RCC_MCOSOURCE_HSI RCC_MCO1SOURCE_HSI +#define RCC_MCOSOURCE_HSI14 RCC_MCO1SOURCE_HSI14 +#define RCC_MCOSOURCE_HSI48 RCC_MCO1SOURCE_HSI48 +#define RCC_MCOSOURCE_HSE RCC_MCO1SOURCE_HSE +#define RCC_MCOSOURCE_PLLCLK_DIV1 RCC_MCO1SOURCE_PLLCLK +#define RCC_MCOSOURCE_PLLCLK_NODIV RCC_MCO1SOURCE_PLLCLK +#define RCC_MCOSOURCE_PLLCLK_DIV2 RCC_MCO1SOURCE_PLLCLK_DIV2 + +#if defined(STM32L4) || defined(STM32WB) || defined(STM32G0) || defined(STM32G4) || defined(STM32L5) || defined(STM32WL) +#define RCC_RTCCLKSOURCE_NO_CLK RCC_RTCCLKSOURCE_NONE +#else +#define RCC_RTCCLKSOURCE_NONE RCC_RTCCLKSOURCE_NO_CLK +#endif + +#define RCC_USBCLK_PLLSAI1 RCC_USBCLKSOURCE_PLLSAI1 +#define RCC_USBCLK_PLL RCC_USBCLKSOURCE_PLL +#define RCC_USBCLK_MSI RCC_USBCLKSOURCE_MSI +#define RCC_USBCLKSOURCE_PLLCLK RCC_USBCLKSOURCE_PLL +#define RCC_USBPLLCLK_DIV1 RCC_USBCLKSOURCE_PLL +#define RCC_USBPLLCLK_DIV1_5 RCC_USBCLKSOURCE_PLL_DIV1_5 +#define RCC_USBPLLCLK_DIV2 RCC_USBCLKSOURCE_PLL_DIV2 +#define RCC_USBPLLCLK_DIV3 RCC_USBCLKSOURCE_PLL_DIV3 + +#define HSION_BitNumber RCC_HSION_BIT_NUMBER +#define HSION_BITNUMBER RCC_HSION_BIT_NUMBER +#define HSEON_BitNumber RCC_HSEON_BIT_NUMBER +#define HSEON_BITNUMBER RCC_HSEON_BIT_NUMBER +#define MSION_BITNUMBER RCC_MSION_BIT_NUMBER +#define CSSON_BitNumber RCC_CSSON_BIT_NUMBER +#define CSSON_BITNUMBER RCC_CSSON_BIT_NUMBER +#define PLLON_BitNumber RCC_PLLON_BIT_NUMBER +#define PLLON_BITNUMBER RCC_PLLON_BIT_NUMBER +#define PLLI2SON_BitNumber RCC_PLLI2SON_BIT_NUMBER +#define I2SSRC_BitNumber RCC_I2SSRC_BIT_NUMBER +#define RTCEN_BitNumber RCC_RTCEN_BIT_NUMBER +#define RTCEN_BITNUMBER RCC_RTCEN_BIT_NUMBER +#define BDRST_BitNumber RCC_BDRST_BIT_NUMBER +#define BDRST_BITNUMBER RCC_BDRST_BIT_NUMBER +#define RTCRST_BITNUMBER RCC_RTCRST_BIT_NUMBER +#define LSION_BitNumber RCC_LSION_BIT_NUMBER +#define LSION_BITNUMBER RCC_LSION_BIT_NUMBER +#define LSEON_BitNumber RCC_LSEON_BIT_NUMBER +#define LSEON_BITNUMBER RCC_LSEON_BIT_NUMBER +#define LSEBYP_BITNUMBER RCC_LSEBYP_BIT_NUMBER +#define PLLSAION_BitNumber RCC_PLLSAION_BIT_NUMBER +#define TIMPRE_BitNumber RCC_TIMPRE_BIT_NUMBER +#define RMVF_BitNumber RCC_RMVF_BIT_NUMBER +#define RMVF_BITNUMBER RCC_RMVF_BIT_NUMBER +#define RCC_CR2_HSI14TRIM_BitNumber RCC_HSI14TRIM_BIT_NUMBER +#define CR_BYTE2_ADDRESS RCC_CR_BYTE2_ADDRESS +#define CIR_BYTE1_ADDRESS RCC_CIR_BYTE1_ADDRESS +#define CIR_BYTE2_ADDRESS RCC_CIR_BYTE2_ADDRESS +#define BDCR_BYTE0_ADDRESS RCC_BDCR_BYTE0_ADDRESS +#define DBP_TIMEOUT_VALUE RCC_DBP_TIMEOUT_VALUE +#define LSE_TIMEOUT_VALUE RCC_LSE_TIMEOUT_VALUE + +#define CR_HSION_BB RCC_CR_HSION_BB +#define CR_CSSON_BB RCC_CR_CSSON_BB +#define CR_PLLON_BB RCC_CR_PLLON_BB +#define CR_PLLI2SON_BB RCC_CR_PLLI2SON_BB +#define CR_MSION_BB RCC_CR_MSION_BB +#define CSR_LSION_BB RCC_CSR_LSION_BB +#define CSR_LSEON_BB RCC_CSR_LSEON_BB +#define CSR_LSEBYP_BB RCC_CSR_LSEBYP_BB +#define CSR_RTCEN_BB RCC_CSR_RTCEN_BB +#define CSR_RTCRST_BB RCC_CSR_RTCRST_BB +#define CFGR_I2SSRC_BB RCC_CFGR_I2SSRC_BB +#define BDCR_RTCEN_BB RCC_BDCR_RTCEN_BB +#define BDCR_BDRST_BB RCC_BDCR_BDRST_BB +#define CR_HSEON_BB RCC_CR_HSEON_BB +#define CSR_RMVF_BB RCC_CSR_RMVF_BB +#define CR_PLLSAION_BB RCC_CR_PLLSAION_BB +#define DCKCFGR_TIMPRE_BB RCC_DCKCFGR_TIMPRE_BB + +#define __HAL_RCC_CRS_ENABLE_FREQ_ERROR_COUNTER __HAL_RCC_CRS_FREQ_ERROR_COUNTER_ENABLE +#define __HAL_RCC_CRS_DISABLE_FREQ_ERROR_COUNTER __HAL_RCC_CRS_FREQ_ERROR_COUNTER_DISABLE +#define __HAL_RCC_CRS_ENABLE_AUTOMATIC_CALIB __HAL_RCC_CRS_AUTOMATIC_CALIB_ENABLE +#define __HAL_RCC_CRS_DISABLE_AUTOMATIC_CALIB __HAL_RCC_CRS_AUTOMATIC_CALIB_DISABLE +#define __HAL_RCC_CRS_CALCULATE_RELOADVALUE __HAL_RCC_CRS_RELOADVALUE_CALCULATE + +#define __HAL_RCC_GET_IT_SOURCE __HAL_RCC_GET_IT + +#define RCC_CRS_SYNCWARM RCC_CRS_SYNCWARN +#define RCC_CRS_TRIMOV RCC_CRS_TRIMOVF + +#define RCC_PERIPHCLK_CK48 RCC_PERIPHCLK_CLK48 +#define RCC_CK48CLKSOURCE_PLLQ RCC_CLK48CLKSOURCE_PLLQ +#define RCC_CK48CLKSOURCE_PLLSAIP RCC_CLK48CLKSOURCE_PLLSAIP +#define RCC_CK48CLKSOURCE_PLLI2SQ RCC_CLK48CLKSOURCE_PLLI2SQ +#define IS_RCC_CK48CLKSOURCE IS_RCC_CLK48CLKSOURCE +#define RCC_SDIOCLKSOURCE_CK48 RCC_SDIOCLKSOURCE_CLK48 + +#define __HAL_RCC_DFSDM_CLK_ENABLE __HAL_RCC_DFSDM1_CLK_ENABLE +#define __HAL_RCC_DFSDM_CLK_DISABLE __HAL_RCC_DFSDM1_CLK_DISABLE +#define __HAL_RCC_DFSDM_IS_CLK_ENABLED __HAL_RCC_DFSDM1_IS_CLK_ENABLED +#define __HAL_RCC_DFSDM_IS_CLK_DISABLED __HAL_RCC_DFSDM1_IS_CLK_DISABLED +#define __HAL_RCC_DFSDM_FORCE_RESET __HAL_RCC_DFSDM1_FORCE_RESET +#define __HAL_RCC_DFSDM_RELEASE_RESET __HAL_RCC_DFSDM1_RELEASE_RESET +#define __HAL_RCC_DFSDM_CLK_SLEEP_ENABLE __HAL_RCC_DFSDM1_CLK_SLEEP_ENABLE +#define __HAL_RCC_DFSDM_CLK_SLEEP_DISABLE __HAL_RCC_DFSDM1_CLK_SLEEP_DISABLE +#define __HAL_RCC_DFSDM_IS_CLK_SLEEP_ENABLED __HAL_RCC_DFSDM1_IS_CLK_SLEEP_ENABLED +#define __HAL_RCC_DFSDM_IS_CLK_SLEEP_DISABLED __HAL_RCC_DFSDM1_IS_CLK_SLEEP_DISABLED +#define DfsdmClockSelection Dfsdm1ClockSelection +#define RCC_PERIPHCLK_DFSDM RCC_PERIPHCLK_DFSDM1 +#define RCC_DFSDMCLKSOURCE_PCLK RCC_DFSDM1CLKSOURCE_PCLK2 +#define RCC_DFSDMCLKSOURCE_SYSCLK RCC_DFSDM1CLKSOURCE_SYSCLK +#define __HAL_RCC_DFSDM_CONFIG __HAL_RCC_DFSDM1_CONFIG +#define __HAL_RCC_GET_DFSDM_SOURCE __HAL_RCC_GET_DFSDM1_SOURCE +#define RCC_DFSDM1CLKSOURCE_PCLK RCC_DFSDM1CLKSOURCE_PCLK2 +#define RCC_SWPMI1CLKSOURCE_PCLK RCC_SWPMI1CLKSOURCE_PCLK1 +#define RCC_LPTIM1CLKSOURCE_PCLK RCC_LPTIM1CLKSOURCE_PCLK1 +#define RCC_LPTIM2CLKSOURCE_PCLK RCC_LPTIM2CLKSOURCE_PCLK1 + +#define RCC_DFSDM1AUDIOCLKSOURCE_I2SAPB1 RCC_DFSDM1AUDIOCLKSOURCE_I2S1 +#define RCC_DFSDM1AUDIOCLKSOURCE_I2SAPB2 RCC_DFSDM1AUDIOCLKSOURCE_I2S2 +#define RCC_DFSDM2AUDIOCLKSOURCE_I2SAPB1 RCC_DFSDM2AUDIOCLKSOURCE_I2S1 +#define RCC_DFSDM2AUDIOCLKSOURCE_I2SAPB2 RCC_DFSDM2AUDIOCLKSOURCE_I2S2 +#define RCC_DFSDM1CLKSOURCE_APB2 RCC_DFSDM1CLKSOURCE_PCLK2 +#define RCC_DFSDM2CLKSOURCE_APB2 RCC_DFSDM2CLKSOURCE_PCLK2 +#define RCC_FMPI2C1CLKSOURCE_APB RCC_FMPI2C1CLKSOURCE_PCLK1 +#if defined(STM32U5) +#define MSIKPLLModeSEL RCC_MSIKPLL_MODE_SEL +#define MSISPLLModeSEL RCC_MSISPLL_MODE_SEL +#define __HAL_RCC_AHB21_CLK_DISABLE __HAL_RCC_AHB2_1_CLK_DISABLE +#define __HAL_RCC_AHB22_CLK_DISABLE __HAL_RCC_AHB2_2_CLK_DISABLE +#define __HAL_RCC_AHB1_CLK_Disable_Clear __HAL_RCC_AHB1_CLK_ENABLE +#define __HAL_RCC_AHB21_CLK_Disable_Clear __HAL_RCC_AHB2_1_CLK_ENABLE +#define __HAL_RCC_AHB22_CLK_Disable_Clear __HAL_RCC_AHB2_2_CLK_ENABLE +#define __HAL_RCC_AHB3_CLK_Disable_Clear __HAL_RCC_AHB3_CLK_ENABLE +#define __HAL_RCC_APB1_CLK_Disable_Clear __HAL_RCC_APB1_CLK_ENABLE +#define __HAL_RCC_APB2_CLK_Disable_Clear __HAL_RCC_APB2_CLK_ENABLE +#define __HAL_RCC_APB3_CLK_Disable_Clear __HAL_RCC_APB3_CLK_ENABLE +#define IS_RCC_MSIPLLModeSelection IS_RCC_MSIPLLMODE_SELECT +#define RCC_PERIPHCLK_CLK48 RCC_PERIPHCLK_ICLK +#define RCC_CLK48CLKSOURCE_HSI48 RCC_ICLK_CLKSOURCE_HSI48 +#define RCC_CLK48CLKSOURCE_PLL2 RCC_ICLK_CLKSOURCE_PLL2 +#define RCC_CLK48CLKSOURCE_PLL1 RCC_ICLK_CLKSOURCE_PLL1 +#define RCC_CLK48CLKSOURCE_MSIK RCC_ICLK_CLKSOURCE_MSIK +#define __HAL_RCC_ADC1_CLK_ENABLE __HAL_RCC_ADC12_CLK_ENABLE +#define __HAL_RCC_ADC1_CLK_DISABLE __HAL_RCC_ADC12_CLK_DISABLE +#define __HAL_RCC_ADC1_IS_CLK_ENABLED __HAL_RCC_ADC12_IS_CLK_ENABLED +#define __HAL_RCC_ADC1_IS_CLK_DISABLED __HAL_RCC_ADC12_IS_CLK_DISABLED +#define __HAL_RCC_ADC1_FORCE_RESET __HAL_RCC_ADC12_FORCE_RESET +#define __HAL_RCC_ADC1_RELEASE_RESET __HAL_RCC_ADC12_RELEASE_RESET +#endif + +/** + * @} + */ + +/** @defgroup HAL_RNG_Aliased_Macros HAL RNG Aliased Macros maintained for legacy purpose + * @{ + */ +#define HAL_RNG_ReadyCallback(__HANDLE__) HAL_RNG_ReadyDataCallback((__HANDLE__), uint32_t random32bit) + +/** + * @} + */ + +/** @defgroup HAL_RTC_Aliased_Macros HAL RTC Aliased Macros maintained for legacy purpose + * @{ + */ +#if defined (STM32G0) || defined (STM32L5) || defined (STM32L412xx) || defined (STM32L422xx) || defined (STM32L4P5xx) || defined (STM32L4Q5xx) || defined (STM32G4) || defined (STM32WL) || defined (STM32U5) +#else +#define __HAL_RTC_CLEAR_FLAG __HAL_RTC_EXTI_CLEAR_FLAG +#endif +#define __HAL_RTC_DISABLE_IT __HAL_RTC_EXTI_DISABLE_IT +#define __HAL_RTC_ENABLE_IT __HAL_RTC_EXTI_ENABLE_IT + +#if defined (STM32F1) +#define __HAL_RTC_EXTI_CLEAR_FLAG(RTC_EXTI_LINE_ALARM_EVENT) __HAL_RTC_ALARM_EXTI_CLEAR_FLAG() + +#define __HAL_RTC_EXTI_ENABLE_IT(RTC_EXTI_LINE_ALARM_EVENT) __HAL_RTC_ALARM_EXTI_ENABLE_IT() + +#define __HAL_RTC_EXTI_DISABLE_IT(RTC_EXTI_LINE_ALARM_EVENT) __HAL_RTC_ALARM_EXTI_DISABLE_IT() + +#define __HAL_RTC_EXTI_GET_FLAG(RTC_EXTI_LINE_ALARM_EVENT) __HAL_RTC_ALARM_EXTI_GET_FLAG() + +#define __HAL_RTC_EXTI_GENERATE_SWIT(RTC_EXTI_LINE_ALARM_EVENT) __HAL_RTC_ALARM_EXTI_GENERATE_SWIT() +#else +#define __HAL_RTC_EXTI_CLEAR_FLAG(__EXTI_LINE__) (((__EXTI_LINE__) == RTC_EXTI_LINE_ALARM_EVENT) ? __HAL_RTC_ALARM_EXTI_CLEAR_FLAG() : \ + (((__EXTI_LINE__) == RTC_EXTI_LINE_WAKEUPTIMER_EVENT) ? __HAL_RTC_WAKEUPTIMER_EXTI_CLEAR_FLAG() : \ + __HAL_RTC_TAMPER_TIMESTAMP_EXTI_CLEAR_FLAG())) +#define __HAL_RTC_EXTI_ENABLE_IT(__EXTI_LINE__) (((__EXTI_LINE__) == RTC_EXTI_LINE_ALARM_EVENT) ? __HAL_RTC_ALARM_EXTI_ENABLE_IT() : \ + (((__EXTI_LINE__) == RTC_EXTI_LINE_WAKEUPTIMER_EVENT) ? __HAL_RTC_WAKEUPTIMER_EXTI_ENABLE_IT() : \ + __HAL_RTC_TAMPER_TIMESTAMP_EXTI_ENABLE_IT())) +#define __HAL_RTC_EXTI_DISABLE_IT(__EXTI_LINE__) (((__EXTI_LINE__) == RTC_EXTI_LINE_ALARM_EVENT) ? __HAL_RTC_ALARM_EXTI_DISABLE_IT() : \ + (((__EXTI_LINE__) == RTC_EXTI_LINE_WAKEUPTIMER_EVENT) ? __HAL_RTC_WAKEUPTIMER_EXTI_DISABLE_IT() : \ + __HAL_RTC_TAMPER_TIMESTAMP_EXTI_DISABLE_IT())) +#define __HAL_RTC_EXTI_GET_FLAG(__EXTI_LINE__) (((__EXTI_LINE__) == RTC_EXTI_LINE_ALARM_EVENT) ? __HAL_RTC_ALARM_EXTI_GET_FLAG() : \ + (((__EXTI_LINE__) == RTC_EXTI_LINE_WAKEUPTIMER_EVENT) ? __HAL_RTC_WAKEUPTIMER_EXTI_GET_FLAG() : \ + __HAL_RTC_TAMPER_TIMESTAMP_EXTI_GET_FLAG())) +#define __HAL_RTC_EXTI_GENERATE_SWIT(__EXTI_LINE__) (((__EXTI_LINE__) == RTC_EXTI_LINE_ALARM_EVENT) ? __HAL_RTC_ALARM_EXTI_GENERATE_SWIT() : \ + (((__EXTI_LINE__) == RTC_EXTI_LINE_WAKEUPTIMER_EVENT) ? __HAL_RTC_WAKEUPTIMER_EXTI_GENERATE_SWIT() : \ + __HAL_RTC_TAMPER_TIMESTAMP_EXTI_GENERATE_SWIT())) +#endif /* STM32F1 */ + +#define IS_ALARM IS_RTC_ALARM +#define IS_ALARM_MASK IS_RTC_ALARM_MASK +#define IS_TAMPER IS_RTC_TAMPER +#define IS_TAMPER_ERASE_MODE IS_RTC_TAMPER_ERASE_MODE +#define IS_TAMPER_FILTER IS_RTC_TAMPER_FILTER +#define IS_TAMPER_INTERRUPT IS_RTC_TAMPER_INTERRUPT +#define IS_TAMPER_MASKFLAG_STATE IS_RTC_TAMPER_MASKFLAG_STATE +#define IS_TAMPER_PRECHARGE_DURATION IS_RTC_TAMPER_PRECHARGE_DURATION +#define IS_TAMPER_PULLUP_STATE IS_RTC_TAMPER_PULLUP_STATE +#define IS_TAMPER_SAMPLING_FREQ IS_RTC_TAMPER_SAMPLING_FREQ +#define IS_TAMPER_TIMESTAMPONTAMPER_DETECTION IS_RTC_TAMPER_TIMESTAMPONTAMPER_DETECTION +#define IS_TAMPER_TRIGGER IS_RTC_TAMPER_TRIGGER +#define IS_WAKEUP_CLOCK IS_RTC_WAKEUP_CLOCK +#define IS_WAKEUP_COUNTER IS_RTC_WAKEUP_COUNTER + +#define __RTC_WRITEPROTECTION_ENABLE __HAL_RTC_WRITEPROTECTION_ENABLE +#define __RTC_WRITEPROTECTION_DISABLE __HAL_RTC_WRITEPROTECTION_DISABLE + +/** + * @} + */ + +/** @defgroup HAL_SD_Aliased_Macros HAL SD/MMC Aliased Macros maintained for legacy purpose + * @{ + */ + +#define SD_OCR_CID_CSD_OVERWRIETE SD_OCR_CID_CSD_OVERWRITE +#define SD_CMD_SD_APP_STAUS SD_CMD_SD_APP_STATUS + +#if !defined(STM32F1) && !defined(STM32F2) && !defined(STM32F4) && !defined(STM32L1) +#define eMMC_HIGH_VOLTAGE_RANGE EMMC_HIGH_VOLTAGE_RANGE +#define eMMC_DUAL_VOLTAGE_RANGE EMMC_DUAL_VOLTAGE_RANGE +#define eMMC_LOW_VOLTAGE_RANGE EMMC_LOW_VOLTAGE_RANGE + +#define SDMMC_NSpeed_CLK_DIV SDMMC_NSPEED_CLK_DIV +#define SDMMC_HSpeed_CLK_DIV SDMMC_HSPEED_CLK_DIV +#endif + +#if defined(STM32F4) || defined(STM32F2) +#define SD_SDMMC_DISABLED SD_SDIO_DISABLED +#define SD_SDMMC_FUNCTION_BUSY SD_SDIO_FUNCTION_BUSY +#define SD_SDMMC_FUNCTION_FAILED SD_SDIO_FUNCTION_FAILED +#define SD_SDMMC_UNKNOWN_FUNCTION SD_SDIO_UNKNOWN_FUNCTION +#define SD_CMD_SDMMC_SEN_OP_COND SD_CMD_SDIO_SEN_OP_COND +#define SD_CMD_SDMMC_RW_DIRECT SD_CMD_SDIO_RW_DIRECT +#define SD_CMD_SDMMC_RW_EXTENDED SD_CMD_SDIO_RW_EXTENDED +#define __HAL_SD_SDMMC_ENABLE __HAL_SD_SDIO_ENABLE +#define __HAL_SD_SDMMC_DISABLE __HAL_SD_SDIO_DISABLE +#define __HAL_SD_SDMMC_DMA_ENABLE __HAL_SD_SDIO_DMA_ENABLE +#define __HAL_SD_SDMMC_DMA_DISABLE __HAL_SD_SDIO_DMA_DISABL +#define __HAL_SD_SDMMC_ENABLE_IT __HAL_SD_SDIO_ENABLE_IT +#define __HAL_SD_SDMMC_DISABLE_IT __HAL_SD_SDIO_DISABLE_IT +#define __HAL_SD_SDMMC_GET_FLAG __HAL_SD_SDIO_GET_FLAG +#define __HAL_SD_SDMMC_CLEAR_FLAG __HAL_SD_SDIO_CLEAR_FLAG +#define __HAL_SD_SDMMC_GET_IT __HAL_SD_SDIO_GET_IT +#define __HAL_SD_SDMMC_CLEAR_IT __HAL_SD_SDIO_CLEAR_IT +#define SDMMC_STATIC_FLAGS SDIO_STATIC_FLAGS +#define SDMMC_CMD0TIMEOUT SDIO_CMD0TIMEOUT +#define SD_SDMMC_SEND_IF_COND SD_SDIO_SEND_IF_COND +/* alias CMSIS */ +#define SDMMC1_IRQn SDIO_IRQn +#define SDMMC1_IRQHandler SDIO_IRQHandler +#endif + +#if defined(STM32F7) || defined(STM32L4) +#define SD_SDIO_DISABLED SD_SDMMC_DISABLED +#define SD_SDIO_FUNCTION_BUSY SD_SDMMC_FUNCTION_BUSY +#define SD_SDIO_FUNCTION_FAILED SD_SDMMC_FUNCTION_FAILED +#define SD_SDIO_UNKNOWN_FUNCTION SD_SDMMC_UNKNOWN_FUNCTION +#define SD_CMD_SDIO_SEN_OP_COND SD_CMD_SDMMC_SEN_OP_COND +#define SD_CMD_SDIO_RW_DIRECT SD_CMD_SDMMC_RW_DIRECT +#define SD_CMD_SDIO_RW_EXTENDED SD_CMD_SDMMC_RW_EXTENDED +#define __HAL_SD_SDIO_ENABLE __HAL_SD_SDMMC_ENABLE +#define __HAL_SD_SDIO_DISABLE __HAL_SD_SDMMC_DISABLE +#define __HAL_SD_SDIO_DMA_ENABLE __HAL_SD_SDMMC_DMA_ENABLE +#define __HAL_SD_SDIO_DMA_DISABL __HAL_SD_SDMMC_DMA_DISABLE +#define __HAL_SD_SDIO_ENABLE_IT __HAL_SD_SDMMC_ENABLE_IT +#define __HAL_SD_SDIO_DISABLE_IT __HAL_SD_SDMMC_DISABLE_IT +#define __HAL_SD_SDIO_GET_FLAG __HAL_SD_SDMMC_GET_FLAG +#define __HAL_SD_SDIO_CLEAR_FLAG __HAL_SD_SDMMC_CLEAR_FLAG +#define __HAL_SD_SDIO_GET_IT __HAL_SD_SDMMC_GET_IT +#define __HAL_SD_SDIO_CLEAR_IT __HAL_SD_SDMMC_CLEAR_IT +#define SDIO_STATIC_FLAGS SDMMC_STATIC_FLAGS +#define SDIO_CMD0TIMEOUT SDMMC_CMD0TIMEOUT +#define SD_SDIO_SEND_IF_COND SD_SDMMC_SEND_IF_COND +/* alias CMSIS for compatibilities */ +#define SDIO_IRQn SDMMC1_IRQn +#define SDIO_IRQHandler SDMMC1_IRQHandler +#endif + +#if defined(STM32F7) || defined(STM32F4) || defined(STM32F2) || defined(STM32L4) || defined(STM32H7) +#define HAL_SD_CardCIDTypedef HAL_SD_CardCIDTypeDef +#define HAL_SD_CardCSDTypedef HAL_SD_CardCSDTypeDef +#define HAL_SD_CardStatusTypedef HAL_SD_CardStatusTypeDef +#define HAL_SD_CardStateTypedef HAL_SD_CardStateTypeDef +#endif + +#if defined(STM32H7) || defined(STM32L5) +#define HAL_MMCEx_Read_DMADoubleBuffer0CpltCallback HAL_MMCEx_Read_DMADoubleBuf0CpltCallback +#define HAL_MMCEx_Read_DMADoubleBuffer1CpltCallback HAL_MMCEx_Read_DMADoubleBuf1CpltCallback +#define HAL_MMCEx_Write_DMADoubleBuffer0CpltCallback HAL_MMCEx_Write_DMADoubleBuf0CpltCallback +#define HAL_MMCEx_Write_DMADoubleBuffer1CpltCallback HAL_MMCEx_Write_DMADoubleBuf1CpltCallback +#define HAL_SDEx_Read_DMADoubleBuffer0CpltCallback HAL_SDEx_Read_DMADoubleBuf0CpltCallback +#define HAL_SDEx_Read_DMADoubleBuffer1CpltCallback HAL_SDEx_Read_DMADoubleBuf1CpltCallback +#define HAL_SDEx_Write_DMADoubleBuffer0CpltCallback HAL_SDEx_Write_DMADoubleBuf0CpltCallback +#define HAL_SDEx_Write_DMADoubleBuffer1CpltCallback HAL_SDEx_Write_DMADoubleBuf1CpltCallback +#define HAL_SD_DriveTransciver_1_8V_Callback HAL_SD_DriveTransceiver_1_8V_Callback +#endif +/** + * @} + */ + +/** @defgroup HAL_SMARTCARD_Aliased_Macros HAL SMARTCARD Aliased Macros maintained for legacy purpose + * @{ + */ + +#define __SMARTCARD_ENABLE_IT __HAL_SMARTCARD_ENABLE_IT +#define __SMARTCARD_DISABLE_IT __HAL_SMARTCARD_DISABLE_IT +#define __SMARTCARD_ENABLE __HAL_SMARTCARD_ENABLE +#define __SMARTCARD_DISABLE __HAL_SMARTCARD_DISABLE +#define __SMARTCARD_DMA_REQUEST_ENABLE __HAL_SMARTCARD_DMA_REQUEST_ENABLE +#define __SMARTCARD_DMA_REQUEST_DISABLE __HAL_SMARTCARD_DMA_REQUEST_DISABLE + +#define __HAL_SMARTCARD_GETCLOCKSOURCE SMARTCARD_GETCLOCKSOURCE +#define __SMARTCARD_GETCLOCKSOURCE SMARTCARD_GETCLOCKSOURCE + +#define IS_SMARTCARD_ONEBIT_SAMPLING IS_SMARTCARD_ONE_BIT_SAMPLE + +/** + * @} + */ + +/** @defgroup HAL_SMBUS_Aliased_Macros HAL SMBUS Aliased Macros maintained for legacy purpose + * @{ + */ +#define __HAL_SMBUS_RESET_CR1 SMBUS_RESET_CR1 +#define __HAL_SMBUS_RESET_CR2 SMBUS_RESET_CR2 +#define __HAL_SMBUS_GENERATE_START SMBUS_GENERATE_START +#define __HAL_SMBUS_GET_ADDR_MATCH SMBUS_GET_ADDR_MATCH +#define __HAL_SMBUS_GET_DIR SMBUS_GET_DIR +#define __HAL_SMBUS_GET_STOP_MODE SMBUS_GET_STOP_MODE +#define __HAL_SMBUS_GET_PEC_MODE SMBUS_GET_PEC_MODE +#define __HAL_SMBUS_GET_ALERT_ENABLED SMBUS_GET_ALERT_ENABLED +/** + * @} + */ + +/** @defgroup HAL_SPI_Aliased_Macros HAL SPI Aliased Macros maintained for legacy purpose + * @{ + */ + +#define __HAL_SPI_1LINE_TX SPI_1LINE_TX +#define __HAL_SPI_1LINE_RX SPI_1LINE_RX +#define __HAL_SPI_RESET_CRC SPI_RESET_CRC + +/** + * @} + */ + +/** @defgroup HAL_UART_Aliased_Macros HAL UART Aliased Macros maintained for legacy purpose + * @{ + */ + +#define __HAL_UART_GETCLOCKSOURCE UART_GETCLOCKSOURCE +#define __HAL_UART_MASK_COMPUTATION UART_MASK_COMPUTATION +#define __UART_GETCLOCKSOURCE UART_GETCLOCKSOURCE +#define __UART_MASK_COMPUTATION UART_MASK_COMPUTATION + +#define IS_UART_WAKEUPMETHODE IS_UART_WAKEUPMETHOD + +#define IS_UART_ONEBIT_SAMPLE IS_UART_ONE_BIT_SAMPLE +#define IS_UART_ONEBIT_SAMPLING IS_UART_ONE_BIT_SAMPLE + +/** + * @} + */ + + +/** @defgroup HAL_USART_Aliased_Macros HAL USART Aliased Macros maintained for legacy purpose + * @{ + */ + +#define __USART_ENABLE_IT __HAL_USART_ENABLE_IT +#define __USART_DISABLE_IT __HAL_USART_DISABLE_IT +#define __USART_ENABLE __HAL_USART_ENABLE +#define __USART_DISABLE __HAL_USART_DISABLE + +#define __HAL_USART_GETCLOCKSOURCE USART_GETCLOCKSOURCE +#define __USART_GETCLOCKSOURCE USART_GETCLOCKSOURCE + +#if defined(STM32F0) || defined(STM32F3) || defined(STM32F7) +#define USART_OVERSAMPLING_16 0x00000000U +#define USART_OVERSAMPLING_8 USART_CR1_OVER8 + +#define IS_USART_OVERSAMPLING(__SAMPLING__) (((__SAMPLING__) == USART_OVERSAMPLING_16) || \ + ((__SAMPLING__) == USART_OVERSAMPLING_8)) +#endif /* STM32F0 || STM32F3 || STM32F7 */ +/** + * @} + */ + +/** @defgroup HAL_USB_Aliased_Macros HAL USB Aliased Macros maintained for legacy purpose + * @{ + */ +#define USB_EXTI_LINE_WAKEUP USB_WAKEUP_EXTI_LINE + +#define USB_FS_EXTI_TRIGGER_RISING_EDGE USB_OTG_FS_WAKEUP_EXTI_RISING_EDGE +#define USB_FS_EXTI_TRIGGER_FALLING_EDGE USB_OTG_FS_WAKEUP_EXTI_FALLING_EDGE +#define USB_FS_EXTI_TRIGGER_BOTH_EDGE USB_OTG_FS_WAKEUP_EXTI_RISING_FALLING_EDGE +#define USB_FS_EXTI_LINE_WAKEUP USB_OTG_FS_WAKEUP_EXTI_LINE + +#define USB_HS_EXTI_TRIGGER_RISING_EDGE USB_OTG_HS_WAKEUP_EXTI_RISING_EDGE +#define USB_HS_EXTI_TRIGGER_FALLING_EDGE USB_OTG_HS_WAKEUP_EXTI_FALLING_EDGE +#define USB_HS_EXTI_TRIGGER_BOTH_EDGE USB_OTG_HS_WAKEUP_EXTI_RISING_FALLING_EDGE +#define USB_HS_EXTI_LINE_WAKEUP USB_OTG_HS_WAKEUP_EXTI_LINE + +#define __HAL_USB_EXTI_ENABLE_IT __HAL_USB_WAKEUP_EXTI_ENABLE_IT +#define __HAL_USB_EXTI_DISABLE_IT __HAL_USB_WAKEUP_EXTI_DISABLE_IT +#define __HAL_USB_EXTI_GET_FLAG __HAL_USB_WAKEUP_EXTI_GET_FLAG +#define __HAL_USB_EXTI_CLEAR_FLAG __HAL_USB_WAKEUP_EXTI_CLEAR_FLAG +#define __HAL_USB_EXTI_SET_RISING_EDGE_TRIGGER __HAL_USB_WAKEUP_EXTI_ENABLE_RISING_EDGE +#define __HAL_USB_EXTI_SET_FALLING_EDGE_TRIGGER __HAL_USB_WAKEUP_EXTI_ENABLE_FALLING_EDGE +#define __HAL_USB_EXTI_SET_FALLINGRISING_TRIGGER __HAL_USB_WAKEUP_EXTI_ENABLE_RISING_FALLING_EDGE + +#define __HAL_USB_FS_EXTI_ENABLE_IT __HAL_USB_OTG_FS_WAKEUP_EXTI_ENABLE_IT +#define __HAL_USB_FS_EXTI_DISABLE_IT __HAL_USB_OTG_FS_WAKEUP_EXTI_DISABLE_IT +#define __HAL_USB_FS_EXTI_GET_FLAG __HAL_USB_OTG_FS_WAKEUP_EXTI_GET_FLAG +#define __HAL_USB_FS_EXTI_CLEAR_FLAG __HAL_USB_OTG_FS_WAKEUP_EXTI_CLEAR_FLAG +#define __HAL_USB_FS_EXTI_SET_RISING_EGDE_TRIGGER __HAL_USB_OTG_FS_WAKEUP_EXTI_ENABLE_RISING_EDGE +#define __HAL_USB_FS_EXTI_SET_FALLING_EGDE_TRIGGER __HAL_USB_OTG_FS_WAKEUP_EXTI_ENABLE_FALLING_EDGE +#define __HAL_USB_FS_EXTI_SET_FALLINGRISING_TRIGGER __HAL_USB_OTG_FS_WAKEUP_EXTI_ENABLE_RISING_FALLING_EDGE +#define __HAL_USB_FS_EXTI_GENERATE_SWIT __HAL_USB_OTG_FS_WAKEUP_EXTI_GENERATE_SWIT + +#define __HAL_USB_HS_EXTI_ENABLE_IT __HAL_USB_OTG_HS_WAKEUP_EXTI_ENABLE_IT +#define __HAL_USB_HS_EXTI_DISABLE_IT __HAL_USB_OTG_HS_WAKEUP_EXTI_DISABLE_IT +#define __HAL_USB_HS_EXTI_GET_FLAG __HAL_USB_OTG_HS_WAKEUP_EXTI_GET_FLAG +#define __HAL_USB_HS_EXTI_CLEAR_FLAG __HAL_USB_OTG_HS_WAKEUP_EXTI_CLEAR_FLAG +#define __HAL_USB_HS_EXTI_SET_RISING_EGDE_TRIGGER __HAL_USB_OTG_HS_WAKEUP_EXTI_ENABLE_RISING_EDGE +#define __HAL_USB_HS_EXTI_SET_FALLING_EGDE_TRIGGER __HAL_USB_OTG_HS_WAKEUP_EXTI_ENABLE_FALLING_EDGE +#define __HAL_USB_HS_EXTI_SET_FALLINGRISING_TRIGGER __HAL_USB_OTG_HS_WAKEUP_EXTI_ENABLE_RISING_FALLING_EDGE +#define __HAL_USB_HS_EXTI_GENERATE_SWIT __HAL_USB_OTG_HS_WAKEUP_EXTI_GENERATE_SWIT + +#define HAL_PCD_ActiveRemoteWakeup HAL_PCD_ActivateRemoteWakeup +#define HAL_PCD_DeActiveRemoteWakeup HAL_PCD_DeActivateRemoteWakeup + +#define HAL_PCD_SetTxFiFo HAL_PCDEx_SetTxFiFo +#define HAL_PCD_SetRxFiFo HAL_PCDEx_SetRxFiFo +/** + * @} + */ + +/** @defgroup HAL_TIM_Aliased_Macros HAL TIM Aliased Macros maintained for legacy purpose + * @{ + */ +#define __HAL_TIM_SetICPrescalerValue TIM_SET_ICPRESCALERVALUE +#define __HAL_TIM_ResetICPrescalerValue TIM_RESET_ICPRESCALERVALUE + +#define TIM_GET_ITSTATUS __HAL_TIM_GET_IT_SOURCE +#define TIM_GET_CLEAR_IT __HAL_TIM_CLEAR_IT + +#define __HAL_TIM_GET_ITSTATUS __HAL_TIM_GET_IT_SOURCE + +#define __HAL_TIM_DIRECTION_STATUS __HAL_TIM_IS_TIM_COUNTING_DOWN +#define __HAL_TIM_PRESCALER __HAL_TIM_SET_PRESCALER +#define __HAL_TIM_SetCounter __HAL_TIM_SET_COUNTER +#define __HAL_TIM_GetCounter __HAL_TIM_GET_COUNTER +#define __HAL_TIM_SetAutoreload __HAL_TIM_SET_AUTORELOAD +#define __HAL_TIM_GetAutoreload __HAL_TIM_GET_AUTORELOAD +#define __HAL_TIM_SetClockDivision __HAL_TIM_SET_CLOCKDIVISION +#define __HAL_TIM_GetClockDivision __HAL_TIM_GET_CLOCKDIVISION +#define __HAL_TIM_SetICPrescaler __HAL_TIM_SET_ICPRESCALER +#define __HAL_TIM_GetICPrescaler __HAL_TIM_GET_ICPRESCALER +#define __HAL_TIM_SetCompare __HAL_TIM_SET_COMPARE +#define __HAL_TIM_GetCompare __HAL_TIM_GET_COMPARE + +#define TIM_BREAKINPUTSOURCE_DFSDM TIM_BREAKINPUTSOURCE_DFSDM1 +/** + * @} + */ + +/** @defgroup HAL_ETH_Aliased_Macros HAL ETH Aliased Macros maintained for legacy purpose + * @{ + */ + +#define __HAL_ETH_EXTI_ENABLE_IT __HAL_ETH_WAKEUP_EXTI_ENABLE_IT +#define __HAL_ETH_EXTI_DISABLE_IT __HAL_ETH_WAKEUP_EXTI_DISABLE_IT +#define __HAL_ETH_EXTI_GET_FLAG __HAL_ETH_WAKEUP_EXTI_GET_FLAG +#define __HAL_ETH_EXTI_CLEAR_FLAG __HAL_ETH_WAKEUP_EXTI_CLEAR_FLAG +#define __HAL_ETH_EXTI_SET_RISING_EGDE_TRIGGER __HAL_ETH_WAKEUP_EXTI_ENABLE_RISING_EDGE_TRIGGER +#define __HAL_ETH_EXTI_SET_FALLING_EGDE_TRIGGER __HAL_ETH_WAKEUP_EXTI_ENABLE_FALLING_EDGE_TRIGGER +#define __HAL_ETH_EXTI_SET_FALLINGRISING_TRIGGER __HAL_ETH_WAKEUP_EXTI_ENABLE_FALLINGRISING_TRIGGER + +#define ETH_PROMISCIOUSMODE_ENABLE ETH_PROMISCUOUS_MODE_ENABLE +#define ETH_PROMISCIOUSMODE_DISABLE ETH_PROMISCUOUS_MODE_DISABLE +#define IS_ETH_PROMISCIOUS_MODE IS_ETH_PROMISCUOUS_MODE +/** + * @} + */ + +/** @defgroup HAL_LTDC_Aliased_Macros HAL LTDC Aliased Macros maintained for legacy purpose + * @{ + */ +#define __HAL_LTDC_LAYER LTDC_LAYER +#define __HAL_LTDC_RELOAD_CONFIG __HAL_LTDC_RELOAD_IMMEDIATE_CONFIG +/** + * @} + */ + +/** @defgroup HAL_SAI_Aliased_Macros HAL SAI Aliased Macros maintained for legacy purpose + * @{ + */ +#define SAI_OUTPUTDRIVE_DISABLED SAI_OUTPUTDRIVE_DISABLE +#define SAI_OUTPUTDRIVE_ENABLED SAI_OUTPUTDRIVE_ENABLE +#define SAI_MASTERDIVIDER_ENABLED SAI_MASTERDIVIDER_ENABLE +#define SAI_MASTERDIVIDER_DISABLED SAI_MASTERDIVIDER_DISABLE +#define SAI_STREOMODE SAI_STEREOMODE +#define SAI_FIFOStatus_Empty SAI_FIFOSTATUS_EMPTY +#define SAI_FIFOStatus_Less1QuarterFull SAI_FIFOSTATUS_LESS1QUARTERFULL +#define SAI_FIFOStatus_1QuarterFull SAI_FIFOSTATUS_1QUARTERFULL +#define SAI_FIFOStatus_HalfFull SAI_FIFOSTATUS_HALFFULL +#define SAI_FIFOStatus_3QuartersFull SAI_FIFOSTATUS_3QUARTERFULL +#define SAI_FIFOStatus_Full SAI_FIFOSTATUS_FULL +#define IS_SAI_BLOCK_MONO_STREO_MODE IS_SAI_BLOCK_MONO_STEREO_MODE +#define SAI_SYNCHRONOUS_EXT SAI_SYNCHRONOUS_EXT_SAI1 +#define SAI_SYNCEXT_IN_ENABLE SAI_SYNCEXT_OUTBLOCKA_ENABLE +/** + * @} + */ + +/** @defgroup HAL_SPDIFRX_Aliased_Macros HAL SPDIFRX Aliased Macros maintained for legacy purpose + * @{ + */ +#if defined(STM32H7) +#define HAL_SPDIFRX_ReceiveControlFlow HAL_SPDIFRX_ReceiveCtrlFlow +#define HAL_SPDIFRX_ReceiveControlFlow_IT HAL_SPDIFRX_ReceiveCtrlFlow_IT +#define HAL_SPDIFRX_ReceiveControlFlow_DMA HAL_SPDIFRX_ReceiveCtrlFlow_DMA +#endif +/** + * @} + */ + +/** @defgroup HAL_HRTIM_Aliased_Functions HAL HRTIM Aliased Functions maintained for legacy purpose + * @{ + */ +#if defined (STM32H7) || defined (STM32G4) || defined (STM32F3) +#define HAL_HRTIM_WaveformCounterStart_IT HAL_HRTIM_WaveformCountStart_IT +#define HAL_HRTIM_WaveformCounterStart_DMA HAL_HRTIM_WaveformCountStart_DMA +#define HAL_HRTIM_WaveformCounterStart HAL_HRTIM_WaveformCountStart +#define HAL_HRTIM_WaveformCounterStop_IT HAL_HRTIM_WaveformCountStop_IT +#define HAL_HRTIM_WaveformCounterStop_DMA HAL_HRTIM_WaveformCountStop_DMA +#define HAL_HRTIM_WaveformCounterStop HAL_HRTIM_WaveformCountStop +#endif +/** + * @} + */ + +/** @defgroup HAL_QSPI_Aliased_Macros HAL QSPI Aliased Macros maintained for legacy purpose + * @{ + */ +#if defined (STM32L4) || defined (STM32F4) || defined (STM32F7) || defined(STM32H7) +#define HAL_QPSI_TIMEOUT_DEFAULT_VALUE HAL_QSPI_TIMEOUT_DEFAULT_VALUE +#endif /* STM32L4 || STM32F4 || STM32F7 */ +/** + * @} + */ + +/** @defgroup HAL_PPP_Aliased_Macros HAL PPP Aliased Macros maintained for legacy purpose + * @{ + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32_HAL_LEGACY */ + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal.h new file mode 100644 index 0000000..9beac83 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal.h @@ -0,0 +1,275 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal.h + * @author MCD Application Team + * @brief This file contains all the functions prototypes for the HAL + * module driver. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_H +#define __STM32F7xx_HAL_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_conf.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup HAL + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup HAL_Exported_Constants HAL Exported Constants + * @{ + */ + +/** @defgroup HAL_TICK_FREQ Tick Frequency + * @{ + */ +typedef enum +{ + HAL_TICK_FREQ_10HZ = 100U, + HAL_TICK_FREQ_100HZ = 10U, + HAL_TICK_FREQ_1KHZ = 1U, + HAL_TICK_FREQ_DEFAULT = HAL_TICK_FREQ_1KHZ +} HAL_TickFreqTypeDef; +/** + * @} + */ + +/** @defgroup SYSCFG_BootMode Boot Mode + * @{ + */ +#define SYSCFG_MEM_BOOT_ADD0 ((uint32_t)0x00000000U) +#define SYSCFG_MEM_BOOT_ADD1 SYSCFG_MEMRMP_MEM_BOOT +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup HAL_Exported_Macros HAL Exported Macros + * @{ + */ + +/** @brief Freeze/Unfreeze Peripherals in Debug mode + */ +#define __HAL_DBGMCU_FREEZE_TIM2() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM2_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM3() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM3_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM4() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM4_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM5() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM5_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM6() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM6_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM7() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM7_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM12() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM12_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM13() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM13_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM14() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_TIM14_STOP)) +#define __HAL_DBGMCU_FREEZE_LPTIM1() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_LPTIM1_STOP)) +#define __HAL_DBGMCU_FREEZE_RTC() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_RTC_STOP)) +#define __HAL_DBGMCU_FREEZE_WWDG() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_WWDG_STOP)) +#define __HAL_DBGMCU_FREEZE_IWDG() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_IWDG_STOP)) +#define __HAL_DBGMCU_FREEZE_I2C1_TIMEOUT() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_I2C1_SMBUS_TIMEOUT)) +#define __HAL_DBGMCU_FREEZE_I2C2_TIMEOUT() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_I2C2_SMBUS_TIMEOUT)) +#define __HAL_DBGMCU_FREEZE_I2C3_TIMEOUT() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_I2C3_SMBUS_TIMEOUT)) +#define __HAL_DBGMCU_FREEZE_I2C4_TIMEOUT() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_I2C4_SMBUS_TIMEOUT)) +#define __HAL_DBGMCU_FREEZE_CAN1() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_CAN1_STOP)) +#define __HAL_DBGMCU_FREEZE_CAN2() (DBGMCU->APB1FZ |= (DBGMCU_APB1_FZ_DBG_CAN2_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM1() (DBGMCU->APB2FZ |= (DBGMCU_APB2_FZ_DBG_TIM1_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM8() (DBGMCU->APB2FZ |= (DBGMCU_APB2_FZ_DBG_TIM8_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM9() (DBGMCU->APB2FZ |= (DBGMCU_APB2_FZ_DBG_TIM9_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM10() (DBGMCU->APB2FZ |= (DBGMCU_APB2_FZ_DBG_TIM10_STOP)) +#define __HAL_DBGMCU_FREEZE_TIM11() (DBGMCU->APB2FZ |= (DBGMCU_APB2_FZ_DBG_TIM11_STOP)) + +#define __HAL_DBGMCU_UNFREEZE_TIM2() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM2_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM3() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM3_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM4() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM4_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM5() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM5_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM6() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM6_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM7() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM7_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM12() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM12_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM13() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM13_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM14() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_TIM14_STOP)) +#define __HAL_DBGMCU_UNFREEZE_LPTIM1() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_LPTIM1_STOP)) +#define __HAL_DBGMCU_UNFREEZE_RTC() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_RTC_STOP)) +#define __HAL_DBGMCU_UNFREEZE_WWDG() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_WWDG_STOP)) +#define __HAL_DBGMCU_UNFREEZE_IWDG() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_IWDG_STOP)) +#define __HAL_DBGMCU_UNFREEZE_I2C1_TIMEOUT() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_I2C1_SMBUS_TIMEOUT)) +#define __HAL_DBGMCU_UNFREEZE_I2C2_TIMEOUT() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_I2C2_SMBUS_TIMEOUT)) +#define __HAL_DBGMCU_UNFREEZE_I2C3_TIMEOUT() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_I2C3_SMBUS_TIMEOUT)) +#define __HAL_DBGMCU_UNFREEZE_I2C4_TIMEOUT() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_I2C4_SMBUS_TIMEOUT)) +#define __HAL_DBGMCU_UNFREEZE_CAN1() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_CAN1_STOP)) +#define __HAL_DBGMCU_UNFREEZE_CAN2() (DBGMCU->APB1FZ &= ~(DBGMCU_APB1_FZ_DBG_CAN2_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM1() (DBGMCU->APB2FZ &= ~(DBGMCU_APB2_FZ_DBG_TIM1_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM8() (DBGMCU->APB2FZ &= ~(DBGMCU_APB2_FZ_DBG_TIM8_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM9() (DBGMCU->APB2FZ &= ~(DBGMCU_APB2_FZ_DBG_TIM9_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM10() (DBGMCU->APB2FZ &= ~(DBGMCU_APB2_FZ_DBG_TIM10_STOP)) +#define __HAL_DBGMCU_UNFREEZE_TIM11() (DBGMCU->APB2FZ &= ~(DBGMCU_APB2_FZ_DBG_TIM11_STOP)) + + +/** @brief FMC (NOR/RAM) mapped at 0x60000000 and SDRAM mapped at 0xC0000000 + */ +#define __HAL_SYSCFG_REMAPMEMORY_FMC() (SYSCFG->MEMRMP &= ~(SYSCFG_MEMRMP_SWP_FMC)) + + +/** @brief FMC/SDRAM mapped at 0x60000000 (NOR/RAM) mapped at 0xC0000000 + */ +#define __HAL_SYSCFG_REMAPMEMORY_FMC_SDRAM() do {SYSCFG->MEMRMP &= ~(SYSCFG_MEMRMP_SWP_FMC);\ + SYSCFG->MEMRMP |= (SYSCFG_MEMRMP_SWP_FMC_0);\ + }while(0); +/** + * @brief Return the memory boot mapping as configured by user. + * @retval The boot mode as configured by user. The returned value can be one + * of the following values: + * @arg @ref SYSCFG_MEM_BOOT_ADD0 + * @arg @ref SYSCFG_MEM_BOOT_ADD1 + */ +#define __HAL_SYSCFG_GET_BOOT_MODE() READ_BIT(SYSCFG->MEMRMP, SYSCFG_MEMRMP_MEM_BOOT) + +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +/** @brief SYSCFG Break Cortex-M7 Lockup lock. + * Enable and lock the connection of Cortex-M7 LOCKUP (Hardfault) output to TIM1/8 Break input. + * @note The selected configuration is locked and can be unlocked only by system reset. + */ +#define __HAL_SYSCFG_BREAK_LOCKUP_LOCK() SET_BIT(SYSCFG->CBR, SYSCFG_CBR_CLL) + +/** @brief SYSCFG Break PVD lock. + * Enable and lock the PVD connection to Timer1/8 Break input, as well as the PVDE and PLS[2:0] in the PWR_CR1 register. + * @note The selected configuration is locked and can be unlocked only by system reset. + */ +#define __HAL_SYSCFG_BREAK_PVD_LOCK() SET_BIT(SYSCFG->CBR, SYSCFG_CBR_PVDL) +#endif /* STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +/** + * @} + */ + +/** @defgroup HAL_Private_Macros HAL Private Macros + * @{ + */ +#define IS_TICKFREQ(FREQ) (((FREQ) == HAL_TICK_FREQ_10HZ) || \ + ((FREQ) == HAL_TICK_FREQ_100HZ) || \ + ((FREQ) == HAL_TICK_FREQ_1KHZ)) +/** + * @} + */ +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup HAL_Exported_Functions + * @{ + */ +/** @addtogroup HAL_Exported_Functions_Group1 + * @{ + */ +/* Initialization and Configuration functions ******************************/ +HAL_StatusTypeDef HAL_Init(void); +HAL_StatusTypeDef HAL_DeInit(void); +void HAL_MspInit(void); +void HAL_MspDeInit(void); +HAL_StatusTypeDef HAL_InitTick(uint32_t TickPriority); +/** + * @} + */ + + /* Exported variables ---------------------------------------------------------*/ +/** @addtogroup HAL_Exported_Variables + * @{ + */ +extern __IO uint32_t uwTick; +extern uint32_t uwTickPrio; +extern HAL_TickFreqTypeDef uwTickFreq; +/** + * @} + */ + +/** @addtogroup HAL_Exported_Functions_Group2 + * @{ + */ +/* Peripheral Control functions ************************************************/ +void HAL_IncTick(void); +void HAL_Delay(uint32_t Delay); +uint32_t HAL_GetTick(void); +uint32_t HAL_GetTickPrio(void); +HAL_StatusTypeDef HAL_SetTickFreq(HAL_TickFreqTypeDef Freq); +HAL_TickFreqTypeDef HAL_GetTickFreq(void); +void HAL_SuspendTick(void); +void HAL_ResumeTick(void); +uint32_t HAL_GetHalVersion(void); +uint32_t HAL_GetREVID(void); +uint32_t HAL_GetDEVID(void); +uint32_t HAL_GetUIDw0(void); +uint32_t HAL_GetUIDw1(void); +uint32_t HAL_GetUIDw2(void); +void HAL_DBGMCU_EnableDBGSleepMode(void); +void HAL_DBGMCU_DisableDBGSleepMode(void); +void HAL_DBGMCU_EnableDBGStopMode(void); +void HAL_DBGMCU_DisableDBGStopMode(void); +void HAL_DBGMCU_EnableDBGStandbyMode(void); +void HAL_DBGMCU_DisableDBGStandbyMode(void); +void HAL_EnableCompensationCell(void); +void HAL_DisableCompensationCell(void); +void HAL_EnableFMCMemorySwapping(void); +void HAL_DisableFMCMemorySwapping(void); +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +void HAL_EnableMemorySwappingBank(void); +void HAL_DisableMemorySwappingBank(void); +#endif /* STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +/** + * @} + */ + +/** + * @} + */ +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/** @defgroup HAL_Private_Variables HAL Private Variables + * @{ + */ +/** + * @} + */ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup HAL_Private_Constants HAL Private Constants + * @{ + */ +/** + * @} + */ +/* Private macros ------------------------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_H */ + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_cortex.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_cortex.h new file mode 100644 index 0000000..9287cdf --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_cortex.h @@ -0,0 +1,404 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_cortex.h + * @author MCD Application Team + * @brief Header file of CORTEX HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_CORTEX_H +#define __STM32F7xx_HAL_CORTEX_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup CORTEX + * @{ + */ +/* Exported types ------------------------------------------------------------*/ +/** @defgroup CORTEX_Exported_Types Cortex Exported Types + * @{ + */ + +#if (__MPU_PRESENT == 1) +/** @defgroup CORTEX_MPU_Region_Initialization_Structure_definition MPU Region Initialization Structure Definition + * @brief MPU Region initialization structure + * @{ + */ +typedef struct +{ + uint8_t Enable; /*!< Specifies the status of the region. + This parameter can be a value of @ref CORTEX_MPU_Region_Enable */ + uint8_t Number; /*!< Specifies the number of the region to protect. + This parameter can be a value of @ref CORTEX_MPU_Region_Number */ + uint32_t BaseAddress; /*!< Specifies the base address of the region to protect. */ + uint8_t Size; /*!< Specifies the size of the region to protect. + This parameter can be a value of @ref CORTEX_MPU_Region_Size */ + uint8_t SubRegionDisable; /*!< Specifies the number of the subregion protection to disable. + This parameter must be a number between Min_Data = 0x00 and Max_Data = 0xFF */ + uint8_t TypeExtField; /*!< Specifies the TEX field level. + This parameter can be a value of @ref CORTEX_MPU_TEX_Levels */ + uint8_t AccessPermission; /*!< Specifies the region access permission type. + This parameter can be a value of @ref CORTEX_MPU_Region_Permission_Attributes */ + uint8_t DisableExec; /*!< Specifies the instruction access status. + This parameter can be a value of @ref CORTEX_MPU_Instruction_Access */ + uint8_t IsShareable; /*!< Specifies the shareability status of the protected region. + This parameter can be a value of @ref CORTEX_MPU_Access_Shareable */ + uint8_t IsCacheable; /*!< Specifies the cacheable status of the region protected. + This parameter can be a value of @ref CORTEX_MPU_Access_Cacheable */ + uint8_t IsBufferable; /*!< Specifies the bufferable status of the protected region. + This parameter can be a value of @ref CORTEX_MPU_Access_Bufferable */ +}MPU_Region_InitTypeDef; +/** + * @} + */ +#endif /* __MPU_PRESENT */ + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup CORTEX_Exported_Constants CORTEX Exported Constants + * @{ + */ + +/** @defgroup CORTEX_Preemption_Priority_Group CORTEX Preemption Priority Group + * @{ + */ +#define NVIC_PRIORITYGROUP_0 ((uint32_t)0x00000007U) /*!< 0 bits for pre-emption priority + 4 bits for subpriority */ +#define NVIC_PRIORITYGROUP_1 ((uint32_t)0x00000006U) /*!< 1 bits for pre-emption priority + 3 bits for subpriority */ +#define NVIC_PRIORITYGROUP_2 ((uint32_t)0x00000005U) /*!< 2 bits for pre-emption priority + 2 bits for subpriority */ +#define NVIC_PRIORITYGROUP_3 ((uint32_t)0x00000004U) /*!< 3 bits for pre-emption priority + 1 bits for subpriority */ +#define NVIC_PRIORITYGROUP_4 ((uint32_t)0x00000003U) /*!< 4 bits for pre-emption priority + 0 bits for subpriority */ +/** + * @} + */ + +/** @defgroup CORTEX_SysTick_clock_source CORTEX _SysTick clock source + * @{ + */ +#define SYSTICK_CLKSOURCE_HCLK_DIV8 ((uint32_t)0x00000000U) +#define SYSTICK_CLKSOURCE_HCLK ((uint32_t)0x00000004U) + +/** + * @} + */ + +#if (__MPU_PRESENT == 1) +/** @defgroup CORTEX_MPU_HFNMI_PRIVDEF_Control MPU HFNMI and PRIVILEGED Access control + * @{ + */ +#define MPU_HFNMI_PRIVDEF_NONE ((uint32_t)0x00000000U) +#define MPU_HARDFAULT_NMI ((uint32_t)0x00000002U) +#define MPU_PRIVILEGED_DEFAULT ((uint32_t)0x00000004U) +#define MPU_HFNMI_PRIVDEF ((uint32_t)0x00000006U) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_Region_Enable CORTEX MPU Region Enable + * @{ + */ +#define MPU_REGION_ENABLE ((uint8_t)0x01U) +#define MPU_REGION_DISABLE ((uint8_t)0x00U) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_Instruction_Access CORTEX MPU Instruction Access + * @{ + */ +#define MPU_INSTRUCTION_ACCESS_ENABLE ((uint8_t)0x00U) +#define MPU_INSTRUCTION_ACCESS_DISABLE ((uint8_t)0x01U) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_Access_Shareable CORTEX MPU Instruction Access Shareable + * @{ + */ +#define MPU_ACCESS_SHAREABLE ((uint8_t)0x01U) +#define MPU_ACCESS_NOT_SHAREABLE ((uint8_t)0x00U) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_Access_Cacheable CORTEX MPU Instruction Access Cacheable + * @{ + */ +#define MPU_ACCESS_CACHEABLE ((uint8_t)0x01U) +#define MPU_ACCESS_NOT_CACHEABLE ((uint8_t)0x00U) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_Access_Bufferable CORTEX MPU Instruction Access Bufferable + * @{ + */ +#define MPU_ACCESS_BUFFERABLE ((uint8_t)0x01U) +#define MPU_ACCESS_NOT_BUFFERABLE ((uint8_t)0x00U) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_TEX_Levels MPU TEX Levels + * @{ + */ +#define MPU_TEX_LEVEL0 ((uint8_t)0x00U) +#define MPU_TEX_LEVEL1 ((uint8_t)0x01U) +#define MPU_TEX_LEVEL2 ((uint8_t)0x02U) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_Region_Size CORTEX MPU Region Size + * @{ + */ +#define MPU_REGION_SIZE_32B ((uint8_t)0x04U) +#define MPU_REGION_SIZE_64B ((uint8_t)0x05U) +#define MPU_REGION_SIZE_128B ((uint8_t)0x06U) +#define MPU_REGION_SIZE_256B ((uint8_t)0x07U) +#define MPU_REGION_SIZE_512B ((uint8_t)0x08U) +#define MPU_REGION_SIZE_1KB ((uint8_t)0x09U) +#define MPU_REGION_SIZE_2KB ((uint8_t)0x0AU) +#define MPU_REGION_SIZE_4KB ((uint8_t)0x0BU) +#define MPU_REGION_SIZE_8KB ((uint8_t)0x0CU) +#define MPU_REGION_SIZE_16KB ((uint8_t)0x0DU) +#define MPU_REGION_SIZE_32KB ((uint8_t)0x0EU) +#define MPU_REGION_SIZE_64KB ((uint8_t)0x0FU) +#define MPU_REGION_SIZE_128KB ((uint8_t)0x10U) +#define MPU_REGION_SIZE_256KB ((uint8_t)0x11U) +#define MPU_REGION_SIZE_512KB ((uint8_t)0x12U) +#define MPU_REGION_SIZE_1MB ((uint8_t)0x13U) +#define MPU_REGION_SIZE_2MB ((uint8_t)0x14U) +#define MPU_REGION_SIZE_4MB ((uint8_t)0x15U) +#define MPU_REGION_SIZE_8MB ((uint8_t)0x16U) +#define MPU_REGION_SIZE_16MB ((uint8_t)0x17U) +#define MPU_REGION_SIZE_32MB ((uint8_t)0x18U) +#define MPU_REGION_SIZE_64MB ((uint8_t)0x19U) +#define MPU_REGION_SIZE_128MB ((uint8_t)0x1AU) +#define MPU_REGION_SIZE_256MB ((uint8_t)0x1BU) +#define MPU_REGION_SIZE_512MB ((uint8_t)0x1CU) +#define MPU_REGION_SIZE_1GB ((uint8_t)0x1DU) +#define MPU_REGION_SIZE_2GB ((uint8_t)0x1EU) +#define MPU_REGION_SIZE_4GB ((uint8_t)0x1FU) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_Region_Permission_Attributes CORTEX MPU Region Permission Attributes + * @{ + */ +#define MPU_REGION_NO_ACCESS ((uint8_t)0x00U) +#define MPU_REGION_PRIV_RW ((uint8_t)0x01U) +#define MPU_REGION_PRIV_RW_URO ((uint8_t)0x02U) +#define MPU_REGION_FULL_ACCESS ((uint8_t)0x03U) +#define MPU_REGION_PRIV_RO ((uint8_t)0x05U) +#define MPU_REGION_PRIV_RO_URO ((uint8_t)0x06U) +/** + * @} + */ + +/** @defgroup CORTEX_MPU_Region_Number CORTEX MPU Region Number + * @{ + */ +#define MPU_REGION_NUMBER0 ((uint8_t)0x00U) +#define MPU_REGION_NUMBER1 ((uint8_t)0x01U) +#define MPU_REGION_NUMBER2 ((uint8_t)0x02U) +#define MPU_REGION_NUMBER3 ((uint8_t)0x03U) +#define MPU_REGION_NUMBER4 ((uint8_t)0x04U) +#define MPU_REGION_NUMBER5 ((uint8_t)0x05U) +#define MPU_REGION_NUMBER6 ((uint8_t)0x06U) +#define MPU_REGION_NUMBER7 ((uint8_t)0x07U) +/** + * @} + */ +#endif /* __MPU_PRESENT */ + +/** + * @} + */ + + +/* Exported Macros -----------------------------------------------------------*/ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup CORTEX_Exported_Functions + * @{ + */ + +/** @addtogroup CORTEX_Exported_Functions_Group1 + * @{ + */ +/* Initialization and de-initialization functions *****************************/ +void HAL_NVIC_SetPriorityGrouping(uint32_t PriorityGroup); +void HAL_NVIC_SetPriority(IRQn_Type IRQn, uint32_t PreemptPriority, uint32_t SubPriority); +void HAL_NVIC_EnableIRQ(IRQn_Type IRQn); +void HAL_NVIC_DisableIRQ(IRQn_Type IRQn); +void HAL_NVIC_SystemReset(void); +uint32_t HAL_SYSTICK_Config(uint32_t TicksNumb); +/** + * @} + */ + +/** @addtogroup CORTEX_Exported_Functions_Group2 + * @{ + */ +/* Peripheral Control functions ***********************************************/ +#if (__MPU_PRESENT == 1) +void HAL_MPU_Enable(uint32_t MPU_Control); +void HAL_MPU_Disable(void); +void HAL_MPU_ConfigRegion(MPU_Region_InitTypeDef *MPU_Init); +#endif /* __MPU_PRESENT */ +uint32_t HAL_NVIC_GetPriorityGrouping(void); +void HAL_NVIC_GetPriority(IRQn_Type IRQn, uint32_t PriorityGroup, uint32_t* pPreemptPriority, uint32_t* pSubPriority); +uint32_t HAL_NVIC_GetPendingIRQ(IRQn_Type IRQn); +void HAL_NVIC_SetPendingIRQ(IRQn_Type IRQn); +void HAL_NVIC_ClearPendingIRQ(IRQn_Type IRQn); +uint32_t HAL_NVIC_GetActive(IRQn_Type IRQn); +void HAL_SYSTICK_CLKSourceConfig(uint32_t CLKSource); +void HAL_SYSTICK_IRQHandler(void); +void HAL_SYSTICK_Callback(void); +/** + * @} + */ + +/** + * @} + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/** @defgroup CORTEX_Private_Macros CORTEX Private Macros + * @{ + */ +#define IS_NVIC_PRIORITY_GROUP(GROUP) (((GROUP) == NVIC_PRIORITYGROUP_0) || \ + ((GROUP) == NVIC_PRIORITYGROUP_1) || \ + ((GROUP) == NVIC_PRIORITYGROUP_2) || \ + ((GROUP) == NVIC_PRIORITYGROUP_3) || \ + ((GROUP) == NVIC_PRIORITYGROUP_4)) + +#define IS_NVIC_PREEMPTION_PRIORITY(PRIORITY) ((PRIORITY) < 0x10U) + +#define IS_NVIC_SUB_PRIORITY(PRIORITY) ((PRIORITY) < 0x10U) + +#define IS_NVIC_DEVICE_IRQ(IRQ) ((IRQ) >= 0x00) + +#define IS_SYSTICK_CLK_SOURCE(SOURCE) (((SOURCE) == SYSTICK_CLKSOURCE_HCLK) || \ + ((SOURCE) == SYSTICK_CLKSOURCE_HCLK_DIV8)) + +#if (__MPU_PRESENT == 1) +#define IS_MPU_REGION_ENABLE(STATE) (((STATE) == MPU_REGION_ENABLE) || \ + ((STATE) == MPU_REGION_DISABLE)) + +#define IS_MPU_INSTRUCTION_ACCESS(STATE) (((STATE) == MPU_INSTRUCTION_ACCESS_ENABLE) || \ + ((STATE) == MPU_INSTRUCTION_ACCESS_DISABLE)) + +#define IS_MPU_ACCESS_SHAREABLE(STATE) (((STATE) == MPU_ACCESS_SHAREABLE) || \ + ((STATE) == MPU_ACCESS_NOT_SHAREABLE)) + +#define IS_MPU_ACCESS_CACHEABLE(STATE) (((STATE) == MPU_ACCESS_CACHEABLE) || \ + ((STATE) == MPU_ACCESS_NOT_CACHEABLE)) + +#define IS_MPU_ACCESS_BUFFERABLE(STATE) (((STATE) == MPU_ACCESS_BUFFERABLE) || \ + ((STATE) == MPU_ACCESS_NOT_BUFFERABLE)) + +#define IS_MPU_TEX_LEVEL(TYPE) (((TYPE) == MPU_TEX_LEVEL0) || \ + ((TYPE) == MPU_TEX_LEVEL1) || \ + ((TYPE) == MPU_TEX_LEVEL2)) + +#define IS_MPU_REGION_PERMISSION_ATTRIBUTE(TYPE) (((TYPE) == MPU_REGION_NO_ACCESS) || \ + ((TYPE) == MPU_REGION_PRIV_RW) || \ + ((TYPE) == MPU_REGION_PRIV_RW_URO) || \ + ((TYPE) == MPU_REGION_FULL_ACCESS) || \ + ((TYPE) == MPU_REGION_PRIV_RO) || \ + ((TYPE) == MPU_REGION_PRIV_RO_URO)) + +#define IS_MPU_REGION_NUMBER(NUMBER) (((NUMBER) == MPU_REGION_NUMBER0) || \ + ((NUMBER) == MPU_REGION_NUMBER1) || \ + ((NUMBER) == MPU_REGION_NUMBER2) || \ + ((NUMBER) == MPU_REGION_NUMBER3) || \ + ((NUMBER) == MPU_REGION_NUMBER4) || \ + ((NUMBER) == MPU_REGION_NUMBER5) || \ + ((NUMBER) == MPU_REGION_NUMBER6) || \ + ((NUMBER) == MPU_REGION_NUMBER7)) + +#define IS_MPU_REGION_SIZE(SIZE) (((SIZE) == MPU_REGION_SIZE_32B) || \ + ((SIZE) == MPU_REGION_SIZE_64B) || \ + ((SIZE) == MPU_REGION_SIZE_128B) || \ + ((SIZE) == MPU_REGION_SIZE_256B) || \ + ((SIZE) == MPU_REGION_SIZE_512B) || \ + ((SIZE) == MPU_REGION_SIZE_1KB) || \ + ((SIZE) == MPU_REGION_SIZE_2KB) || \ + ((SIZE) == MPU_REGION_SIZE_4KB) || \ + ((SIZE) == MPU_REGION_SIZE_8KB) || \ + ((SIZE) == MPU_REGION_SIZE_16KB) || \ + ((SIZE) == MPU_REGION_SIZE_32KB) || \ + ((SIZE) == MPU_REGION_SIZE_64KB) || \ + ((SIZE) == MPU_REGION_SIZE_128KB) || \ + ((SIZE) == MPU_REGION_SIZE_256KB) || \ + ((SIZE) == MPU_REGION_SIZE_512KB) || \ + ((SIZE) == MPU_REGION_SIZE_1MB) || \ + ((SIZE) == MPU_REGION_SIZE_2MB) || \ + ((SIZE) == MPU_REGION_SIZE_4MB) || \ + ((SIZE) == MPU_REGION_SIZE_8MB) || \ + ((SIZE) == MPU_REGION_SIZE_16MB) || \ + ((SIZE) == MPU_REGION_SIZE_32MB) || \ + ((SIZE) == MPU_REGION_SIZE_64MB) || \ + ((SIZE) == MPU_REGION_SIZE_128MB) || \ + ((SIZE) == MPU_REGION_SIZE_256MB) || \ + ((SIZE) == MPU_REGION_SIZE_512MB) || \ + ((SIZE) == MPU_REGION_SIZE_1GB) || \ + ((SIZE) == MPU_REGION_SIZE_2GB) || \ + ((SIZE) == MPU_REGION_SIZE_4GB)) + +#define IS_MPU_SUB_REGION_DISABLE(SUBREGION) ((SUBREGION) < (uint16_t)0x00FFU) +#endif /* __MPU_PRESENT */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_CORTEX_H */ + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_def.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_def.h new file mode 100644 index 0000000..301ea86 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_def.h @@ -0,0 +1,218 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_def.h + * @author MCD Application Team + * @brief This file contains HAL common defines, enumeration, macros and + * structures definitions. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_DEF +#define __STM32F7xx_HAL_DEF + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx.h" +#include "Legacy/stm32_hal_legacy.h" +#include + +/* Exported types ------------------------------------------------------------*/ + +/** + * @brief HAL Status structures definition + */ +typedef enum +{ + HAL_OK = 0x00U, + HAL_ERROR = 0x01U, + HAL_BUSY = 0x02U, + HAL_TIMEOUT = 0x03U +} HAL_StatusTypeDef; + +/** + * @brief HAL Lock structures definition + */ +typedef enum +{ + HAL_UNLOCKED = 0x00U, + HAL_LOCKED = 0x01U +} HAL_LockTypeDef; + +/* Exported macro ------------------------------------------------------------*/ + +#define UNUSED(X) (void)X /* To avoid gcc/g++ warnings */ + +#define HAL_MAX_DELAY 0xFFFFFFFFU + +#define HAL_IS_BIT_SET(REG, BIT) (((REG) & (BIT)) == (BIT)) +#define HAL_IS_BIT_CLR(REG, BIT) (((REG) & (BIT)) == 0U) + +#define __HAL_LINKDMA(__HANDLE__, __PPP_DMA_FIELD__, __DMA_HANDLE__) \ + do{ \ + (__HANDLE__)->__PPP_DMA_FIELD__ = &(__DMA_HANDLE__); \ + (__DMA_HANDLE__).Parent = (__HANDLE__); \ + } while(0) + +/** @brief Reset the Handle's State field. + * @param __HANDLE__ specifies the Peripheral Handle. + * @note This macro can be used for the following purpose: + * - When the Handle is declared as local variable; before passing it as parameter + * to HAL_PPP_Init() for the first time, it is mandatory to use this macro + * to set to 0 the Handle's "State" field. + * Otherwise, "State" field may have any random value and the first time the function + * HAL_PPP_Init() is called, the low level hardware initialization will be missed + * (i.e. HAL_PPP_MspInit() will not be executed). + * - When there is a need to reconfigure the low level hardware: instead of calling + * HAL_PPP_DeInit() then HAL_PPP_Init(), user can make a call to this macro then HAL_PPP_Init(). + * In this later function, when the Handle's "State" field is set to 0, it will execute the function + * HAL_PPP_MspInit() which will reconfigure the low level hardware. + * @retval None + */ +#define __HAL_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = 0U) + +#if (USE_RTOS == 1U) + /* Reserved for future use */ + #error "USE_RTOS should be 0 in the current HAL release" +#else + #define __HAL_LOCK(__HANDLE__) \ + do{ \ + if((__HANDLE__)->Lock == HAL_LOCKED) \ + { \ + return HAL_BUSY; \ + } \ + else \ + { \ + (__HANDLE__)->Lock = HAL_LOCKED; \ + } \ + }while (0U) + + #define __HAL_UNLOCK(__HANDLE__) \ + do{ \ + (__HANDLE__)->Lock = HAL_UNLOCKED; \ + }while (0U) +#endif /* USE_RTOS */ + +#if defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) /* ARM Compiler V6 */ + #ifndef __weak + #define __weak __attribute__((weak)) + #endif + #ifndef __packed + #define __packed __attribute__((packed)) + #endif +#elif defined ( __GNUC__ ) && !defined (__CC_ARM) /* GNU Compiler */ + #ifndef __weak + #define __weak __attribute__((weak)) + #endif /* __weak */ + #ifndef __packed + #define __packed __attribute__((__packed__)) + #endif /* __packed */ +#endif /* __GNUC__ */ + + +/* Macro to get variable aligned on 4-bytes, for __ICCARM__ the directive "#pragma data_alignment=4" must be used instead */ +#if defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) /* ARM Compiler V6 */ + #ifndef __ALIGN_BEGIN + #define __ALIGN_BEGIN + #endif + #ifndef __ALIGN_END + #define __ALIGN_END __attribute__ ((aligned (4))) + #endif +#elif defined ( __GNUC__ ) && !defined (__CC_ARM) /* GNU Compiler */ + #ifndef __ALIGN_END + #define __ALIGN_END __attribute__ ((aligned (4))) + #endif /* __ALIGN_END */ + #ifndef __ALIGN_BEGIN + #define __ALIGN_BEGIN + #endif /* __ALIGN_BEGIN */ +#else + #ifndef __ALIGN_END + #define __ALIGN_END + #endif /* __ALIGN_END */ + #ifndef __ALIGN_BEGIN + #if defined (__CC_ARM) /* ARM Compiler V5*/ + #define __ALIGN_BEGIN __align(4) + #elif defined (__ICCARM__) /* IAR Compiler */ + #define __ALIGN_BEGIN + #endif /* __CC_ARM */ + #endif /* __ALIGN_BEGIN */ +#endif /* __GNUC__ */ + +/* Macro to get variable aligned on 32-bytes,needed for cache maintenance purpose */ +#if defined (__GNUC__) /* GNU Compiler */ + #define ALIGN_32BYTES(buf) buf __attribute__ ((aligned (32))) +#elif defined (__ICCARM__) /* IAR Compiler */ + #define ALIGN_32BYTES(buf) _Pragma("data_alignment=32") buf +#elif defined (__CC_ARM) /* ARM Compiler */ + #define ALIGN_32BYTES(buf) __align(32) buf +#endif + +/** + * @brief __RAM_FUNC definition + */ +#if defined ( __CC_ARM ) || (defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)) +/* ARM Compiler V4/V5 and V6 + -------------------------- + RAM functions are defined using the toolchain options. + Functions that are executed in RAM should reside in a separate source module. + Using the 'Options for File' dialog you can simply change the 'Code / Const' + area of a module to a memory space in physical RAM. + Available memory areas are declared in the 'Target' tab of the 'Options for Target' + dialog. +*/ +#define __RAM_FUNC + +#elif defined ( __ICCARM__ ) +/* ICCARM Compiler + --------------- + RAM functions are defined using a specific toolchain keyword "__ramfunc". +*/ +#define __RAM_FUNC __ramfunc + +#elif defined ( __GNUC__ ) +/* GNU Compiler + ------------ + RAM functions are defined using a specific toolchain attribute + "__attribute__((section(".RamFunc")))". +*/ +#define __RAM_FUNC __attribute__((section(".RamFunc"))) + +#endif + +/** + * @brief __NOINLINE definition + */ +#if defined ( __CC_ARM ) || (defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)) || defined ( __GNUC__ ) +/* ARM V4/V5 and V6 & GNU Compiler + ------------------------------- +*/ +#define __NOINLINE __attribute__ ( (noinline) ) + +#elif defined ( __ICCARM__ ) +/* ICCARM Compiler + --------------- +*/ +#define __NOINLINE _Pragma("optimize = no_inline") + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* ___STM32F7xx_HAL_DEF */ + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma.h new file mode 100644 index 0000000..cca4509 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma.h @@ -0,0 +1,747 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_dma.h + * @author MCD Application Team + * @brief Header file of DMA HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_DMA_H +#define __STM32F7xx_HAL_DMA_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup DMA + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ + +/** @defgroup DMA_Exported_Types DMA Exported Types + * @brief DMA Exported Types + * @{ + */ + +/** + * @brief DMA Configuration Structure definition + */ +typedef struct +{ + uint32_t Channel; /*!< Specifies the channel used for the specified stream. + This parameter can be a value of @ref DMAEx_Channel_selection */ + + uint32_t Direction; /*!< Specifies if the data will be transferred from memory to peripheral, + from memory to memory or from peripheral to memory. + This parameter can be a value of @ref DMA_Data_transfer_direction */ + + uint32_t PeriphInc; /*!< Specifies whether the Peripheral address register should be incremented or not. + This parameter can be a value of @ref DMA_Peripheral_incremented_mode */ + + uint32_t MemInc; /*!< Specifies whether the memory address register should be incremented or not. + This parameter can be a value of @ref DMA_Memory_incremented_mode */ + + uint32_t PeriphDataAlignment; /*!< Specifies the Peripheral data width. + This parameter can be a value of @ref DMA_Peripheral_data_size */ + + uint32_t MemDataAlignment; /*!< Specifies the Memory data width. + This parameter can be a value of @ref DMA_Memory_data_size */ + + uint32_t Mode; /*!< Specifies the operation mode of the DMAy Streamx. + This parameter can be a value of @ref DMA_mode + @note The circular buffer mode cannot be used if the memory-to-memory + data transfer is configured on the selected Stream */ + + uint32_t Priority; /*!< Specifies the software priority for the DMAy Streamx. + This parameter can be a value of @ref DMA_Priority_level */ + + uint32_t FIFOMode; /*!< Specifies if the FIFO mode or Direct mode will be used for the specified stream. + This parameter can be a value of @ref DMA_FIFO_direct_mode + @note The Direct mode (FIFO mode disabled) cannot be used if the + memory-to-memory data transfer is configured on the selected stream */ + + uint32_t FIFOThreshold; /*!< Specifies the FIFO threshold level. + This parameter can be a value of @ref DMA_FIFO_threshold_level */ + + uint32_t MemBurst; /*!< Specifies the Burst transfer configuration for the memory transfers. + It specifies the amount of data to be transferred in a single non interruptible + transaction. + This parameter can be a value of @ref DMA_Memory_burst + @note The burst mode is possible only if the address Increment mode is enabled. */ + + uint32_t PeriphBurst; /*!< Specifies the Burst transfer configuration for the peripheral transfers. + It specifies the amount of data to be transferred in a single non interruptible + transaction. + This parameter can be a value of @ref DMA_Peripheral_burst + @note The burst mode is possible only if the address Increment mode is enabled. */ +}DMA_InitTypeDef; + +/** + * @brief HAL DMA State structures definition + */ +typedef enum +{ + HAL_DMA_STATE_RESET = 0x00U, /*!< DMA not yet initialized or disabled */ + HAL_DMA_STATE_READY = 0x01U, /*!< DMA initialized and ready for use */ + HAL_DMA_STATE_BUSY = 0x02U, /*!< DMA process is ongoing */ + HAL_DMA_STATE_TIMEOUT = 0x03U, /*!< DMA timeout state */ + HAL_DMA_STATE_ERROR = 0x04U, /*!< DMA error state */ + HAL_DMA_STATE_ABORT = 0x05U, /*!< DMA Abort state */ +}HAL_DMA_StateTypeDef; + +/** + * @brief HAL DMA Error Code structure definition + */ +typedef enum +{ + HAL_DMA_FULL_TRANSFER = 0x00U, /*!< Full transfer */ + HAL_DMA_HALF_TRANSFER = 0x01U, /*!< Half Transfer */ +}HAL_DMA_LevelCompleteTypeDef; + +/** + * @brief HAL DMA Error Code structure definition + */ +typedef enum +{ + HAL_DMA_XFER_CPLT_CB_ID = 0x00U, /*!< Full transfer */ + HAL_DMA_XFER_HALFCPLT_CB_ID = 0x01U, /*!< Half Transfer */ + HAL_DMA_XFER_M1CPLT_CB_ID = 0x02U, /*!< M1 Full Transfer */ + HAL_DMA_XFER_M1HALFCPLT_CB_ID = 0x03U, /*!< M1 Half Transfer */ + HAL_DMA_XFER_ERROR_CB_ID = 0x04U, /*!< Error */ + HAL_DMA_XFER_ABORT_CB_ID = 0x05U, /*!< Abort */ + HAL_DMA_XFER_ALL_CB_ID = 0x06U /*!< All */ +}HAL_DMA_CallbackIDTypeDef; + +/** + * @brief DMA handle Structure definition + */ +typedef struct __DMA_HandleTypeDef +{ + DMA_Stream_TypeDef *Instance; /*!< Register base address */ + + DMA_InitTypeDef Init; /*!< DMA communication parameters */ + + HAL_LockTypeDef Lock; /*!< DMA locking object */ + + __IO HAL_DMA_StateTypeDef State; /*!< DMA transfer state */ + + void *Parent; /*!< Parent object state */ + + void (* XferCpltCallback)( struct __DMA_HandleTypeDef * hdma); /*!< DMA transfer complete callback */ + + void (* XferHalfCpltCallback)( struct __DMA_HandleTypeDef * hdma); /*!< DMA Half transfer complete callback */ + + void (* XferM1CpltCallback)( struct __DMA_HandleTypeDef * hdma); /*!< DMA transfer complete Memory1 callback */ + + void (* XferM1HalfCpltCallback)( struct __DMA_HandleTypeDef * hdma); /*!< DMA transfer Half complete Memory1 callback */ + + void (* XferErrorCallback)( struct __DMA_HandleTypeDef * hdma); /*!< DMA transfer error callback */ + + void (* XferAbortCallback)( struct __DMA_HandleTypeDef * hdma); /*!< DMA transfer Abort callback */ + + __IO uint32_t ErrorCode; /*!< DMA Error code */ + + uint32_t StreamBaseAddress; /*!< DMA Stream Base Address */ + + uint32_t StreamIndex; /*!< DMA Stream Index */ + +}DMA_HandleTypeDef; + +/** + * @} + */ + + +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup DMA_Exported_Constants DMA Exported Constants + * @brief DMA Exported constants + * @{ + */ + +/** @defgroup DMA_Error_Code DMA Error Code + * @brief DMA Error Code + * @{ + */ +#define HAL_DMA_ERROR_NONE 0x00000000U /*!< No error */ +#define HAL_DMA_ERROR_TE 0x00000001U /*!< Transfer error */ +#define HAL_DMA_ERROR_FE 0x00000002U /*!< FIFO error */ +#define HAL_DMA_ERROR_DME 0x00000004U /*!< Direct Mode error */ +#define HAL_DMA_ERROR_TIMEOUT 0x00000020U /*!< Timeout error */ +#define HAL_DMA_ERROR_PARAM 0x00000040U /*!< Parameter error */ +#define HAL_DMA_ERROR_NO_XFER 0x00000080U /*!< Abort requested with no Xfer ongoing */ +#define HAL_DMA_ERROR_NOT_SUPPORTED 0x00000100U /*!< Not supported mode */ +/** + * @} + */ + +/** @defgroup DMA_Data_transfer_direction DMA Data transfer direction + * @brief DMA data transfer direction + * @{ + */ +#define DMA_PERIPH_TO_MEMORY 0x00000000U /*!< Peripheral to memory direction */ +#define DMA_MEMORY_TO_PERIPH DMA_SxCR_DIR_0 /*!< Memory to peripheral direction */ +#define DMA_MEMORY_TO_MEMORY DMA_SxCR_DIR_1 /*!< Memory to memory direction */ +/** + * @} + */ + +/** @defgroup DMA_Peripheral_incremented_mode DMA Peripheral incremented mode + * @brief DMA peripheral incremented mode + * @{ + */ +#define DMA_PINC_ENABLE DMA_SxCR_PINC /*!< Peripheral increment mode enable */ +#define DMA_PINC_DISABLE 0x00000000U /*!< Peripheral increment mode disable */ +/** + * @} + */ + +/** @defgroup DMA_Memory_incremented_mode DMA Memory incremented mode + * @brief DMA memory incremented mode + * @{ + */ +#define DMA_MINC_ENABLE DMA_SxCR_MINC /*!< Memory increment mode enable */ +#define DMA_MINC_DISABLE 0x00000000U /*!< Memory increment mode disable */ +/** + * @} + */ + +/** @defgroup DMA_Peripheral_data_size DMA Peripheral data size + * @brief DMA peripheral data size + * @{ + */ +#define DMA_PDATAALIGN_BYTE 0x00000000U /*!< Peripheral data alignment: Byte */ +#define DMA_PDATAALIGN_HALFWORD DMA_SxCR_PSIZE_0 /*!< Peripheral data alignment: HalfWord */ +#define DMA_PDATAALIGN_WORD DMA_SxCR_PSIZE_1 /*!< Peripheral data alignment: Word */ +/** + * @} + */ + +/** @defgroup DMA_Memory_data_size DMA Memory data size + * @brief DMA memory data size + * @{ + */ +#define DMA_MDATAALIGN_BYTE 0x00000000U /*!< Memory data alignment: Byte */ +#define DMA_MDATAALIGN_HALFWORD DMA_SxCR_MSIZE_0 /*!< Memory data alignment: HalfWord */ +#define DMA_MDATAALIGN_WORD DMA_SxCR_MSIZE_1 /*!< Memory data alignment: Word */ +/** + * @} + */ + +/** @defgroup DMA_mode DMA mode + * @brief DMA mode + * @{ + */ +#define DMA_NORMAL 0x00000000U /*!< Normal mode */ +#define DMA_CIRCULAR DMA_SxCR_CIRC /*!< Circular mode */ +#define DMA_PFCTRL DMA_SxCR_PFCTRL /*!< Peripheral flow control mode */ +/** + * @} + */ + +/** @defgroup DMA_Priority_level DMA Priority level + * @brief DMA priority levels + * @{ + */ +#define DMA_PRIORITY_LOW 0x00000000U /*!< Priority level: Low */ +#define DMA_PRIORITY_MEDIUM DMA_SxCR_PL_0 /*!< Priority level: Medium */ +#define DMA_PRIORITY_HIGH DMA_SxCR_PL_1 /*!< Priority level: High */ +#define DMA_PRIORITY_VERY_HIGH DMA_SxCR_PL /*!< Priority level: Very High */ +/** + * @} + */ + +/** @defgroup DMA_FIFO_direct_mode DMA FIFO direct mode + * @brief DMA FIFO direct mode + * @{ + */ +#define DMA_FIFOMODE_DISABLE 0x00000000U /*!< FIFO mode disable */ +#define DMA_FIFOMODE_ENABLE DMA_SxFCR_DMDIS /*!< FIFO mode enable */ +/** + * @} + */ + +/** @defgroup DMA_FIFO_threshold_level DMA FIFO threshold level + * @brief DMA FIFO level + * @{ + */ +#define DMA_FIFO_THRESHOLD_1QUARTERFULL 0x00000000U /*!< FIFO threshold 1 quart full configuration */ +#define DMA_FIFO_THRESHOLD_HALFFULL DMA_SxFCR_FTH_0 /*!< FIFO threshold half full configuration */ +#define DMA_FIFO_THRESHOLD_3QUARTERSFULL DMA_SxFCR_FTH_1 /*!< FIFO threshold 3 quarts full configuration */ +#define DMA_FIFO_THRESHOLD_FULL DMA_SxFCR_FTH /*!< FIFO threshold full configuration */ +/** + * @} + */ + +/** @defgroup DMA_Memory_burst DMA Memory burst + * @brief DMA memory burst + * @{ + */ +#define DMA_MBURST_SINGLE 0x00000000U +#define DMA_MBURST_INC4 DMA_SxCR_MBURST_0 +#define DMA_MBURST_INC8 DMA_SxCR_MBURST_1 +#define DMA_MBURST_INC16 DMA_SxCR_MBURST +/** + * @} + */ + +/** @defgroup DMA_Peripheral_burst DMA Peripheral burst + * @brief DMA peripheral burst + * @{ + */ +#define DMA_PBURST_SINGLE 0x00000000U +#define DMA_PBURST_INC4 DMA_SxCR_PBURST_0 +#define DMA_PBURST_INC8 DMA_SxCR_PBURST_1 +#define DMA_PBURST_INC16 DMA_SxCR_PBURST +/** + * @} + */ + +/** @defgroup DMA_interrupt_enable_definitions DMA interrupt enable definitions + * @brief DMA interrupts definition + * @{ + */ +#define DMA_IT_TC DMA_SxCR_TCIE +#define DMA_IT_HT DMA_SxCR_HTIE +#define DMA_IT_TE DMA_SxCR_TEIE +#define DMA_IT_DME DMA_SxCR_DMEIE +#define DMA_IT_FE 0x00000080U +/** + * @} + */ + +/** @defgroup DMA_flag_definitions DMA flag definitions + * @brief DMA flag definitions + * @{ + */ +#define DMA_FLAG_FEIF0_4 0x00000001U +#define DMA_FLAG_DMEIF0_4 0x00000004U +#define DMA_FLAG_TEIF0_4 0x00000008U +#define DMA_FLAG_HTIF0_4 0x00000010U +#define DMA_FLAG_TCIF0_4 0x00000020U +#define DMA_FLAG_FEIF1_5 0x00000040U +#define DMA_FLAG_DMEIF1_5 0x00000100U +#define DMA_FLAG_TEIF1_5 0x00000200U +#define DMA_FLAG_HTIF1_5 0x00000400U +#define DMA_FLAG_TCIF1_5 0x00000800U +#define DMA_FLAG_FEIF2_6 0x00010000U +#define DMA_FLAG_DMEIF2_6 0x00040000U +#define DMA_FLAG_TEIF2_6 0x00080000U +#define DMA_FLAG_HTIF2_6 0x00100000U +#define DMA_FLAG_TCIF2_6 0x00200000U +#define DMA_FLAG_FEIF3_7 0x00400000U +#define DMA_FLAG_DMEIF3_7 0x01000000U +#define DMA_FLAG_TEIF3_7 0x02000000U +#define DMA_FLAG_HTIF3_7 0x04000000U +#define DMA_FLAG_TCIF3_7 0x08000000U +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ + +/** @brief Reset DMA handle state + * @param __HANDLE__ specifies the DMA handle. + * @retval None + */ +#define __HAL_DMA_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = HAL_DMA_STATE_RESET) + +/** + * @brief Return the current DMA Stream FIFO filled level. + * @param __HANDLE__ DMA handle + * @retval The FIFO filling state. + * - DMA_FIFOStatus_Less1QuarterFull: when FIFO is less than 1 quarter-full + * and not empty. + * - DMA_FIFOStatus_1QuarterFull: if more than 1 quarter-full. + * - DMA_FIFOStatus_HalfFull: if more than 1 half-full. + * - DMA_FIFOStatus_3QuartersFull: if more than 3 quarters-full. + * - DMA_FIFOStatus_Empty: when FIFO is empty + * - DMA_FIFOStatus_Full: when FIFO is full + */ +#define __HAL_DMA_GET_FS(__HANDLE__) (((__HANDLE__)->Instance->FCR & (DMA_SxFCR_FS))) + +/** + * @brief Enable the specified DMA Stream. + * @param __HANDLE__ DMA handle + * @retval None + */ +#define __HAL_DMA_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR |= DMA_SxCR_EN) + +/** + * @brief Disable the specified DMA Stream. + * @param __HANDLE__ DMA handle + * @retval None + */ +#define __HAL_DMA_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR &= ~DMA_SxCR_EN) + +/* Interrupt & Flag management */ + +/** + * @brief Return the current DMA Stream transfer complete flag. + * @param __HANDLE__ DMA handle + * @retval The specified transfer complete flag index. + */ +#define __HAL_DMA_GET_TC_FLAG_INDEX(__HANDLE__) \ +(((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream0))? DMA_FLAG_TCIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream0))? DMA_FLAG_TCIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream4))? DMA_FLAG_TCIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream4))? DMA_FLAG_TCIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream1))? DMA_FLAG_TCIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream1))? DMA_FLAG_TCIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream5))? DMA_FLAG_TCIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream5))? DMA_FLAG_TCIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream2))? DMA_FLAG_TCIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream2))? DMA_FLAG_TCIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream6))? DMA_FLAG_TCIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream6))? DMA_FLAG_TCIF2_6 :\ + DMA_FLAG_TCIF3_7) + +/** + * @brief Return the current DMA Stream half transfer complete flag. + * @param __HANDLE__ DMA handle + * @retval The specified half transfer complete flag index. + */ +#define __HAL_DMA_GET_HT_FLAG_INDEX(__HANDLE__)\ +(((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream0))? DMA_FLAG_HTIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream0))? DMA_FLAG_HTIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream4))? DMA_FLAG_HTIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream4))? DMA_FLAG_HTIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream1))? DMA_FLAG_HTIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream1))? DMA_FLAG_HTIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream5))? DMA_FLAG_HTIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream5))? DMA_FLAG_HTIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream2))? DMA_FLAG_HTIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream2))? DMA_FLAG_HTIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream6))? DMA_FLAG_HTIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream6))? DMA_FLAG_HTIF2_6 :\ + DMA_FLAG_HTIF3_7) + +/** + * @brief Return the current DMA Stream transfer error flag. + * @param __HANDLE__ DMA handle + * @retval The specified transfer error flag index. + */ +#define __HAL_DMA_GET_TE_FLAG_INDEX(__HANDLE__)\ +(((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream0))? DMA_FLAG_TEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream0))? DMA_FLAG_TEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream4))? DMA_FLAG_TEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream4))? DMA_FLAG_TEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream1))? DMA_FLAG_TEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream1))? DMA_FLAG_TEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream5))? DMA_FLAG_TEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream5))? DMA_FLAG_TEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream2))? DMA_FLAG_TEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream2))? DMA_FLAG_TEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream6))? DMA_FLAG_TEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream6))? DMA_FLAG_TEIF2_6 :\ + DMA_FLAG_TEIF3_7) + +/** + * @brief Return the current DMA Stream FIFO error flag. + * @param __HANDLE__ DMA handle + * @retval The specified FIFO error flag index. + */ +#define __HAL_DMA_GET_FE_FLAG_INDEX(__HANDLE__)\ +(((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream0))? DMA_FLAG_FEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream0))? DMA_FLAG_FEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream4))? DMA_FLAG_FEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream4))? DMA_FLAG_FEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream1))? DMA_FLAG_FEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream1))? DMA_FLAG_FEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream5))? DMA_FLAG_FEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream5))? DMA_FLAG_FEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream2))? DMA_FLAG_FEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream2))? DMA_FLAG_FEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream6))? DMA_FLAG_FEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream6))? DMA_FLAG_FEIF2_6 :\ + DMA_FLAG_FEIF3_7) + +/** + * @brief Return the current DMA Stream direct mode error flag. + * @param __HANDLE__ DMA handle + * @retval The specified direct mode error flag index. + */ +#define __HAL_DMA_GET_DME_FLAG_INDEX(__HANDLE__)\ +(((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream0))? DMA_FLAG_DMEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream0))? DMA_FLAG_DMEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream4))? DMA_FLAG_DMEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream4))? DMA_FLAG_DMEIF0_4 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream1))? DMA_FLAG_DMEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream1))? DMA_FLAG_DMEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream5))? DMA_FLAG_DMEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream5))? DMA_FLAG_DMEIF1_5 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream2))? DMA_FLAG_DMEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream2))? DMA_FLAG_DMEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA1_Stream6))? DMA_FLAG_DMEIF2_6 :\ + ((uint32_t)((__HANDLE__)->Instance) == ((uint32_t)DMA2_Stream6))? DMA_FLAG_DMEIF2_6 :\ + DMA_FLAG_DMEIF3_7) + +/** + * @brief Get the DMA Stream pending flags. + * @param __HANDLE__ DMA handle + * @param __FLAG__ Get the specified flag. + * This parameter can be any combination of the following values: + * @arg DMA_FLAG_TCIFx: Transfer complete flag. + * @arg DMA_FLAG_HTIFx: Half transfer complete flag. + * @arg DMA_FLAG_TEIFx: Transfer error flag. + * @arg DMA_FLAG_DMEIFx: Direct mode error flag. + * @arg DMA_FLAG_FEIFx: FIFO error flag. + * Where x can be 0_4, 1_5, 2_6 or 3_7 to select the DMA Stream flag. + * @retval The state of FLAG (SET or RESET). + */ +#define __HAL_DMA_GET_FLAG(__HANDLE__, __FLAG__)\ +(((uint32_t)((__HANDLE__)->Instance) > (uint32_t)DMA2_Stream3)? (DMA2->HISR & (__FLAG__)) :\ + ((uint32_t)((__HANDLE__)->Instance) > (uint32_t)DMA1_Stream7)? (DMA2->LISR & (__FLAG__)) :\ + ((uint32_t)((__HANDLE__)->Instance) > (uint32_t)DMA1_Stream3)? (DMA1->HISR & (__FLAG__)) : (DMA1->LISR & (__FLAG__))) + +/** + * @brief Clear the DMA Stream pending flags. + * @param __HANDLE__ DMA handle + * @param __FLAG__ specifies the flag to clear. + * This parameter can be any combination of the following values: + * @arg DMA_FLAG_TCIFx: Transfer complete flag. + * @arg DMA_FLAG_HTIFx: Half transfer complete flag. + * @arg DMA_FLAG_TEIFx: Transfer error flag. + * @arg DMA_FLAG_DMEIFx: Direct mode error flag. + * @arg DMA_FLAG_FEIFx: FIFO error flag. + * Where x can be 0_4, 1_5, 2_6 or 3_7 to select the DMA Stream flag. + * @retval None + */ +#define __HAL_DMA_CLEAR_FLAG(__HANDLE__, __FLAG__) \ +(((uint32_t)((__HANDLE__)->Instance) > (uint32_t)DMA2_Stream3)? (DMA2->HIFCR = (__FLAG__)) :\ + ((uint32_t)((__HANDLE__)->Instance) > (uint32_t)DMA1_Stream7)? (DMA2->LIFCR = (__FLAG__)) :\ + ((uint32_t)((__HANDLE__)->Instance) > (uint32_t)DMA1_Stream3)? (DMA1->HIFCR = (__FLAG__)) : (DMA1->LIFCR = (__FLAG__))) + +/** + * @brief Enable the specified DMA Stream interrupts. + * @param __HANDLE__ DMA handle + * @param __INTERRUPT__ specifies the DMA interrupt sources to be enabled or disabled. + * This parameter can be one of the following values: + * @arg DMA_IT_TC: Transfer complete interrupt mask. + * @arg DMA_IT_HT: Half transfer complete interrupt mask. + * @arg DMA_IT_TE: Transfer error interrupt mask. + * @arg DMA_IT_FE: FIFO error interrupt mask. + * @arg DMA_IT_DME: Direct mode error interrupt. + * @retval None + */ +#define __HAL_DMA_ENABLE_IT(__HANDLE__, __INTERRUPT__) (((__INTERRUPT__) != DMA_IT_FE)? \ +((__HANDLE__)->Instance->CR |= (__INTERRUPT__)) : ((__HANDLE__)->Instance->FCR |= (__INTERRUPT__))) + +/** + * @brief Disable the specified DMA Stream interrupts. + * @param __HANDLE__ DMA handle + * @param __INTERRUPT__ specifies the DMA interrupt sources to be enabled or disabled. + * This parameter can be one of the following values: + * @arg DMA_IT_TC: Transfer complete interrupt mask. + * @arg DMA_IT_HT: Half transfer complete interrupt mask. + * @arg DMA_IT_TE: Transfer error interrupt mask. + * @arg DMA_IT_FE: FIFO error interrupt mask. + * @arg DMA_IT_DME: Direct mode error interrupt. + * @retval None + */ +#define __HAL_DMA_DISABLE_IT(__HANDLE__, __INTERRUPT__) (((__INTERRUPT__) != DMA_IT_FE)? \ +((__HANDLE__)->Instance->CR &= ~(__INTERRUPT__)) : ((__HANDLE__)->Instance->FCR &= ~(__INTERRUPT__))) + +/** + * @brief Check whether the specified DMA Stream interrupt is enabled or not. + * @param __HANDLE__ DMA handle + * @param __INTERRUPT__ specifies the DMA interrupt source to check. + * This parameter can be one of the following values: + * @arg DMA_IT_TC: Transfer complete interrupt mask. + * @arg DMA_IT_HT: Half transfer complete interrupt mask. + * @arg DMA_IT_TE: Transfer error interrupt mask. + * @arg DMA_IT_FE: FIFO error interrupt mask. + * @arg DMA_IT_DME: Direct mode error interrupt. + * @retval The state of DMA_IT. + */ +#define __HAL_DMA_GET_IT_SOURCE(__HANDLE__, __INTERRUPT__) (((__INTERRUPT__) != DMA_IT_FE)? \ + ((__HANDLE__)->Instance->CR & (__INTERRUPT__)) : \ + ((__HANDLE__)->Instance->FCR & (__INTERRUPT__))) + +/** + * @brief Writes the number of data units to be transferred on the DMA Stream. + * @param __HANDLE__ DMA handle + * @param __COUNTER__ Number of data units to be transferred (from 0 to 65535) + * Number of data items depends only on the Peripheral data format. + * + * @note If Peripheral data format is Bytes: number of data units is equal + * to total number of bytes to be transferred. + * + * @note If Peripheral data format is Half-Word: number of data units is + * equal to total number of bytes to be transferred / 2. + * + * @note If Peripheral data format is Word: number of data units is equal + * to total number of bytes to be transferred / 4. + * + * @retval The number of remaining data units in the current DMAy Streamx transfer. + */ +#define __HAL_DMA_SET_COUNTER(__HANDLE__, __COUNTER__) ((__HANDLE__)->Instance->NDTR = (uint16_t)(__COUNTER__)) + +/** + * @brief Returns the number of remaining data units in the current DMAy Streamx transfer. + * @param __HANDLE__ DMA handle + * + * @retval The number of remaining data units in the current DMA Stream transfer. + */ +#define __HAL_DMA_GET_COUNTER(__HANDLE__) ((__HANDLE__)->Instance->NDTR) + + +/* Include DMA HAL Extension module */ +#include "stm32f7xx_hal_dma_ex.h" + +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup DMA_Exported_Functions DMA Exported Functions + * @brief DMA Exported functions + * @{ + */ + +/** @defgroup DMA_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and de-initialization functions + * @{ + */ +HAL_StatusTypeDef HAL_DMA_Init(DMA_HandleTypeDef *hdma); +HAL_StatusTypeDef HAL_DMA_DeInit(DMA_HandleTypeDef *hdma); +/** + * @} + */ + +/** @defgroup DMA_Exported_Functions_Group2 I/O operation functions + * @brief I/O operation functions + * @{ + */ +HAL_StatusTypeDef HAL_DMA_Start (DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength); +HAL_StatusTypeDef HAL_DMA_Start_IT(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength); +HAL_StatusTypeDef HAL_DMA_Abort(DMA_HandleTypeDef *hdma); +HAL_StatusTypeDef HAL_DMA_Abort_IT(DMA_HandleTypeDef *hdma); +HAL_StatusTypeDef HAL_DMA_PollForTransfer(DMA_HandleTypeDef *hdma, HAL_DMA_LevelCompleteTypeDef CompleteLevel, uint32_t Timeout); +void HAL_DMA_IRQHandler(DMA_HandleTypeDef *hdma); +HAL_StatusTypeDef HAL_DMA_RegisterCallback(DMA_HandleTypeDef *hdma, HAL_DMA_CallbackIDTypeDef CallbackID, void (* pCallback)(DMA_HandleTypeDef *_hdma)); +HAL_StatusTypeDef HAL_DMA_UnRegisterCallback(DMA_HandleTypeDef *hdma, HAL_DMA_CallbackIDTypeDef CallbackID); + +/** + * @} + */ + +/** @defgroup DMA_Exported_Functions_Group3 Peripheral State functions + * @brief Peripheral State functions + * @{ + */ +HAL_DMA_StateTypeDef HAL_DMA_GetState(DMA_HandleTypeDef *hdma); +uint32_t HAL_DMA_GetError(DMA_HandleTypeDef *hdma); +/** + * @} + */ +/** + * @} + */ +/* Private Constants -------------------------------------------------------------*/ +/** @defgroup DMA_Private_Constants DMA Private Constants + * @brief DMA private defines and constants + * @{ + */ +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup DMA_Private_Macros DMA Private Macros + * @brief DMA private macros + * @{ + */ +#define IS_DMA_DIRECTION(DIRECTION) (((DIRECTION) == DMA_PERIPH_TO_MEMORY ) || \ + ((DIRECTION) == DMA_MEMORY_TO_PERIPH) || \ + ((DIRECTION) == DMA_MEMORY_TO_MEMORY)) + +#define IS_DMA_BUFFER_SIZE(SIZE) (((SIZE) >= 0x01U) && ((SIZE) < 0x10000U)) + +#define IS_DMA_PERIPHERAL_INC_STATE(STATE) (((STATE) == DMA_PINC_ENABLE) || \ + ((STATE) == DMA_PINC_DISABLE)) + +#define IS_DMA_MEMORY_INC_STATE(STATE) (((STATE) == DMA_MINC_ENABLE) || \ + ((STATE) == DMA_MINC_DISABLE)) + +#define IS_DMA_PERIPHERAL_DATA_SIZE(SIZE) (((SIZE) == DMA_PDATAALIGN_BYTE) || \ + ((SIZE) == DMA_PDATAALIGN_HALFWORD) || \ + ((SIZE) == DMA_PDATAALIGN_WORD)) + +#define IS_DMA_MEMORY_DATA_SIZE(SIZE) (((SIZE) == DMA_MDATAALIGN_BYTE) || \ + ((SIZE) == DMA_MDATAALIGN_HALFWORD) || \ + ((SIZE) == DMA_MDATAALIGN_WORD )) + +#define IS_DMA_MODE(MODE) (((MODE) == DMA_NORMAL ) || \ + ((MODE) == DMA_CIRCULAR) || \ + ((MODE) == DMA_PFCTRL)) + +#define IS_DMA_PRIORITY(PRIORITY) (((PRIORITY) == DMA_PRIORITY_LOW ) || \ + ((PRIORITY) == DMA_PRIORITY_MEDIUM) || \ + ((PRIORITY) == DMA_PRIORITY_HIGH) || \ + ((PRIORITY) == DMA_PRIORITY_VERY_HIGH)) + +#define IS_DMA_FIFO_MODE_STATE(STATE) (((STATE) == DMA_FIFOMODE_DISABLE ) || \ + ((STATE) == DMA_FIFOMODE_ENABLE)) + +#define IS_DMA_FIFO_THRESHOLD(THRESHOLD) (((THRESHOLD) == DMA_FIFO_THRESHOLD_1QUARTERFULL ) || \ + ((THRESHOLD) == DMA_FIFO_THRESHOLD_HALFFULL) || \ + ((THRESHOLD) == DMA_FIFO_THRESHOLD_3QUARTERSFULL) || \ + ((THRESHOLD) == DMA_FIFO_THRESHOLD_FULL)) + +#define IS_DMA_MEMORY_BURST(BURST) (((BURST) == DMA_MBURST_SINGLE) || \ + ((BURST) == DMA_MBURST_INC4) || \ + ((BURST) == DMA_MBURST_INC8) || \ + ((BURST) == DMA_MBURST_INC16)) + +#define IS_DMA_PERIPHERAL_BURST(BURST) (((BURST) == DMA_PBURST_SINGLE) || \ + ((BURST) == DMA_PBURST_INC4) || \ + ((BURST) == DMA_PBURST_INC8) || \ + ((BURST) == DMA_PBURST_INC16)) +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup DMA_Private_Functions DMA Private Functions + * @brief DMA private functions + * @{ + */ +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_DMA_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma_ex.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma_ex.h new file mode 100644 index 0000000..286fb30 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_dma_ex.h @@ -0,0 +1,183 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_dma_ex.h + * @author MCD Application Team + * @brief Header file of DMA HAL extension module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_DMA_EX_H +#define __STM32F7xx_HAL_DMA_EX_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup DMAEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup DMAEx_Exported_Types DMAEx Exported Types + * @brief DMAEx Exported types + * @{ + */ + +/** + * @brief HAL DMA Memory definition + */ +typedef enum +{ + MEMORY0 = 0x00U, /*!< Memory 0 */ + MEMORY1 = 0x01U, /*!< Memory 1 */ + +}HAL_DMA_MemoryTypeDef; + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup DMA_Exported_Constants DMA Exported Constants + * @brief DMA Exported constants + * @{ + */ + +/** @defgroup DMAEx_Channel_selection DMA Channel selection + * @brief DMAEx channel selection + * @{ + */ +#define DMA_CHANNEL_0 0x00000000U /*!< DMA Channel 0 */ +#define DMA_CHANNEL_1 0x02000000U /*!< DMA Channel 1 */ +#define DMA_CHANNEL_2 0x04000000U /*!< DMA Channel 2 */ +#define DMA_CHANNEL_3 0x06000000U /*!< DMA Channel 3 */ +#define DMA_CHANNEL_4 0x08000000U /*!< DMA Channel 4 */ +#define DMA_CHANNEL_5 0x0A000000U /*!< DMA Channel 5 */ +#define DMA_CHANNEL_6 0x0C000000U /*!< DMA Channel 6 */ +#define DMA_CHANNEL_7 0x0E000000U /*!< DMA Channel 7 */ +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) ||\ + defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) ||\ + defined (STM32F779xx) || defined (STM32F730xx) +#define DMA_CHANNEL_8 0x10000000U /*!< DMA Channel 8 */ +#define DMA_CHANNEL_9 0x12000000U /*!< DMA Channel 9 */ +#define DMA_CHANNEL_10 0x14000000U /*!< DMA Channel 10*/ +#define DMA_CHANNEL_11 0x16000000U /*!< DMA Channel 11*/ +#define DMA_CHANNEL_12 0x18000000U /*!< DMA Channel 12*/ +#define DMA_CHANNEL_13 0x1A000000U /*!< DMA Channel 13*/ +#define DMA_CHANNEL_14 0x1C000000U /*!< DMA Channel 14*/ +#define DMA_CHANNEL_15 0x1E000000U /*!< DMA Channel 15*/ +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || + STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ + +/** + * @} + */ + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup DMAEx_Exported_Functions DMAEx Exported Functions + * @brief DMAEx Exported functions + * @{ + */ + +/** @defgroup DMAEx_Exported_Functions_Group1 Extended features functions + * @brief Extended features functions + * @{ + */ + +/* IO operation functions *******************************************************/ +HAL_StatusTypeDef HAL_DMAEx_MultiBufferStart(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t SecondMemAddress, uint32_t DataLength); +HAL_StatusTypeDef HAL_DMAEx_MultiBufferStart_IT(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t SecondMemAddress, uint32_t DataLength); +HAL_StatusTypeDef HAL_DMAEx_ChangeMemory(DMA_HandleTypeDef *hdma, uint32_t Address, HAL_DMA_MemoryTypeDef memory); + +/** + * @} + */ +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup DMAEx_Private_Macros DMA Private Macros + * @brief DMAEx private macros + * @{ + */ +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) ||\ + defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) ||\ + defined (STM32F779xx) || defined (STM32F730xx) +#define IS_DMA_CHANNEL(CHANNEL) (((CHANNEL) == DMA_CHANNEL_0) || \ + ((CHANNEL) == DMA_CHANNEL_1) || \ + ((CHANNEL) == DMA_CHANNEL_2) || \ + ((CHANNEL) == DMA_CHANNEL_3) || \ + ((CHANNEL) == DMA_CHANNEL_4) || \ + ((CHANNEL) == DMA_CHANNEL_5) || \ + ((CHANNEL) == DMA_CHANNEL_6) || \ + ((CHANNEL) == DMA_CHANNEL_7) || \ + ((CHANNEL) == DMA_CHANNEL_8) || \ + ((CHANNEL) == DMA_CHANNEL_9) || \ + ((CHANNEL) == DMA_CHANNEL_10) || \ + ((CHANNEL) == DMA_CHANNEL_11) || \ + ((CHANNEL) == DMA_CHANNEL_12) || \ + ((CHANNEL) == DMA_CHANNEL_13) || \ + ((CHANNEL) == DMA_CHANNEL_14) || \ + ((CHANNEL) == DMA_CHANNEL_15)) +#else +#define IS_DMA_CHANNEL(CHANNEL) (((CHANNEL) == DMA_CHANNEL_0) || \ + ((CHANNEL) == DMA_CHANNEL_1) || \ + ((CHANNEL) == DMA_CHANNEL_2) || \ + ((CHANNEL) == DMA_CHANNEL_3) || \ + ((CHANNEL) == DMA_CHANNEL_4) || \ + ((CHANNEL) == DMA_CHANNEL_5) || \ + ((CHANNEL) == DMA_CHANNEL_6) || \ + ((CHANNEL) == DMA_CHANNEL_7)) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || + STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx*/ +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup DMAEx_Private_Functions DMAEx Private Functions + * @brief DMAEx Private functions + * @{ + */ +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_DMA_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_eth.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_eth.h new file mode 100644 index 0000000..cec8c73 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_eth.h @@ -0,0 +1,2215 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_eth.h + * @author MCD Application Team + * @brief Header file of ETH HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_ETH_H +#define __STM32F7xx_HAL_ETH_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +#if defined (ETH) + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup ETH + * @{ + */ + +/** @addtogroup ETH_Private_Macros + * @{ + */ +#define IS_ETH_PHY_ADDRESS(ADDRESS) ((ADDRESS) <= 0x20) +#define IS_ETH_AUTONEGOTIATION(CMD) (((CMD) == ETH_AUTONEGOTIATION_ENABLE) || \ + ((CMD) == ETH_AUTONEGOTIATION_DISABLE)) +#define IS_ETH_SPEED(SPEED) (((SPEED) == ETH_SPEED_10M) || \ + ((SPEED) == ETH_SPEED_100M)) +#define IS_ETH_DUPLEX_MODE(MODE) (((MODE) == ETH_MODE_FULLDUPLEX) || \ + ((MODE) == ETH_MODE_HALFDUPLEX)) +#define IS_ETH_RX_MODE(MODE) (((MODE) == ETH_RXPOLLING_MODE) || \ + ((MODE) == ETH_RXINTERRUPT_MODE)) +#define IS_ETH_CHECKSUM_MODE(MODE) (((MODE) == ETH_CHECKSUM_BY_HARDWARE) || \ + ((MODE) == ETH_CHECKSUM_BY_SOFTWARE)) +#define IS_ETH_MEDIA_INTERFACE(MODE) (((MODE) == ETH_MEDIA_INTERFACE_MII) || \ + ((MODE) == ETH_MEDIA_INTERFACE_RMII)) +#define IS_ETH_WATCHDOG(CMD) (((CMD) == ETH_WATCHDOG_ENABLE) || \ + ((CMD) == ETH_WATCHDOG_DISABLE)) +#define IS_ETH_JABBER(CMD) (((CMD) == ETH_JABBER_ENABLE) || \ + ((CMD) == ETH_JABBER_DISABLE)) +#define IS_ETH_INTER_FRAME_GAP(GAP) (((GAP) == ETH_INTERFRAMEGAP_96BIT) || \ + ((GAP) == ETH_INTERFRAMEGAP_88BIT) || \ + ((GAP) == ETH_INTERFRAMEGAP_80BIT) || \ + ((GAP) == ETH_INTERFRAMEGAP_72BIT) || \ + ((GAP) == ETH_INTERFRAMEGAP_64BIT) || \ + ((GAP) == ETH_INTERFRAMEGAP_56BIT) || \ + ((GAP) == ETH_INTERFRAMEGAP_48BIT) || \ + ((GAP) == ETH_INTERFRAMEGAP_40BIT)) +#define IS_ETH_CARRIER_SENSE(CMD) (((CMD) == ETH_CARRIERSENCE_ENABLE) || \ + ((CMD) == ETH_CARRIERSENCE_DISABLE)) +#define IS_ETH_RECEIVE_OWN(CMD) (((CMD) == ETH_RECEIVEOWN_ENABLE) || \ + ((CMD) == ETH_RECEIVEOWN_DISABLE)) +#define IS_ETH_LOOPBACK_MODE(CMD) (((CMD) == ETH_LOOPBACKMODE_ENABLE) || \ + ((CMD) == ETH_LOOPBACKMODE_DISABLE)) +#define IS_ETH_CHECKSUM_OFFLOAD(CMD) (((CMD) == ETH_CHECKSUMOFFLAOD_ENABLE) || \ + ((CMD) == ETH_CHECKSUMOFFLAOD_DISABLE)) +#define IS_ETH_RETRY_TRANSMISSION(CMD) (((CMD) == ETH_RETRYTRANSMISSION_ENABLE) || \ + ((CMD) == ETH_RETRYTRANSMISSION_DISABLE)) +#define IS_ETH_AUTOMATIC_PADCRC_STRIP(CMD) (((CMD) == ETH_AUTOMATICPADCRCSTRIP_ENABLE) || \ + ((CMD) == ETH_AUTOMATICPADCRCSTRIP_DISABLE)) +#define IS_ETH_BACKOFF_LIMIT(LIMIT) (((LIMIT) == ETH_BACKOFFLIMIT_10) || \ + ((LIMIT) == ETH_BACKOFFLIMIT_8) || \ + ((LIMIT) == ETH_BACKOFFLIMIT_4) || \ + ((LIMIT) == ETH_BACKOFFLIMIT_1)) +#define IS_ETH_DEFERRAL_CHECK(CMD) (((CMD) == ETH_DEFFERRALCHECK_ENABLE) || \ + ((CMD) == ETH_DEFFERRALCHECK_DISABLE)) +#define IS_ETH_RECEIVE_ALL(CMD) (((CMD) == ETH_RECEIVEALL_ENABLE) || \ + ((CMD) == ETH_RECEIVEAll_DISABLE)) +#define IS_ETH_SOURCE_ADDR_FILTER(CMD) (((CMD) == ETH_SOURCEADDRFILTER_NORMAL_ENABLE) || \ + ((CMD) == ETH_SOURCEADDRFILTER_INVERSE_ENABLE) || \ + ((CMD) == ETH_SOURCEADDRFILTER_DISABLE)) +#define IS_ETH_CONTROL_FRAMES(PASS) (((PASS) == ETH_PASSCONTROLFRAMES_BLOCKALL) || \ + ((PASS) == ETH_PASSCONTROLFRAMES_FORWARDALL) || \ + ((PASS) == ETH_PASSCONTROLFRAMES_FORWARDPASSEDADDRFILTER)) +#define IS_ETH_BROADCAST_FRAMES_RECEPTION(CMD) (((CMD) == ETH_BROADCASTFRAMESRECEPTION_ENABLE) || \ + ((CMD) == ETH_BROADCASTFRAMESRECEPTION_DISABLE)) +#define IS_ETH_DESTINATION_ADDR_FILTER(FILTER) (((FILTER) == ETH_DESTINATIONADDRFILTER_NORMAL) || \ + ((FILTER) == ETH_DESTINATIONADDRFILTER_INVERSE)) +#define IS_ETH_PROMISCUOUS_MODE(CMD) (((CMD) == ETH_PROMISCUOUS_MODE_ENABLE) || \ + ((CMD) == ETH_PROMISCUOUS_MODE_DISABLE)) +#define IS_ETH_MULTICAST_FRAMES_FILTER(FILTER) (((FILTER) == ETH_MULTICASTFRAMESFILTER_PERFECTHASHTABLE) || \ + ((FILTER) == ETH_MULTICASTFRAMESFILTER_HASHTABLE) || \ + ((FILTER) == ETH_MULTICASTFRAMESFILTER_PERFECT) || \ + ((FILTER) == ETH_MULTICASTFRAMESFILTER_NONE)) +#define IS_ETH_UNICAST_FRAMES_FILTER(FILTER) (((FILTER) == ETH_UNICASTFRAMESFILTER_PERFECTHASHTABLE) || \ + ((FILTER) == ETH_UNICASTFRAMESFILTER_HASHTABLE) || \ + ((FILTER) == ETH_UNICASTFRAMESFILTER_PERFECT)) +#define IS_ETH_PAUSE_TIME(TIME) ((TIME) <= 0xFFFF) +#define IS_ETH_ZEROQUANTA_PAUSE(CMD) (((CMD) == ETH_ZEROQUANTAPAUSE_ENABLE) || \ + ((CMD) == ETH_ZEROQUANTAPAUSE_DISABLE)) +#define IS_ETH_PAUSE_LOW_THRESHOLD(THRESHOLD) (((THRESHOLD) == ETH_PAUSELOWTHRESHOLD_MINUS4) || \ + ((THRESHOLD) == ETH_PAUSELOWTHRESHOLD_MINUS28) || \ + ((THRESHOLD) == ETH_PAUSELOWTHRESHOLD_MINUS144) || \ + ((THRESHOLD) == ETH_PAUSELOWTHRESHOLD_MINUS256)) +#define IS_ETH_UNICAST_PAUSE_FRAME_DETECT(CMD) (((CMD) == ETH_UNICASTPAUSEFRAMEDETECT_ENABLE) || \ + ((CMD) == ETH_UNICASTPAUSEFRAMEDETECT_DISABLE)) +#define IS_ETH_RECEIVE_FLOWCONTROL(CMD) (((CMD) == ETH_RECEIVEFLOWCONTROL_ENABLE) || \ + ((CMD) == ETH_RECEIVEFLOWCONTROL_DISABLE)) +#define IS_ETH_TRANSMIT_FLOWCONTROL(CMD) (((CMD) == ETH_TRANSMITFLOWCONTROL_ENABLE) || \ + ((CMD) == ETH_TRANSMITFLOWCONTROL_DISABLE)) +#define IS_ETH_VLAN_TAG_COMPARISON(COMPARISON) (((COMPARISON) == ETH_VLANTAGCOMPARISON_12BIT) || \ + ((COMPARISON) == ETH_VLANTAGCOMPARISON_16BIT)) +#define IS_ETH_VLAN_TAG_IDENTIFIER(IDENTIFIER) ((IDENTIFIER) <= 0xFFFF) +#define IS_ETH_MAC_ADDRESS0123(ADDRESS) (((ADDRESS) == ETH_MAC_ADDRESS0) || \ + ((ADDRESS) == ETH_MAC_ADDRESS1) || \ + ((ADDRESS) == ETH_MAC_ADDRESS2) || \ + ((ADDRESS) == ETH_MAC_ADDRESS3)) +#define IS_ETH_MAC_ADDRESS123(ADDRESS) (((ADDRESS) == ETH_MAC_ADDRESS1) || \ + ((ADDRESS) == ETH_MAC_ADDRESS2) || \ + ((ADDRESS) == ETH_MAC_ADDRESS3)) +#define IS_ETH_MAC_ADDRESS_FILTER(FILTER) (((FILTER) == ETH_MAC_ADDRESSFILTER_SA) || \ + ((FILTER) == ETH_MAC_ADDRESSFILTER_DA)) +#define IS_ETH_MAC_ADDRESS_MASK(MASK) (((MASK) == ETH_MAC_ADDRESSMASK_BYTE6) || \ + ((MASK) == ETH_MAC_ADDRESSMASK_BYTE5) || \ + ((MASK) == ETH_MAC_ADDRESSMASK_BYTE4) || \ + ((MASK) == ETH_MAC_ADDRESSMASK_BYTE3) || \ + ((MASK) == ETH_MAC_ADDRESSMASK_BYTE2) || \ + ((MASK) == ETH_MAC_ADDRESSMASK_BYTE1)) +#define IS_ETH_DROP_TCPIP_CHECKSUM_FRAME(CMD) (((CMD) == ETH_DROPTCPIPCHECKSUMERRORFRAME_ENABLE) || \ + ((CMD) == ETH_DROPTCPIPCHECKSUMERRORFRAME_DISABLE)) +#define IS_ETH_RECEIVE_STORE_FORWARD(CMD) (((CMD) == ETH_RECEIVESTOREFORWARD_ENABLE) || \ + ((CMD) == ETH_RECEIVESTOREFORWARD_DISABLE)) +#define IS_ETH_FLUSH_RECEIVE_FRAME(CMD) (((CMD) == ETH_FLUSHRECEIVEDFRAME_ENABLE) || \ + ((CMD) == ETH_FLUSHRECEIVEDFRAME_DISABLE)) +#define IS_ETH_TRANSMIT_STORE_FORWARD(CMD) (((CMD) == ETH_TRANSMITSTOREFORWARD_ENABLE) || \ + ((CMD) == ETH_TRANSMITSTOREFORWARD_DISABLE)) +#define IS_ETH_TRANSMIT_THRESHOLD_CONTROL(THRESHOLD) (((THRESHOLD) == ETH_TRANSMITTHRESHOLDCONTROL_64BYTES) || \ + ((THRESHOLD) == ETH_TRANSMITTHRESHOLDCONTROL_128BYTES) || \ + ((THRESHOLD) == ETH_TRANSMITTHRESHOLDCONTROL_192BYTES) || \ + ((THRESHOLD) == ETH_TRANSMITTHRESHOLDCONTROL_256BYTES) || \ + ((THRESHOLD) == ETH_TRANSMITTHRESHOLDCONTROL_40BYTES) || \ + ((THRESHOLD) == ETH_TRANSMITTHRESHOLDCONTROL_32BYTES) || \ + ((THRESHOLD) == ETH_TRANSMITTHRESHOLDCONTROL_24BYTES) || \ + ((THRESHOLD) == ETH_TRANSMITTHRESHOLDCONTROL_16BYTES)) +#define IS_ETH_FORWARD_ERROR_FRAMES(CMD) (((CMD) == ETH_FORWARDERRORFRAMES_ENABLE) || \ + ((CMD) == ETH_FORWARDERRORFRAMES_DISABLE)) +#define IS_ETH_FORWARD_UNDERSIZED_GOOD_FRAMES(CMD) (((CMD) == ETH_FORWARDUNDERSIZEDGOODFRAMES_ENABLE) || \ + ((CMD) == ETH_FORWARDUNDERSIZEDGOODFRAMES_DISABLE)) +#define IS_ETH_RECEIVE_THRESHOLD_CONTROL(THRESHOLD) (((THRESHOLD) == ETH_RECEIVEDTHRESHOLDCONTROL_64BYTES) || \ + ((THRESHOLD) == ETH_RECEIVEDTHRESHOLDCONTROL_32BYTES) || \ + ((THRESHOLD) == ETH_RECEIVEDTHRESHOLDCONTROL_96BYTES) || \ + ((THRESHOLD) == ETH_RECEIVEDTHRESHOLDCONTROL_128BYTES)) +#define IS_ETH_SECOND_FRAME_OPERATE(CMD) (((CMD) == ETH_SECONDFRAMEOPERARTE_ENABLE) || \ + ((CMD) == ETH_SECONDFRAMEOPERARTE_DISABLE)) +#define IS_ETH_ADDRESS_ALIGNED_BEATS(CMD) (((CMD) == ETH_ADDRESSALIGNEDBEATS_ENABLE) || \ + ((CMD) == ETH_ADDRESSALIGNEDBEATS_DISABLE)) +#define IS_ETH_FIXED_BURST(CMD) (((CMD) == ETH_FIXEDBURST_ENABLE) || \ + ((CMD) == ETH_FIXEDBURST_DISABLE)) +#define IS_ETH_RXDMA_BURST_LENGTH(LENGTH) (((LENGTH) == ETH_RXDMABURSTLENGTH_1BEAT) || \ + ((LENGTH) == ETH_RXDMABURSTLENGTH_2BEAT) || \ + ((LENGTH) == ETH_RXDMABURSTLENGTH_4BEAT) || \ + ((LENGTH) == ETH_RXDMABURSTLENGTH_8BEAT) || \ + ((LENGTH) == ETH_RXDMABURSTLENGTH_16BEAT) || \ + ((LENGTH) == ETH_RXDMABURSTLENGTH_32BEAT) || \ + ((LENGTH) == ETH_RXDMABURSTLENGTH_4XPBL_4BEAT) || \ + ((LENGTH) == ETH_RXDMABURSTLENGTH_4XPBL_8BEAT) || \ + ((LENGTH) == ETH_RXDMABURSTLENGTH_4XPBL_16BEAT) || \ + ((LENGTH) == ETH_RXDMABURSTLENGTH_4XPBL_32BEAT) || \ + ((LENGTH) == ETH_RXDMABURSTLENGTH_4XPBL_64BEAT) || \ + ((LENGTH) == ETH_RXDMABURSTLENGTH_4XPBL_128BEAT)) +#define IS_ETH_TXDMA_BURST_LENGTH(LENGTH) (((LENGTH) == ETH_TXDMABURSTLENGTH_1BEAT) || \ + ((LENGTH) == ETH_TXDMABURSTLENGTH_2BEAT) || \ + ((LENGTH) == ETH_TXDMABURSTLENGTH_4BEAT) || \ + ((LENGTH) == ETH_TXDMABURSTLENGTH_8BEAT) || \ + ((LENGTH) == ETH_TXDMABURSTLENGTH_16BEAT) || \ + ((LENGTH) == ETH_TXDMABURSTLENGTH_32BEAT) || \ + ((LENGTH) == ETH_TXDMABURSTLENGTH_4XPBL_4BEAT) || \ + ((LENGTH) == ETH_TXDMABURSTLENGTH_4XPBL_8BEAT) || \ + ((LENGTH) == ETH_TXDMABURSTLENGTH_4XPBL_16BEAT) || \ + ((LENGTH) == ETH_TXDMABURSTLENGTH_4XPBL_32BEAT) || \ + ((LENGTH) == ETH_TXDMABURSTLENGTH_4XPBL_64BEAT) || \ + ((LENGTH) == ETH_TXDMABURSTLENGTH_4XPBL_128BEAT)) +#define IS_ETH_DMA_DESC_SKIP_LENGTH(LENGTH) ((LENGTH) <= 0x1F) +#define IS_ETH_DMA_ARBITRATION_ROUNDROBIN_RXTX(RATIO) (((RATIO) == ETH_DMAARBITRATION_ROUNDROBIN_RXTX_1_1) || \ + ((RATIO) == ETH_DMAARBITRATION_ROUNDROBIN_RXTX_2_1) || \ + ((RATIO) == ETH_DMAARBITRATION_ROUNDROBIN_RXTX_3_1) || \ + ((RATIO) == ETH_DMAARBITRATION_ROUNDROBIN_RXTX_4_1) || \ + ((RATIO) == ETH_DMAARBITRATION_RXPRIORTX)) +#define IS_ETH_DMATXDESC_GET_FLAG(FLAG) (((FLAG) == ETH_DMATXDESC_OWN) || \ + ((FLAG) == ETH_DMATXDESC_IC) || \ + ((FLAG) == ETH_DMATXDESC_LS) || \ + ((FLAG) == ETH_DMATXDESC_FS) || \ + ((FLAG) == ETH_DMATXDESC_DC) || \ + ((FLAG) == ETH_DMATXDESC_DP) || \ + ((FLAG) == ETH_DMATXDESC_TTSE) || \ + ((FLAG) == ETH_DMATXDESC_TER) || \ + ((FLAG) == ETH_DMATXDESC_TCH) || \ + ((FLAG) == ETH_DMATXDESC_TTSS) || \ + ((FLAG) == ETH_DMATXDESC_IHE) || \ + ((FLAG) == ETH_DMATXDESC_ES) || \ + ((FLAG) == ETH_DMATXDESC_JT) || \ + ((FLAG) == ETH_DMATXDESC_FF) || \ + ((FLAG) == ETH_DMATXDESC_PCE) || \ + ((FLAG) == ETH_DMATXDESC_LCA) || \ + ((FLAG) == ETH_DMATXDESC_NC) || \ + ((FLAG) == ETH_DMATXDESC_LCO) || \ + ((FLAG) == ETH_DMATXDESC_EC) || \ + ((FLAG) == ETH_DMATXDESC_VF) || \ + ((FLAG) == ETH_DMATXDESC_CC) || \ + ((FLAG) == ETH_DMATXDESC_ED) || \ + ((FLAG) == ETH_DMATXDESC_UF) || \ + ((FLAG) == ETH_DMATXDESC_DB)) +#define IS_ETH_DMA_TXDESC_SEGMENT(SEGMENT) (((SEGMENT) == ETH_DMATXDESC_LASTSEGMENTS) || \ + ((SEGMENT) == ETH_DMATXDESC_FIRSTSEGMENT)) +#define IS_ETH_DMA_TXDESC_CHECKSUM(CHECKSUM) (((CHECKSUM) == ETH_DMATXDESC_CHECKSUMBYPASS) || \ + ((CHECKSUM) == ETH_DMATXDESC_CHECKSUMIPV4HEADER) || \ + ((CHECKSUM) == ETH_DMATXDESC_CHECKSUMTCPUDPICMPSEGMENT) || \ + ((CHECKSUM) == ETH_DMATXDESC_CHECKSUMTCPUDPICMPFULL)) +#define IS_ETH_DMATXDESC_BUFFER_SIZE(SIZE) ((SIZE) <= 0x1FFF) +#define IS_ETH_DMARXDESC_GET_FLAG(FLAG) (((FLAG) == ETH_DMARXDESC_OWN) || \ + ((FLAG) == ETH_DMARXDESC_AFM) || \ + ((FLAG) == ETH_DMARXDESC_ES) || \ + ((FLAG) == ETH_DMARXDESC_DE) || \ + ((FLAG) == ETH_DMARXDESC_SAF) || \ + ((FLAG) == ETH_DMARXDESC_LE) || \ + ((FLAG) == ETH_DMARXDESC_OE) || \ + ((FLAG) == ETH_DMARXDESC_VLAN) || \ + ((FLAG) == ETH_DMARXDESC_FS) || \ + ((FLAG) == ETH_DMARXDESC_LS) || \ + ((FLAG) == ETH_DMARXDESC_IPV4HCE) || \ + ((FLAG) == ETH_DMARXDESC_LC) || \ + ((FLAG) == ETH_DMARXDESC_FT) || \ + ((FLAG) == ETH_DMARXDESC_RWT) || \ + ((FLAG) == ETH_DMARXDESC_RE) || \ + ((FLAG) == ETH_DMARXDESC_DBE) || \ + ((FLAG) == ETH_DMARXDESC_CE) || \ + ((FLAG) == ETH_DMARXDESC_MAMPCE)) +#define IS_ETH_DMA_RXDESC_BUFFER(BUFFER) (((BUFFER) == ETH_DMARXDESC_BUFFER1) || \ + ((BUFFER) == ETH_DMARXDESC_BUFFER2)) +#define IS_ETH_PMT_GET_FLAG(FLAG) (((FLAG) == ETH_PMT_FLAG_WUFR) || \ + ((FLAG) == ETH_PMT_FLAG_MPR)) +#define IS_ETH_DMA_FLAG(FLAG) ((((FLAG) & (uint32_t)0xC7FE1800) == 0x00) && ((FLAG) != 0x00)) +#define IS_ETH_DMA_GET_FLAG(FLAG) (((FLAG) == ETH_DMA_FLAG_TST) || ((FLAG) == ETH_DMA_FLAG_PMT) || \ + ((FLAG) == ETH_DMA_FLAG_MMC) || ((FLAG) == ETH_DMA_FLAG_DATATRANSFERERROR) || \ + ((FLAG) == ETH_DMA_FLAG_READWRITEERROR) || ((FLAG) == ETH_DMA_FLAG_ACCESSERROR) || \ + ((FLAG) == ETH_DMA_FLAG_NIS) || ((FLAG) == ETH_DMA_FLAG_AIS) || \ + ((FLAG) == ETH_DMA_FLAG_ER) || ((FLAG) == ETH_DMA_FLAG_FBE) || \ + ((FLAG) == ETH_DMA_FLAG_ET) || ((FLAG) == ETH_DMA_FLAG_RWT) || \ + ((FLAG) == ETH_DMA_FLAG_RPS) || ((FLAG) == ETH_DMA_FLAG_RBU) || \ + ((FLAG) == ETH_DMA_FLAG_R) || ((FLAG) == ETH_DMA_FLAG_TU) || \ + ((FLAG) == ETH_DMA_FLAG_RO) || ((FLAG) == ETH_DMA_FLAG_TJT) || \ + ((FLAG) == ETH_DMA_FLAG_TBU) || ((FLAG) == ETH_DMA_FLAG_TPS) || \ + ((FLAG) == ETH_DMA_FLAG_T)) +#define IS_ETH_MAC_IT(IT) ((((IT) & (uint32_t)0xFFFFFDF1) == 0x00) && ((IT) != 0x00)) +#define IS_ETH_MAC_GET_IT(IT) (((IT) == ETH_MAC_IT_TST) || ((IT) == ETH_MAC_IT_MMCT) || \ + ((IT) == ETH_MAC_IT_MMCR) || ((IT) == ETH_MAC_IT_MMC) || \ + ((IT) == ETH_MAC_IT_PMT)) +#define IS_ETH_MAC_GET_FLAG(FLAG) (((FLAG) == ETH_MAC_FLAG_TST) || ((FLAG) == ETH_MAC_FLAG_MMCT) || \ + ((FLAG) == ETH_MAC_FLAG_MMCR) || ((FLAG) == ETH_MAC_FLAG_MMC) || \ + ((FLAG) == ETH_MAC_FLAG_PMT)) +#define IS_ETH_DMA_IT(IT) ((((IT) & (uint32_t)0xC7FE1800) == 0x00) && ((IT) != 0x00)) +#define IS_ETH_DMA_GET_IT(IT) (((IT) == ETH_DMA_IT_TST) || ((IT) == ETH_DMA_IT_PMT) || \ + ((IT) == ETH_DMA_IT_MMC) || ((IT) == ETH_DMA_IT_NIS) || \ + ((IT) == ETH_DMA_IT_AIS) || ((IT) == ETH_DMA_IT_ER) || \ + ((IT) == ETH_DMA_IT_FBE) || ((IT) == ETH_DMA_IT_ET) || \ + ((IT) == ETH_DMA_IT_RWT) || ((IT) == ETH_DMA_IT_RPS) || \ + ((IT) == ETH_DMA_IT_RBU) || ((IT) == ETH_DMA_IT_R) || \ + ((IT) == ETH_DMA_IT_TU) || ((IT) == ETH_DMA_IT_RO) || \ + ((IT) == ETH_DMA_IT_TJT) || ((IT) == ETH_DMA_IT_TBU) || \ + ((IT) == ETH_DMA_IT_TPS) || ((IT) == ETH_DMA_IT_T)) +#define IS_ETH_DMA_GET_OVERFLOW(OVERFLOW) (((OVERFLOW) == ETH_DMA_OVERFLOW_RXFIFOCOUNTER) || \ + ((OVERFLOW) == ETH_DMA_OVERFLOW_MISSEDFRAMECOUNTER)) +#define IS_ETH_MMC_IT(IT) (((((IT) & (uint32_t)0xFFDF3FFF) == 0x00) || (((IT) & (uint32_t)0xEFFDFF9F) == 0x00)) && \ + ((IT) != 0x00)) +#define IS_ETH_MMC_GET_IT(IT) (((IT) == ETH_MMC_IT_TGF) || ((IT) == ETH_MMC_IT_TGFMSC) || \ + ((IT) == ETH_MMC_IT_TGFSC) || ((IT) == ETH_MMC_IT_RGUF) || \ + ((IT) == ETH_MMC_IT_RFAE) || ((IT) == ETH_MMC_IT_RFCE)) +#define IS_ETH_ENHANCED_DESCRIPTOR_FORMAT(CMD) (((CMD) == ETH_DMAENHANCEDDESCRIPTOR_ENABLE) || \ + ((CMD) == ETH_DMAENHANCEDDESCRIPTOR_DISABLE)) + + +/** + * @} + */ + +/** @addtogroup ETH_Private_Defines + * @{ + */ +/* Delay to wait when writing to some Ethernet registers */ +#define ETH_REG_WRITE_DELAY ((uint32_t)0x00000001U) + +/* Ethernet Errors */ +#define ETH_SUCCESS ((uint32_t)0U) +#define ETH_ERROR ((uint32_t)1U) + +/* Ethernet DMA Tx descriptors Collision Count Shift */ +#define ETH_DMATXDESC_COLLISION_COUNTSHIFT ((uint32_t)3U) + +/* Ethernet DMA Tx descriptors Buffer2 Size Shift */ +#define ETH_DMATXDESC_BUFFER2_SIZESHIFT ((uint32_t)16U) + +/* Ethernet DMA Rx descriptors Frame Length Shift */ +#define ETH_DMARXDESC_FRAME_LENGTHSHIFT ((uint32_t)16U) + +/* Ethernet DMA Rx descriptors Buffer2 Size Shift */ +#define ETH_DMARXDESC_BUFFER2_SIZESHIFT ((uint32_t)16U) + +/* Ethernet DMA Rx descriptors Frame length Shift */ +#define ETH_DMARXDESC_FRAMELENGTHSHIFT ((uint32_t)16) + +/* Ethernet MAC address offsets */ +#define ETH_MAC_ADDR_HBASE (uint32_t)(ETH_MAC_BASE + (uint32_t)0x40U) /* Ethernet MAC address high offset */ +#define ETH_MAC_ADDR_LBASE (uint32_t)(ETH_MAC_BASE + (uint32_t)0x44U) /* Ethernet MAC address low offset */ + +/* Ethernet MACMIIAR register Mask */ +#define ETH_MACMIIAR_CR_MASK ((uint32_t)0xFFFFFFE3U) + +/* Ethernet MACCR register Mask */ +#define ETH_MACCR_CLEAR_MASK ((uint32_t)0xFF20810FU) + +/* Ethernet MACFCR register Mask */ +#define ETH_MACFCR_CLEAR_MASK ((uint32_t)0x0000FF41U) + +/* Ethernet DMAOMR register Mask */ +#define ETH_DMAOMR_CLEAR_MASK ((uint32_t)0xF8DE3F23U) + +/* Ethernet Remote Wake-up frame register length */ +#define ETH_WAKEUP_REGISTER_LENGTH 8U + +/* Ethernet Missed frames counter Shift */ +#define ETH_DMA_RX_OVERFLOW_MISSEDFRAMES_COUNTERSHIFT 17U + /** + * @} + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup ETH_Exported_Types ETH Exported Types + * @{ + */ + +/** + * @brief HAL State structures definition + */ +typedef enum +{ + HAL_ETH_STATE_RESET = 0x00U, /*!< Peripheral not yet Initialized or disabled */ + HAL_ETH_STATE_READY = 0x01U, /*!< Peripheral Initialized and ready for use */ + HAL_ETH_STATE_BUSY = 0x02U, /*!< an internal process is ongoing */ + HAL_ETH_STATE_BUSY_TX = 0x12U, /*!< Data Transmission process is ongoing */ + HAL_ETH_STATE_BUSY_RX = 0x22U, /*!< Data Reception process is ongoing */ + HAL_ETH_STATE_BUSY_TX_RX = 0x32U, /*!< Data Transmission and Reception process is ongoing */ + HAL_ETH_STATE_BUSY_WR = 0x42U, /*!< Write process is ongoing */ + HAL_ETH_STATE_BUSY_RD = 0x82U, /*!< Read process is ongoing */ + HAL_ETH_STATE_TIMEOUT = 0x03U, /*!< Timeout state */ + HAL_ETH_STATE_ERROR = 0x04U /*!< Reception process is ongoing */ +}HAL_ETH_StateTypeDef; + +/** + * @brief ETH Init Structure definition + */ + +typedef struct +{ + uint32_t AutoNegotiation; /*!< Selects or not the AutoNegotiation mode for the external PHY + The AutoNegotiation allows an automatic setting of the Speed (10/100Mbps) + and the mode (half/full-duplex). + This parameter can be a value of @ref ETH_AutoNegotiation */ + + uint32_t Speed; /*!< Sets the Ethernet speed: 10/100 Mbps. + This parameter can be a value of @ref ETH_Speed */ + + uint32_t DuplexMode; /*!< Selects the MAC duplex mode: Half-Duplex or Full-Duplex mode + This parameter can be a value of @ref ETH_Duplex_Mode */ + + uint16_t PhyAddress; /*!< Ethernet PHY address. + This parameter must be a number between Min_Data = 0 and Max_Data = 32 */ + + uint8_t *MACAddr; /*!< MAC Address of used Hardware: must be pointer on an array of 6 bytes */ + + uint32_t RxMode; /*!< Selects the Ethernet Rx mode: Polling mode, Interrupt mode. + This parameter can be a value of @ref ETH_Rx_Mode */ + + uint32_t ChecksumMode; /*!< Selects if the checksum is check by hardware or by software. + This parameter can be a value of @ref ETH_Checksum_Mode */ + + uint32_t MediaInterface ; /*!< Selects the media-independent interface or the reduced media-independent interface. + This parameter can be a value of @ref ETH_Media_Interface */ + +} ETH_InitTypeDef; + + + /** + * @brief ETH MAC Configuration Structure definition + */ + +typedef struct +{ + uint32_t Watchdog; /*!< Selects or not the Watchdog timer + When enabled, the MAC allows no more then 2048 bytes to be received. + When disabled, the MAC can receive up to 16384 bytes. + This parameter can be a value of @ref ETH_Watchdog */ + + uint32_t Jabber; /*!< Selects or not Jabber timer + When enabled, the MAC allows no more then 2048 bytes to be sent. + When disabled, the MAC can send up to 16384 bytes. + This parameter can be a value of @ref ETH_Jabber */ + + uint32_t InterFrameGap; /*!< Selects the minimum IFG between frames during transmission. + This parameter can be a value of @ref ETH_Inter_Frame_Gap */ + + uint32_t CarrierSense; /*!< Selects or not the Carrier Sense. + This parameter can be a value of @ref ETH_Carrier_Sense */ + + uint32_t ReceiveOwn; /*!< Selects or not the ReceiveOwn, + ReceiveOwn allows the reception of frames when the TX_EN signal is asserted + in Half-Duplex mode. + This parameter can be a value of @ref ETH_Receive_Own */ + + uint32_t LoopbackMode; /*!< Selects or not the internal MAC MII Loopback mode. + This parameter can be a value of @ref ETH_Loop_Back_Mode */ + + uint32_t ChecksumOffload; /*!< Selects or not the IPv4 checksum checking for received frame payloads' TCP/UDP/ICMP headers. + This parameter can be a value of @ref ETH_Checksum_Offload */ + + uint32_t RetryTransmission; /*!< Selects or not the MAC attempt retries transmission, based on the settings of BL, + when a collision occurs (Half-Duplex mode). + This parameter can be a value of @ref ETH_Retry_Transmission */ + + uint32_t AutomaticPadCRCStrip; /*!< Selects or not the Automatic MAC Pad/CRC Stripping. + This parameter can be a value of @ref ETH_Automatic_Pad_CRC_Strip */ + + uint32_t BackOffLimit; /*!< Selects the BackOff limit value. + This parameter can be a value of @ref ETH_Back_Off_Limit */ + + uint32_t DeferralCheck; /*!< Selects or not the deferral check function (Half-Duplex mode). + This parameter can be a value of @ref ETH_Deferral_Check */ + + uint32_t ReceiveAll; /*!< Selects or not all frames reception by the MAC (No filtering). + This parameter can be a value of @ref ETH_Receive_All */ + + uint32_t SourceAddrFilter; /*!< Selects the Source Address Filter mode. + This parameter can be a value of @ref ETH_Source_Addr_Filter */ + + uint32_t PassControlFrames; /*!< Sets the forwarding mode of the control frames (including unicast and multicast PAUSE frames) + This parameter can be a value of @ref ETH_Pass_Control_Frames */ + + uint32_t BroadcastFramesReception; /*!< Selects or not the reception of Broadcast Frames. + This parameter can be a value of @ref ETH_Broadcast_Frames_Reception */ + + uint32_t DestinationAddrFilter; /*!< Sets the destination filter mode for both unicast and multicast frames. + This parameter can be a value of @ref ETH_Destination_Addr_Filter */ + + uint32_t PromiscuousMode; /*!< Selects or not the Promiscuous Mode + This parameter can be a value of @ref ETH_Promiscuous_Mode */ + + uint32_t MulticastFramesFilter; /*!< Selects the Multicast Frames filter mode: None/HashTableFilter/PerfectFilter/PerfectHashTableFilter. + This parameter can be a value of @ref ETH_Multicast_Frames_Filter */ + + uint32_t UnicastFramesFilter; /*!< Selects the Unicast Frames filter mode: HashTableFilter/PerfectFilter/PerfectHashTableFilter. + This parameter can be a value of @ref ETH_Unicast_Frames_Filter */ + + uint32_t HashTableHigh; /*!< This field holds the higher 32 bits of Hash table. + This parameter must be a number between Min_Data = 0x0 and Max_Data = 0xFFFFFFFF */ + + uint32_t HashTableLow; /*!< This field holds the lower 32 bits of Hash table. + This parameter must be a number between Min_Data = 0x0 and Max_Data = 0xFFFFFFFF */ + + uint32_t PauseTime; /*!< This field holds the value to be used in the Pause Time field in the transmit control frame. + This parameter must be a number between Min_Data = 0x0 and Max_Data = 0xFFFF */ + + uint32_t ZeroQuantaPause; /*!< Selects or not the automatic generation of Zero-Quanta Pause Control frames. + This parameter can be a value of @ref ETH_Zero_Quanta_Pause */ + + uint32_t PauseLowThreshold; /*!< This field configures the threshold of the PAUSE to be checked for + automatic retransmission of PAUSE Frame. + This parameter can be a value of @ref ETH_Pause_Low_Threshold */ + + uint32_t UnicastPauseFrameDetect; /*!< Selects or not the MAC detection of the Pause frames (with MAC Address0 + unicast address and unique multicast address). + This parameter can be a value of @ref ETH_Unicast_Pause_Frame_Detect */ + + uint32_t ReceiveFlowControl; /*!< Enables or disables the MAC to decode the received Pause frame and + disable its transmitter for a specified time (Pause Time) + This parameter can be a value of @ref ETH_Receive_Flow_Control */ + + uint32_t TransmitFlowControl; /*!< Enables or disables the MAC to transmit Pause frames (Full-Duplex mode) + or the MAC back-pressure operation (Half-Duplex mode) + This parameter can be a value of @ref ETH_Transmit_Flow_Control */ + + uint32_t VLANTagComparison; /*!< Selects the 12-bit VLAN identifier or the complete 16-bit VLAN tag for + comparison and filtering. + This parameter can be a value of @ref ETH_VLAN_Tag_Comparison */ + + uint32_t VLANTagIdentifier; /*!< Holds the VLAN tag identifier for receive frames */ + +} ETH_MACInitTypeDef; + + +/** + * @brief ETH DMA Configuration Structure definition + */ + +typedef struct +{ + uint32_t DropTCPIPChecksumErrorFrame; /*!< Selects or not the Dropping of TCP/IP Checksum Error Frames. + This parameter can be a value of @ref ETH_Drop_TCP_IP_Checksum_Error_Frame */ + + uint32_t ReceiveStoreForward; /*!< Enables or disables the Receive store and forward mode. + This parameter can be a value of @ref ETH_Receive_Store_Forward */ + + uint32_t FlushReceivedFrame; /*!< Enables or disables the flushing of received frames. + This parameter can be a value of @ref ETH_Flush_Received_Frame */ + + uint32_t TransmitStoreForward; /*!< Enables or disables Transmit store and forward mode. + This parameter can be a value of @ref ETH_Transmit_Store_Forward */ + + uint32_t TransmitThresholdControl; /*!< Selects or not the Transmit Threshold Control. + This parameter can be a value of @ref ETH_Transmit_Threshold_Control */ + + uint32_t ForwardErrorFrames; /*!< Selects or not the forward to the DMA of erroneous frames. + This parameter can be a value of @ref ETH_Forward_Error_Frames */ + + uint32_t ForwardUndersizedGoodFrames; /*!< Enables or disables the Rx FIFO to forward Undersized frames (frames with no Error + and length less than 64 bytes) including pad-bytes and CRC) + This parameter can be a value of @ref ETH_Forward_Undersized_Good_Frames */ + + uint32_t ReceiveThresholdControl; /*!< Selects the threshold level of the Receive FIFO. + This parameter can be a value of @ref ETH_Receive_Threshold_Control */ + + uint32_t SecondFrameOperate; /*!< Selects or not the Operate on second frame mode, which allows the DMA to process a second + frame of Transmit data even before obtaining the status for the first frame. + This parameter can be a value of @ref ETH_Second_Frame_Operate */ + + uint32_t AddressAlignedBeats; /*!< Enables or disables the Address Aligned Beats. + This parameter can be a value of @ref ETH_Address_Aligned_Beats */ + + uint32_t FixedBurst; /*!< Enables or disables the AHB Master interface fixed burst transfers. + This parameter can be a value of @ref ETH_Fixed_Burst */ + + uint32_t RxDMABurstLength; /*!< Indicates the maximum number of beats to be transferred in one Rx DMA transaction. + This parameter can be a value of @ref ETH_Rx_DMA_Burst_Length */ + + uint32_t TxDMABurstLength; /*!< Indicates the maximum number of beats to be transferred in one Tx DMA transaction. + This parameter can be a value of @ref ETH_Tx_DMA_Burst_Length */ + + uint32_t EnhancedDescriptorFormat; /*!< Enables the enhanced descriptor format. + This parameter can be a value of @ref ETH_DMA_Enhanced_descriptor_format */ + + uint32_t DescriptorSkipLength; /*!< Specifies the number of word to skip between two unchained descriptors (Ring mode) + This parameter must be a number between Min_Data = 0 and Max_Data = 32 */ + + uint32_t DMAArbitration; /*!< Selects the DMA Tx/Rx arbitration. + This parameter can be a value of @ref ETH_DMA_Arbitration */ +} ETH_DMAInitTypeDef; + + +/** + * @brief ETH DMA Descriptors data structure definition + */ + +typedef struct +{ + __IO uint32_t Status; /*!< Status */ + + uint32_t ControlBufferSize; /*!< Control and Buffer1, Buffer2 lengths */ + + uint32_t Buffer1Addr; /*!< Buffer1 address pointer */ + + uint32_t Buffer2NextDescAddr; /*!< Buffer2 or next descriptor address pointer */ + + /*!< Enhanced Ethernet DMA PTP Descriptors */ + uint32_t ExtendedStatus; /*!< Extended status for PTP receive descriptor */ + + uint32_t Reserved1; /*!< Reserved */ + + uint32_t TimeStampLow; /*!< Time Stamp Low value for transmit and receive */ + + uint32_t TimeStampHigh; /*!< Time Stamp High value for transmit and receive */ + +} ETH_DMADescTypeDef; + + +/** + * @brief Received Frame Information structure definition + */ +typedef struct +{ + ETH_DMADescTypeDef *FSRxDesc; /*!< First Segment Rx Desc */ + + ETH_DMADescTypeDef *LSRxDesc; /*!< Last Segment Rx Desc */ + + uint32_t SegCount; /*!< Segment count */ + + uint32_t length; /*!< Frame length */ + + uint32_t buffer; /*!< Frame buffer */ + +} ETH_DMARxFrameInfos; + + +/** + * @brief ETH Handle Structure definition + */ + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) +typedef struct __ETH_HandleTypeDef +#else +typedef struct +#endif +{ + ETH_TypeDef *Instance; /*!< Register base address */ + + ETH_InitTypeDef Init; /*!< Ethernet Init Configuration */ + + uint32_t LinkStatus; /*!< Ethernet link status */ + + ETH_DMADescTypeDef *RxDesc; /*!< Rx descriptor to Get */ + + ETH_DMADescTypeDef *TxDesc; /*!< Tx descriptor to Set */ + + ETH_DMARxFrameInfos RxFrameInfos; /*!< last Rx frame infos */ + + __IO HAL_ETH_StateTypeDef State; /*!< ETH communication state */ + + HAL_LockTypeDef Lock; /*!< ETH Lock */ + + #if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + + void (* TxCpltCallback) ( struct __ETH_HandleTypeDef * heth); /*!< ETH Tx Complete Callback */ + void (* RxCpltCallback) ( struct __ETH_HandleTypeDef * heth); /*!< ETH Rx Complete Callback */ + void (* DMAErrorCallback) ( struct __ETH_HandleTypeDef * heth); /*!< DMA Error Callback */ + void (* MspInitCallback) ( struct __ETH_HandleTypeDef * heth); /*!< ETH Msp Init callback */ + void (* MspDeInitCallback) ( struct __ETH_HandleTypeDef * heth); /*!< ETH Msp DeInit callback */ + +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + +} ETH_HandleTypeDef; + + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) +/** + * @brief HAL ETH Callback ID enumeration definition + */ +typedef enum +{ + HAL_ETH_MSPINIT_CB_ID = 0x00U, /*!< ETH MspInit callback ID */ + HAL_ETH_MSPDEINIT_CB_ID = 0x01U, /*!< ETH MspDeInit callback ID */ + HAL_ETH_TX_COMPLETE_CB_ID = 0x02U, /*!< ETH Tx Complete Callback ID */ + HAL_ETH_RX_COMPLETE_CB_ID = 0x03U, /*!< ETH Rx Complete Callback ID */ + HAL_ETH_DMA_ERROR_CB_ID = 0x04U, /*!< ETH DMA Error Callback ID */ + +}HAL_ETH_CallbackIDTypeDef; + +/** + * @brief HAL ETH Callback pointer definition + */ +typedef void (*pETH_CallbackTypeDef)(ETH_HandleTypeDef * heth); /*!< pointer to an ETH callback function */ + +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + + /** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup ETH_Exported_Constants ETH Exported Constants + * @{ + */ + +/** @defgroup ETH_Buffers_setting ETH Buffers setting + * @{ + */ +#define ETH_MAX_PACKET_SIZE ((uint32_t)1524U) /*!< ETH_HEADER + ETH_EXTRA + ETH_VLAN_TAG + ETH_MAX_ETH_PAYLOAD + ETH_CRC */ +#define ETH_HEADER ((uint32_t)14U) /*!< 6 byte Dest addr, 6 byte Src addr, 2 byte length/type */ +#define ETH_CRC ((uint32_t)4U) /*!< Ethernet CRC */ +#define ETH_EXTRA ((uint32_t)2U) /*!< Extra bytes in some cases */ +#define ETH_VLAN_TAG ((uint32_t)4U) /*!< optional 802.1q VLAN Tag */ +#define ETH_MIN_ETH_PAYLOAD ((uint32_t)46U) /*!< Minimum Ethernet payload size */ +#define ETH_MAX_ETH_PAYLOAD ((uint32_t)1500U) /*!< Maximum Ethernet payload size */ +#define ETH_JUMBO_FRAME_PAYLOAD ((uint32_t)9000U) /*!< Jumbo frame payload size */ + + /* Ethernet driver receive buffers are organized in a chained linked-list, when + an Ethernet packet is received, the Rx-DMA will transfer the packet from RxFIFO + to the driver receive buffers memory. + + Depending on the size of the received Ethernet packet and the size of + each Ethernet driver receive buffer, the received packet can take one or more + Ethernet driver receive buffer. + + In below are defined the size of one Ethernet driver receive buffer ETH_RX_BUF_SIZE + and the total count of the driver receive buffers ETH_RXBUFNB. + + The configured value for ETH_RX_BUF_SIZE and ETH_RXBUFNB are only provided as + example, they can be reconfigured in the application layer to fit the application + needs */ + +/* Here we configure each Ethernet driver receive buffer to fit the Max size Ethernet + packet */ +#ifndef ETH_RX_BUF_SIZE + #define ETH_RX_BUF_SIZE ETH_MAX_PACKET_SIZE +#endif + +/* 5 Ethernet driver receive buffers are used (in a chained linked list)*/ +#ifndef ETH_RXBUFNB + #define ETH_RXBUFNB ((uint32_t)5U) /* 5 Rx buffers of size ETH_RX_BUF_SIZE */ +#endif + + + /* Ethernet driver transmit buffers are organized in a chained linked-list, when + an Ethernet packet is transmitted, Tx-DMA will transfer the packet from the + driver transmit buffers memory to the TxFIFO. + + Depending on the size of the Ethernet packet to be transmitted and the size of + each Ethernet driver transmit buffer, the packet to be transmitted can take + one or more Ethernet driver transmit buffer. + + In below are defined the size of one Ethernet driver transmit buffer ETH_TX_BUF_SIZE + and the total count of the driver transmit buffers ETH_TXBUFNB. + + The configured value for ETH_TX_BUF_SIZE and ETH_TXBUFNB are only provided as + example, they can be reconfigured in the application layer to fit the application + needs */ + +/* Here we configure each Ethernet driver transmit buffer to fit the Max size Ethernet + packet */ +#ifndef ETH_TX_BUF_SIZE + #define ETH_TX_BUF_SIZE ETH_MAX_PACKET_SIZE +#endif + +/* 5 Ethernet driver transmit buffers are used (in a chained linked list)*/ +#ifndef ETH_TXBUFNB + #define ETH_TXBUFNB ((uint32_t)5U) /* 5 Tx buffers of size ETH_TX_BUF_SIZE */ +#endif + + /** + * @} + */ + +/** @defgroup ETH_DMA_TX_Descriptor ETH DMA TX Descriptor + * @{ + */ + +/* + DMA Tx Descriptor + ----------------------------------------------------------------------------------------------- + TDES0 | OWN(31) | CTRL[30:26] | Reserved[25:24] | CTRL[23:20] | Reserved[19:17] | Status[16:0] | + ----------------------------------------------------------------------------------------------- + TDES1 | Reserved[31:29] | Buffer2 ByteCount[28:16] | Reserved[15:13] | Buffer1 ByteCount[12:0] | + ----------------------------------------------------------------------------------------------- + TDES2 | Buffer1 Address [31:0] | + ----------------------------------------------------------------------------------------------- + TDES3 | Buffer2 Address [31:0] / Next Descriptor Address [31:0] | + ----------------------------------------------------------------------------------------------- +*/ + +/** + * @brief Bit definition of TDES0 register: DMA Tx descriptor status register + */ +#define ETH_DMATXDESC_OWN ((uint32_t)0x80000000U) /*!< OWN bit: descriptor is owned by DMA engine */ +#define ETH_DMATXDESC_IC ((uint32_t)0x40000000U) /*!< Interrupt on Completion */ +#define ETH_DMATXDESC_LS ((uint32_t)0x20000000U) /*!< Last Segment */ +#define ETH_DMATXDESC_FS ((uint32_t)0x10000000U) /*!< First Segment */ +#define ETH_DMATXDESC_DC ((uint32_t)0x08000000U) /*!< Disable CRC */ +#define ETH_DMATXDESC_DP ((uint32_t)0x04000000U) /*!< Disable Padding */ +#define ETH_DMATXDESC_TTSE ((uint32_t)0x02000000U) /*!< Transmit Time Stamp Enable */ +#define ETH_DMATXDESC_CIC ((uint32_t)0x00C00000U) /*!< Checksum Insertion Control: 4 cases */ +#define ETH_DMATXDESC_CIC_BYPASS ((uint32_t)0x00000000U) /*!< Do Nothing: Checksum Engine is bypassed */ +#define ETH_DMATXDESC_CIC_IPV4HEADER ((uint32_t)0x00400000U) /*!< IPV4 header Checksum Insertion */ +#define ETH_DMATXDESC_CIC_TCPUDPICMP_SEGMENT ((uint32_t)0x00800000U) /*!< TCP/UDP/ICMP Checksum Insertion calculated over segment only */ +#define ETH_DMATXDESC_CIC_TCPUDPICMP_FULL ((uint32_t)0x00C00000U) /*!< TCP/UDP/ICMP Checksum Insertion fully calculated */ +#define ETH_DMATXDESC_TER ((uint32_t)0x00200000U) /*!< Transmit End of Ring */ +#define ETH_DMATXDESC_TCH ((uint32_t)0x00100000U) /*!< Second Address Chained */ +#define ETH_DMATXDESC_TTSS ((uint32_t)0x00020000U) /*!< Tx Time Stamp Status */ +#define ETH_DMATXDESC_IHE ((uint32_t)0x00010000U) /*!< IP Header Error */ +#define ETH_DMATXDESC_ES ((uint32_t)0x00008000U) /*!< Error summary: OR of the following bits: UE || ED || EC || LCO || NC || LCA || FF || JT */ +#define ETH_DMATXDESC_JT ((uint32_t)0x00004000U) /*!< Jabber Timeout */ +#define ETH_DMATXDESC_FF ((uint32_t)0x00002000U) /*!< Frame Flushed: DMA/MTL flushed the frame due to SW flush */ +#define ETH_DMATXDESC_PCE ((uint32_t)0x00001000U) /*!< Payload Checksum Error */ +#define ETH_DMATXDESC_LCA ((uint32_t)0x00000800U) /*!< Loss of Carrier: carrier lost during transmission */ +#define ETH_DMATXDESC_NC ((uint32_t)0x00000400U) /*!< No Carrier: no carrier signal from the transceiver */ +#define ETH_DMATXDESC_LCO ((uint32_t)0x00000200U) /*!< Late Collision: transmission aborted due to collision */ +#define ETH_DMATXDESC_EC ((uint32_t)0x00000100U) /*!< Excessive Collision: transmission aborted after 16 collisions */ +#define ETH_DMATXDESC_VF ((uint32_t)0x00000080U) /*!< VLAN Frame */ +#define ETH_DMATXDESC_CC ((uint32_t)0x00000078U) /*!< Collision Count */ +#define ETH_DMATXDESC_ED ((uint32_t)0x00000004U) /*!< Excessive Deferral */ +#define ETH_DMATXDESC_UF ((uint32_t)0x00000002U) /*!< Underflow Error: late data arrival from the memory */ +#define ETH_DMATXDESC_DB ((uint32_t)0x00000001U) /*!< Deferred Bit */ + +/** + * @brief Bit definition of TDES1 register + */ +#define ETH_DMATXDESC_TBS2 ((uint32_t)0x1FFF0000U) /*!< Transmit Buffer2 Size */ +#define ETH_DMATXDESC_TBS1 ((uint32_t)0x00001FFFU) /*!< Transmit Buffer1 Size */ + +/** + * @brief Bit definition of TDES2 register + */ +#define ETH_DMATXDESC_B1AP ((uint32_t)0xFFFFFFFFU) /*!< Buffer1 Address Pointer */ + +/** + * @brief Bit definition of TDES3 register + */ +#define ETH_DMATXDESC_B2AP ((uint32_t)0xFFFFFFFFU) /*!< Buffer2 Address Pointer */ + + /*--------------------------------------------------------------------------------------------- + TDES6 | Transmit Time Stamp Low [31:0] | + ----------------------------------------------------------------------------------------------- + TDES7 | Transmit Time Stamp High [31:0] | + ----------------------------------------------------------------------------------------------*/ + +/* Bit definition of TDES6 register */ + #define ETH_DMAPTPTXDESC_TTSL ((uint32_t)0xFFFFFFFFU) /* Transmit Time Stamp Low */ + +/* Bit definition of TDES7 register */ + #define ETH_DMAPTPTXDESC_TTSH ((uint32_t)0xFFFFFFFFU) /* Transmit Time Stamp High */ + +/** + * @} + */ +/** @defgroup ETH_DMA_RX_Descriptor ETH DMA RX Descriptor + * @{ + */ + +/* + DMA Rx Descriptor + -------------------------------------------------------------------------------------------------------------------- + RDES0 | OWN(31) | Status [30:0] | + --------------------------------------------------------------------------------------------------------------------- + RDES1 | CTRL(31) | Reserved[30:29] | Buffer2 ByteCount[28:16] | CTRL[15:14] | Reserved(13) | Buffer1 ByteCount[12:0] | + --------------------------------------------------------------------------------------------------------------------- + RDES2 | Buffer1 Address [31:0] | + --------------------------------------------------------------------------------------------------------------------- + RDES3 | Buffer2 Address [31:0] / Next Descriptor Address [31:0] | + --------------------------------------------------------------------------------------------------------------------- +*/ + +/** + * @brief Bit definition of RDES0 register: DMA Rx descriptor status register + */ +#define ETH_DMARXDESC_OWN ((uint32_t)0x80000000U) /*!< OWN bit: descriptor is owned by DMA engine */ +#define ETH_DMARXDESC_AFM ((uint32_t)0x40000000U) /*!< DA Filter Fail for the rx frame */ +#define ETH_DMARXDESC_FL ((uint32_t)0x3FFF0000U) /*!< Receive descriptor frame length */ +#define ETH_DMARXDESC_ES ((uint32_t)0x00008000U) /*!< Error summary: OR of the following bits: DE || OE || IPC || LC || RWT || RE || CE */ +#define ETH_DMARXDESC_DE ((uint32_t)0x00004000U) /*!< Descriptor error: no more descriptors for receive frame */ +#define ETH_DMARXDESC_SAF ((uint32_t)0x00002000U) /*!< SA Filter Fail for the received frame */ +#define ETH_DMARXDESC_LE ((uint32_t)0x00001000U) /*!< Frame size not matching with length field */ +#define ETH_DMARXDESC_OE ((uint32_t)0x00000800U) /*!< Overflow Error: Frame was damaged due to buffer overflow */ +#define ETH_DMARXDESC_VLAN ((uint32_t)0x00000400U) /*!< VLAN Tag: received frame is a VLAN frame */ +#define ETH_DMARXDESC_FS ((uint32_t)0x00000200U) /*!< First descriptor of the frame */ +#define ETH_DMARXDESC_LS ((uint32_t)0x00000100U) /*!< Last descriptor of the frame */ +#define ETH_DMARXDESC_IPV4HCE ((uint32_t)0x00000080U) /*!< IPC Checksum Error: Rx Ipv4 header checksum error */ +#define ETH_DMARXDESC_LC ((uint32_t)0x00000040U) /*!< Late collision occurred during reception */ +#define ETH_DMARXDESC_FT ((uint32_t)0x00000020U) /*!< Frame type - Ethernet, otherwise 802.3 */ +#define ETH_DMARXDESC_RWT ((uint32_t)0x00000010U) /*!< Receive Watchdog Timeout: watchdog timer expired during reception */ +#define ETH_DMARXDESC_RE ((uint32_t)0x00000008U) /*!< Receive error: error reported by MII interface */ +#define ETH_DMARXDESC_DBE ((uint32_t)0x00000004U) /*!< Dribble bit error: frame contains non int multiple of 8 bits */ +#define ETH_DMARXDESC_CE ((uint32_t)0x00000002U) /*!< CRC error */ +#define ETH_DMARXDESC_MAMPCE ((uint32_t)0x00000001U) /*!< Rx MAC Address/Payload Checksum Error: Rx MAC address matched/ Rx Payload Checksum Error */ + +/** + * @brief Bit definition of RDES1 register + */ +#define ETH_DMARXDESC_DIC ((uint32_t)0x80000000U) /*!< Disable Interrupt on Completion */ +#define ETH_DMARXDESC_RBS2 ((uint32_t)0x1FFF0000U) /*!< Receive Buffer2 Size */ +#define ETH_DMARXDESC_RER ((uint32_t)0x00008000U) /*!< Receive End of Ring */ +#define ETH_DMARXDESC_RCH ((uint32_t)0x00004000U) /*!< Second Address Chained */ +#define ETH_DMARXDESC_RBS1 ((uint32_t)0x00001FFFU) /*!< Receive Buffer1 Size */ + +/** + * @brief Bit definition of RDES2 register + */ +#define ETH_DMARXDESC_B1AP ((uint32_t)0xFFFFFFFFU) /*!< Buffer1 Address Pointer */ + +/** + * @brief Bit definition of RDES3 register + */ +#define ETH_DMARXDESC_B2AP ((uint32_t)0xFFFFFFFFU) /*!< Buffer2 Address Pointer */ + +/*--------------------------------------------------------------------------------------------------------------------- + RDES4 | Reserved[31:15] | Extended Status [14:0] | + --------------------------------------------------------------------------------------------------------------------- + RDES5 | Reserved[31:0] | + --------------------------------------------------------------------------------------------------------------------- + RDES6 | Receive Time Stamp Low [31:0] | + --------------------------------------------------------------------------------------------------------------------- + RDES7 | Receive Time Stamp High [31:0] | + --------------------------------------------------------------------------------------------------------------------*/ + +/* Bit definition of RDES4 register */ +#define ETH_DMAPTPRXDESC_PTPV ((uint32_t)0x00002000U) /* PTP Version */ +#define ETH_DMAPTPRXDESC_PTPFT ((uint32_t)0x00001000U) /* PTP Frame Type */ +#define ETH_DMAPTPRXDESC_PTPMT ((uint32_t)0x00000F00U) /* PTP Message Type */ +#define ETH_DMAPTPRXDESC_PTPMT_SYNC ((uint32_t)0x00000100U) /* SYNC message (all clock types) */ +#define ETH_DMAPTPRXDESC_PTPMT_FOLLOWUP ((uint32_t)0x00000200U) /* FollowUp message (all clock types) */ +#define ETH_DMAPTPRXDESC_PTPMT_DELAYREQ ((uint32_t)0x00000300U) /* DelayReq message (all clock types) */ +#define ETH_DMAPTPRXDESC_PTPMT_DELAYRESP ((uint32_t)0x00000400U) /* DelayResp message (all clock types) */ +#define ETH_DMAPTPRXDESC_PTPMT_PDELAYREQ_ANNOUNCE ((uint32_t)0x00000500U) /* PdelayReq message (peer-to-peer transparent clock) or Announce message (Ordinary or Boundary clock) */ +#define ETH_DMAPTPRXDESC_PTPMT_PDELAYRESP_MANAG ((uint32_t)0x00000600U) /* PdelayResp message (peer-to-peer transparent clock) or Management message (Ordinary or Boundary clock) */ +#define ETH_DMAPTPRXDESC_PTPMT_PDELAYRESPFOLLOWUP_SIGNAL ((uint32_t)0x00000700U) /* PdelayRespFollowUp message (peer-to-peer transparent clock) or Signaling message (Ordinary or Boundary clock) */ +#define ETH_DMAPTPRXDESC_IPV6PR ((uint32_t)0x00000080U) /* IPv6 Packet Received */ +#define ETH_DMAPTPRXDESC_IPV4PR ((uint32_t)0x00000040U) /* IPv4 Packet Received */ +#define ETH_DMAPTPRXDESC_IPCB ((uint32_t)0x00000020U) /* IP Checksum Bypassed */ +#define ETH_DMAPTPRXDESC_IPPE ((uint32_t)0x00000010U) /* IP Payload Error */ +#define ETH_DMAPTPRXDESC_IPHE ((uint32_t)0x00000008U) /* IP Header Error */ +#define ETH_DMAPTPRXDESC_IPPT ((uint32_t)0x00000007U) /* IP Payload Type */ +#define ETH_DMAPTPRXDESC_IPPT_UDP ((uint32_t)0x00000001U) /* UDP payload encapsulated in the IP datagram */ +#define ETH_DMAPTPRXDESC_IPPT_TCP ((uint32_t)0x00000002U) /* TCP payload encapsulated in the IP datagram */ +#define ETH_DMAPTPRXDESC_IPPT_ICMP ((uint32_t)0x00000003U) /* ICMP payload encapsulated in the IP datagram */ + +/* Bit definition of RDES6 register */ +#define ETH_DMAPTPRXDESC_RTSL ((uint32_t)0xFFFFFFFFU) /* Receive Time Stamp Low */ + +/* Bit definition of RDES7 register */ +#define ETH_DMAPTPRXDESC_RTSH ((uint32_t)0xFFFFFFFFU) /* Receive Time Stamp High */ +/** + * @} + */ + /** @defgroup ETH_AutoNegotiation ETH AutoNegotiation + * @{ + */ +#define ETH_AUTONEGOTIATION_ENABLE ((uint32_t)0x00000001U) +#define ETH_AUTONEGOTIATION_DISABLE ((uint32_t)0x00000000U) + +/** + * @} + */ +/** @defgroup ETH_Speed ETH Speed + * @{ + */ +#define ETH_SPEED_10M ((uint32_t)0x00000000U) +#define ETH_SPEED_100M ((uint32_t)0x00004000U) + +/** + * @} + */ +/** @defgroup ETH_Duplex_Mode ETH Duplex Mode + * @{ + */ +#define ETH_MODE_FULLDUPLEX ((uint32_t)0x00000800U) +#define ETH_MODE_HALFDUPLEX ((uint32_t)0x00000000U) +/** + * @} + */ +/** @defgroup ETH_Rx_Mode ETH Rx Mode + * @{ + */ +#define ETH_RXPOLLING_MODE ((uint32_t)0x00000000U) +#define ETH_RXINTERRUPT_MODE ((uint32_t)0x00000001U) +/** + * @} + */ + +/** @defgroup ETH_Checksum_Mode ETH Checksum Mode + * @{ + */ +#define ETH_CHECKSUM_BY_HARDWARE ((uint32_t)0x00000000U) +#define ETH_CHECKSUM_BY_SOFTWARE ((uint32_t)0x00000001U) +/** + * @} + */ + +/** @defgroup ETH_Media_Interface ETH Media Interface + * @{ + */ +#define ETH_MEDIA_INTERFACE_MII ((uint32_t)0x00000000U) +#define ETH_MEDIA_INTERFACE_RMII ((uint32_t)SYSCFG_PMC_MII_RMII_SEL) +/** + * @} + */ + +/** @defgroup ETH_Watchdog ETH Watchdog + * @{ + */ +#define ETH_WATCHDOG_ENABLE ((uint32_t)0x00000000U) +#define ETH_WATCHDOG_DISABLE ((uint32_t)0x00800000U) +/** + * @} + */ + +/** @defgroup ETH_Jabber ETH Jabber + * @{ + */ +#define ETH_JABBER_ENABLE ((uint32_t)0x00000000U) +#define ETH_JABBER_DISABLE ((uint32_t)0x00400000U) +/** + * @} + */ + +/** @defgroup ETH_Inter_Frame_Gap ETH Inter Frame Gap + * @{ + */ +#define ETH_INTERFRAMEGAP_96BIT ((uint32_t)0x00000000U) /*!< minimum IFG between frames during transmission is 96Bit */ +#define ETH_INTERFRAMEGAP_88BIT ((uint32_t)0x00020000U) /*!< minimum IFG between frames during transmission is 88Bit */ +#define ETH_INTERFRAMEGAP_80BIT ((uint32_t)0x00040000U) /*!< minimum IFG between frames during transmission is 80Bit */ +#define ETH_INTERFRAMEGAP_72BIT ((uint32_t)0x00060000U) /*!< minimum IFG between frames during transmission is 72Bit */ +#define ETH_INTERFRAMEGAP_64BIT ((uint32_t)0x00080000U) /*!< minimum IFG between frames during transmission is 64Bit */ +#define ETH_INTERFRAMEGAP_56BIT ((uint32_t)0x000A0000U) /*!< minimum IFG between frames during transmission is 56Bit */ +#define ETH_INTERFRAMEGAP_48BIT ((uint32_t)0x000C0000U) /*!< minimum IFG between frames during transmission is 48Bit */ +#define ETH_INTERFRAMEGAP_40BIT ((uint32_t)0x000E0000U) /*!< minimum IFG between frames during transmission is 40Bit */ +/** + * @} + */ + +/** @defgroup ETH_Carrier_Sense ETH Carrier Sense + * @{ + */ +#define ETH_CARRIERSENCE_ENABLE ((uint32_t)0x00000000U) +#define ETH_CARRIERSENCE_DISABLE ((uint32_t)0x00010000U) +/** + * @} + */ + +/** @defgroup ETH_Receive_Own ETH Receive Own + * @{ + */ +#define ETH_RECEIVEOWN_ENABLE ((uint32_t)0x00000000U) +#define ETH_RECEIVEOWN_DISABLE ((uint32_t)0x00002000U) +/** + * @} + */ + +/** @defgroup ETH_Loop_Back_Mode ETH Loop Back Mode + * @{ + */ +#define ETH_LOOPBACKMODE_ENABLE ((uint32_t)0x00001000U) +#define ETH_LOOPBACKMODE_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Checksum_Offload ETH Checksum Offload + * @{ + */ +#define ETH_CHECKSUMOFFLAOD_ENABLE ((uint32_t)0x00000400U) +#define ETH_CHECKSUMOFFLAOD_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Retry_Transmission ETH Retry Transmission + * @{ + */ +#define ETH_RETRYTRANSMISSION_ENABLE ((uint32_t)0x00000000U) +#define ETH_RETRYTRANSMISSION_DISABLE ((uint32_t)0x00000200U) +/** + * @} + */ + +/** @defgroup ETH_Automatic_Pad_CRC_Strip ETH Automatic Pad CRC Strip + * @{ + */ +#define ETH_AUTOMATICPADCRCSTRIP_ENABLE ((uint32_t)0x00000080U) +#define ETH_AUTOMATICPADCRCSTRIP_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Back_Off_Limit ETH Back Off Limit + * @{ + */ +#define ETH_BACKOFFLIMIT_10 ((uint32_t)0x00000000U) +#define ETH_BACKOFFLIMIT_8 ((uint32_t)0x00000020U) +#define ETH_BACKOFFLIMIT_4 ((uint32_t)0x00000040U) +#define ETH_BACKOFFLIMIT_1 ((uint32_t)0x00000060U) +/** + * @} + */ + +/** @defgroup ETH_Deferral_Check ETH Deferral Check + * @{ + */ +#define ETH_DEFFERRALCHECK_ENABLE ((uint32_t)0x00000010U) +#define ETH_DEFFERRALCHECK_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Receive_All ETH Receive All + * @{ + */ +#define ETH_RECEIVEALL_ENABLE ((uint32_t)0x80000000U) +#define ETH_RECEIVEAll_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Source_Addr_Filter ETH Source Addr Filter + * @{ + */ +#define ETH_SOURCEADDRFILTER_NORMAL_ENABLE ((uint32_t)0x00000200U) +#define ETH_SOURCEADDRFILTER_INVERSE_ENABLE ((uint32_t)0x00000300U) +#define ETH_SOURCEADDRFILTER_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Pass_Control_Frames ETH Pass Control Frames + * @{ + */ +#define ETH_PASSCONTROLFRAMES_BLOCKALL ((uint32_t)0x00000040U) /*!< MAC filters all control frames from reaching the application */ +#define ETH_PASSCONTROLFRAMES_FORWARDALL ((uint32_t)0x00000080U) /*!< MAC forwards all control frames to application even if they fail the Address Filter */ +#define ETH_PASSCONTROLFRAMES_FORWARDPASSEDADDRFILTER ((uint32_t)0x000000C0U) /*!< MAC forwards control frames that pass the Address Filter. */ +/** + * @} + */ + +/** @defgroup ETH_Broadcast_Frames_Reception ETH Broadcast Frames Reception + * @{ + */ +#define ETH_BROADCASTFRAMESRECEPTION_ENABLE ((uint32_t)0x00000000U) +#define ETH_BROADCASTFRAMESRECEPTION_DISABLE ((uint32_t)0x00000020U) +/** + * @} + */ + +/** @defgroup ETH_Destination_Addr_Filter ETH Destination Addr Filter + * @{ + */ +#define ETH_DESTINATIONADDRFILTER_NORMAL ((uint32_t)0x00000000U) +#define ETH_DESTINATIONADDRFILTER_INVERSE ((uint32_t)0x00000008U) +/** + * @} + */ + +/** @defgroup ETH_Promiscuous_Mode ETH Promiscuous Mode + * @{ + */ +#define ETH_PROMISCUOUS_MODE_ENABLE ((uint32_t)0x00000001U) +#define ETH_PROMISCUOUS_MODE_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Multicast_Frames_Filter ETH Multicast Frames Filter + * @{ + */ +#define ETH_MULTICASTFRAMESFILTER_PERFECTHASHTABLE ((uint32_t)0x00000404U) +#define ETH_MULTICASTFRAMESFILTER_HASHTABLE ((uint32_t)0x00000004U) +#define ETH_MULTICASTFRAMESFILTER_PERFECT ((uint32_t)0x00000000U) +#define ETH_MULTICASTFRAMESFILTER_NONE ((uint32_t)0x00000010U) +/** + * @} + */ + +/** @defgroup ETH_Unicast_Frames_Filter ETH Unicast Frames Filter + * @{ + */ +#define ETH_UNICASTFRAMESFILTER_PERFECTHASHTABLE ((uint32_t)0x00000402U) +#define ETH_UNICASTFRAMESFILTER_HASHTABLE ((uint32_t)0x00000002U) +#define ETH_UNICASTFRAMESFILTER_PERFECT ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Zero_Quanta_Pause ETH Zero Quanta Pause + * @{ + */ +#define ETH_ZEROQUANTAPAUSE_ENABLE ((uint32_t)0x00000000U) +#define ETH_ZEROQUANTAPAUSE_DISABLE ((uint32_t)0x00000080U) +/** + * @} + */ + +/** @defgroup ETH_Pause_Low_Threshold ETH Pause Low Threshold + * @{ + */ +#define ETH_PAUSELOWTHRESHOLD_MINUS4 ((uint32_t)0x00000000U) /*!< Pause time minus 4 slot times */ +#define ETH_PAUSELOWTHRESHOLD_MINUS28 ((uint32_t)0x00000010U) /*!< Pause time minus 28 slot times */ +#define ETH_PAUSELOWTHRESHOLD_MINUS144 ((uint32_t)0x00000020U) /*!< Pause time minus 144 slot times */ +#define ETH_PAUSELOWTHRESHOLD_MINUS256 ((uint32_t)0x00000030U) /*!< Pause time minus 256 slot times */ +/** + * @} + */ + +/** @defgroup ETH_Unicast_Pause_Frame_Detect ETH Unicast Pause Frame Detect + * @{ + */ +#define ETH_UNICASTPAUSEFRAMEDETECT_ENABLE ((uint32_t)0x00000008U) +#define ETH_UNICASTPAUSEFRAMEDETECT_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Receive_Flow_Control ETH Receive Flow Control + * @{ + */ +#define ETH_RECEIVEFLOWCONTROL_ENABLE ((uint32_t)0x00000004U) +#define ETH_RECEIVEFLOWCONTROL_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Transmit_Flow_Control ETH Transmit Flow Control + * @{ + */ +#define ETH_TRANSMITFLOWCONTROL_ENABLE ((uint32_t)0x00000002U) +#define ETH_TRANSMITFLOWCONTROL_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_VLAN_Tag_Comparison ETH VLAN Tag Comparison + * @{ + */ +#define ETH_VLANTAGCOMPARISON_12BIT ((uint32_t)0x00010000U) +#define ETH_VLANTAGCOMPARISON_16BIT ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_MAC_addresses ETH MAC addresses + * @{ + */ +#define ETH_MAC_ADDRESS0 ((uint32_t)0x00000000U) +#define ETH_MAC_ADDRESS1 ((uint32_t)0x00000008U) +#define ETH_MAC_ADDRESS2 ((uint32_t)0x00000010U) +#define ETH_MAC_ADDRESS3 ((uint32_t)0x00000018U) +/** + * @} + */ + +/** @defgroup ETH_MAC_addresses_filter_SA_DA ETH MAC addresses filter SA DA + * @{ + */ +#define ETH_MAC_ADDRESSFILTER_SA ((uint32_t)0x00000000U) +#define ETH_MAC_ADDRESSFILTER_DA ((uint32_t)0x00000008U) +/** + * @} + */ + +/** @defgroup ETH_MAC_addresses_filter_Mask_bytes ETH MAC addresses filter Mask bytes + * @{ + */ +#define ETH_MAC_ADDRESSMASK_BYTE6 ((uint32_t)0x20000000U) /*!< Mask MAC Address high reg bits [15:8] */ +#define ETH_MAC_ADDRESSMASK_BYTE5 ((uint32_t)0x10000000U) /*!< Mask MAC Address high reg bits [7:0] */ +#define ETH_MAC_ADDRESSMASK_BYTE4 ((uint32_t)0x08000000U) /*!< Mask MAC Address low reg bits [31:24] */ +#define ETH_MAC_ADDRESSMASK_BYTE3 ((uint32_t)0x04000000U) /*!< Mask MAC Address low reg bits [23:16] */ +#define ETH_MAC_ADDRESSMASK_BYTE2 ((uint32_t)0x02000000U) /*!< Mask MAC Address low reg bits [15:8] */ +#define ETH_MAC_ADDRESSMASK_BYTE1 ((uint32_t)0x01000000U) /*!< Mask MAC Address low reg bits [70] */ +/** + * @} + */ + +/** @defgroup ETH_Drop_TCP_IP_Checksum_Error_Frame ETH Drop TCP IP Checksum Error Frame + * @{ + */ +#define ETH_DROPTCPIPCHECKSUMERRORFRAME_ENABLE ((uint32_t)0x00000000U) +#define ETH_DROPTCPIPCHECKSUMERRORFRAME_DISABLE ((uint32_t)0x04000000U) +/** + * @} + */ + +/** @defgroup ETH_Receive_Store_Forward ETH Receive Store Forward + * @{ + */ +#define ETH_RECEIVESTOREFORWARD_ENABLE ((uint32_t)0x02000000U) +#define ETH_RECEIVESTOREFORWARD_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Flush_Received_Frame ETH Flush Received Frame + * @{ + */ +#define ETH_FLUSHRECEIVEDFRAME_ENABLE ((uint32_t)0x00000000U) +#define ETH_FLUSHRECEIVEDFRAME_DISABLE ((uint32_t)0x01000000U) +/** + * @} + */ + +/** @defgroup ETH_Transmit_Store_Forward ETH Transmit Store Forward + * @{ + */ +#define ETH_TRANSMITSTOREFORWARD_ENABLE ((uint32_t)0x00200000U) +#define ETH_TRANSMITSTOREFORWARD_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Transmit_Threshold_Control ETH Transmit Threshold Control + * @{ + */ +#define ETH_TRANSMITTHRESHOLDCONTROL_64BYTES ((uint32_t)0x00000000U) /*!< threshold level of the MTL Transmit FIFO is 64 Bytes */ +#define ETH_TRANSMITTHRESHOLDCONTROL_128BYTES ((uint32_t)0x00004000U) /*!< threshold level of the MTL Transmit FIFO is 128 Bytes */ +#define ETH_TRANSMITTHRESHOLDCONTROL_192BYTES ((uint32_t)0x00008000U) /*!< threshold level of the MTL Transmit FIFO is 192 Bytes */ +#define ETH_TRANSMITTHRESHOLDCONTROL_256BYTES ((uint32_t)0x0000C000U) /*!< threshold level of the MTL Transmit FIFO is 256 Bytes */ +#define ETH_TRANSMITTHRESHOLDCONTROL_40BYTES ((uint32_t)0x00010000U) /*!< threshold level of the MTL Transmit FIFO is 40 Bytes */ +#define ETH_TRANSMITTHRESHOLDCONTROL_32BYTES ((uint32_t)0x00014000U) /*!< threshold level of the MTL Transmit FIFO is 32 Bytes */ +#define ETH_TRANSMITTHRESHOLDCONTROL_24BYTES ((uint32_t)0x00018000U) /*!< threshold level of the MTL Transmit FIFO is 24 Bytes */ +#define ETH_TRANSMITTHRESHOLDCONTROL_16BYTES ((uint32_t)0x0001C000U) /*!< threshold level of the MTL Transmit FIFO is 16 Bytes */ +/** + * @} + */ + +/** @defgroup ETH_Forward_Error_Frames ETH Forward Error Frames + * @{ + */ +#define ETH_FORWARDERRORFRAMES_ENABLE ((uint32_t)0x00000080U) +#define ETH_FORWARDERRORFRAMES_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Forward_Undersized_Good_Frames ETH Forward Undersized Good Frames + * @{ + */ +#define ETH_FORWARDUNDERSIZEDGOODFRAMES_ENABLE ((uint32_t)0x00000040U) +#define ETH_FORWARDUNDERSIZEDGOODFRAMES_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Receive_Threshold_Control ETH Receive Threshold Control + * @{ + */ +#define ETH_RECEIVEDTHRESHOLDCONTROL_64BYTES ((uint32_t)0x00000000U) /*!< threshold level of the MTL Receive FIFO is 64 Bytes */ +#define ETH_RECEIVEDTHRESHOLDCONTROL_32BYTES ((uint32_t)0x00000008U) /*!< threshold level of the MTL Receive FIFO is 32 Bytes */ +#define ETH_RECEIVEDTHRESHOLDCONTROL_96BYTES ((uint32_t)0x00000010U) /*!< threshold level of the MTL Receive FIFO is 96 Bytes */ +#define ETH_RECEIVEDTHRESHOLDCONTROL_128BYTES ((uint32_t)0x00000018U) /*!< threshold level of the MTL Receive FIFO is 128 Bytes */ +/** + * @} + */ + +/** @defgroup ETH_Second_Frame_Operate ETH Second Frame Operate + * @{ + */ +#define ETH_SECONDFRAMEOPERARTE_ENABLE ((uint32_t)0x00000004U) +#define ETH_SECONDFRAMEOPERARTE_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Address_Aligned_Beats ETH Address Aligned Beats + * @{ + */ +#define ETH_ADDRESSALIGNEDBEATS_ENABLE ((uint32_t)0x02000000U) +#define ETH_ADDRESSALIGNEDBEATS_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Fixed_Burst ETH Fixed Burst + * @{ + */ +#define ETH_FIXEDBURST_ENABLE ((uint32_t)0x00010000U) +#define ETH_FIXEDBURST_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_Rx_DMA_Burst_Length ETH Rx DMA Burst Length + * @{ + */ +#define ETH_RXDMABURSTLENGTH_1BEAT ((uint32_t)0x00020000U) /*!< maximum number of beats to be transferred in one RxDMA transaction is 1 */ +#define ETH_RXDMABURSTLENGTH_2BEAT ((uint32_t)0x00040000U) /*!< maximum number of beats to be transferred in one RxDMA transaction is 2 */ +#define ETH_RXDMABURSTLENGTH_4BEAT ((uint32_t)0x00080000U) /*!< maximum number of beats to be transferred in one RxDMA transaction is 4 */ +#define ETH_RXDMABURSTLENGTH_8BEAT ((uint32_t)0x00100000U) /*!< maximum number of beats to be transferred in one RxDMA transaction is 8 */ +#define ETH_RXDMABURSTLENGTH_16BEAT ((uint32_t)0x00200000U) /*!< maximum number of beats to be transferred in one RxDMA transaction is 16 */ +#define ETH_RXDMABURSTLENGTH_32BEAT ((uint32_t)0x00400000U) /*!< maximum number of beats to be transferred in one RxDMA transaction is 32 */ +#define ETH_RXDMABURSTLENGTH_4XPBL_4BEAT ((uint32_t)0x01020000U) /*!< maximum number of beats to be transferred in one RxDMA transaction is 4 */ +#define ETH_RXDMABURSTLENGTH_4XPBL_8BEAT ((uint32_t)0x01040000U) /*!< maximum number of beats to be transferred in one RxDMA transaction is 8 */ +#define ETH_RXDMABURSTLENGTH_4XPBL_16BEAT ((uint32_t)0x01080000U) /*!< maximum number of beats to be transferred in one RxDMA transaction is 16 */ +#define ETH_RXDMABURSTLENGTH_4XPBL_32BEAT ((uint32_t)0x01100000U) /*!< maximum number of beats to be transferred in one RxDMA transaction is 32 */ +#define ETH_RXDMABURSTLENGTH_4XPBL_64BEAT ((uint32_t)0x01200000U) /*!< maximum number of beats to be transferred in one RxDMA transaction is 64 */ +#define ETH_RXDMABURSTLENGTH_4XPBL_128BEAT ((uint32_t)0x01400000U) /*!< maximum number of beats to be transferred in one RxDMA transaction is 128 */ +/** + * @} + */ + +/** @defgroup ETH_Tx_DMA_Burst_Length ETH Tx DMA Burst Length + * @{ + */ +#define ETH_TXDMABURSTLENGTH_1BEAT ((uint32_t)0x00000100U) /*!< maximum number of beats to be transferred in one TxDMA (or both) transaction is 1 */ +#define ETH_TXDMABURSTLENGTH_2BEAT ((uint32_t)0x00000200U) /*!< maximum number of beats to be transferred in one TxDMA (or both) transaction is 2 */ +#define ETH_TXDMABURSTLENGTH_4BEAT ((uint32_t)0x00000400U) /*!< maximum number of beats to be transferred in one TxDMA (or both) transaction is 4 */ +#define ETH_TXDMABURSTLENGTH_8BEAT ((uint32_t)0x00000800U) /*!< maximum number of beats to be transferred in one TxDMA (or both) transaction is 8 */ +#define ETH_TXDMABURSTLENGTH_16BEAT ((uint32_t)0x00001000U) /*!< maximum number of beats to be transferred in one TxDMA (or both) transaction is 16 */ +#define ETH_TXDMABURSTLENGTH_32BEAT ((uint32_t)0x00002000U) /*!< maximum number of beats to be transferred in one TxDMA (or both) transaction is 32 */ +#define ETH_TXDMABURSTLENGTH_4XPBL_4BEAT ((uint32_t)0x01000100U) /*!< maximum number of beats to be transferred in one TxDMA (or both) transaction is 4 */ +#define ETH_TXDMABURSTLENGTH_4XPBL_8BEAT ((uint32_t)0x01000200U) /*!< maximum number of beats to be transferred in one TxDMA (or both) transaction is 8 */ +#define ETH_TXDMABURSTLENGTH_4XPBL_16BEAT ((uint32_t)0x01000400U) /*!< maximum number of beats to be transferred in one TxDMA (or both) transaction is 16 */ +#define ETH_TXDMABURSTLENGTH_4XPBL_32BEAT ((uint32_t)0x01000800U) /*!< maximum number of beats to be transferred in one TxDMA (or both) transaction is 32 */ +#define ETH_TXDMABURSTLENGTH_4XPBL_64BEAT ((uint32_t)0x01001000U) /*!< maximum number of beats to be transferred in one TxDMA (or both) transaction is 64 */ +#define ETH_TXDMABURSTLENGTH_4XPBL_128BEAT ((uint32_t)0x01002000U) /*!< maximum number of beats to be transferred in one TxDMA (or both) transaction is 128 */ +/** + * @} + */ + +/** @defgroup ETH_DMA_Enhanced_descriptor_format ETH DMA Enhanced descriptor format + * @{ + */ +#define ETH_DMAENHANCEDDESCRIPTOR_ENABLE ((uint32_t)0x00000080U) +#define ETH_DMAENHANCEDDESCRIPTOR_DISABLE ((uint32_t)0x00000000U) +/** + * @} + */ + +/** @defgroup ETH_DMA_Arbitration ETH DMA Arbitration + * @{ + */ +#define ETH_DMAARBITRATION_ROUNDROBIN_RXTX_1_1 ((uint32_t)0x00000000U) +#define ETH_DMAARBITRATION_ROUNDROBIN_RXTX_2_1 ((uint32_t)0x00004000U) +#define ETH_DMAARBITRATION_ROUNDROBIN_RXTX_3_1 ((uint32_t)0x00008000U) +#define ETH_DMAARBITRATION_ROUNDROBIN_RXTX_4_1 ((uint32_t)0x0000C000U) +#define ETH_DMAARBITRATION_RXPRIORTX ((uint32_t)0x00000002U) +/** + * @} + */ + +/** @defgroup ETH_DMA_Tx_descriptor_segment ETH DMA Tx descriptor segment + * @{ + */ +#define ETH_DMATXDESC_LASTSEGMENTS ((uint32_t)0x40000000U) /*!< Last Segment */ +#define ETH_DMATXDESC_FIRSTSEGMENT ((uint32_t)0x20000000U) /*!< First Segment */ +/** + * @} + */ + +/** @defgroup ETH_DMA_Tx_descriptor_Checksum_Insertion_Control ETH DMA Tx descriptor Checksum Insertion Control + * @{ + */ +#define ETH_DMATXDESC_CHECKSUMBYPASS ((uint32_t)0x00000000U) /*!< Checksum engine bypass */ +#define ETH_DMATXDESC_CHECKSUMIPV4HEADER ((uint32_t)0x00400000U) /*!< IPv4 header checksum insertion */ +#define ETH_DMATXDESC_CHECKSUMTCPUDPICMPSEGMENT ((uint32_t)0x00800000U) /*!< TCP/UDP/ICMP checksum insertion. Pseudo header checksum is assumed to be present */ +#define ETH_DMATXDESC_CHECKSUMTCPUDPICMPFULL ((uint32_t)0x00C00000U) /*!< TCP/UDP/ICMP checksum fully in hardware including pseudo header */ +/** + * @} + */ + +/** @defgroup ETH_DMA_Rx_descriptor_buffers ETH DMA Rx descriptor buffers + * @{ + */ +#define ETH_DMARXDESC_BUFFER1 ((uint32_t)0x00000000U) /*!< DMA Rx Desc Buffer1 */ +#define ETH_DMARXDESC_BUFFER2 ((uint32_t)0x00000001U) /*!< DMA Rx Desc Buffer2 */ +/** + * @} + */ + +/** @defgroup ETH_PMT_Flags ETH PMT Flags + * @{ + */ +#define ETH_PMT_FLAG_WUFFRPR ((uint32_t)0x80000000U) /*!< Wake-Up Frame Filter Register Pointer Reset */ +#define ETH_PMT_FLAG_WUFR ((uint32_t)0x00000040U) /*!< Wake-Up Frame Received */ +#define ETH_PMT_FLAG_MPR ((uint32_t)0x00000020U) /*!< Magic Packet Received */ +/** + * @} + */ + +/** @defgroup ETH_MMC_Tx_Interrupts ETH MMC Tx Interrupts + * @{ + */ +#define ETH_MMC_IT_TGF ((uint32_t)0x00200000U) /*!< When Tx good frame counter reaches half the maximum value */ +#define ETH_MMC_IT_TGFMSC ((uint32_t)0x00008000U) /*!< When Tx good multi col counter reaches half the maximum value */ +#define ETH_MMC_IT_TGFSC ((uint32_t)0x00004000U) /*!< When Tx good single col counter reaches half the maximum value */ +/** + * @} + */ + +/** @defgroup ETH_MMC_Rx_Interrupts ETH MMC Rx Interrupts + * @{ + */ +#define ETH_MMC_IT_RGUF ((uint32_t)0x10020000U) /*!< When Rx good unicast frames counter reaches half the maximum value */ +#define ETH_MMC_IT_RFAE ((uint32_t)0x10000040U) /*!< When Rx alignment error counter reaches half the maximum value */ +#define ETH_MMC_IT_RFCE ((uint32_t)0x10000020U) /*!< When Rx crc error counter reaches half the maximum value */ +/** + * @} + */ + +/** @defgroup ETH_MAC_Flags ETH MAC Flags + * @{ + */ +#define ETH_MAC_FLAG_TST ((uint32_t)0x00000200U) /*!< Time stamp trigger flag (on MAC) */ +#define ETH_MAC_FLAG_MMCT ((uint32_t)0x00000040U) /*!< MMC transmit flag */ +#define ETH_MAC_FLAG_MMCR ((uint32_t)0x00000020U) /*!< MMC receive flag */ +#define ETH_MAC_FLAG_MMC ((uint32_t)0x00000010U) /*!< MMC flag (on MAC) */ +#define ETH_MAC_FLAG_PMT ((uint32_t)0x00000008U) /*!< PMT flag (on MAC) */ +/** + * @} + */ + +/** @defgroup ETH_DMA_Flags ETH DMA Flags + * @{ + */ +#define ETH_DMA_FLAG_TST ((uint32_t)0x20000000U) /*!< Time-stamp trigger interrupt (on DMA) */ +#define ETH_DMA_FLAG_PMT ((uint32_t)0x10000000U) /*!< PMT interrupt (on DMA) */ +#define ETH_DMA_FLAG_MMC ((uint32_t)0x08000000U) /*!< MMC interrupt (on DMA) */ +#define ETH_DMA_FLAG_DATATRANSFERERROR ((uint32_t)0x00800000U) /*!< Error bits 0-Rx DMA, 1-Tx DMA */ +#define ETH_DMA_FLAG_READWRITEERROR ((uint32_t)0x01000000U) /*!< Error bits 0-write transfer, 1-read transfer */ +#define ETH_DMA_FLAG_ACCESSERROR ((uint32_t)0x02000000U) /*!< Error bits 0-data buffer, 1-desc. access */ +#define ETH_DMA_FLAG_NIS ((uint32_t)0x00010000U) /*!< Normal interrupt summary flag */ +#define ETH_DMA_FLAG_AIS ((uint32_t)0x00008000U) /*!< Abnormal interrupt summary flag */ +#define ETH_DMA_FLAG_ER ((uint32_t)0x00004000U) /*!< Early receive flag */ +#define ETH_DMA_FLAG_FBE ((uint32_t)0x00002000U) /*!< Fatal bus error flag */ +#define ETH_DMA_FLAG_ET ((uint32_t)0x00000400U) /*!< Early transmit flag */ +#define ETH_DMA_FLAG_RWT ((uint32_t)0x00000200U) /*!< Receive watchdog timeout flag */ +#define ETH_DMA_FLAG_RPS ((uint32_t)0x00000100U) /*!< Receive process stopped flag */ +#define ETH_DMA_FLAG_RBU ((uint32_t)0x00000080U) /*!< Receive buffer unavailable flag */ +#define ETH_DMA_FLAG_R ((uint32_t)0x00000040U) /*!< Receive flag */ +#define ETH_DMA_FLAG_TU ((uint32_t)0x00000020U) /*!< Underflow flag */ +#define ETH_DMA_FLAG_RO ((uint32_t)0x00000010U) /*!< Overflow flag */ +#define ETH_DMA_FLAG_TJT ((uint32_t)0x00000008U) /*!< Transmit jabber timeout flag */ +#define ETH_DMA_FLAG_TBU ((uint32_t)0x00000004U) /*!< Transmit buffer unavailable flag */ +#define ETH_DMA_FLAG_TPS ((uint32_t)0x00000002U) /*!< Transmit process stopped flag */ +#define ETH_DMA_FLAG_T ((uint32_t)0x00000001U) /*!< Transmit flag */ +/** + * @} + */ + +/** @defgroup ETH_MAC_Interrupts ETH MAC Interrupts + * @{ + */ +#define ETH_MAC_IT_TST ((uint32_t)0x00000200U) /*!< Time stamp trigger interrupt (on MAC) */ +#define ETH_MAC_IT_MMCT ((uint32_t)0x00000040U) /*!< MMC transmit interrupt */ +#define ETH_MAC_IT_MMCR ((uint32_t)0x00000020U) /*!< MMC receive interrupt */ +#define ETH_MAC_IT_MMC ((uint32_t)0x00000010U) /*!< MMC interrupt (on MAC) */ +#define ETH_MAC_IT_PMT ((uint32_t)0x00000008U) /*!< PMT interrupt (on MAC) */ +/** + * @} + */ + +/** @defgroup ETH_DMA_Interrupts ETH DMA Interrupts + * @{ + */ +#define ETH_DMA_IT_TST ((uint32_t)0x20000000U) /*!< Time-stamp trigger interrupt (on DMA) */ +#define ETH_DMA_IT_PMT ((uint32_t)0x10000000U) /*!< PMT interrupt (on DMA) */ +#define ETH_DMA_IT_MMC ((uint32_t)0x08000000U) /*!< MMC interrupt (on DMA) */ +#define ETH_DMA_IT_NIS ((uint32_t)0x00010000U) /*!< Normal interrupt summary */ +#define ETH_DMA_IT_AIS ((uint32_t)0x00008000U) /*!< Abnormal interrupt summary */ +#define ETH_DMA_IT_ER ((uint32_t)0x00004000U) /*!< Early receive interrupt */ +#define ETH_DMA_IT_FBE ((uint32_t)0x00002000U) /*!< Fatal bus error interrupt */ +#define ETH_DMA_IT_ET ((uint32_t)0x00000400U) /*!< Early transmit interrupt */ +#define ETH_DMA_IT_RWT ((uint32_t)0x00000200U) /*!< Receive watchdog timeout interrupt */ +#define ETH_DMA_IT_RPS ((uint32_t)0x00000100U) /*!< Receive process stopped interrupt */ +#define ETH_DMA_IT_RBU ((uint32_t)0x00000080U) /*!< Receive buffer unavailable interrupt */ +#define ETH_DMA_IT_R ((uint32_t)0x00000040U) /*!< Receive interrupt */ +#define ETH_DMA_IT_TU ((uint32_t)0x00000020U) /*!< Underflow interrupt */ +#define ETH_DMA_IT_RO ((uint32_t)0x00000010U) /*!< Overflow interrupt */ +#define ETH_DMA_IT_TJT ((uint32_t)0x00000008U) /*!< Transmit jabber timeout interrupt */ +#define ETH_DMA_IT_TBU ((uint32_t)0x00000004U) /*!< Transmit buffer unavailable interrupt */ +#define ETH_DMA_IT_TPS ((uint32_t)0x00000002U) /*!< Transmit process stopped interrupt */ +#define ETH_DMA_IT_T ((uint32_t)0x00000001U) /*!< Transmit interrupt */ +/** + * @} + */ + +/** @defgroup ETH_DMA_transmit_process_state ETH DMA transmit process state + * @{ + */ +#define ETH_DMA_TRANSMITPROCESS_STOPPED ((uint32_t)0x00000000U) /*!< Stopped - Reset or Stop Tx Command issued */ +#define ETH_DMA_TRANSMITPROCESS_FETCHING ((uint32_t)0x00100000U) /*!< Running - fetching the Tx descriptor */ +#define ETH_DMA_TRANSMITPROCESS_WAITING ((uint32_t)0x00200000U) /*!< Running - waiting for status */ +#define ETH_DMA_TRANSMITPROCESS_READING ((uint32_t)0x00300000U) /*!< Running - reading the data from host memory */ +#define ETH_DMA_TRANSMITPROCESS_SUSPENDED ((uint32_t)0x00600000U) /*!< Suspended - Tx Descriptor unavailable */ +#define ETH_DMA_TRANSMITPROCESS_CLOSING ((uint32_t)0x00700000U) /*!< Running - closing Rx descriptor */ + +/** + * @} + */ + + +/** @defgroup ETH_DMA_receive_process_state ETH DMA receive process state + * @{ + */ +#define ETH_DMA_RECEIVEPROCESS_STOPPED ((uint32_t)0x00000000U) /*!< Stopped - Reset or Stop Rx Command issued */ +#define ETH_DMA_RECEIVEPROCESS_FETCHING ((uint32_t)0x00020000U) /*!< Running - fetching the Rx descriptor */ +#define ETH_DMA_RECEIVEPROCESS_WAITING ((uint32_t)0x00060000U) /*!< Running - waiting for packet */ +#define ETH_DMA_RECEIVEPROCESS_SUSPENDED ((uint32_t)0x00080000U) /*!< Suspended - Rx Descriptor unavailable */ +#define ETH_DMA_RECEIVEPROCESS_CLOSING ((uint32_t)0x000A0000U) /*!< Running - closing descriptor */ +#define ETH_DMA_RECEIVEPROCESS_QUEUING ((uint32_t)0x000E0000U) /*!< Running - queuing the receive frame into host memory */ + +/** + * @} + */ + +/** @defgroup ETH_DMA_overflow ETH DMA overflow + * @{ + */ +#define ETH_DMA_OVERFLOW_RXFIFOCOUNTER ((uint32_t)0x10000000U) /*!< Overflow bit for FIFO overflow counter */ +#define ETH_DMA_OVERFLOW_MISSEDFRAMECOUNTER ((uint32_t)0x00010000U) /*!< Overflow bit for missed frame counter */ +/** + * @} + */ + +/** @defgroup ETH_EXTI_LINE_WAKEUP ETH EXTI LINE WAKEUP + * @{ + */ +#define ETH_EXTI_LINE_WAKEUP ((uint32_t)0x00080000U) /*!< External interrupt line 19 Connected to the ETH EXTI Line */ + +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup ETH_Exported_Macros ETH Exported Macros + * @brief macros to handle interrupts and specific clock configurations + * @{ + */ + +/** @brief Reset ETH handle state + * @param __HANDLE__ specifies the ETH handle. + * @retval None + */ +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) +#define __HAL_ETH_RESET_HANDLE_STATE(__HANDLE__) do{ \ + (__HANDLE__)->State = HAL_ETH_STATE_RESET; \ + (__HANDLE__)->MspInitCallback = NULL; \ + (__HANDLE__)->MspDeInitCallback = NULL; \ + } while(0) +#else +#define __HAL_ETH_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = HAL_ETH_STATE_RESET) +#endif /*USE_HAL_ETH_REGISTER_CALLBACKS */ + +/** + * @brief Checks whether the specified Ethernet DMA Tx Desc flag is set or not. + * @param __HANDLE__ ETH Handle + * @param __FLAG__ specifies the flag of TDES0 to check. + * @retval the ETH_DMATxDescFlag (SET or RESET). + */ +#define __HAL_ETH_DMATXDESC_GET_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->TxDesc->Status & (__FLAG__) == (__FLAG__)) + +/** + * @brief Checks whether the specified Ethernet DMA Rx Desc flag is set or not. + * @param __HANDLE__ ETH Handle + * @param __FLAG__ specifies the flag of RDES0 to check. + * @retval the ETH_DMATxDescFlag (SET or RESET). + */ +#define __HAL_ETH_DMARXDESC_GET_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->RxDesc->Status & (__FLAG__) == (__FLAG__)) + +/** + * @brief Enables the specified DMA Rx Desc receive interrupt. + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_DMARXDESC_ENABLE_IT(__HANDLE__) ((__HANDLE__)->RxDesc->ControlBufferSize &=(~(uint32_t)ETH_DMARXDESC_DIC)) + +/** + * @brief Disables the specified DMA Rx Desc receive interrupt. + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_DMARXDESC_DISABLE_IT(__HANDLE__) ((__HANDLE__)->RxDesc->ControlBufferSize |= ETH_DMARXDESC_DIC) + +/** + * @brief Set the specified DMA Rx Desc Own bit. + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_DMARXDESC_SET_OWN_BIT(__HANDLE__) ((__HANDLE__)->RxDesc->Status |= ETH_DMARXDESC_OWN) + +/** + * @brief Returns the specified Ethernet DMA Tx Desc collision count. + * @param __HANDLE__ ETH Handle + * @retval The Transmit descriptor collision counter value. + */ +#define __HAL_ETH_DMATXDESC_GET_COLLISION_COUNT(__HANDLE__) (((__HANDLE__)->TxDesc->Status & ETH_DMATXDESC_CC) >> ETH_DMATXDESC_COLLISION_COUNTSHIFT) + +/** + * @brief Set the specified DMA Tx Desc Own bit. + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_DMATXDESC_SET_OWN_BIT(__HANDLE__) ((__HANDLE__)->TxDesc->Status |= ETH_DMATXDESC_OWN) + +/** + * @brief Enables the specified DMA Tx Desc Transmit interrupt. + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_DMATXDESC_ENABLE_IT(__HANDLE__) ((__HANDLE__)->TxDesc->Status |= ETH_DMATXDESC_IC) + +/** + * @brief Disables the specified DMA Tx Desc Transmit interrupt. + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_DMATXDESC_DISABLE_IT(__HANDLE__) ((__HANDLE__)->TxDesc->Status &= ~ETH_DMATXDESC_IC) + +/** + * @brief Selects the specified Ethernet DMA Tx Desc Checksum Insertion. + * @param __HANDLE__ ETH Handle + * @param __CHECKSUM__ specifies is the DMA Tx desc checksum insertion. + * This parameter can be one of the following values: + * @arg ETH_DMATXDESC_CHECKSUMBYPASS : Checksum bypass + * @arg ETH_DMATXDESC_CHECKSUMIPV4HEADER : IPv4 header checksum + * @arg ETH_DMATXDESC_CHECKSUMTCPUDPICMPSEGMENT : TCP/UDP/ICMP checksum. Pseudo header checksum is assumed to be present + * @arg ETH_DMATXDESC_CHECKSUMTCPUDPICMPFULL : TCP/UDP/ICMP checksum fully in hardware including pseudo header + * @retval None + */ +#define __HAL_ETH_DMATXDESC_CHECKSUM_INSERTION(__HANDLE__, __CHECKSUM__) ((__HANDLE__)->TxDesc->Status |= (__CHECKSUM__)) + +/** + * @brief Enables the DMA Tx Desc CRC. + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_DMATXDESC_CRC_ENABLE(__HANDLE__) ((__HANDLE__)->TxDesc->Status &= ~ETH_DMATXDESC_DC) + +/** + * @brief Disables the DMA Tx Desc CRC. + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_DMATXDESC_CRC_DISABLE(__HANDLE__) ((__HANDLE__)->TxDesc->Status |= ETH_DMATXDESC_DC) + +/** + * @brief Enables the DMA Tx Desc padding for frame shorter than 64 bytes. + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_DMATXDESC_SHORT_FRAME_PADDING_ENABLE(__HANDLE__) ((__HANDLE__)->TxDesc->Status &= ~ETH_DMATXDESC_DP) + +/** + * @brief Disables the DMA Tx Desc padding for frame shorter than 64 bytes. + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_DMATXDESC_SHORT_FRAME_PADDING_DISABLE(__HANDLE__) ((__HANDLE__)->TxDesc->Status |= ETH_DMATXDESC_DP) + +/** + * @brief Enables the specified Ethernet MAC interrupts. + * @param __HANDLE__ ETH Handle + * @param __INTERRUPT__ specifies the Ethernet MAC interrupt sources to be + * enabled or disabled. + * This parameter can be any combination of the following values: + * @arg ETH_MAC_IT_TST : Time stamp trigger interrupt + * @arg ETH_MAC_IT_PMT : PMT interrupt + * @retval None + */ +#define __HAL_ETH_MAC_ENABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->MACIMR |= (__INTERRUPT__)) + +/** + * @brief Disables the specified Ethernet MAC interrupts. + * @param __HANDLE__ ETH Handle + * @param __INTERRUPT__ specifies the Ethernet MAC interrupt sources to be + * enabled or disabled. + * This parameter can be any combination of the following values: + * @arg ETH_MAC_IT_TST : Time stamp trigger interrupt + * @arg ETH_MAC_IT_PMT : PMT interrupt + * @retval None + */ +#define __HAL_ETH_MAC_DISABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->MACIMR &= ~(__INTERRUPT__)) + +/** + * @brief Initiate a Pause Control Frame (Full-duplex only). + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_INITIATE_PAUSE_CONTROL_FRAME(__HANDLE__) ((__HANDLE__)->Instance->MACFCR |= ETH_MACFCR_FCBBPA) + +/** + * @brief Checks whether the Ethernet flow control busy bit is set or not. + * @param __HANDLE__ ETH Handle + * @retval The new state of flow control busy status bit (SET or RESET). + */ +#define __HAL_ETH_GET_FLOW_CONTROL_BUSY_STATUS(__HANDLE__) (((__HANDLE__)->Instance->MACFCR & ETH_MACFCR_FCBBPA) == ETH_MACFCR_FCBBPA) + +/** + * @brief Enables the MAC Back Pressure operation activation (Half-duplex only). + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_BACK_PRESSURE_ACTIVATION_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->MACFCR |= ETH_MACFCR_FCBBPA) + +/** + * @brief Disables the MAC BackPressure operation activation (Half-duplex only). + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_BACK_PRESSURE_ACTIVATION_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->MACFCR &= ~ETH_MACFCR_FCBBPA) + +/** + * @brief Checks whether the specified Ethernet MAC flag is set or not. + * @param __HANDLE__ ETH Handle + * @param __FLAG__ specifies the flag to check. + * This parameter can be one of the following values: + * @arg ETH_MAC_FLAG_TST : Time stamp trigger flag + * @arg ETH_MAC_FLAG_MMCT : MMC transmit flag + * @arg ETH_MAC_FLAG_MMCR : MMC receive flag + * @arg ETH_MAC_FLAG_MMC : MMC flag + * @arg ETH_MAC_FLAG_PMT : PMT flag + * @retval The state of Ethernet MAC flag. + */ +#define __HAL_ETH_MAC_GET_FLAG(__HANDLE__, __FLAG__) (((__HANDLE__)->Instance->MACSR &( __FLAG__)) == ( __FLAG__)) + +/** + * @brief Enables the specified Ethernet DMA interrupts. + * @param __HANDLE__ ETH Handle + * @param __INTERRUPT__ specifies the Ethernet DMA interrupt sources to be + * enabled @ref ETH_DMA_Interrupts + * @retval None + */ +#define __HAL_ETH_DMA_ENABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->DMAIER |= (__INTERRUPT__)) + +/** + * @brief Disables the specified Ethernet DMA interrupts. + * @param __HANDLE__ ETH Handle + * @param __INTERRUPT__ specifies the Ethernet DMA interrupt sources to be + * disabled. @ref ETH_DMA_Interrupts + * @retval None + */ +#define __HAL_ETH_DMA_DISABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->DMAIER &= ~(__INTERRUPT__)) + +/** + * @brief Clears the Ethernet DMA IT pending bit. + * @param __HANDLE__ ETH Handle + * @param __INTERRUPT__ specifies the interrupt pending bit to clear. @ref ETH_DMA_Interrupts + * @retval None + */ +#define __HAL_ETH_DMA_CLEAR_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->DMASR =(__INTERRUPT__)) + +/** + * @brief Checks whether the specified Ethernet DMA flag is set or not. +* @param __HANDLE__ ETH Handle + * @param __FLAG__ specifies the flag to check. @ref ETH_DMA_Flags + * @retval The new state of ETH_DMA_FLAG (SET or RESET). + */ +#define __HAL_ETH_DMA_GET_FLAG(__HANDLE__, __FLAG__) (((__HANDLE__)->Instance->DMASR &( __FLAG__)) == ( __FLAG__)) + +/** + * @brief Checks whether the specified Ethernet DMA flag is set or not. + * @param __HANDLE__ ETH Handle + * @param __FLAG__ specifies the flag to clear. @ref ETH_DMA_Flags + * @retval The new state of ETH_DMA_FLAG (SET or RESET). + */ +#define __HAL_ETH_DMA_CLEAR_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->Instance->DMASR = (__FLAG__)) + +/** + * @brief Checks whether the specified Ethernet DMA overflow flag is set or not. + * @param __HANDLE__ ETH Handle + * @param __OVERFLOW__ specifies the DMA overflow flag to check. + * This parameter can be one of the following values: + * @arg ETH_DMA_OVERFLOW_RXFIFOCOUNTER : Overflow for FIFO Overflows Counter + * @arg ETH_DMA_OVERFLOW_MISSEDFRAMECOUNTER : Overflow for Buffer Unavailable Missed Frame Counter + * @retval The state of Ethernet DMA overflow Flag (SET or RESET). + */ +#define __HAL_ETH_GET_DMA_OVERFLOW_STATUS(__HANDLE__, __OVERFLOW__) (((__HANDLE__)->Instance->DMAMFBOCR & (__OVERFLOW__)) == (__OVERFLOW__)) + +/** + * @brief Set the DMA Receive status watchdog timer register value + * @param __HANDLE__ ETH Handle + * @param __VALUE__ DMA Receive status watchdog timer register value + * @retval None + */ +#define __HAL_ETH_SET_RECEIVE_WATCHDOG_TIMER(__HANDLE__, __VALUE__) ((__HANDLE__)->Instance->DMARSWTR = (__VALUE__)) + +/** + * @brief Enables any unicast packet filtered by the MAC address + * recognition to be a wake-up frame. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_GLOBAL_UNICAST_WAKEUP_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->MACPMTCSR |= ETH_MACPMTCSR_GU) + +/** + * @brief Disables any unicast packet filtered by the MAC address + * recognition to be a wake-up frame. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_GLOBAL_UNICAST_WAKEUP_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->MACPMTCSR &= ~ETH_MACPMTCSR_GU) + +/** + * @brief Enables the MAC Wake-Up Frame Detection. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_WAKEUP_FRAME_DETECTION_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->MACPMTCSR |= ETH_MACPMTCSR_WFE) + +/** + * @brief Disables the MAC Wake-Up Frame Detection. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_WAKEUP_FRAME_DETECTION_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->MACPMTCSR &= ~ETH_MACPMTCSR_WFE) + +/** + * @brief Enables the MAC Magic Packet Detection. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_MAGIC_PACKET_DETECTION_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->MACPMTCSR |= ETH_MACPMTCSR_MPE) + +/** + * @brief Disables the MAC Magic Packet Detection. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_MAGIC_PACKET_DETECTION_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->MACPMTCSR &= ~ETH_MACPMTCSR_WFE) + +/** + * @brief Enables the MAC Power Down. + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_POWER_DOWN_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->MACPMTCSR |= ETH_MACPMTCSR_PD) + +/** + * @brief Disables the MAC Power Down. + * @param __HANDLE__ ETH Handle + * @retval None + */ +#define __HAL_ETH_POWER_DOWN_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->MACPMTCSR &= ~ETH_MACPMTCSR_PD) + +/** + * @brief Checks whether the specified Ethernet PMT flag is set or not. + * @param __HANDLE__ ETH Handle. + * @param __FLAG__ specifies the flag to check. + * This parameter can be one of the following values: + * @arg ETH_PMT_FLAG_WUFFRPR : Wake-Up Frame Filter Register Pointer Reset + * @arg ETH_PMT_FLAG_WUFR : Wake-Up Frame Received + * @arg ETH_PMT_FLAG_MPR : Magic Packet Received + * @retval The new state of Ethernet PMT Flag (SET or RESET). + */ +#define __HAL_ETH_GET_PMT_FLAG_STATUS(__HANDLE__, __FLAG__) (((__HANDLE__)->Instance->MACPMTCSR &( __FLAG__)) == ( __FLAG__)) + +/** + * @brief Preset and Initialize the MMC counters to almost-full value: 0xFFFF_FFF0 (full - 16) + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_MMC_COUNTER_FULL_PRESET(__HANDLE__) ((__HANDLE__)->Instance->MMCCR |= (ETH_MMCCR_MCFHP | ETH_MMCCR_MCP)) + +/** + * @brief Preset and Initialize the MMC counters to almost-half value: 0x7FFF_FFF0 (half - 16) + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_MMC_COUNTER_HALF_PRESET(__HANDLE__) do{(__HANDLE__)->Instance->MMCCR &= ~ETH_MMCCR_MCFHP;\ + (__HANDLE__)->Instance->MMCCR |= ETH_MMCCR_MCP;} while (0) + +/** + * @brief Enables the MMC Counter Freeze. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_MMC_COUNTER_FREEZE_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->MMCCR |= ETH_MMCCR_MCF) + +/** + * @brief Disables the MMC Counter Freeze. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_MMC_COUNTER_FREEZE_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->MMCCR &= ~ETH_MMCCR_MCF) + +/** + * @brief Enables the MMC Reset On Read. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_ETH_MMC_RESET_ONREAD_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->MMCCR |= ETH_MMCCR_ROR) + +/** + * @brief Disables the MMC Reset On Read. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_ETH_MMC_RESET_ONREAD_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->MMCCR &= ~ETH_MMCCR_ROR) + +/** + * @brief Enables the MMC Counter Stop Rollover. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_ETH_MMC_COUNTER_ROLLOVER_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->MMCCR &= ~ETH_MMCCR_CSR) + +/** + * @brief Disables the MMC Counter Stop Rollover. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_ETH_MMC_COUNTER_ROLLOVER_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->MMCCR |= ETH_MMCCR_CSR) + +/** + * @brief Resets the MMC Counters. + * @param __HANDLE__ ETH Handle. + * @retval None + */ +#define __HAL_ETH_MMC_COUNTERS_RESET(__HANDLE__) ((__HANDLE__)->Instance->MMCCR |= ETH_MMCCR_CR) + +/** + * @brief Enables the specified Ethernet MMC Rx interrupts. + * @param __HANDLE__ ETH Handle. + * @param __INTERRUPT__ specifies the Ethernet MMC interrupt sources to be enabled or disabled. + * This parameter can be one of the following values: + * @arg ETH_MMC_IT_RGUF : When Rx good unicast frames counter reaches half the maximum value + * @arg ETH_MMC_IT_RFAE : When Rx alignment error counter reaches half the maximum value + * @arg ETH_MMC_IT_RFCE : When Rx crc error counter reaches half the maximum value + * @retval None + */ +#define __HAL_ETH_MMC_RX_IT_ENABLE(__HANDLE__, __INTERRUPT__) (__HANDLE__)->Instance->MMCRIMR &= ~((__INTERRUPT__) & 0xEFFFFFFF) +/** + * @brief Disables the specified Ethernet MMC Rx interrupts. + * @param __HANDLE__ ETH Handle. + * @param __INTERRUPT__ specifies the Ethernet MMC interrupt sources to be enabled or disabled. + * This parameter can be one of the following values: + * @arg ETH_MMC_IT_RGUF : When Rx good unicast frames counter reaches half the maximum value + * @arg ETH_MMC_IT_RFAE : When Rx alignment error counter reaches half the maximum value + * @arg ETH_MMC_IT_RFCE : When Rx crc error counter reaches half the maximum value + * @retval None + */ +#define __HAL_ETH_MMC_RX_IT_DISABLE(__HANDLE__, __INTERRUPT__) (__HANDLE__)->Instance->MMCRIMR |= ((__INTERRUPT__) & 0xEFFFFFFF) +/** + * @brief Enables the specified Ethernet MMC Tx interrupts. + * @param __HANDLE__ ETH Handle. + * @param __INTERRUPT__ specifies the Ethernet MMC interrupt sources to be enabled or disabled. + * This parameter can be one of the following values: + * @arg ETH_MMC_IT_TGF : When Tx good frame counter reaches half the maximum value + * @arg ETH_MMC_IT_TGFMSC: When Tx good multi col counter reaches half the maximum value + * @arg ETH_MMC_IT_TGFSC : When Tx good single col counter reaches half the maximum value + * @retval None + */ +#define __HAL_ETH_MMC_TX_IT_ENABLE(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->MMCRIMR &= ~ (__INTERRUPT__)) + +/** + * @brief Disables the specified Ethernet MMC Tx interrupts. + * @param __HANDLE__ ETH Handle. + * @param __INTERRUPT__ specifies the Ethernet MMC interrupt sources to be enabled or disabled. + * This parameter can be one of the following values: + * @arg ETH_MMC_IT_TGF : When Tx good frame counter reaches half the maximum value + * @arg ETH_MMC_IT_TGFMSC: When Tx good multi col counter reaches half the maximum value + * @arg ETH_MMC_IT_TGFSC : When Tx good single col counter reaches half the maximum value + * @retval None + */ +#define __HAL_ETH_MMC_TX_IT_DISABLE(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->MMCRIMR |= (__INTERRUPT__)) + +/** + * @brief Enables the ETH External interrupt line. + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_ENABLE_IT() EXTI->IMR |= (ETH_EXTI_LINE_WAKEUP) + +/** + * @brief Disables the ETH External interrupt line. + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_DISABLE_IT() EXTI->IMR &= ~(ETH_EXTI_LINE_WAKEUP) + +/** + * @brief Enable event on ETH External event line. + * @retval None. + */ +#define __HAL_ETH_WAKEUP_EXTI_ENABLE_EVENT() EXTI->EMR |= (ETH_EXTI_LINE_WAKEUP) + +/** + * @brief Disable event on ETH External event line + * @retval None. + */ +#define __HAL_ETH_WAKEUP_EXTI_DISABLE_EVENT() EXTI->EMR &= ~(ETH_EXTI_LINE_WAKEUP) + +/** + * @brief Get flag of the ETH External interrupt line. + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_GET_FLAG() EXTI->PR & (ETH_EXTI_LINE_WAKEUP) + +/** + * @brief Clear flag of the ETH External interrupt line. + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_CLEAR_FLAG() EXTI->PR = (ETH_EXTI_LINE_WAKEUP) + +/** + * @brief Enables rising edge trigger to the ETH External interrupt line. + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_ENABLE_RISING_EDGE_TRIGGER() EXTI->RTSR |= ETH_EXTI_LINE_WAKEUP + +/** + * @brief Disables the rising edge trigger to the ETH External interrupt line. + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_DISABLE_RISING_EDGE_TRIGGER() EXTI->RTSR &= ~(ETH_EXTI_LINE_WAKEUP) + +/** + * @brief Enables falling edge trigger to the ETH External interrupt line. + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_ENABLE_FALLING_EDGE_TRIGGER() EXTI->FTSR |= (ETH_EXTI_LINE_WAKEUP) + +/** + * @brief Disables falling edge trigger to the ETH External interrupt line. + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_DISABLE_FALLING_EDGE_TRIGGER() EXTI->FTSR &= ~(ETH_EXTI_LINE_WAKEUP) + +/** + * @brief Enables rising/falling edge trigger to the ETH External interrupt line. + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_ENABLE_FALLINGRISING_TRIGGER() do{EXTI->RTSR |= ETH_EXTI_LINE_WAKEUP;\ + EXTI->FTSR |= ETH_EXTI_LINE_WAKEUP;\ + }while(0) + +/** + * @brief Disables rising/falling edge trigger to the ETH External interrupt line. + * @retval None + */ +#define __HAL_ETH_WAKEUP_EXTI_DISABLE_FALLINGRISING_TRIGGER() do{EXTI->RTSR &= ~(ETH_EXTI_LINE_WAKEUP);\ + EXTI->FTSR &= ~(ETH_EXTI_LINE_WAKEUP);\ + }while(0) + +/** + * @brief Generate a Software interrupt on selected EXTI line. + * @retval None. + */ +#define __HAL_ETH_WAKEUP_EXTI_GENERATE_SWIT() EXTI->SWIER|= ETH_EXTI_LINE_WAKEUP + +/** + * @} + */ +/* Exported functions --------------------------------------------------------*/ + +/** @addtogroup ETH_Exported_Functions + * @{ + */ + +/* Initialization and de-initialization functions ****************************/ + +/** @addtogroup ETH_Exported_Functions_Group1 + * @{ + */ +HAL_StatusTypeDef HAL_ETH_Init(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_DeInit(ETH_HandleTypeDef *heth); +void HAL_ETH_MspInit(ETH_HandleTypeDef *heth); +void HAL_ETH_MspDeInit(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_DMATxDescListInit(ETH_HandleTypeDef *heth, ETH_DMADescTypeDef *DMATxDescTab, uint8_t* TxBuff, uint32_t TxBuffCount); +HAL_StatusTypeDef HAL_ETH_DMARxDescListInit(ETH_HandleTypeDef *heth, ETH_DMADescTypeDef *DMARxDescTab, uint8_t *RxBuff, uint32_t RxBuffCount); +/* Callbacks Register/UnRegister functions ***********************************/ +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) +HAL_StatusTypeDef HAL_ETH_RegisterCallback(ETH_HandleTypeDef *heth, HAL_ETH_CallbackIDTypeDef CallbackID, pETH_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_ETH_UnRegisterCallback(ETH_HandleTypeDef *heth, HAL_ETH_CallbackIDTypeDef CallbackID); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + +/** + * @} + */ +/* IO operation functions ****************************************************/ + +/** @addtogroup ETH_Exported_Functions_Group2 + * @{ + */ +HAL_StatusTypeDef HAL_ETH_TransmitFrame(ETH_HandleTypeDef *heth, uint32_t FrameLength); +HAL_StatusTypeDef HAL_ETH_GetReceivedFrame(ETH_HandleTypeDef *heth); +/* Communication with PHY functions*/ +HAL_StatusTypeDef HAL_ETH_ReadPHYRegister(ETH_HandleTypeDef *heth, uint16_t PHYReg, uint32_t *RegValue); +HAL_StatusTypeDef HAL_ETH_WritePHYRegister(ETH_HandleTypeDef *heth, uint16_t PHYReg, uint32_t RegValue); +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_ETH_GetReceivedFrame_IT(ETH_HandleTypeDef *heth); +void HAL_ETH_IRQHandler(ETH_HandleTypeDef *heth); +/* Callback in non blocking modes (Interrupt) */ +void HAL_ETH_TxCpltCallback(ETH_HandleTypeDef *heth); +void HAL_ETH_RxCpltCallback(ETH_HandleTypeDef *heth); +void HAL_ETH_ErrorCallback(ETH_HandleTypeDef *heth); +/** + * @} + */ + +/* Peripheral Control functions **********************************************/ + +/** @addtogroup ETH_Exported_Functions_Group3 + * @{ + */ + +HAL_StatusTypeDef HAL_ETH_Start(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_Stop(ETH_HandleTypeDef *heth); +HAL_StatusTypeDef HAL_ETH_ConfigMAC(ETH_HandleTypeDef *heth, ETH_MACInitTypeDef *macconf); +HAL_StatusTypeDef HAL_ETH_ConfigDMA(ETH_HandleTypeDef *heth, ETH_DMAInitTypeDef *dmaconf); +/** + * @} + */ + +/* Peripheral State functions ************************************************/ + +/** @addtogroup ETH_Exported_Functions_Group4 + * @{ + */ +HAL_ETH_StateTypeDef HAL_ETH_GetState(ETH_HandleTypeDef *heth); +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ +#endif /* ETH */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_ETH_H */ + + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_exti.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_exti.h new file mode 100644 index 0000000..992d22e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_exti.h @@ -0,0 +1,317 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_exti.h + * @author MCD Application Team + * @brief Header file of EXTI HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2018 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_EXTI_H +#define STM32F7xx_HAL_EXTI_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup EXTI EXTI + * @brief EXTI HAL module driver + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ + +/** @defgroup EXTI_Exported_Types EXTI Exported Types + * @{ + */ +typedef enum +{ + HAL_EXTI_COMMON_CB_ID = 0x00U +} EXTI_CallbackIDTypeDef; + +/** + * @brief EXTI Handle structure definition + */ +typedef struct +{ + uint32_t Line; /*!< Exti line number */ + void (* PendingCallback)(void); /*!< Exti pending callback */ +} EXTI_HandleTypeDef; + +/** + * @brief EXTI Configuration structure definition + */ +typedef struct +{ + uint32_t Line; /*!< The Exti line to be configured. This parameter + can be a value of @ref EXTI_Line */ + uint32_t Mode; /*!< The Exit Mode to be configured for a core. + This parameter can be a combination of @ref EXTI_Mode */ + uint32_t Trigger; /*!< The Exti Trigger to be configured. This parameter + can be a value of @ref EXTI_Trigger */ + uint32_t GPIOSel; /*!< The Exti GPIO multiplexer selection to be configured. + This parameter is only possible for line 0 to 15. It + can be a value of @ref EXTI_GPIOSel */ +} EXTI_ConfigTypeDef; + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup EXTI_Exported_Constants EXTI Exported Constants + * @{ + */ + +/** @defgroup EXTI_Line EXTI Line + * @{ + */ +#define EXTI_LINE_0 (EXTI_GPIO | 0x00u) /*!< External interrupt line 0 */ +#define EXTI_LINE_1 (EXTI_GPIO | 0x01u) /*!< External interrupt line 1 */ +#define EXTI_LINE_2 (EXTI_GPIO | 0x02u) /*!< External interrupt line 2 */ +#define EXTI_LINE_3 (EXTI_GPIO | 0x03u) /*!< External interrupt line 3 */ +#define EXTI_LINE_4 (EXTI_GPIO | 0x04u) /*!< External interrupt line 4 */ +#define EXTI_LINE_5 (EXTI_GPIO | 0x05u) /*!< External interrupt line 5 */ +#define EXTI_LINE_6 (EXTI_GPIO | 0x06u) /*!< External interrupt line 6 */ +#define EXTI_LINE_7 (EXTI_GPIO | 0x07u) /*!< External interrupt line 7 */ +#define EXTI_LINE_8 (EXTI_GPIO | 0x08u) /*!< External interrupt line 8 */ +#define EXTI_LINE_9 (EXTI_GPIO | 0x09u) /*!< External interrupt line 9 */ +#define EXTI_LINE_10 (EXTI_GPIO | 0x0Au) /*!< External interrupt line 10 */ +#define EXTI_LINE_11 (EXTI_GPIO | 0x0Bu) /*!< External interrupt line 11 */ +#define EXTI_LINE_12 (EXTI_GPIO | 0x0Cu) /*!< External interrupt line 12 */ +#define EXTI_LINE_13 (EXTI_GPIO | 0x0Du) /*!< External interrupt line 13 */ +#define EXTI_LINE_14 (EXTI_GPIO | 0x0Eu) /*!< External interrupt line 14 */ +#define EXTI_LINE_15 (EXTI_GPIO | 0x0Fu) /*!< External interrupt line 15 */ +#define EXTI_LINE_16 (EXTI_CONFIG | 0x10u) /*!< External interrupt line 16 Connected to the PVD Output */ +#define EXTI_LINE_17 (EXTI_CONFIG | 0x11u) /*!< External interrupt line 17 Connected to the RTC Alarm event */ +#define EXTI_LINE_18 (EXTI_CONFIG | 0x12u) /*!< External interrupt line 18 Connected to the USB OTG FS Wakeup from suspend event */ +#if defined(ETH) +#define EXTI_LINE_19 (EXTI_CONFIG | 0x13u) /*!< External interrupt line 19 Connected to the Ethernet Wakeup event */ +#else +#define EXTI_LINE_19 (EXTI_RESERVED | 0x13u) /*!< No interrupt supported in this line */ +#endif /* ETH */ +#define EXTI_LINE_20 (EXTI_CONFIG | 0x14u) /*!< External interrupt line 20 Connected to the USB OTG HS (configured in FS) Wakeup event */ +#define EXTI_LINE_21 (EXTI_CONFIG | 0x15u) /*!< External interrupt line 21 Connected to the RTC Tamper and Time Stamp events */ +#define EXTI_LINE_22 (EXTI_CONFIG | 0x16u) /*!< External interrupt line 22 Connected to the RTC Wakeup event */ +#define EXTI_LINE_23 (EXTI_CONFIG | 0x17u) /*!< External interrupt line 23 Connected to the LPTIM Wakeup event */ +#if defined(EXTI_IMR_IM24) +#define EXTI_LINE_24 (EXTI_CONFIG | 0x18u) /*!< External interrupt line 24 Connected to the MDIO Slave global Interrupt Wakeup event */ +#endif /* EXTI_IMR_IM24 */ +/** + * @} + */ + +/** @defgroup EXTI_Mode EXTI Mode + * @{ + */ +#define EXTI_MODE_NONE 0x00000000u +#define EXTI_MODE_INTERRUPT 0x00000001u +#define EXTI_MODE_EVENT 0x00000002u +/** + * @} + */ + +/** @defgroup EXTI_Trigger EXTI Trigger + * @{ + */ + +#define EXTI_TRIGGER_NONE 0x00000000u +#define EXTI_TRIGGER_RISING 0x00000001u +#define EXTI_TRIGGER_FALLING 0x00000002u +#define EXTI_TRIGGER_RISING_FALLING (EXTI_TRIGGER_RISING | EXTI_TRIGGER_FALLING) +/** + * @} + */ + +/** @defgroup EXTI_GPIOSel EXTI GPIOSel + * @brief + * @{ + */ +#define EXTI_GPIOA 0x00000000u +#define EXTI_GPIOB 0x00000001u +#define EXTI_GPIOC 0x00000002u +#define EXTI_GPIOD 0x00000003u +#define EXTI_GPIOE 0x00000004u +#define EXTI_GPIOF 0x00000005u +#define EXTI_GPIOG 0x00000006u +#define EXTI_GPIOH 0x00000007u +#define EXTI_GPIOI 0x00000008u +#define EXTI_GPIOJ 0x00000009u +#if defined (GPIOK) +#define EXTI_GPIOK 0x0000000Au +#endif /* GPIOK */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup EXTI_Exported_Macros EXTI Exported Macros + * @{ + */ + +/** + * @} + */ + +/* Private constants --------------------------------------------------------*/ +/** @defgroup EXTI_Private_Constants EXTI Private Constants + * @{ + */ +/** + * @brief EXTI Line property definition + */ +#define EXTI_PROPERTY_SHIFT 24u +#define EXTI_CONFIG (0x02uL << EXTI_PROPERTY_SHIFT) +#define EXTI_GPIO ((0x04uL << EXTI_PROPERTY_SHIFT) | EXTI_CONFIG) +#define EXTI_RESERVED (0x08uL << EXTI_PROPERTY_SHIFT) +#define EXTI_PROPERTY_MASK (EXTI_CONFIG | EXTI_GPIO) + +/** + * @brief EXTI bit usage + */ +#define EXTI_PIN_MASK 0x0000001Fu + +/** + * @brief EXTI Mask for interrupt & event mode + */ +#define EXTI_MODE_MASK (EXTI_MODE_EVENT | EXTI_MODE_INTERRUPT) + +/** + * @brief EXTI Mask for trigger possibilities + */ +#define EXTI_TRIGGER_MASK (EXTI_TRIGGER_RISING | EXTI_TRIGGER_FALLING) + +/** + * @brief EXTI Line number + */ +#if defined(EXTI_IMR_IM24) +#define EXTI_LINE_NB 25u +#else +#define EXTI_LINE_NB 24u +#endif /* EXTI_IMR_IM24 */ + + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup EXTI_Private_Macros EXTI Private Macros + * @{ + */ +#define IS_EXTI_LINE(__EXTI_LINE__) ((((__EXTI_LINE__) & ~(EXTI_PROPERTY_MASK | EXTI_PIN_MASK)) == 0x00u) && \ + ((((__EXTI_LINE__) & EXTI_PROPERTY_MASK) == EXTI_CONFIG) || \ + (((__EXTI_LINE__) & EXTI_PROPERTY_MASK) == EXTI_GPIO)) && \ + (((__EXTI_LINE__) & EXTI_PIN_MASK) < EXTI_LINE_NB)) + +#define IS_EXTI_MODE(__EXTI_LINE__) ((((__EXTI_LINE__) & EXTI_MODE_MASK) != 0x00u) && \ + (((__EXTI_LINE__) & ~EXTI_MODE_MASK) == 0x00u)) + +#define IS_EXTI_TRIGGER(__EXTI_LINE__) (((__EXTI_LINE__) & ~EXTI_TRIGGER_MASK) == 0x00u) + +#define IS_EXTI_PENDING_EDGE(__EXTI_LINE__) (((__EXTI_LINE__) == EXTI_TRIGGER_FALLING) || \ + ((__EXTI_LINE__) == EXTI_TRIGGER_RISING) || \ + ((__EXTI_LINE__) == EXTI_TRIGGER_RISING_FALLING)) + +#define IS_EXTI_CONFIG_LINE(__EXTI_LINE__) (((__EXTI_LINE__) & EXTI_CONFIG) != 0x00u) + +#if defined (GPIOK) +#define IS_EXTI_GPIO_PORT(__PORT__) (((__PORT__) == EXTI_GPIOA) || \ + ((__PORT__) == EXTI_GPIOB) || \ + ((__PORT__) == EXTI_GPIOC) || \ + ((__PORT__) == EXTI_GPIOD) || \ + ((__PORT__) == EXTI_GPIOE) || \ + ((__PORT__) == EXTI_GPIOF) || \ + ((__PORT__) == EXTI_GPIOG) || \ + ((__PORT__) == EXTI_GPIOH) || \ + ((__PORT__) == EXTI_GPIOI) || \ + ((__PORT__) == EXTI_GPIOJ) || \ + ((__PORT__) == EXTI_GPIOK)) +#else +#define IS_EXTI_GPIO_PORT(__PORT__) (((__PORT__) == EXTI_GPIOA) || \ + ((__PORT__) == EXTI_GPIOB) || \ + ((__PORT__) == EXTI_GPIOC) || \ + ((__PORT__) == EXTI_GPIOD) || \ + ((__PORT__) == EXTI_GPIOE) || \ + ((__PORT__) == EXTI_GPIOF) || \ + ((__PORT__) == EXTI_GPIOG) || \ + ((__PORT__) == EXTI_GPIOH) || \ + ((__PORT__) == EXTI_GPIOI) || \ + ((__PORT__) == EXTI_GPIOJ)) +#endif /* GPIOK */ + +#define IS_EXTI_GPIO_PIN(__PIN__) ((__PIN__) < 16U) +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup EXTI_Exported_Functions EXTI Exported Functions + * @brief EXTI Exported Functions + * @{ + */ + +/** @defgroup EXTI_Exported_Functions_Group1 Configuration functions + * @brief Configuration functions + * @{ + */ +/* Configuration functions ****************************************************/ +HAL_StatusTypeDef HAL_EXTI_SetConfigLine(EXTI_HandleTypeDef *hexti, EXTI_ConfigTypeDef *pExtiConfig); +HAL_StatusTypeDef HAL_EXTI_GetConfigLine(EXTI_HandleTypeDef *hexti, EXTI_ConfigTypeDef *pExtiConfig); +HAL_StatusTypeDef HAL_EXTI_ClearConfigLine(EXTI_HandleTypeDef *hexti); +HAL_StatusTypeDef HAL_EXTI_RegisterCallback(EXTI_HandleTypeDef *hexti, EXTI_CallbackIDTypeDef CallbackID, void (*pPendingCbfn)(void)); +HAL_StatusTypeDef HAL_EXTI_GetHandle(EXTI_HandleTypeDef *hexti, uint32_t ExtiLine); +/** + * @} + */ + +/** @defgroup EXTI_Exported_Functions_Group2 IO operation functions + * @brief IO operation functions + * @{ + */ +/* IO operation functions *****************************************************/ +void HAL_EXTI_IRQHandler(EXTI_HandleTypeDef *hexti); +uint32_t HAL_EXTI_GetPending(EXTI_HandleTypeDef *hexti, uint32_t Edge); +void HAL_EXTI_ClearPending(EXTI_HandleTypeDef *hexti, uint32_t Edge); +void HAL_EXTI_GenerateSWI(EXTI_HandleTypeDef *hexti); + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_HAL_EXTI_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_flash.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_flash.h new file mode 100644 index 0000000..39e859e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_flash.h @@ -0,0 +1,415 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_flash.h + * @author MCD Application Team + * @brief Header file of FLASH HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_FLASH_H +#define __STM32F7xx_HAL_FLASH_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup FLASH + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup FLASH_Exported_Types FLASH Exported Types + * @{ + */ + +/** + * @brief FLASH Procedure structure definition + */ +typedef enum +{ + FLASH_PROC_NONE = 0U, + FLASH_PROC_SECTERASE, + FLASH_PROC_MASSERASE, + FLASH_PROC_PROGRAM +} FLASH_ProcedureTypeDef; + + +/** + * @brief FLASH handle Structure definition + */ +typedef struct +{ + __IO FLASH_ProcedureTypeDef ProcedureOnGoing; /* Internal variable to indicate which procedure is ongoing or not in IT context */ + + __IO uint32_t NbSectorsToErase; /* Internal variable to save the remaining sectors to erase in IT context */ + + __IO uint8_t VoltageForErase; /* Internal variable to provide voltage range selected by user in IT context */ + + __IO uint32_t Sector; /* Internal variable to define the current sector which is erasing */ + + __IO uint32_t Address; /* Internal variable to save address selected for program */ + + HAL_LockTypeDef Lock; /* FLASH locking object */ + + __IO uint32_t ErrorCode; /* FLASH error code */ + +}FLASH_ProcessTypeDef; + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup FLASH_Exported_Constants FLASH Exported Constants + * @{ + */ + +/** @defgroup FLASH_Error_Code FLASH Error Code + * @brief FLASH Error Code + * @{ + */ +#define HAL_FLASH_ERROR_NONE ((uint32_t)0x00000000U) /*!< No error */ +#define HAL_FLASH_ERROR_ERS ((uint32_t)0x00000002U) /*!< Programming Sequence error */ +#define HAL_FLASH_ERROR_PGP ((uint32_t)0x00000004U) /*!< Programming Parallelism error */ +#define HAL_FLASH_ERROR_PGA ((uint32_t)0x00000008U) /*!< Programming Alignment error */ +#define HAL_FLASH_ERROR_WRP ((uint32_t)0x00000010U) /*!< Write protection error */ +#define HAL_FLASH_ERROR_OPERATION ((uint32_t)0x00000020U) /*!< Operation Error */ +#define HAL_FLASH_ERROR_RD ((uint32_t)0x00000040U) /*!< Read Protection Error */ +/** + * @} + */ + +/** @defgroup FLASH_Type_Program FLASH Type Program + * @{ + */ +#define FLASH_TYPEPROGRAM_BYTE ((uint32_t)0x00U) /*!< Program byte (8-bit) at a specified address */ +#define FLASH_TYPEPROGRAM_HALFWORD ((uint32_t)0x01U) /*!< Program a half-word (16-bit) at a specified address */ +#define FLASH_TYPEPROGRAM_WORD ((uint32_t)0x02U) /*!< Program a word (32-bit) at a specified address */ +#define FLASH_TYPEPROGRAM_DOUBLEWORD ((uint32_t)0x03U) /*!< Program a double word (64-bit) at a specified address */ +/** + * @} + */ + +/** @defgroup FLASH_Flag_definition FLASH Flag definition + * @brief Flag definition + * @{ + */ +#define FLASH_FLAG_EOP FLASH_SR_EOP /*!< FLASH End of Operation flag */ +#define FLASH_FLAG_OPERR FLASH_SR_OPERR /*!< FLASH operation Error flag */ +#define FLASH_FLAG_WRPERR FLASH_SR_WRPERR /*!< FLASH Write protected error flag */ +#define FLASH_FLAG_PGAERR FLASH_SR_PGAERR /*!< FLASH Programming Alignment error flag */ +#define FLASH_FLAG_PGPERR FLASH_SR_PGPERR /*!< FLASH Programming Parallelism error flag */ +#define FLASH_FLAG_ERSERR FLASH_SR_ERSERR /*!< FLASH Erasing Sequence error flag */ +#define FLASH_FLAG_BSY FLASH_SR_BSY /*!< FLASH Busy flag */ + +#if defined (FLASH_OPTCR2_PCROP) +#define FLASH_FLAG_RDERR FLASH_SR_RDERR /*!< FLASH Read protection error flag */ +#define FLASH_FLAG_ALL_ERRORS (FLASH_FLAG_OPERR | FLASH_FLAG_WRPERR | FLASH_FLAG_PGAERR | \ + FLASH_FLAG_PGPERR | FLASH_FLAG_ERSERR | FLASH_FLAG_RDERR) +#else +#define FLASH_FLAG_ALL_ERRORS (FLASH_FLAG_OPERR | FLASH_FLAG_WRPERR | FLASH_FLAG_PGAERR | \ + FLASH_FLAG_PGPERR | FLASH_FLAG_ERSERR) +#endif /* FLASH_OPTCR2_PCROP */ +/** + * @} + */ + +/** @defgroup FLASH_Interrupt_definition FLASH Interrupt definition + * @brief FLASH Interrupt definition + * @{ + */ +#define FLASH_IT_EOP FLASH_CR_EOPIE /*!< End of FLASH Operation Interrupt source */ +#define FLASH_IT_ERR ((uint32_t)0x02000000U) /*!< Error Interrupt source */ +/** + * @} + */ + +/** @defgroup FLASH_Program_Parallelism FLASH Program Parallelism + * @{ + */ +#define FLASH_PSIZE_BYTE ((uint32_t)0x00000000U) +#define FLASH_PSIZE_HALF_WORD ((uint32_t)FLASH_CR_PSIZE_0) +#define FLASH_PSIZE_WORD ((uint32_t)FLASH_CR_PSIZE_1) +#define FLASH_PSIZE_DOUBLE_WORD ((uint32_t)FLASH_CR_PSIZE) +#define CR_PSIZE_MASK ((uint32_t)0xFFFFFCFFU) +/** + * @} + */ + +/** @defgroup FLASH_Keys FLASH Keys + * @{ + */ +#define FLASH_KEY1 ((uint32_t)0x45670123U) +#define FLASH_KEY2 ((uint32_t)0xCDEF89ABU) +#define FLASH_OPT_KEY1 ((uint32_t)0x08192A3BU) +#define FLASH_OPT_KEY2 ((uint32_t)0x4C5D6E7FU) +/** + * @} + */ + +/** @defgroup FLASH_Sectors FLASH Sectors + * @{ + */ +#if (FLASH_SECTOR_TOTAL == 2) +#define FLASH_SECTOR_0 ((uint32_t)0U) /*!< Sector Number 0 */ +#define FLASH_SECTOR_1 ((uint32_t)1U) /*!< Sector Number 1 */ +#elif (FLASH_SECTOR_TOTAL == 4) +#define FLASH_SECTOR_0 ((uint32_t)0U) /*!< Sector Number 0 */ +#define FLASH_SECTOR_1 ((uint32_t)1U) /*!< Sector Number 1 */ +#define FLASH_SECTOR_2 ((uint32_t)2U) /*!< Sector Number 2 */ +#define FLASH_SECTOR_3 ((uint32_t)3U) /*!< Sector Number 3 */ +#else +#define FLASH_SECTOR_0 ((uint32_t)0U) /*!< Sector Number 0 */ +#define FLASH_SECTOR_1 ((uint32_t)1U) /*!< Sector Number 1 */ +#define FLASH_SECTOR_2 ((uint32_t)2U) /*!< Sector Number 2 */ +#define FLASH_SECTOR_3 ((uint32_t)3U) /*!< Sector Number 3 */ +#define FLASH_SECTOR_4 ((uint32_t)4U) /*!< Sector Number 4 */ +#define FLASH_SECTOR_5 ((uint32_t)5U) /*!< Sector Number 5 */ +#define FLASH_SECTOR_6 ((uint32_t)6U) /*!< Sector Number 6 */ +#define FLASH_SECTOR_7 ((uint32_t)7U) /*!< Sector Number 7 */ +#endif /* FLASH_SECTOR_TOTAL */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup FLASH_Exported_Macros FLASH Exported Macros + * @{ + */ +/** + * @brief Set the FLASH Latency. + * @param __LATENCY__ FLASH Latency + * The value of this parameter depend on device used within the same series + * @retval none + */ +#define __HAL_FLASH_SET_LATENCY(__LATENCY__) \ + MODIFY_REG(FLASH->ACR, FLASH_ACR_LATENCY, (uint32_t)(__LATENCY__)) + +/** + * @brief Get the FLASH Latency. + * @retval FLASH Latency + * The value of this parameter depend on device used within the same series + */ +#define __HAL_FLASH_GET_LATENCY() (READ_BIT((FLASH->ACR), FLASH_ACR_LATENCY)) + +/** + * @brief Enable the FLASH prefetch buffer. + * @retval none + */ +#define __HAL_FLASH_PREFETCH_BUFFER_ENABLE() (FLASH->ACR |= FLASH_ACR_PRFTEN) + +/** + * @brief Disable the FLASH prefetch buffer. + * @retval none + */ +#define __HAL_FLASH_PREFETCH_BUFFER_DISABLE() (FLASH->ACR &= (~FLASH_ACR_PRFTEN)) + +/** + * @brief Enable the FLASH Adaptive Real-Time memory accelerator. + * @note The ART accelerator is available only for flash access on ITCM interface. + * @retval none + */ +#define __HAL_FLASH_ART_ENABLE() SET_BIT(FLASH->ACR, FLASH_ACR_ARTEN) + +/** + * @brief Disable the FLASH Adaptive Real-Time memory accelerator. + * @retval none + */ +#define __HAL_FLASH_ART_DISABLE() CLEAR_BIT(FLASH->ACR, FLASH_ACR_ARTEN) + +/** + * @brief Resets the FLASH Adaptive Real-Time memory accelerator. + * @note This function must be used only when the Adaptive Real-Time memory accelerator + * is disabled. + * @retval None + */ +#define __HAL_FLASH_ART_RESET() (FLASH->ACR |= FLASH_ACR_ARTRST) + +/** + * @brief Enable the specified FLASH interrupt. + * @param __INTERRUPT__ FLASH interrupt + * This parameter can be any combination of the following values: + * @arg FLASH_IT_EOP: End of FLASH Operation Interrupt + * @arg FLASH_IT_ERR: Error Interrupt + * @retval none + */ +#define __HAL_FLASH_ENABLE_IT(__INTERRUPT__) (FLASH->CR |= (__INTERRUPT__)) + +/** + * @brief Disable the specified FLASH interrupt. + * @param __INTERRUPT__ FLASH interrupt + * This parameter can be any combination of the following values: + * @arg FLASH_IT_EOP: End of FLASH Operation Interrupt + * @arg FLASH_IT_ERR: Error Interrupt + * @retval none + */ +#define __HAL_FLASH_DISABLE_IT(__INTERRUPT__) (FLASH->CR &= ~(uint32_t)(__INTERRUPT__)) + +/** + * @brief Get the specified FLASH flag status. + * @param __FLAG__ specifies the FLASH flag to check. + * This parameter can be one of the following values: + * @arg FLASH_FLAG_EOP : FLASH End of Operation flag + * @arg FLASH_FLAG_OPERR : FLASH operation Error flag + * @arg FLASH_FLAG_WRPERR: FLASH Write protected error flag + * @arg FLASH_FLAG_PGAERR: FLASH Programming Alignment error flag + * @arg FLASH_FLAG_PGPERR: FLASH Programming Parallelism error flag + * @arg FLASH_FLAG_ERSERR : FLASH Erasing Sequence error flag + * @arg FLASH_FLAG_BSY : FLASH Busy flag + * @retval The new state of __FLAG__ (SET or RESET). + */ +#define __HAL_FLASH_GET_FLAG(__FLAG__) ((FLASH->SR & (__FLAG__))) + +/** + * @brief Clear the specified FLASH flag. + * @param __FLAG__ specifies the FLASH flags to clear. + * This parameter can be any combination of the following values: + * @arg FLASH_FLAG_EOP : FLASH End of Operation flag + * @arg FLASH_FLAG_OPERR : FLASH operation Error flag + * @arg FLASH_FLAG_WRPERR: FLASH Write protected error flag + * @arg FLASH_FLAG_PGAERR: FLASH Programming Alignment error flag + * @arg FLASH_FLAG_PGPERR: FLASH Programming Parallelism error flag + * @arg FLASH_FLAG_ERSERR : FLASH Erasing Sequence error flag + * @retval none + */ +#define __HAL_FLASH_CLEAR_FLAG(__FLAG__) (FLASH->SR = (__FLAG__)) +/** + * @} + */ + +/* Include FLASH HAL Extension module */ +#include "stm32f7xx_hal_flash_ex.h" + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup FLASH_Exported_Functions + * @{ + */ +/** @addtogroup FLASH_Exported_Functions_Group1 + * @{ + */ +/* Program operation functions ***********************************************/ +HAL_StatusTypeDef HAL_FLASH_Program(uint32_t TypeProgram, uint32_t Address, uint64_t Data); +HAL_StatusTypeDef HAL_FLASH_Program_IT(uint32_t TypeProgram, uint32_t Address, uint64_t Data); +/* FLASH IRQ handler method */ +void HAL_FLASH_IRQHandler(void); +/* Callbacks in non blocking modes */ +void HAL_FLASH_EndOfOperationCallback(uint32_t ReturnValue); +void HAL_FLASH_OperationErrorCallback(uint32_t ReturnValue); +/** + * @} + */ + +/** @addtogroup FLASH_Exported_Functions_Group2 + * @{ + */ +/* Peripheral Control functions **********************************************/ +HAL_StatusTypeDef HAL_FLASH_Unlock(void); +HAL_StatusTypeDef HAL_FLASH_Lock(void); +HAL_StatusTypeDef HAL_FLASH_OB_Unlock(void); +HAL_StatusTypeDef HAL_FLASH_OB_Lock(void); +/* Option bytes control */ +HAL_StatusTypeDef HAL_FLASH_OB_Launch(void); +/** + * @} + */ + +/** @addtogroup FLASH_Exported_Functions_Group3 + * @{ + */ +/* Peripheral State functions ************************************************/ +uint32_t HAL_FLASH_GetError(void); +HAL_StatusTypeDef FLASH_WaitForLastOperation(uint32_t Timeout); +/** + * @} + */ + +/** + * @} + */ +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/** @defgroup FLASH_Private_Variables FLASH Private Variables + * @{ + */ + +/** + * @} + */ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup FLASH_Private_Constants FLASH Private Constants + * @{ + */ + +/** + * @brief OPTCR register byte 1 (Bits[15:8]) base address + */ +#define OPTCR_BYTE1_ADDRESS ((uint32_t)0x40023C15) + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup FLASH_Private_Macros FLASH Private Macros + * @{ + */ + +/** @defgroup FLASH_IS_FLASH_Definitions FLASH Private macros to check input parameters + * @{ + */ +#define IS_FLASH_TYPEPROGRAM(VALUE)(((VALUE) == FLASH_TYPEPROGRAM_BYTE) || \ + ((VALUE) == FLASH_TYPEPROGRAM_HALFWORD) || \ + ((VALUE) == FLASH_TYPEPROGRAM_WORD) || \ + ((VALUE) == FLASH_TYPEPROGRAM_DOUBLEWORD)) +/** + * @} + */ + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup FLASH_Private_Functions FLASH Private Functions + * @{ + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_FLASH_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_flash_ex.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_flash_ex.h new file mode 100644 index 0000000..6ec226a --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_flash_ex.h @@ -0,0 +1,697 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_flash_ex.h + * @author MCD Application Team + * @brief Header file of FLASH HAL Extension module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_FLASH_EX_H +#define __STM32F7xx_HAL_FLASH_EX_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup FLASHEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup FLASHEx_Exported_Types FLASH Exported Types + * @{ + */ + +/** + * @brief FLASH Erase structure definition + */ +typedef struct +{ + uint32_t TypeErase; /*!< Mass erase or sector Erase. + This parameter can be a value of @ref FLASHEx_Type_Erase */ + +#if defined (FLASH_OPTCR_nDBANK) + uint32_t Banks; /*!< Select banks to erase when Mass erase is enabled. + This parameter must be a value of @ref FLASHEx_Banks */ +#endif /* FLASH_OPTCR_nDBANK */ + + uint32_t Sector; /*!< Initial FLASH sector to erase when Mass erase is disabled + This parameter must be a value of @ref FLASHEx_Sectors */ + + uint32_t NbSectors; /*!< Number of sectors to be erased. + This parameter must be a value between 1 and (max number of sectors - value of Initial sector)*/ + + uint32_t VoltageRange;/*!< The device voltage range which defines the erase parallelism + This parameter must be a value of @ref FLASHEx_Voltage_Range */ + +} FLASH_EraseInitTypeDef; + +/** + * @brief FLASH Option Bytes Program structure definition + */ +typedef struct +{ + uint32_t OptionType; /*!< Option byte to be configured. + This parameter can be a value of @ref FLASHEx_Option_Type */ + + uint32_t WRPState; /*!< Write protection activation or deactivation. + This parameter can be a value of @ref FLASHEx_WRP_State */ + + uint32_t WRPSector; /*!< Specifies the sector(s) to be write protected. + The value of this parameter depend on device used within the same series */ + + uint32_t RDPLevel; /*!< Set the read protection level. + This parameter can be a value of @ref FLASHEx_Option_Bytes_Read_Protection */ + + uint32_t BORLevel; /*!< Set the BOR Level. + This parameter can be a value of @ref FLASHEx_BOR_Reset_Level */ + + uint32_t USERConfig; /*!< Program the FLASH User Option Byte: WWDG_SW / IWDG_SW / RST_STOP / RST_STDBY / + IWDG_FREEZE_STOP / IWDG_FREEZE_SANDBY / nDBANK / nDBOOT. + nDBANK / nDBOOT are only available for STM32F76xxx/STM32F77xxx devices */ + + uint32_t BootAddr0; /*!< Boot base address when Boot pin = 0. + This parameter can be a value of @ref FLASHEx_Boot_Address */ + + uint32_t BootAddr1; /*!< Boot base address when Boot pin = 1. + This parameter can be a value of @ref FLASHEx_Boot_Address */ + +#if defined (FLASH_OPTCR2_PCROP) + uint32_t PCROPSector; /*!< Set the PCROP sector. + This parameter can be a value of @ref FLASHEx_Option_Bytes_PCROP_Sectors */ + + uint32_t PCROPRdp; /*!< Set the PCROP_RDP option. + This parameter can be a value of @ref FLASHEx_Option_Bytes_PCROP_RDP */ +#endif /* FLASH_OPTCR2_PCROP */ + +} FLASH_OBProgramInitTypeDef; + +/** + * @} + */ +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup FLASHEx_Exported_Constants FLASH Exported Constants + * @{ + */ + +/** @defgroup FLASHEx_Type_Erase FLASH Type Erase + * @{ + */ +#define FLASH_TYPEERASE_SECTORS ((uint32_t)0x00U) /*!< Sectors erase only */ +#define FLASH_TYPEERASE_MASSERASE ((uint32_t)0x01U) /*!< Flash Mass erase activation */ +/** + * @} + */ + +/** @defgroup FLASHEx_Voltage_Range FLASH Voltage Range + * @{ + */ +#define FLASH_VOLTAGE_RANGE_1 ((uint32_t)0x00U) /*!< Device operating range: 1.8V to 2.1V */ +#define FLASH_VOLTAGE_RANGE_2 ((uint32_t)0x01U) /*!< Device operating range: 2.1V to 2.7V */ +#define FLASH_VOLTAGE_RANGE_3 ((uint32_t)0x02U) /*!< Device operating range: 2.7V to 3.6V */ +#define FLASH_VOLTAGE_RANGE_4 ((uint32_t)0x03U) /*!< Device operating range: 2.7V to 3.6V + External Vpp */ +/** + * @} + */ + +/** @defgroup FLASHEx_WRP_State FLASH WRP State + * @{ + */ +#define OB_WRPSTATE_DISABLE ((uint32_t)0x00U) /*!< Disable the write protection of the desired bank 1 sectors */ +#define OB_WRPSTATE_ENABLE ((uint32_t)0x01U) /*!< Enable the write protection of the desired bank 1 sectors */ +/** + * @} + */ + +/** @defgroup FLASHEx_Option_Type FLASH Option Type + * @{ + */ +#define OPTIONBYTE_WRP ((uint32_t)0x01U) /*!< WRP option byte configuration */ +#define OPTIONBYTE_RDP ((uint32_t)0x02U) /*!< RDP option byte configuration */ +#define OPTIONBYTE_USER ((uint32_t)0x04U) /*!< USER option byte configuration */ +#define OPTIONBYTE_BOR ((uint32_t)0x08U) /*!< BOR option byte configuration */ +#define OPTIONBYTE_BOOTADDR_0 ((uint32_t)0x10U) /*!< Boot 0 Address configuration */ +#define OPTIONBYTE_BOOTADDR_1 ((uint32_t)0x20U) /*!< Boot 1 Address configuration */ +#if defined (FLASH_OPTCR2_PCROP) +#define OPTIONBYTE_PCROP ((uint32_t)0x40U) /*!< PCROP configuration */ +#define OPTIONBYTE_PCROP_RDP ((uint32_t)0x80U) /*!< PCROP_RDP configuration */ +#endif /* FLASH_OPTCR2_PCROP */ +/** + * @} + */ + +/** @defgroup FLASHEx_Option_Bytes_Read_Protection FLASH Option Bytes Read Protection + * @{ + */ +#define OB_RDP_LEVEL_0 ((uint8_t)0xAAU) +#define OB_RDP_LEVEL_1 ((uint8_t)0x55U) +#define OB_RDP_LEVEL_2 ((uint8_t)0xCCU) /*!< Warning: When enabling read protection level 2 + it s no more possible to go back to level 1 or 0 */ +/** + * @} + */ + +/** @defgroup FLASHEx_Option_Bytes_WWatchdog FLASH Option Bytes WWatchdog + * @{ + */ +#define OB_WWDG_SW ((uint32_t)0x10U) /*!< Software WWDG selected */ +#define OB_WWDG_HW ((uint32_t)0x00U) /*!< Hardware WWDG selected */ +/** + * @} + */ + + +/** @defgroup FLASHEx_Option_Bytes_IWatchdog FLASH Option Bytes IWatchdog + * @{ + */ +#define OB_IWDG_SW ((uint32_t)0x20U) /*!< Software IWDG selected */ +#define OB_IWDG_HW ((uint32_t)0x00U) /*!< Hardware IWDG selected */ +/** + * @} + */ + +/** @defgroup FLASHEx_Option_Bytes_nRST_STOP FLASH Option Bytes nRST_STOP + * @{ + */ +#define OB_STOP_NO_RST ((uint32_t)0x40U) /*!< No reset generated when entering in STOP */ +#define OB_STOP_RST ((uint32_t)0x00U) /*!< Reset generated when entering in STOP */ +/** + * @} + */ + +/** @defgroup FLASHEx_Option_Bytes_nRST_STDBY FLASH Option Bytes nRST_STDBY + * @{ + */ +#define OB_STDBY_NO_RST ((uint32_t)0x80U) /*!< No reset generated when entering in STANDBY */ +#define OB_STDBY_RST ((uint32_t)0x00U) /*!< Reset generated when entering in STANDBY */ +/** + * @} + */ + +/** @defgroup FLASHEx_Option_Bytes_IWDG_FREEZE_STOP FLASH IWDG Counter Freeze in STOP + * @{ + */ +#define OB_IWDG_STOP_FREEZE ((uint32_t)0x00000000U) /*!< Freeze IWDG counter in STOP mode */ +#define OB_IWDG_STOP_ACTIVE ((uint32_t)0x80000000U) /*!< IWDG counter active in STOP mode */ +/** + * @} + */ + +/** @defgroup FLASHEx_Option_Bytes_IWDG_FREEZE_SANDBY FLASH IWDG Counter Freeze in STANDBY + * @{ + */ +#define OB_IWDG_STDBY_FREEZE ((uint32_t)0x00000000U) /*!< Freeze IWDG counter in STANDBY mode */ +#define OB_IWDG_STDBY_ACTIVE ((uint32_t)0x40000000U) /*!< IWDG counter active in STANDBY mode */ +/** + * @} + */ + +/** @defgroup FLASHEx_BOR_Reset_Level FLASH BOR Reset Level + * @{ + */ +#define OB_BOR_LEVEL3 ((uint32_t)0x00U) /*!< Supply voltage ranges from 2.70 to 3.60 V */ +#define OB_BOR_LEVEL2 ((uint32_t)0x04U) /*!< Supply voltage ranges from 2.40 to 2.70 V */ +#define OB_BOR_LEVEL1 ((uint32_t)0x08U) /*!< Supply voltage ranges from 2.10 to 2.40 V */ +#define OB_BOR_OFF ((uint32_t)0x0CU) /*!< Supply voltage ranges from 1.62 to 2.10 V */ +/** + * @} + */ + +#if defined (FLASH_OPTCR_nDBOOT) +/** @defgroup FLASHEx_Option_Bytes_nDBOOT FLASH Option Bytes nDBOOT + * @{ + */ +#define OB_DUAL_BOOT_DISABLE ((uint32_t)0x10000000U) /* !< Dual Boot disable. Boot according to boot address option */ +#define OB_DUAL_BOOT_ENABLE ((uint32_t)0x00000000U) /* !< Dual Boot enable. Boot always from system memory if boot address in flash + (Dual bank Boot mode), or RAM if Boot address option in RAM */ +/** + * @} + */ +#endif /* FLASH_OPTCR_nDBOOT */ + +#if defined (FLASH_OPTCR_nDBANK) +/** @defgroup FLASHEx_Option_Bytes_nDBank FLASH Single Bank or Dual Bank + * @{ + */ +#define OB_NDBANK_SINGLE_BANK ((uint32_t)0x20000000U) /*!< NDBANK bit is set : Single Bank mode */ +#define OB_NDBANK_DUAL_BANK ((uint32_t)0x00000000U) /*!< NDBANK bit is reset : Dual Bank mode */ +/** + * @} + */ +#endif /* FLASH_OPTCR_nDBANK */ + +/** @defgroup FLASHEx_Boot_Address FLASH Boot Address + * @{ + */ +#define OB_BOOTADDR_ITCM_RAM ((uint32_t)0x0000U) /*!< Boot from ITCM RAM (0x00000000) */ +#define OB_BOOTADDR_SYSTEM ((uint32_t)0x0040U) /*!< Boot from System memory bootloader (0x00100000) */ +#define OB_BOOTADDR_ITCM_FLASH ((uint32_t)0x0080U) /*!< Boot from Flash on ITCM interface (0x00200000) */ +#define OB_BOOTADDR_AXIM_FLASH ((uint32_t)0x2000U) /*!< Boot from Flash on AXIM interface (0x08000000) */ +#define OB_BOOTADDR_DTCM_RAM ((uint32_t)0x8000U) /*!< Boot from DTCM RAM (0x20000000) */ +#define OB_BOOTADDR_SRAM1 ((uint32_t)0x8004U) /*!< Boot from SRAM1 (0x20010000) */ +#if (SRAM2_BASE == 0x2003C000U) +#define OB_BOOTADDR_SRAM2 ((uint32_t)0x800FU) /*!< Boot from SRAM2 (0x2003C000) */ +#else +#define OB_BOOTADDR_SRAM2 ((uint32_t)0x8013U) /*!< Boot from SRAM2 (0x2004C000) */ +#endif /* SRAM2_BASE == 0x2003C000U */ +/** + * @} + */ + +/** @defgroup FLASH_Latency FLASH Latency + * @{ + */ +#define FLASH_LATENCY_0 FLASH_ACR_LATENCY_0WS /*!< FLASH Zero Latency cycle */ +#define FLASH_LATENCY_1 FLASH_ACR_LATENCY_1WS /*!< FLASH One Latency cycle */ +#define FLASH_LATENCY_2 FLASH_ACR_LATENCY_2WS /*!< FLASH Two Latency cycles */ +#define FLASH_LATENCY_3 FLASH_ACR_LATENCY_3WS /*!< FLASH Three Latency cycles */ +#define FLASH_LATENCY_4 FLASH_ACR_LATENCY_4WS /*!< FLASH Four Latency cycles */ +#define FLASH_LATENCY_5 FLASH_ACR_LATENCY_5WS /*!< FLASH Five Latency cycles */ +#define FLASH_LATENCY_6 FLASH_ACR_LATENCY_6WS /*!< FLASH Six Latency cycles */ +#define FLASH_LATENCY_7 FLASH_ACR_LATENCY_7WS /*!< FLASH Seven Latency cycles */ +#define FLASH_LATENCY_8 FLASH_ACR_LATENCY_8WS /*!< FLASH Eight Latency cycles */ +#define FLASH_LATENCY_9 FLASH_ACR_LATENCY_9WS /*!< FLASH Nine Latency cycles */ +#define FLASH_LATENCY_10 FLASH_ACR_LATENCY_10WS /*!< FLASH Ten Latency cycles */ +#define FLASH_LATENCY_11 FLASH_ACR_LATENCY_11WS /*!< FLASH Eleven Latency cycles */ +#define FLASH_LATENCY_12 FLASH_ACR_LATENCY_12WS /*!< FLASH Twelve Latency cycles */ +#define FLASH_LATENCY_13 FLASH_ACR_LATENCY_13WS /*!< FLASH Thirteen Latency cycles */ +#define FLASH_LATENCY_14 FLASH_ACR_LATENCY_14WS /*!< FLASH Fourteen Latency cycles */ +#define FLASH_LATENCY_15 FLASH_ACR_LATENCY_15WS /*!< FLASH Fifteen Latency cycles */ +/** + * @} + */ + +#if defined (FLASH_OPTCR_nDBANK) +/** @defgroup FLASHEx_Banks FLASH Banks + * @{ + */ +#define FLASH_BANK_1 ((uint32_t)0x01U) /*!< Bank 1 */ +#define FLASH_BANK_2 ((uint32_t)0x02U) /*!< Bank 2 */ +#define FLASH_BANK_BOTH ((uint32_t)(FLASH_BANK_1 | FLASH_BANK_2)) /*!< Bank1 and Bank2 */ +/** + * @} + */ +#endif /* FLASH_OPTCR_nDBANK */ + +/** @defgroup FLASHEx_MassErase_bit FLASH Mass Erase bit + * @{ + */ +#if defined (FLASH_OPTCR_nDBANK) +#define FLASH_MER_BIT (FLASH_CR_MER1 | FLASH_CR_MER2) /*!< 2 MER bits */ +#else +#define FLASH_MER_BIT (FLASH_CR_MER) /*!< only 1 MER bit */ +#endif /* FLASH_OPTCR_nDBANK */ +/** + * @} + */ + +/** @defgroup FLASHEx_Sectors FLASH Sectors + * @{ + */ +#if (FLASH_SECTOR_TOTAL == 24) +#define FLASH_SECTOR_8 ((uint32_t)8U) /*!< Sector Number 8 */ +#define FLASH_SECTOR_9 ((uint32_t)9U) /*!< Sector Number 9 */ +#define FLASH_SECTOR_10 ((uint32_t)10U) /*!< Sector Number 10 */ +#define FLASH_SECTOR_11 ((uint32_t)11U) /*!< Sector Number 11 */ +#define FLASH_SECTOR_12 ((uint32_t)12U) /*!< Sector Number 12 */ +#define FLASH_SECTOR_13 ((uint32_t)13U) /*!< Sector Number 13 */ +#define FLASH_SECTOR_14 ((uint32_t)14U) /*!< Sector Number 14 */ +#define FLASH_SECTOR_15 ((uint32_t)15U) /*!< Sector Number 15 */ +#define FLASH_SECTOR_16 ((uint32_t)16U) /*!< Sector Number 16 */ +#define FLASH_SECTOR_17 ((uint32_t)17U) /*!< Sector Number 17 */ +#define FLASH_SECTOR_18 ((uint32_t)18U) /*!< Sector Number 18 */ +#define FLASH_SECTOR_19 ((uint32_t)19U) /*!< Sector Number 19 */ +#define FLASH_SECTOR_20 ((uint32_t)20U) /*!< Sector Number 20 */ +#define FLASH_SECTOR_21 ((uint32_t)21U) /*!< Sector Number 21 */ +#define FLASH_SECTOR_22 ((uint32_t)22U) /*!< Sector Number 22 */ +#define FLASH_SECTOR_23 ((uint32_t)23U) /*!< Sector Number 23 */ +#endif /* FLASH_SECTOR_TOTAL == 24 */ +/** + * @} + */ + +#if (FLASH_SECTOR_TOTAL == 24) +/** @defgroup FLASHEx_Option_Bytes_Write_Protection FLASH Option Bytes Write Protection + * @note For Single Bank mode, use OB_WRP_SECTOR_x defines: In fact, in FLASH_OPTCR register, + * nWRP[11:0] bits contain the value of the write-protection option bytes for sectors 0 to 11. + * For Dual Bank mode, use OB_WRP_DB_SECTOR_x defines: In fact, in FLASH_OPTCR register, + * nWRP[11:0] bits are divided on two groups, one group dedicated for bank 1 and + * a second one dedicated for bank 2 (nWRP[i] activates Write protection on sector 2*i and 2*i+1). + * This behavior is applicable only for STM32F76xxx / STM32F77xxx devices. + * @{ + */ +/* Single Bank Sectors */ +#define OB_WRP_SECTOR_0 ((uint32_t)0x00010000U) /*!< Write protection of Single Bank Sector0 */ +#define OB_WRP_SECTOR_1 ((uint32_t)0x00020000U) /*!< Write protection of Single Bank Sector1 */ +#define OB_WRP_SECTOR_2 ((uint32_t)0x00040000U) /*!< Write protection of Single Bank Sector2 */ +#define OB_WRP_SECTOR_3 ((uint32_t)0x00080000U) /*!< Write protection of Single Bank Sector3 */ +#define OB_WRP_SECTOR_4 ((uint32_t)0x00100000U) /*!< Write protection of Single Bank Sector4 */ +#define OB_WRP_SECTOR_5 ((uint32_t)0x00200000U) /*!< Write protection of Single Bank Sector5 */ +#define OB_WRP_SECTOR_6 ((uint32_t)0x00400000U) /*!< Write protection of Single Bank Sector6 */ +#define OB_WRP_SECTOR_7 ((uint32_t)0x00800000U) /*!< Write protection of Single Bank Sector7 */ +#define OB_WRP_SECTOR_8 ((uint32_t)0x01000000U) /*!< Write protection of Single Bank Sector8 */ +#define OB_WRP_SECTOR_9 ((uint32_t)0x02000000U) /*!< Write protection of Single Bank Sector9 */ +#define OB_WRP_SECTOR_10 ((uint32_t)0x04000000U) /*!< Write protection of Single Bank Sector10 */ +#define OB_WRP_SECTOR_11 ((uint32_t)0x08000000U) /*!< Write protection of Single Bank Sector11 */ +#define OB_WRP_SECTOR_All ((uint32_t)0x0FFF0000U) /*!< Write protection of all Sectors for Single Bank Flash */ + +/* Dual Bank Sectors */ +#define OB_WRP_DB_SECTOR_0 ((uint32_t)0x00010000U) /*!< Write protection of Dual Bank Sector0 */ +#define OB_WRP_DB_SECTOR_1 ((uint32_t)0x00010000U) /*!< Write protection of Dual Bank Sector1 */ +#define OB_WRP_DB_SECTOR_2 ((uint32_t)0x00020000U) /*!< Write protection of Dual Bank Sector2 */ +#define OB_WRP_DB_SECTOR_3 ((uint32_t)0x00020000U) /*!< Write protection of Dual Bank Sector3 */ +#define OB_WRP_DB_SECTOR_4 ((uint32_t)0x00040000U) /*!< Write protection of Dual Bank Sector4 */ +#define OB_WRP_DB_SECTOR_5 ((uint32_t)0x00040000U) /*!< Write protection of Dual Bank Sector5 */ +#define OB_WRP_DB_SECTOR_6 ((uint32_t)0x00080000U) /*!< Write protection of Dual Bank Sector6 */ +#define OB_WRP_DB_SECTOR_7 ((uint32_t)0x00080000U) /*!< Write protection of Dual Bank Sector7 */ +#define OB_WRP_DB_SECTOR_8 ((uint32_t)0x00100000U) /*!< Write protection of Dual Bank Sector8 */ +#define OB_WRP_DB_SECTOR_9 ((uint32_t)0x00100000U) /*!< Write protection of Dual Bank Sector9 */ +#define OB_WRP_DB_SECTOR_10 ((uint32_t)0x00200000U) /*!< Write protection of Dual Bank Sector10 */ +#define OB_WRP_DB_SECTOR_11 ((uint32_t)0x00200000U) /*!< Write protection of Dual Bank Sector11 */ +#define OB_WRP_DB_SECTOR_12 ((uint32_t)0x00400000U) /*!< Write protection of Dual Bank Sector12 */ +#define OB_WRP_DB_SECTOR_13 ((uint32_t)0x00400000U) /*!< Write protection of Dual Bank Sector13 */ +#define OB_WRP_DB_SECTOR_14 ((uint32_t)0x00800000U) /*!< Write protection of Dual Bank Sector14 */ +#define OB_WRP_DB_SECTOR_15 ((uint32_t)0x00800000U) /*!< Write protection of Dual Bank Sector15 */ +#define OB_WRP_DB_SECTOR_16 ((uint32_t)0x01000000U) /*!< Write protection of Dual Bank Sector16 */ +#define OB_WRP_DB_SECTOR_17 ((uint32_t)0x01000000U) /*!< Write protection of Dual Bank Sector17 */ +#define OB_WRP_DB_SECTOR_18 ((uint32_t)0x02000000U) /*!< Write protection of Dual Bank Sector18 */ +#define OB_WRP_DB_SECTOR_19 ((uint32_t)0x02000000U) /*!< Write protection of Dual Bank Sector19 */ +#define OB_WRP_DB_SECTOR_20 ((uint32_t)0x04000000U) /*!< Write protection of Dual Bank Sector20 */ +#define OB_WRP_DB_SECTOR_21 ((uint32_t)0x04000000U) /*!< Write protection of Dual Bank Sector21 */ +#define OB_WRP_DB_SECTOR_22 ((uint32_t)0x08000000U) /*!< Write protection of Dual Bank Sector22 */ +#define OB_WRP_DB_SECTOR_23 ((uint32_t)0x08000000U) /*!< Write protection of Dual Bank Sector23 */ +#define OB_WRP_DB_SECTOR_All ((uint32_t)0x0FFF0000U) /*!< Write protection of all Sectors for Dual Bank Flash */ +/** + * @} + */ +#endif /* FLASH_SECTOR_TOTAL == 24 */ + +#if (FLASH_SECTOR_TOTAL == 8) +/** @defgroup FLASHEx_Option_Bytes_Write_Protection FLASH Option Bytes Write Protection + * @{ + */ +#define OB_WRP_SECTOR_0 ((uint32_t)0x00010000U) /*!< Write protection of Sector0 */ +#define OB_WRP_SECTOR_1 ((uint32_t)0x00020000U) /*!< Write protection of Sector1 */ +#define OB_WRP_SECTOR_2 ((uint32_t)0x00040000U) /*!< Write protection of Sector2 */ +#define OB_WRP_SECTOR_3 ((uint32_t)0x00080000U) /*!< Write protection of Sector3 */ +#define OB_WRP_SECTOR_4 ((uint32_t)0x00100000U) /*!< Write protection of Sector4 */ +#define OB_WRP_SECTOR_5 ((uint32_t)0x00200000U) /*!< Write protection of Sector5 */ +#define OB_WRP_SECTOR_6 ((uint32_t)0x00400000U) /*!< Write protection of Sector6 */ +#define OB_WRP_SECTOR_7 ((uint32_t)0x00800000U) /*!< Write protection of Sector7 */ +#define OB_WRP_SECTOR_All ((uint32_t)0x00FF0000U) /*!< Write protection of all Sectors */ +/** + * @} + */ +#endif /* FLASH_SECTOR_TOTAL == 8 */ + +#if (FLASH_SECTOR_TOTAL == 4) +/** @defgroup FLASHEx_Option_Bytes_Write_Protection FLASH Option Bytes Write Protection + * @{ + */ +#define OB_WRP_SECTOR_0 ((uint32_t)0x00010000U) /*!< Write protection of Sector0 */ +#define OB_WRP_SECTOR_1 ((uint32_t)0x00020000U) /*!< Write protection of Sector1 */ +#define OB_WRP_SECTOR_2 ((uint32_t)0x00040000U) /*!< Write protection of Sector2 */ +#define OB_WRP_SECTOR_3 ((uint32_t)0x00080000U) /*!< Write protection of Sector3 */ +#define OB_WRP_SECTOR_All ((uint32_t)0x000F0000U) /*!< Write protection of all Sectors */ +/** + * @} + */ +#endif /* FLASH_SECTOR_TOTAL == 4 */ + +#if (FLASH_SECTOR_TOTAL == 2) +/** @defgroup FLASHEx_Option_Bytes_Write_Protection FLASH Option Bytes Write Protection + * @{ + */ +#define OB_WRP_SECTOR_0 ((uint32_t)0x00010000U) /*!< Write protection of Sector0 */ +#define OB_WRP_SECTOR_1 ((uint32_t)0x00020000U) /*!< Write protection of Sector1 */ +#define OB_WRP_SECTOR_All ((uint32_t)0x00030000U) /*!< Write protection of all Sectors */ +/** + * @} + */ +#endif /* FLASH_SECTOR_TOTAL == 2 */ + +#if defined (FLASH_OPTCR2_PCROP) +#if (FLASH_SECTOR_TOTAL == 8) +/** @defgroup FLASHEx_Option_Bytes_PCROP_Sectors FLASH Option Bytes PCROP Sectors + * @{ + */ +#define OB_PCROP_SECTOR_0 ((uint32_t)0x00000001U) /*!< PC Readout protection of Sector0 */ +#define OB_PCROP_SECTOR_1 ((uint32_t)0x00000002U) /*!< PC Readout protection of Sector1 */ +#define OB_PCROP_SECTOR_2 ((uint32_t)0x00000004U) /*!< PC Readout protection of Sector2 */ +#define OB_PCROP_SECTOR_3 ((uint32_t)0x00000008U) /*!< PC Readout protection of Sector3 */ +#define OB_PCROP_SECTOR_4 ((uint32_t)0x00000010U) /*!< PC Readout protection of Sector4 */ +#define OB_PCROP_SECTOR_5 ((uint32_t)0x00000020U) /*!< PC Readout protection of Sector5 */ +#define OB_PCROP_SECTOR_6 ((uint32_t)0x00000040U) /*!< PC Readout protection of Sector6 */ +#define OB_PCROP_SECTOR_7 ((uint32_t)0x00000080U) /*!< PC Readout protection of Sector7 */ +#define OB_PCROP_SECTOR_All ((uint32_t)0x000000FFU) /*!< PC Readout protection of all Sectors */ +/** + * @} + */ +#endif /* FLASH_SECTOR_TOTAL == 8 */ + +#if (FLASH_SECTOR_TOTAL == 4) +/** @defgroup FLASHEx_Option_Bytes_PCROP_Sectors FLASH Option Bytes PCROP Sectors + * @{ + */ +#define OB_PCROP_SECTOR_0 ((uint32_t)0x00000001U) /*!< PC Readout protection of Sector0 */ +#define OB_PCROP_SECTOR_1 ((uint32_t)0x00000002U) /*!< PC Readout protection of Sector1 */ +#define OB_PCROP_SECTOR_2 ((uint32_t)0x00000004U) /*!< PC Readout protection of Sector2 */ +#define OB_PCROP_SECTOR_3 ((uint32_t)0x00000008U) /*!< PC Readout protection of Sector3 */ +#define OB_PCROP_SECTOR_All ((uint32_t)0x0000000FU) /*!< PC Readout protection of all Sectors */ +/** + * @} + */ +#endif /* FLASH_SECTOR_TOTAL == 4 */ + +/** @defgroup FLASHEx_Option_Bytes_PCROP_RDP FLASH Option Bytes PCROP_RDP Bit + * @{ + */ +#define OB_PCROP_RDP_ENABLE ((uint32_t)0x80000000U) /*!< PCROP_RDP Enable */ +#define OB_PCROP_RDP_DISABLE ((uint32_t)0x00000000U) /*!< PCROP_RDP Disable */ +/** + * @} + */ +#endif /* FLASH_OPTCR2_PCROP */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup FLASH_Exported_Macros FLASH Exported Macros + * @{ + */ +/** + * @brief Calculate the FLASH Boot Base Address (BOOT_ADD0 or BOOT_ADD1) + * @note Returned value BOOT_ADDx[15:0] corresponds to boot address [29:14]. + * @param __ADDRESS__ FLASH Boot Address (in the range 0x0000 0000 to 0x2004 FFFF with a granularity of 16KB) + * @retval The FLASH Boot Base Address + */ +#define __HAL_FLASH_CALC_BOOT_BASE_ADR(__ADDRESS__) ((__ADDRESS__) >> 14) + /** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup FLASHEx_Exported_Functions + * @{ + */ + +/** @addtogroup FLASHEx_Exported_Functions_Group1 + * @{ + */ +/* Extension Program operation functions *************************************/ +HAL_StatusTypeDef HAL_FLASHEx_Erase(FLASH_EraseInitTypeDef *pEraseInit, uint32_t *SectorError); +HAL_StatusTypeDef HAL_FLASHEx_Erase_IT(FLASH_EraseInitTypeDef *pEraseInit); +HAL_StatusTypeDef HAL_FLASHEx_OBProgram(FLASH_OBProgramInitTypeDef *pOBInit); +void HAL_FLASHEx_OBGetConfig(FLASH_OBProgramInitTypeDef *pOBInit); + +/** + * @} + */ + +/** + * @} + */ +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/** @defgroup FLASHEx_Private_Macros FLASH Private Macros + * @{ + */ + +/** @defgroup FLASHEx_IS_FLASH_Definitions FLASH Private macros to check input parameters + * @{ + */ + +#define IS_FLASH_TYPEERASE(VALUE)(((VALUE) == FLASH_TYPEERASE_SECTORS) || \ + ((VALUE) == FLASH_TYPEERASE_MASSERASE)) + +#define IS_VOLTAGERANGE(RANGE)(((RANGE) == FLASH_VOLTAGE_RANGE_1) || \ + ((RANGE) == FLASH_VOLTAGE_RANGE_2) || \ + ((RANGE) == FLASH_VOLTAGE_RANGE_3) || \ + ((RANGE) == FLASH_VOLTAGE_RANGE_4)) + +#define IS_WRPSTATE(VALUE)(((VALUE) == OB_WRPSTATE_DISABLE) || \ + ((VALUE) == OB_WRPSTATE_ENABLE)) + +#if defined (FLASH_OPTCR2_PCROP) +#define IS_OPTIONBYTE(VALUE)(((VALUE) <= (OPTIONBYTE_WRP | OPTIONBYTE_RDP | OPTIONBYTE_USER |\ + OPTIONBYTE_BOR | OPTIONBYTE_BOOTADDR_0 | OPTIONBYTE_BOOTADDR_1 |\ + OPTIONBYTE_PCROP | OPTIONBYTE_PCROP_RDP))) +#else +#define IS_OPTIONBYTE(VALUE)(((VALUE) <= (OPTIONBYTE_WRP | OPTIONBYTE_RDP | OPTIONBYTE_USER |\ + OPTIONBYTE_BOR | OPTIONBYTE_BOOTADDR_0 | OPTIONBYTE_BOOTADDR_1))) +#endif /* FLASH_OPTCR2_PCROP */ + +#define IS_OB_BOOT_ADDRESS(ADDRESS) ((ADDRESS) <= 0x8013) + +#define IS_OB_RDP_LEVEL(LEVEL) (((LEVEL) == OB_RDP_LEVEL_0) ||\ + ((LEVEL) == OB_RDP_LEVEL_1) ||\ + ((LEVEL) == OB_RDP_LEVEL_2)) + +#define IS_OB_WWDG_SOURCE(SOURCE) (((SOURCE) == OB_WWDG_SW) || ((SOURCE) == OB_WWDG_HW)) + +#define IS_OB_IWDG_SOURCE(SOURCE) (((SOURCE) == OB_IWDG_SW) || ((SOURCE) == OB_IWDG_HW)) + +#define IS_OB_STOP_SOURCE(SOURCE) (((SOURCE) == OB_STOP_NO_RST) || ((SOURCE) == OB_STOP_RST)) + +#define IS_OB_STDBY_SOURCE(SOURCE) (((SOURCE) == OB_STDBY_NO_RST) || ((SOURCE) == OB_STDBY_RST)) + +#define IS_OB_IWDG_STOP_FREEZE(FREEZE) (((FREEZE) == OB_IWDG_STOP_FREEZE) || ((FREEZE) == OB_IWDG_STOP_ACTIVE)) + +#define IS_OB_IWDG_STDBY_FREEZE(FREEZE) (((FREEZE) == OB_IWDG_STDBY_FREEZE) || ((FREEZE) == OB_IWDG_STDBY_ACTIVE)) + +#define IS_OB_BOR_LEVEL(LEVEL) (((LEVEL) == OB_BOR_LEVEL1) || ((LEVEL) == OB_BOR_LEVEL2) ||\ + ((LEVEL) == OB_BOR_LEVEL3) || ((LEVEL) == OB_BOR_OFF)) + +#define IS_FLASH_LATENCY(LATENCY) (((LATENCY) == FLASH_LATENCY_0) || \ + ((LATENCY) == FLASH_LATENCY_1) || \ + ((LATENCY) == FLASH_LATENCY_2) || \ + ((LATENCY) == FLASH_LATENCY_3) || \ + ((LATENCY) == FLASH_LATENCY_4) || \ + ((LATENCY) == FLASH_LATENCY_5) || \ + ((LATENCY) == FLASH_LATENCY_6) || \ + ((LATENCY) == FLASH_LATENCY_7) || \ + ((LATENCY) == FLASH_LATENCY_8) || \ + ((LATENCY) == FLASH_LATENCY_9) || \ + ((LATENCY) == FLASH_LATENCY_10) || \ + ((LATENCY) == FLASH_LATENCY_11) || \ + ((LATENCY) == FLASH_LATENCY_12) || \ + ((LATENCY) == FLASH_LATENCY_13) || \ + ((LATENCY) == FLASH_LATENCY_14) || \ + ((LATENCY) == FLASH_LATENCY_15)) + +#define IS_FLASH_ADDRESS(ADDRESS) ((((ADDRESS) >= FLASH_BASE) && ((ADDRESS) <= FLASH_END)) || \ + (((ADDRESS) >= FLASH_OTP_BASE) && ((ADDRESS) <= FLASH_OTP_END))) +#define IS_FLASH_NBSECTORS(NBSECTORS) (((NBSECTORS) != 0U) && ((NBSECTORS) <= FLASH_SECTOR_TOTAL)) + +#if (FLASH_SECTOR_TOTAL == 8) +#define IS_FLASH_SECTOR(SECTOR) (((SECTOR) == FLASH_SECTOR_0) || ((SECTOR) == FLASH_SECTOR_1) ||\ + ((SECTOR) == FLASH_SECTOR_2) || ((SECTOR) == FLASH_SECTOR_3) ||\ + ((SECTOR) == FLASH_SECTOR_4) || ((SECTOR) == FLASH_SECTOR_5) ||\ + ((SECTOR) == FLASH_SECTOR_6) || ((SECTOR) == FLASH_SECTOR_7)) + +#define IS_OB_WRP_SECTOR(SECTOR) ((((SECTOR) & 0xFF00FFFFU) == 0x00000000U) && ((SECTOR) != 0x00000000U)) +#endif /* FLASH_SECTOR_TOTAL == 8 */ + +#if (FLASH_SECTOR_TOTAL == 24) +#define IS_FLASH_SECTOR(SECTOR) (((SECTOR) == FLASH_SECTOR_0) || ((SECTOR) == FLASH_SECTOR_1) ||\ + ((SECTOR) == FLASH_SECTOR_2) || ((SECTOR) == FLASH_SECTOR_3) ||\ + ((SECTOR) == FLASH_SECTOR_4) || ((SECTOR) == FLASH_SECTOR_5) ||\ + ((SECTOR) == FLASH_SECTOR_6) || ((SECTOR) == FLASH_SECTOR_7) ||\ + ((SECTOR) == FLASH_SECTOR_8) || ((SECTOR) == FLASH_SECTOR_9) ||\ + ((SECTOR) == FLASH_SECTOR_10) || ((SECTOR) == FLASH_SECTOR_11) ||\ + ((SECTOR) == FLASH_SECTOR_12) || ((SECTOR) == FLASH_SECTOR_13) ||\ + ((SECTOR) == FLASH_SECTOR_14) || ((SECTOR) == FLASH_SECTOR_15) ||\ + ((SECTOR) == FLASH_SECTOR_16) || ((SECTOR) == FLASH_SECTOR_17) ||\ + ((SECTOR) == FLASH_SECTOR_18) || ((SECTOR) == FLASH_SECTOR_19) ||\ + ((SECTOR) == FLASH_SECTOR_20) || ((SECTOR) == FLASH_SECTOR_21) ||\ + ((SECTOR) == FLASH_SECTOR_22) || ((SECTOR) == FLASH_SECTOR_23)) + +#define IS_OB_WRP_SECTOR(SECTOR) ((((SECTOR) & 0xF000FFFFU) == 0x00000000U) && ((SECTOR) != 0x00000000U)) +#endif /* FLASH_SECTOR_TOTAL == 24 */ + +#if (FLASH_SECTOR_TOTAL == 4) +#define IS_FLASH_SECTOR(SECTOR) (((SECTOR) == FLASH_SECTOR_0) || ((SECTOR) == FLASH_SECTOR_1) ||\ + ((SECTOR) == FLASH_SECTOR_2) || ((SECTOR) == FLASH_SECTOR_3)) + +#define IS_OB_WRP_SECTOR(SECTOR) ((((SECTOR) & 0xFFF0FFFFU) == 0x00000000U) && ((SECTOR) != 0x00000000U)) +#endif /* FLASH_SECTOR_TOTAL == 4 */ + +#if (FLASH_SECTOR_TOTAL == 2) +#define IS_FLASH_SECTOR(SECTOR) (((SECTOR) == FLASH_SECTOR_0) || ((SECTOR) == FLASH_SECTOR_1)) + +#define IS_OB_WRP_SECTOR(SECTOR) ((((SECTOR) & 0xFFFCFFFFU) == 0x00000000U) && ((SECTOR) != 0x00000000U)) +#endif /* FLASH_SECTOR_TOTAL == 2 */ + +#if defined (FLASH_OPTCR_nDBANK) +#define IS_OB_NDBANK(VALUE) (((VALUE) == OB_NDBANK_SINGLE_BANK) || \ + ((VALUE) == OB_NDBANK_DUAL_BANK)) + +#define IS_FLASH_BANK(BANK) (((BANK) == FLASH_BANK_1) || \ + ((BANK) == FLASH_BANK_2) || \ + ((BANK) == FLASH_BANK_BOTH)) +#endif /* FLASH_OPTCR_nDBANK */ + +#if defined (FLASH_OPTCR_nDBOOT) +#define IS_OB_NDBOOT(VALUE) (((VALUE) == OB_DUAL_BOOT_DISABLE) || \ + ((VALUE) == OB_DUAL_BOOT_ENABLE)) +#endif /* FLASH_OPTCR_nDBOOT */ + +#if defined (FLASH_OPTCR2_PCROP) +#define IS_OB_PCROP_SECTOR(SECTOR) (((SECTOR) & (uint32_t)0xFFFFFF00U) == 0x00000000U) +#define IS_OB_PCROP_RDP_VALUE(VALUE) (((VALUE) == OB_PCROP_RDP_DISABLE) || \ + ((VALUE) == OB_PCROP_RDP_ENABLE)) +#endif /* FLASH_OPTCR2_PCROP */ + +/** + * @} + */ + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup FLASHEx_Private_Functions FLASH Private Functions + * @{ + */ +void FLASH_Erase_Sector(uint32_t Sector, uint8_t VoltageRange); +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_FLASH_EX_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_gpio.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_gpio.h new file mode 100644 index 0000000..c9dd7b2 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_gpio.h @@ -0,0 +1,323 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_gpio.h + * @author MCD Application Team + * @brief Header file of GPIO HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_GPIO_H +#define __STM32F7xx_HAL_GPIO_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup GPIO + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup GPIO_Exported_Types GPIO Exported Types + * @{ + */ + +/** + * @brief GPIO Init structure definition + */ +typedef struct +{ + uint32_t Pin; /*!< Specifies the GPIO pins to be configured. + This parameter can be any value of @ref GPIO_pins_define */ + + uint32_t Mode; /*!< Specifies the operating mode for the selected pins. + This parameter can be a value of @ref GPIO_mode_define */ + + uint32_t Pull; /*!< Specifies the Pull-up or Pull-Down activation for the selected pins. + This parameter can be a value of @ref GPIO_pull_define */ + + uint32_t Speed; /*!< Specifies the speed for the selected pins. + This parameter can be a value of @ref GPIO_speed_define */ + + uint32_t Alternate; /*!< Peripheral to be connected to the selected pins. + This parameter can be a value of @ref GPIO_Alternate_function_selection */ +}GPIO_InitTypeDef; + +/** + * @brief GPIO Bit SET and Bit RESET enumeration + */ +typedef enum +{ + GPIO_PIN_RESET = 0, + GPIO_PIN_SET +}GPIO_PinState; +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup GPIO_Exported_Constants GPIO Exported Constants + * @{ + */ + +/** @defgroup GPIO_pins_define GPIO pins define + * @{ + */ +#define GPIO_PIN_0 ((uint16_t)0x0001U) /* Pin 0 selected */ +#define GPIO_PIN_1 ((uint16_t)0x0002U) /* Pin 1 selected */ +#define GPIO_PIN_2 ((uint16_t)0x0004U) /* Pin 2 selected */ +#define GPIO_PIN_3 ((uint16_t)0x0008U) /* Pin 3 selected */ +#define GPIO_PIN_4 ((uint16_t)0x0010U) /* Pin 4 selected */ +#define GPIO_PIN_5 ((uint16_t)0x0020U) /* Pin 5 selected */ +#define GPIO_PIN_6 ((uint16_t)0x0040U) /* Pin 6 selected */ +#define GPIO_PIN_7 ((uint16_t)0x0080U) /* Pin 7 selected */ +#define GPIO_PIN_8 ((uint16_t)0x0100U) /* Pin 8 selected */ +#define GPIO_PIN_9 ((uint16_t)0x0200U) /* Pin 9 selected */ +#define GPIO_PIN_10 ((uint16_t)0x0400U) /* Pin 10 selected */ +#define GPIO_PIN_11 ((uint16_t)0x0800U) /* Pin 11 selected */ +#define GPIO_PIN_12 ((uint16_t)0x1000U) /* Pin 12 selected */ +#define GPIO_PIN_13 ((uint16_t)0x2000U) /* Pin 13 selected */ +#define GPIO_PIN_14 ((uint16_t)0x4000U) /* Pin 14 selected */ +#define GPIO_PIN_15 ((uint16_t)0x8000U) /* Pin 15 selected */ +#define GPIO_PIN_All ((uint16_t)0xFFFFU) /* All pins selected */ + +#define GPIO_PIN_MASK ((uint32_t)0x0000FFFFU) /* PIN mask for assert test */ +/** + * @} + */ + +/** @defgroup GPIO_mode_define GPIO mode define + * @brief GPIO Configuration Mode + * Elements values convention: 0x00WX00YZ + * - W : EXTI trigger detection on 3 bits + * - X : EXTI mode (IT or Event) on 2 bits + * - Y : Output type (Push Pull or Open Drain) on 1 bit + * - Z : GPIO mode (Input, Output, Alternate or Analog) on 2 bits + * @{ + */ +#define GPIO_MODE_INPUT MODE_INPUT /*!< Input Floating Mode */ +#define GPIO_MODE_OUTPUT_PP (MODE_OUTPUT | OUTPUT_PP) /*!< Output Push Pull Mode */ +#define GPIO_MODE_OUTPUT_OD (MODE_OUTPUT | OUTPUT_OD) /*!< Output Open Drain Mode */ +#define GPIO_MODE_AF_PP (MODE_AF | OUTPUT_PP) /*!< Alternate Function Push Pull Mode */ +#define GPIO_MODE_AF_OD (MODE_AF | OUTPUT_OD) /*!< Alternate Function Open Drain Mode */ + +#define GPIO_MODE_ANALOG MODE_ANALOG /*!< Analog Mode */ + +#define GPIO_MODE_IT_RISING (MODE_INPUT | EXTI_IT | TRIGGER_RISING) /*!< External Interrupt Mode with Rising edge trigger detection */ +#define GPIO_MODE_IT_FALLING (MODE_INPUT | EXTI_IT | TRIGGER_FALLING) /*!< External Interrupt Mode with Falling edge trigger detection */ +#define GPIO_MODE_IT_RISING_FALLING (MODE_INPUT | EXTI_IT | TRIGGER_RISING | TRIGGER_FALLING) /*!< External Interrupt Mode with Rising/Falling edge trigger detection */ + +#define GPIO_MODE_EVT_RISING (MODE_INPUT | EXTI_EVT | TRIGGER_RISING) /*!< External Event Mode with Rising edge trigger detection */ +#define GPIO_MODE_EVT_FALLING (MODE_INPUT | EXTI_EVT | TRIGGER_FALLING) /*!< External Event Mode with Falling edge trigger detection */ +#define GPIO_MODE_EVT_RISING_FALLING (MODE_INPUT | EXTI_EVT | TRIGGER_RISING | TRIGGER_FALLING) /*!< External Event Mode with Rising/Falling edge trigger detection */ +/** + * @} + */ + +/** @defgroup GPIO_speed_define GPIO speed define + * @brief GPIO Output Maximum frequency + * @{ + */ +#define GPIO_SPEED_FREQ_LOW ((uint32_t)0x00000000U) /*!< Low speed */ +#define GPIO_SPEED_FREQ_MEDIUM ((uint32_t)0x00000001U) /*!< Medium speed */ +#define GPIO_SPEED_FREQ_HIGH ((uint32_t)0x00000002U) /*!< Fast speed */ +#define GPIO_SPEED_FREQ_VERY_HIGH ((uint32_t)0x00000003U) /*!< High speed */ +/** + * @} + */ + + /** @defgroup GPIO_pull_define GPIO pull define + * @brief GPIO Pull-Up or Pull-Down Activation + * @{ + */ +#define GPIO_NOPULL ((uint32_t)0x00000000U) /*!< No Pull-up or Pull-down activation */ +#define GPIO_PULLUP ((uint32_t)0x00000001U) /*!< Pull-up activation */ +#define GPIO_PULLDOWN ((uint32_t)0x00000002U) /*!< Pull-down activation */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup GPIO_Exported_Macros GPIO Exported Macros + * @{ + */ + +/** + * @brief Checks whether the specified EXTI line flag is set or not. + * @param __EXTI_LINE__ specifies the EXTI line flag to check. + * This parameter can be GPIO_PIN_x where x can be(0..15) + * @retval The new state of __EXTI_LINE__ (SET or RESET). + */ +#define __HAL_GPIO_EXTI_GET_FLAG(__EXTI_LINE__) (EXTI->PR & (__EXTI_LINE__)) + +/** + * @brief Clears the EXTI's line pending flags. + * @param __EXTI_LINE__ specifies the EXTI lines flags to clear. + * This parameter can be any combination of GPIO_PIN_x where x can be (0..15) + * @retval None + */ +#define __HAL_GPIO_EXTI_CLEAR_FLAG(__EXTI_LINE__) (EXTI->PR = (__EXTI_LINE__)) + +/** + * @brief Checks whether the specified EXTI line is asserted or not. + * @param __EXTI_LINE__ specifies the EXTI line to check. + * This parameter can be GPIO_PIN_x where x can be(0..15) + * @retval The new state of __EXTI_LINE__ (SET or RESET). + */ +#define __HAL_GPIO_EXTI_GET_IT(__EXTI_LINE__) (EXTI->PR & (__EXTI_LINE__)) + +/** + * @brief Clears the EXTI's line pending bits. + * @param __EXTI_LINE__ specifies the EXTI lines to clear. + * This parameter can be any combination of GPIO_PIN_x where x can be (0..15) + * @retval None + */ +#define __HAL_GPIO_EXTI_CLEAR_IT(__EXTI_LINE__) (EXTI->PR = (__EXTI_LINE__)) + +/** + * @brief Generates a Software interrupt on selected EXTI line. + * @param __EXTI_LINE__ specifies the EXTI line to check. + * This parameter can be GPIO_PIN_x where x can be(0..15) + * @retval None + */ +#define __HAL_GPIO_EXTI_GENERATE_SWIT(__EXTI_LINE__) (EXTI->SWIER |= (__EXTI_LINE__)) +/** + * @} + */ + +/* Include GPIO HAL Extension module */ +#include "stm32f7xx_hal_gpio_ex.h" + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup GPIO_Exported_Functions + * @{ + */ + +/** @addtogroup GPIO_Exported_Functions_Group1 + * @{ + */ +/* Initialization and de-initialization functions *****************************/ +void HAL_GPIO_Init(GPIO_TypeDef *GPIOx, GPIO_InitTypeDef *GPIO_Init); +void HAL_GPIO_DeInit(GPIO_TypeDef *GPIOx, uint32_t GPIO_Pin); +/** + * @} + */ + +/** @addtogroup GPIO_Exported_Functions_Group2 + * @{ + */ +/* IO operation functions *****************************************************/ +GPIO_PinState HAL_GPIO_ReadPin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin); +void HAL_GPIO_WritePin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin, GPIO_PinState PinState); +void HAL_GPIO_TogglePin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin); +HAL_StatusTypeDef HAL_GPIO_LockPin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin); +void HAL_GPIO_EXTI_IRQHandler(uint16_t GPIO_Pin); +void HAL_GPIO_EXTI_Callback(uint16_t GPIO_Pin); + +/** + * @} + */ + +/** + * @} + */ +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup GPIO_Private_Constants GPIO Private Constants + * @{ + */ +#define GPIO_MODE_Pos 0U +#define GPIO_MODE (0x3UL << GPIO_MODE_Pos) +#define MODE_INPUT (0x0UL << GPIO_MODE_Pos) +#define MODE_OUTPUT (0x1UL << GPIO_MODE_Pos) +#define MODE_AF (0x2UL << GPIO_MODE_Pos) +#define MODE_ANALOG (0x3UL << GPIO_MODE_Pos) +#define OUTPUT_TYPE_Pos 4U +#define OUTPUT_TYPE (0x1UL << OUTPUT_TYPE_Pos) +#define OUTPUT_PP (0x0UL << OUTPUT_TYPE_Pos) +#define OUTPUT_OD (0x1UL << OUTPUT_TYPE_Pos) +#define EXTI_MODE_Pos 16U +#define EXTI_MODE (0x3UL << EXTI_MODE_Pos) +#define EXTI_IT (0x1UL << EXTI_MODE_Pos) +#define EXTI_EVT (0x2UL << EXTI_MODE_Pos) +#define TRIGGER_MODE_Pos 20U +#define TRIGGER_MODE (0x7UL << TRIGGER_MODE_Pos) +#define TRIGGER_RISING (0x1UL << TRIGGER_MODE_Pos) +#define TRIGGER_FALLING (0x2UL << TRIGGER_MODE_Pos) +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup GPIO_Private_Macros GPIO Private Macros + * @{ + */ +#define IS_GPIO_PIN_ACTION(ACTION) (((ACTION) == GPIO_PIN_RESET) || ((ACTION) == GPIO_PIN_SET)) +#define IS_GPIO_PIN(__PIN__) ((((uint32_t)(__PIN__) & GPIO_PIN_MASK) != 0x00U)) +#define IS_GPIO_MODE(MODE) (((MODE) == GPIO_MODE_INPUT) ||\ + ((MODE) == GPIO_MODE_OUTPUT_PP) ||\ + ((MODE) == GPIO_MODE_OUTPUT_OD) ||\ + ((MODE) == GPIO_MODE_AF_PP) ||\ + ((MODE) == GPIO_MODE_AF_OD) ||\ + ((MODE) == GPIO_MODE_IT_RISING) ||\ + ((MODE) == GPIO_MODE_IT_FALLING) ||\ + ((MODE) == GPIO_MODE_IT_RISING_FALLING) ||\ + ((MODE) == GPIO_MODE_EVT_RISING) ||\ + ((MODE) == GPIO_MODE_EVT_FALLING) ||\ + ((MODE) == GPIO_MODE_EVT_RISING_FALLING) ||\ + ((MODE) == GPIO_MODE_ANALOG)) +#define IS_GPIO_SPEED(SPEED) (((SPEED) == GPIO_SPEED_LOW) || ((SPEED) == GPIO_SPEED_MEDIUM) || \ + ((SPEED) == GPIO_SPEED_FAST) || ((SPEED) == GPIO_SPEED_HIGH)) +#define IS_GPIO_PULL(PULL) (((PULL) == GPIO_NOPULL) || ((PULL) == GPIO_PULLUP) || \ + ((PULL) == GPIO_PULLDOWN)) +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup GPIO_Private_Functions GPIO Private Functions + * @{ + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_GPIO_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_gpio_ex.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_gpio_ex.h new file mode 100644 index 0000000..0fbfc7c --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_gpio_ex.h @@ -0,0 +1,656 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_gpio_ex.h + * @author MCD Application Team + * @brief Header file of GPIO HAL Extension module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_GPIO_EX_H +#define __STM32F7xx_HAL_GPIO_EX_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup GPIOEx GPIOEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup GPIOEx_Exported_Constants GPIO Exported Constants + * @{ + */ + +/** @defgroup GPIO_Alternate_function_selection GPIO Alternate Function Selection + * @{ + */ +/*--------------- STM32F74xxx/STM32F75xxx/STM32F76xxx/STM32F77xxx -------------*/ +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) || defined (STM32F767xx) ||\ + defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +/** + * @brief AF 0 selection + */ +#define GPIO_AF0_RTC_50Hz ((uint8_t)0x00U) /* RTC_50Hz Alternate Function mapping */ +#define GPIO_AF0_MCO ((uint8_t)0x00U) /* MCO (MCO1 and MCO2) Alternate Function mapping */ +#define GPIO_AF0_SWJ ((uint8_t)0x00U) /* SWJ (SWD and JTAG) Alternate Function mapping */ +#define GPIO_AF0_TRACE ((uint8_t)0x00U) /* TRACE Alternate Function mapping */ + +/** + * @brief AF 1 selection + */ +#define GPIO_AF1_TIM1 ((uint8_t)0x01U) /* TIM1 Alternate Function mapping */ +#define GPIO_AF1_TIM2 ((uint8_t)0x01U) /* TIM2 Alternate Function mapping */ +#if defined (STM32F765xx) || defined(STM32F767xx) || defined(STM32F769xx) || defined(STM32F777xx) || defined(STM32F779xx) +#define GPIO_AF1_UART5 ((uint8_t)0x01U) /* UART5 Alternate Function mapping */ +#define GPIO_AF1_I2C4 ((uint8_t)0x01U) /* I2C4 Alternate Function mapping */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +/** + * @brief AF 2 selection + */ +#define GPIO_AF2_TIM3 ((uint8_t)0x02U) /* TIM3 Alternate Function mapping */ +#define GPIO_AF2_TIM4 ((uint8_t)0x02U) /* TIM4 Alternate Function mapping */ +#define GPIO_AF2_TIM5 ((uint8_t)0x02U) /* TIM5 Alternate Function mapping */ + +/** + * @brief AF 3 selection + */ +#define GPIO_AF3_TIM8 ((uint8_t)0x03U) /* TIM8 Alternate Function mapping */ +#define GPIO_AF3_TIM9 ((uint8_t)0x03U) /* TIM9 Alternate Function mapping */ +#define GPIO_AF3_TIM10 ((uint8_t)0x03U) /* TIM10 Alternate Function mapping */ +#define GPIO_AF3_TIM11 ((uint8_t)0x03U) /* TIM11 Alternate Function mapping */ +#define GPIO_AF3_LPTIM1 ((uint8_t)0x03U) /* LPTIM1 Alternate Function mapping */ +#define GPIO_AF3_CEC ((uint8_t)0x03U) /* CEC Alternate Function mapping */ +#if defined (STM32F765xx) || defined(STM32F767xx) || defined(STM32F769xx) || defined(STM32F777xx) || defined(STM32F779xx) +#define GPIO_AF3_DFSDM1 ((uint8_t)0x03U) /* DFSDM1 Alternate Function mapping */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +/** + * @brief AF 4 selection + */ +#define GPIO_AF4_I2C1 ((uint8_t)0x04U) /* I2C1 Alternate Function mapping */ +#define GPIO_AF4_I2C2 ((uint8_t)0x04U) /* I2C2 Alternate Function mapping */ +#define GPIO_AF4_I2C3 ((uint8_t)0x04U) /* I2C3 Alternate Function mapping */ +#define GPIO_AF4_I2C4 ((uint8_t)0x04U) /* I2C4 Alternate Function mapping */ +#define GPIO_AF4_CEC ((uint8_t)0x04U) /* CEC Alternate Function mapping */ +#if defined (STM32F765xx) || defined(STM32F767xx) || defined(STM32F769xx) || defined(STM32F777xx) || defined(STM32F779xx) +#define GPIO_AF4_USART1 ((uint8_t)0x04) /* USART1 Alternate Function mapping */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +/** + * @brief AF 5 selection + */ +#define GPIO_AF5_SPI1 ((uint8_t)0x05U) /* SPI1 Alternate Function mapping */ +#define GPIO_AF5_SPI2 ((uint8_t)0x05U) /* SPI2/I2S2 Alternate Function mapping */ +#define GPIO_AF5_SPI3 ((uint8_t)0x05U) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF5_SPI4 ((uint8_t)0x05U) /* SPI4 Alternate Function mapping */ +#define GPIO_AF5_SPI5 ((uint8_t)0x05U) /* SPI5 Alternate Function mapping */ +#define GPIO_AF5_SPI6 ((uint8_t)0x05U) /* SPI6 Alternate Function mapping */ + +/** + * @brief AF 6 selection + */ +#define GPIO_AF6_SPI3 ((uint8_t)0x06U) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF6_SAI1 ((uint8_t)0x06U) /* SAI1 Alternate Function mapping */ +#if defined (STM32F765xx) || defined(STM32F767xx) || defined(STM32F769xx) || defined(STM32F777xx) || defined(STM32F779xx) +#define GPIO_AF6_UART4 ((uint8_t)0x06U) /* UART4 Alternate Function mapping */ +#define GPIO_AF6_DFSDM1 ((uint8_t)0x06U) /* DFSDM1 Alternate Function mapping */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +/** + * @brief AF 7 selection + */ +#define GPIO_AF7_USART1 ((uint8_t)0x07U) /* USART1 Alternate Function mapping */ +#define GPIO_AF7_USART2 ((uint8_t)0x07U) /* USART2 Alternate Function mapping */ +#define GPIO_AF7_USART3 ((uint8_t)0x07U) /* USART3 Alternate Function mapping */ +#define GPIO_AF7_UART5 ((uint8_t)0x07U) /* UART5 Alternate Function mapping */ +#define GPIO_AF7_SPDIFRX ((uint8_t)0x07U) /* SPDIF-RX Alternate Function mapping */ +#define GPIO_AF7_SPI2 ((uint8_t)0x07U) /* SPI2 Alternate Function mapping */ +#define GPIO_AF7_SPI3 ((uint8_t)0x07U) /* SPI3 Alternate Function mapping */ +#if defined (STM32F765xx) || defined(STM32F767xx) || defined(STM32F769xx) || defined(STM32F777xx) || defined(STM32F779xx) +#define GPIO_AF7_SPI6 ((uint8_t)0x07U) /* SPI6 Alternate Function mapping */ +#define GPIO_AF7_DFSDM1 ((uint8_t)0x07U) /* DFSDM1 Alternate Function mapping */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +/** + * @brief AF 8 selection + */ +#define GPIO_AF8_UART4 ((uint8_t)0x08U) /* UART4 Alternate Function mapping */ +#define GPIO_AF8_UART5 ((uint8_t)0x08U) /* UART5 Alternate Function mapping */ +#define GPIO_AF8_USART6 ((uint8_t)0x08U) /* USART6 Alternate Function mapping */ +#define GPIO_AF8_UART7 ((uint8_t)0x08U) /* UART7 Alternate Function mapping */ +#define GPIO_AF8_UART8 ((uint8_t)0x08U) /* UART8 Alternate Function mapping */ +#define GPIO_AF8_SPDIFRX ((uint8_t)0x08U) /* SPIDIF-RX Alternate Function mapping */ +#define GPIO_AF8_SAI2 ((uint8_t)0x08U) /* SAI2 Alternate Function mapping */ +#if defined (STM32F765xx) || defined(STM32F767xx) || defined(STM32F769xx) || defined(STM32F777xx) || defined(STM32F779xx) +#define GPIO_AF8_SPI6 ((uint8_t)0x08U) /* SPI6 Alternate Function mapping */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + + +/** + * @brief AF 9 selection + */ +#define GPIO_AF9_CAN1 ((uint8_t)0x09U) /* CAN1 Alternate Function mapping */ +#define GPIO_AF9_CAN2 ((uint8_t)0x09U) /* CAN2 Alternate Function mapping */ +#define GPIO_AF9_TIM12 ((uint8_t)0x09U) /* TIM12 Alternate Function mapping */ +#define GPIO_AF9_TIM13 ((uint8_t)0x09U) /* TIM13 Alternate Function mapping */ +#define GPIO_AF9_TIM14 ((uint8_t)0x09U) /* TIM14 Alternate Function mapping */ +#define GPIO_AF9_QUADSPI ((uint8_t)0x09U) /* QUADSPI Alternate Function mapping */ +#if defined(STM32F746xx) || defined(STM32F756xx) || defined(STM32F767xx) || defined(STM32F769xx) || defined(STM32F777xx) || defined(STM32F779xx) || defined(STM32F750xx) +#define GPIO_AF9_LTDC ((uint8_t)0x09U) /* LCD-TFT Alternate Function mapping */ +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +#if defined(STM32F746xx) || defined(STM32F756xx) || defined(STM32F765xx) || defined(STM32F765xx) || defined(STM32F767xx) || defined(STM32F769xx) || defined(STM32F777xx) || defined(STM32F779xx) || defined(STM32F750xx) +#define GPIO_AF9_FMC ((uint8_t)0x09U) /* FMC Alternate Function mapping */ +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +/** + * @brief AF 10 selection + */ +#define GPIO_AF10_OTG_FS ((uint8_t)0xAU) /* OTG_FS Alternate Function mapping */ +#define GPIO_AF10_OTG_HS ((uint8_t)0xAU) /* OTG_HS Alternate Function mapping */ +#define GPIO_AF10_QUADSPI ((uint8_t)0xAU) /* QUADSPI Alternate Function mapping */ +#define GPIO_AF10_SAI2 ((uint8_t)0xAU) /* SAI2 Alternate Function mapping */ +#if defined (STM32F765xx) || defined(STM32F767xx) || defined(STM32F769xx) || defined(STM32F777xx) || defined(STM32F779xx) +#define GPIO_AF10_DFSDM1 ((uint8_t)0x0AU) /* DFSDM1 Alternate Function mapping */ +#define GPIO_AF10_SDMMC2 ((uint8_t)0x0AU) /* SDMMC2 Alternate Function mapping */ +#define GPIO_AF10_LTDC ((uint8_t)0x0AU) /* LCD-TFT Alternate Function mapping */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +/** + * @brief AF 11 selection + */ +#define GPIO_AF11_ETH ((uint8_t)0x0BU) /* ETHERNET Alternate Function mapping */ +#if defined(STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define GPIO_AF11_CAN3 ((uint8_t)0x0BU) /* CAN3 Alternate Function mapping */ +#define GPIO_AF11_SDMMC2 ((uint8_t)0x0BU) /* SDMMC2 Alternate Function mapping */ +#define GPIO_AF11_I2C4 ((uint8_t)0x0BU) /* I2C4 Alternate Function mapping */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +/** + * @brief AF 12 selection + */ +#define GPIO_AF12_FMC ((uint8_t)0xCU) /* FMC Alternate Function mapping */ +#define GPIO_AF12_OTG_HS_FS ((uint8_t)0xCU) /* OTG HS configured in FS, Alternate Function mapping */ +#define GPIO_AF12_SDMMC1 ((uint8_t)0xCU) /* SDMMC1 Alternate Function mapping */ +#if defined(STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define GPIO_AF12_MDIOS ((uint8_t)0xCU) /* SDMMC1 Alternate Function mapping */ +#define GPIO_AF12_UART7 ((uint8_t)0xCU) /* UART7 Alternate Function mapping */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +/** + * @brief AF 13 selection + */ +#define GPIO_AF13_DCMI ((uint8_t)0x0DU) /* DCMI Alternate Function mapping */ +#if defined (STM32F769xx) || defined (STM32F779xx) +#define GPIO_AF13_DSI ((uint8_t)0x0DU) /* DSI Alternate Function mapping */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#if defined(STM32F746xx) || defined(STM32F756xx) || defined(STM32F767xx) || defined(STM32F769xx) || defined(STM32F777xx) || defined(STM32F779xx) || defined(STM32F750xx) +#define GPIO_AF13_LTDC ((uint8_t)0x0DU) /* LTDC Alternate Function mapping */ + +/** + * @brief AF 14 selection + */ +#define GPIO_AF14_LTDC ((uint8_t)0x0EU) /* LCD-TFT Alternate Function mapping */ +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +/** + * @brief AF 15 selection + */ +#define GPIO_AF15_EVENTOUT ((uint8_t)0x0FU) /* EVENTOUT Alternate Function mapping */ +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +/*----------------------------------------------------------------------------*/ + +/*---------------------------- STM32F72xxx/STM32F73xxx -----------------------*/ +#if defined(STM32F722xx) || defined(STM32F723xx) || defined(STM32F732xx) || defined(STM32F733xx) || defined(STM32F730xx) + /** + * @brief AF 0 selection + */ +#define GPIO_AF0_RTC_50Hz ((uint8_t)0x00U) /* RTC_50Hz Alternate Function mapping */ +#define GPIO_AF0_MCO ((uint8_t)0x00U) /* MCO (MCO1 and MCO2) Alternate Function mapping */ +#define GPIO_AF0_SWJ ((uint8_t)0x00U) /* SWJ (SWD and JTAG) Alternate Function mapping */ +#define GPIO_AF0_TRACE ((uint8_t)0x00U) /* TRACE Alternate Function mapping */ + +/** + * @brief AF 1 selection + */ +#define GPIO_AF1_TIM1 ((uint8_t)0x01U) /* TIM1 Alternate Function mapping */ +#define GPIO_AF1_TIM2 ((uint8_t)0x01U) /* TIM2 Alternate Function mapping */ + +/** + * @brief AF 2 selection + */ +#define GPIO_AF2_TIM3 ((uint8_t)0x02U) /* TIM3 Alternate Function mapping */ +#define GPIO_AF2_TIM4 ((uint8_t)0x02U) /* TIM4 Alternate Function mapping */ +#define GPIO_AF2_TIM5 ((uint8_t)0x02U) /* TIM5 Alternate Function mapping */ + +/** + * @brief AF 3 selection + */ +#define GPIO_AF3_TIM8 ((uint8_t)0x03U) /* TIM8 Alternate Function mapping */ +#define GPIO_AF3_TIM9 ((uint8_t)0x03U) /* TIM9 Alternate Function mapping */ +#define GPIO_AF3_TIM10 ((uint8_t)0x03U) /* TIM10 Alternate Function mapping */ +#define GPIO_AF3_TIM11 ((uint8_t)0x03U) /* TIM11 Alternate Function mapping */ +#define GPIO_AF3_LPTIM1 ((uint8_t)0x03U) /* LPTIM1 Alternate Function mapping */ + +/** + * @brief AF 4 selection + */ +#define GPIO_AF4_I2C1 ((uint8_t)0x04U) /* I2C1 Alternate Function mapping */ +#define GPIO_AF4_I2C2 ((uint8_t)0x04U) /* I2C2 Alternate Function mapping */ +#define GPIO_AF4_I2C3 ((uint8_t)0x04U) /* I2C3 Alternate Function mapping */ + +/** + * @brief AF 5 selection + */ +#define GPIO_AF5_SPI1 ((uint8_t)0x05U) /* SPI1 Alternate Function mapping */ +#define GPIO_AF5_SPI2 ((uint8_t)0x05U) /* SPI2/I2S2 Alternate Function mapping */ +#define GPIO_AF5_SPI3 ((uint8_t)0x05U) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF5_SPI4 ((uint8_t)0x05U) /* SPI4 Alternate Function mapping */ +#define GPIO_AF5_SPI5 ((uint8_t)0x05U) /* SPI5 Alternate Function mapping */ + +/** + * @brief AF 6 selection + */ +#define GPIO_AF6_SPI3 ((uint8_t)0x06U) /* SPI3/I2S3 Alternate Function mapping */ +#define GPIO_AF6_SAI1 ((uint8_t)0x06U) /* SAI1 Alternate Function mapping */ + +/** + * @brief AF 7 selection + */ +#define GPIO_AF7_USART1 ((uint8_t)0x07U) /* USART1 Alternate Function mapping */ +#define GPIO_AF7_USART2 ((uint8_t)0x07U) /* USART2 Alternate Function mapping */ +#define GPIO_AF7_USART3 ((uint8_t)0x07U) /* USART3 Alternate Function mapping */ +#define GPIO_AF7_UART5 ((uint8_t)0x07U) /* UART5 Alternate Function mapping */ +#define GPIO_AF7_SPI2 ((uint8_t)0x07U) /* SPI2 Alternate Function mapping */ +#define GPIO_AF7_SPI3 ((uint8_t)0x07U) /* SPI3 Alternate Function mapping */ + +/** + * @brief AF 8 selection + */ +#define GPIO_AF8_UART4 ((uint8_t)0x08U) /* UART4 Alternate Function mapping */ +#define GPIO_AF8_UART5 ((uint8_t)0x08U) /* UART5 Alternate Function mapping */ +#define GPIO_AF8_USART6 ((uint8_t)0x08U) /* USART6 Alternate Function mapping */ +#define GPIO_AF8_UART7 ((uint8_t)0x08U) /* UART7 Alternate Function mapping */ +#define GPIO_AF8_UART8 ((uint8_t)0x08U) /* UART8 Alternate Function mapping */ +#define GPIO_AF8_SAI2 ((uint8_t)0x08U) /* SAI2 Alternate Function mapping */ + +/** + * @brief AF 9 selection + */ +#define GPIO_AF9_CAN1 ((uint8_t)0x09U) /* CAN1 Alternate Function mapping */ +#define GPIO_AF9_TIM12 ((uint8_t)0x09U) /* TIM12 Alternate Function mapping */ +#define GPIO_AF9_TIM13 ((uint8_t)0x09U) /* TIM13 Alternate Function mapping */ +#define GPIO_AF9_TIM14 ((uint8_t)0x09U) /* TIM14 Alternate Function mapping */ +#define GPIO_AF9_QUADSPI ((uint8_t)0x09U) /* QUADSPI Alternate Function mapping */ + +/** + * @brief AF 10 selection + */ +#define GPIO_AF10_OTG_FS ((uint8_t)0xAU) /* OTG_FS Alternate Function mapping */ +#define GPIO_AF10_OTG_HS ((uint8_t)0xAU) /* OTG_HS Alternate Function mapping */ +#define GPIO_AF10_QUADSPI ((uint8_t)0xAU) /* QUADSPI Alternate Function mapping */ +#define GPIO_AF10_SAI2 ((uint8_t)0xAU) /* SAI2 Alternate Function mapping */ +#define GPIO_AF10_SDMMC2 ((uint8_t)0x0AU) /* SDMMC2 Alternate Function mapping */ + +/** + * @brief AF 11 selection + */ +#define GPIO_AF11_SDMMC2 ((uint8_t)0x0BU) /* SDMMC2 Alternate Function mapping */ + +/** + * @brief AF 12 selection + */ +#define GPIO_AF12_FMC ((uint8_t)0xCU) /* FMC Alternate Function mapping */ +#define GPIO_AF12_OTG_HS_FS ((uint8_t)0xCU) /* OTG HS configured in FS, Alternate Function mapping */ +#define GPIO_AF12_SDMMC1 ((uint8_t)0xCU) /* SDMMC1 Alternate Function mapping */ + +/** + * @brief AF 13 selection + */ +#define GPIO_AF13_RNG ((uint8_t)0x0DU) /* RNG Alternate Function mapping */ + +/** + * @brief AF 15 selection + */ +#define GPIO_AF15_EVENTOUT ((uint8_t)0x0FU) /* EVENTOUT Alternate Function mapping */ +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F730xx */ +/*----------------------------------------------------------------------------*/ + +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup GPIOEx_Exported_Macros GPIO Exported Macros + * @{ + */ +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup GPIOEx_Exported_Functions GPIO Exported Functions + * @{ + */ +/** + * @} + */ +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup GPIOEx_Private_Constants GPIO Private Constants + * @{ + */ + +/** + * @brief GPIO pin available on the platform + */ +/* Defines the available pins per GPIOs */ +#define GPIOA_PIN_AVAILABLE GPIO_PIN_All +#define GPIOB_PIN_AVAILABLE GPIO_PIN_All +#define GPIOC_PIN_AVAILABLE GPIO_PIN_All +#define GPIOD_PIN_AVAILABLE GPIO_PIN_All +#define GPIOE_PIN_AVAILABLE GPIO_PIN_All +#define GPIOF_PIN_AVAILABLE GPIO_PIN_All +#define GPIOG_PIN_AVAILABLE GPIO_PIN_All +#define GPIOI_PIN_AVAILABLE GPIO_PIN_All +#define GPIOJ_PIN_AVAILABLE GPIO_PIN_All +#define GPIOH_PIN_AVAILABLE GPIO_PIN_All +#define GPIOK_PIN_AVAILABLE (GPIO_PIN_0 | GPIO_PIN_1 | GPIO_PIN_2 | GPIO_PIN_3 | GPIO_PIN_4 | \ + GPIO_PIN_5 | GPIO_PIN_6 | GPIO_PIN_7) + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup GPIOEx_Private_Macros GPIO Private Macros + * @{ + */ +/** @defgroup GPIOEx_Get_Port_Index GPIO Get Port Index + * @{ + */ +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define GPIO_GET_INDEX(__GPIOx__) (uint8_t)(((__GPIOx__) == (GPIOA))? 0U :\ + ((__GPIOx__) == (GPIOB))? 1U :\ + ((__GPIOx__) == (GPIOC))? 2U :\ + ((__GPIOx__) == (GPIOD))? 3U :\ + ((__GPIOx__) == (GPIOE))? 4U :\ + ((__GPIOx__) == (GPIOF))? 5U :\ + ((__GPIOx__) == (GPIOG))? 6U :\ + ((__GPIOx__) == (GPIOH))? 7U :\ + ((__GPIOx__) == (GPIOI))? 8U :\ + ((__GPIOx__) == (GPIOJ))? 9U : 10U) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define GPIO_GET_INDEX(__GPIOx__) (uint8_t)(((__GPIOx__) == (GPIOA))? 0U :\ + ((__GPIOx__) == (GPIOB))? 1U :\ + ((__GPIOx__) == (GPIOC))? 2U :\ + ((__GPIOx__) == (GPIOD))? 3U :\ + ((__GPIOx__) == (GPIOE))? 4U :\ + ((__GPIOx__) == (GPIOF))? 5U :\ + ((__GPIOx__) == (GPIOG))? 6U :\ + ((__GPIOx__) == (GPIOH))? 7U : 8U) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F730xx */ +/** + * @} + */ + +#define IS_GPIO_PIN_AVAILABLE(__INSTANCE__,__PIN__) \ + ((((__INSTANCE__) == GPIOA) && (((__PIN__) & (GPIOA_PIN_AVAILABLE)) != 0) && (((__PIN__) | (GPIOA_PIN_AVAILABLE)) == (GPIOA_PIN_AVAILABLE))) || \ + (((__INSTANCE__) == GPIOB) && (((__PIN__) & (GPIOB_PIN_AVAILABLE)) != 0) && (((__PIN__) | (GPIOB_PIN_AVAILABLE)) == (GPIOB_PIN_AVAILABLE))) || \ + (((__INSTANCE__) == GPIOC) && (((__PIN__) & (GPIOC_PIN_AVAILABLE)) != 0) && (((__PIN__) | (GPIOC_PIN_AVAILABLE)) == (GPIOC_PIN_AVAILABLE))) || \ + (((__INSTANCE__) == GPIOD) && (((__PIN__) & (GPIOD_PIN_AVAILABLE)) != 0) && (((__PIN__) | (GPIOD_PIN_AVAILABLE)) == (GPIOD_PIN_AVAILABLE))) || \ + (((__INSTANCE__) == GPIOE) && (((__PIN__) & (GPIOE_PIN_AVAILABLE)) != 0) && (((__PIN__) | (GPIOE_PIN_AVAILABLE)) == (GPIOE_PIN_AVAILABLE))) || \ + (((__INSTANCE__) == GPIOF) && (((__PIN__) & (GPIOF_PIN_AVAILABLE)) != 0) && (((__PIN__) | (GPIOF_PIN_AVAILABLE)) == (GPIOF_PIN_AVAILABLE))) || \ + (((__INSTANCE__) == GPIOG) && (((__PIN__) & (GPIOG_PIN_AVAILABLE)) != 0) && (((__PIN__) | (GPIOG_PIN_AVAILABLE)) == (GPIOG_PIN_AVAILABLE))) || \ + (((__INSTANCE__) == GPIOI) && (((__PIN__) & (GPIOI_PIN_AVAILABLE)) != 0) && (((__PIN__) | (GPIOI_PIN_AVAILABLE)) == (GPIOI_PIN_AVAILABLE))) || \ + (((__INSTANCE__) == GPIOJ) && (((__PIN__) & (GPIOJ_PIN_AVAILABLE)) != 0) && (((__PIN__) | (GPIOJ_PIN_AVAILABLE)) == (GPIOJ_PIN_AVAILABLE))) || \ + (((__INSTANCE__) == GPIOK) && (((__PIN__) & (GPIOK_PIN_AVAILABLE)) != 0) && (((__PIN__) | (GPIOK_PIN_AVAILABLE)) == (GPIOK_PIN_AVAILABLE))) || \ + (((__INSTANCE__) == GPIOH) && (((__PIN__) & (GPIOH_PIN_AVAILABLE)) != 0) && (((__PIN__) | (GPIOH_PIN_AVAILABLE)) == (GPIOH_PIN_AVAILABLE)))) +/** @defgroup GPIOEx_IS_Alternat_function_selection GPIO Check Alternate Function + * @{ + */ +#if defined(STM32F756xx) || defined(STM32F746xx) || defined(STM32F750xx) +#define IS_GPIO_AF(AF) (((AF) == GPIO_AF0_RTC_50Hz) || ((AF) == GPIO_AF1_TIM1) || \ + ((AF) == GPIO_AF0_SWJ) || ((AF) == GPIO_AF0_TRACE) || \ + ((AF) == GPIO_AF0_MCO) || ((AF) == GPIO_AF1_TIM2) || \ + ((AF) == GPIO_AF2_TIM3) || ((AF) == GPIO_AF2_TIM4) || \ + ((AF) == GPIO_AF2_TIM5) || ((AF) == GPIO_AF3_TIM8) || \ + ((AF) == GPIO_AF3_TIM9) || ((AF) == GPIO_AF3_TIM10) || \ + ((AF) == GPIO_AF3_TIM11) || ((AF) == GPIO_AF3_LPTIM1) || \ + ((AF) == GPIO_AF3_CEC) || ((AF) == GPIO_AF4_CEC) || \ + ((AF) == GPIO_AF4_I2C1) || ((AF) == GPIO_AF4_I2C2) || \ + ((AF) == GPIO_AF4_I2C3) || ((AF) == GPIO_AF4_I2C4) || \ + ((AF) == GPIO_AF5_SPI1) || ((AF) == GPIO_AF5_SPI2) || \ + ((AF) == GPIO_AF5_SPI3) || ((AF) == GPIO_AF5_SPI4) || \ + ((AF) == GPIO_AF5_SPI5) || ((AF) == GPIO_AF5_SPI6) || \ + ((AF) == GPIO_AF6_SPI3) || ((AF) == GPIO_AF6_SAI1) || \ + ((AF) == GPIO_AF7_SPI3) || ((AF) == GPIO_AF7_SPI2) || \ + ((AF) == GPIO_AF7_USART1) || ((AF) == GPIO_AF7_USART2) || \ + ((AF) == GPIO_AF7_USART3) || ((AF) == GPIO_AF7_UART5) || \ + ((AF) == GPIO_AF7_SPDIFRX) || ((AF) == GPIO_AF8_SPDIFRX) || \ + ((AF) == GPIO_AF8_SAI2) || ((AF) == GPIO_AF8_USART6) || \ + ((AF) == GPIO_AF8_UART4) || ((AF) == GPIO_AF8_UART5) || \ + ((AF) == GPIO_AF8_UART7) || ((AF) == GPIO_AF8_UART8) || \ + ((AF) == GPIO_AF9_CAN1) || ((AF) == GPIO_AF9_CAN2) || \ + ((AF) == GPIO_AF9_TIM12) || ((AF) == GPIO_AF9_TIM12) || \ + ((AF) == GPIO_AF9_TIM14) || ((AF) == GPIO_AF9_QUADSPI) || \ + ((AF) == GPIO_AF9_LTDC) || ((AF) == GPIO_AF10_OTG_FS) || \ + ((AF) == GPIO_AF10_OTG_HS) || ((AF) == GPIO_AF10_SAI2) || \ + ((AF) == GPIO_AF10_QUADSPI) || ((AF) == GPIO_AF11_ETH) || \ + ((AF) == GPIO_AF12_OTG_HS_FS) || ((AF) == GPIO_AF12_SDMMC1) || \ + ((AF) == GPIO_AF12_FMC) || ((AF) == GPIO_AF15_EVENTOUT) || \ + ((AF) == GPIO_AF13_DCMI) || ((AF) == GPIO_AF14_LTDC)) +#elif defined(STM32F745xx) +#define IS_GPIO_AF(AF) (((AF) == GPIO_AF0_RTC_50Hz) || ((AF) == GPIO_AF1_TIM1) || \ + ((AF) == GPIO_AF0_SWJ) || ((AF) == GPIO_AF0_TRACE) || \ + ((AF) == GPIO_AF0_MCO) || ((AF) == GPIO_AF1_TIM2) || \ + ((AF) == GPIO_AF2_TIM3) || ((AF) == GPIO_AF2_TIM4) || \ + ((AF) == GPIO_AF2_TIM5) || ((AF) == GPIO_AF3_TIM8) || \ + ((AF) == GPIO_AF3_TIM9) || ((AF) == GPIO_AF3_TIM10) || \ + ((AF) == GPIO_AF3_TIM11) || ((AF) == GPIO_AF3_LPTIM1) || \ + ((AF) == GPIO_AF3_CEC) || ((AF) == GPIO_AF4_CEC) || \ + ((AF) == GPIO_AF4_I2C1) || ((AF) == GPIO_AF4_I2C2) || \ + ((AF) == GPIO_AF4_I2C3) || ((AF) == GPIO_AF4_I2C4) || \ + ((AF) == GPIO_AF5_SPI1) || ((AF) == GPIO_AF5_SPI2) || \ + ((AF) == GPIO_AF5_SPI3) || ((AF) == GPIO_AF5_SPI4) || \ + ((AF) == GPIO_AF5_SPI5) || ((AF) == GPIO_AF5_SPI6) || \ + ((AF) == GPIO_AF6_SPI3) || ((AF) == GPIO_AF6_SAI1) || \ + ((AF) == GPIO_AF7_SPI3) || ((AF) == GPIO_AF7_SPI2) || \ + ((AF) == GPIO_AF7_USART1) || ((AF) == GPIO_AF7_USART2) || \ + ((AF) == GPIO_AF7_USART3) || ((AF) == GPIO_AF7_UART5) || \ + ((AF) == GPIO_AF7_SPDIFRX) || ((AF) == GPIO_AF8_SPDIFRX) || \ + ((AF) == GPIO_AF8_SAI2) || ((AF) == GPIO_AF8_USART6) || \ + ((AF) == GPIO_AF8_UART4) || ((AF) == GPIO_AF8_UART5) || \ + ((AF) == GPIO_AF8_UART7) || ((AF) == GPIO_AF8_UART8) || \ + ((AF) == GPIO_AF9_CAN1) || ((AF) == GPIO_AF9_CAN2) || \ + ((AF) == GPIO_AF9_TIM12) || ((AF) == GPIO_AF9_TIM12) || \ + ((AF) == GPIO_AF9_TIM14) || ((AF) == GPIO_AF9_QUADSPI) || \ + ((AF) == GPIO_AF13_DCMI) || ((AF) == GPIO_AF10_OTG_FS) || \ + ((AF) == GPIO_AF10_OTG_HS) || ((AF) == GPIO_AF10_SAI2) || \ + ((AF) == GPIO_AF10_QUADSPI) || ((AF) == GPIO_AF11_ETH) || \ + ((AF) == GPIO_AF12_OTG_HS_FS) || ((AF) == GPIO_AF12_SDMMC1) || \ + ((AF) == GPIO_AF12_FMC) || ((AF) == GPIO_AF15_EVENTOUT)) +#elif defined(STM32F767xx) || defined(STM32F777xx) +#define IS_GPIO_AF(AF) (((AF) == GPIO_AF0_RTC_50Hz) || ((AF) == GPIO_AF1_TIM1) || \ + ((AF) == GPIO_AF0_SWJ) || ((AF) == GPIO_AF0_TRACE) || \ + ((AF) == GPIO_AF0_MCO) || ((AF) == GPIO_AF1_TIM2) || \ + ((AF) == GPIO_AF2_TIM3) || ((AF) == GPIO_AF2_TIM4) || \ + ((AF) == GPIO_AF2_TIM5) || ((AF) == GPIO_AF3_TIM8) || \ + ((AF) == GPIO_AF3_TIM9) || ((AF) == GPIO_AF3_TIM10) || \ + ((AF) == GPIO_AF3_TIM11) || ((AF) == GPIO_AF3_LPTIM1) || \ + ((AF) == GPIO_AF3_CEC) || ((AF) == GPIO_AF4_CEC) || \ + ((AF) == GPIO_AF4_I2C1) || ((AF) == GPIO_AF4_I2C2) || \ + ((AF) == GPIO_AF4_I2C3) || ((AF) == GPIO_AF4_I2C4) || \ + ((AF) == GPIO_AF5_SPI1) || ((AF) == GPIO_AF5_SPI2) || \ + ((AF) == GPIO_AF5_SPI3) || ((AF) == GPIO_AF5_SPI4) || \ + ((AF) == GPIO_AF5_SPI5) || ((AF) == GPIO_AF5_SPI6) || \ + ((AF) == GPIO_AF6_SPI3) || ((AF) == GPIO_AF6_SAI1) || \ + ((AF) == GPIO_AF7_SPI3) || ((AF) == GPIO_AF7_SPI2) || \ + ((AF) == GPIO_AF7_USART1) || ((AF) == GPIO_AF7_USART2) || \ + ((AF) == GPIO_AF7_USART3) || ((AF) == GPIO_AF7_UART5) || \ + ((AF) == GPIO_AF7_SPDIFRX) || ((AF) == GPIO_AF8_SPDIFRX) || \ + ((AF) == GPIO_AF8_SAI2) || ((AF) == GPIO_AF8_USART6) || \ + ((AF) == GPIO_AF8_UART4) || ((AF) == GPIO_AF8_UART5) || \ + ((AF) == GPIO_AF8_UART7) || ((AF) == GPIO_AF8_UART8) || \ + ((AF) == GPIO_AF9_CAN1) || ((AF) == GPIO_AF9_CAN2) || \ + ((AF) == GPIO_AF9_TIM12) || ((AF) == GPIO_AF9_TIM12) || \ + ((AF) == GPIO_AF9_TIM14) || ((AF) == GPIO_AF9_QUADSPI) || \ + ((AF) == GPIO_AF10_OTG_FS) || ((AF) == GPIO_AF9_LTDC) || \ + ((AF) == GPIO_AF10_OTG_HS) || ((AF) == GPIO_AF10_SAI2) || \ + ((AF) == GPIO_AF10_QUADSPI) || ((AF) == GPIO_AF11_ETH) || \ + ((AF) == GPIO_AF10_SDMMC2) || ((AF) == GPIO_AF11_SDMMC2) || \ + ((AF) == GPIO_AF11_CAN3) || ((AF) == GPIO_AF12_OTG_HS_FS) || \ + ((AF) == GPIO_AF12_SDMMC1) || ((AF) == GPIO_AF12_FMC) || \ + ((AF) == GPIO_AF15_EVENTOUT) || ((AF) == GPIO_AF13_DCMI) || \ + ((AF) == GPIO_AF14_LTDC)) +#elif defined(STM32F769xx) || defined(STM32F779xx) +#define IS_GPIO_AF(AF) (((AF) == GPIO_AF0_RTC_50Hz) || ((AF) == GPIO_AF1_TIM1) || \ + ((AF) == GPIO_AF0_SWJ) || ((AF) == GPIO_AF0_TRACE) || \ + ((AF) == GPIO_AF0_MCO) || ((AF) == GPIO_AF1_TIM2) || \ + ((AF) == GPIO_AF2_TIM3) || ((AF) == GPIO_AF2_TIM4) || \ + ((AF) == GPIO_AF2_TIM5) || ((AF) == GPIO_AF3_TIM8) || \ + ((AF) == GPIO_AF3_TIM9) || ((AF) == GPIO_AF3_TIM10) || \ + ((AF) == GPIO_AF3_TIM11) || ((AF) == GPIO_AF3_LPTIM1) || \ + ((AF) == GPIO_AF3_CEC) || ((AF) == GPIO_AF4_CEC) || \ + ((AF) == GPIO_AF4_I2C1) || ((AF) == GPIO_AF4_I2C2) || \ + ((AF) == GPIO_AF4_I2C3) || ((AF) == GPIO_AF4_I2C4) || \ + ((AF) == GPIO_AF5_SPI1) || ((AF) == GPIO_AF5_SPI2) || \ + ((AF) == GPIO_AF5_SPI3) || ((AF) == GPIO_AF5_SPI4) || \ + ((AF) == GPIO_AF5_SPI5) || ((AF) == GPIO_AF5_SPI6) || \ + ((AF) == GPIO_AF6_SPI3) || ((AF) == GPIO_AF6_SAI1) || \ + ((AF) == GPIO_AF7_SPI3) || ((AF) == GPIO_AF7_SPI2) || \ + ((AF) == GPIO_AF7_USART1) || ((AF) == GPIO_AF7_USART2) || \ + ((AF) == GPIO_AF7_USART3) || ((AF) == GPIO_AF7_UART5) || \ + ((AF) == GPIO_AF7_SPDIFRX) || ((AF) == GPIO_AF8_SPDIFRX) || \ + ((AF) == GPIO_AF8_SAI2) || ((AF) == GPIO_AF8_USART6) || \ + ((AF) == GPIO_AF8_UART4) || ((AF) == GPIO_AF8_UART5) || \ + ((AF) == GPIO_AF8_UART7) || ((AF) == GPIO_AF8_UART8) || \ + ((AF) == GPIO_AF9_CAN1) || ((AF) == GPIO_AF9_CAN2) || \ + ((AF) == GPIO_AF9_TIM12) || ((AF) == GPIO_AF9_TIM12) || \ + ((AF) == GPIO_AF9_TIM14) || ((AF) == GPIO_AF9_QUADSPI) || \ + ((AF) == GPIO_AF9_LTDC) || ((AF) == GPIO_AF10_OTG_FS) || \ + ((AF) == GPIO_AF10_OTG_HS) || ((AF) == GPIO_AF10_SAI2) || \ + ((AF) == GPIO_AF10_QUADSPI) || ((AF) == GPIO_AF11_ETH) || \ + ((AF) == GPIO_AF10_SDMMC2) || ((AF) == GPIO_AF11_SDMMC2) || \ + ((AF) == GPIO_AF11_CAN3) || ((AF) == GPIO_AF12_OTG_HS_FS) || \ + ((AF) == GPIO_AF12_SDMMC1) || ((AF) == GPIO_AF12_FMC) || \ + ((AF) == GPIO_AF15_EVENTOUT) || ((AF) == GPIO_AF13_DCMI) || \ + ((AF) == GPIO_AF14_LTDC) || ((AF) == GPIO_AF13_DSI)) +#elif defined(STM32F765xx) +#define IS_GPIO_AF(AF) (((AF) == GPIO_AF0_RTC_50Hz) || ((AF) == GPIO_AF1_TIM1) || \ + ((AF) == GPIO_AF0_SWJ) || ((AF) == GPIO_AF0_TRACE) || \ + ((AF) == GPIO_AF0_MCO) || ((AF) == GPIO_AF1_TIM2) || \ + ((AF) == GPIO_AF2_TIM3) || ((AF) == GPIO_AF2_TIM4) || \ + ((AF) == GPIO_AF2_TIM5) || ((AF) == GPIO_AF3_TIM8) || \ + ((AF) == GPIO_AF3_TIM9) || ((AF) == GPIO_AF3_TIM10) || \ + ((AF) == GPIO_AF3_TIM11) || ((AF) == GPIO_AF3_LPTIM1) || \ + ((AF) == GPIO_AF3_CEC) || ((AF) == GPIO_AF4_CEC) || \ + ((AF) == GPIO_AF4_I2C1) || ((AF) == GPIO_AF4_I2C2) || \ + ((AF) == GPIO_AF4_I2C3) || ((AF) == GPIO_AF4_I2C4) || \ + ((AF) == GPIO_AF5_SPI1) || ((AF) == GPIO_AF5_SPI2) || \ + ((AF) == GPIO_AF5_SPI3) || ((AF) == GPIO_AF5_SPI4) || \ + ((AF) == GPIO_AF5_SPI5) || ((AF) == GPIO_AF5_SPI6) || \ + ((AF) == GPIO_AF6_SPI3) || ((AF) == GPIO_AF6_SAI1) || \ + ((AF) == GPIO_AF7_SPI3) || ((AF) == GPIO_AF7_SPI2) || \ + ((AF) == GPIO_AF7_USART1) || ((AF) == GPIO_AF7_USART2) || \ + ((AF) == GPIO_AF7_USART3) || ((AF) == GPIO_AF7_UART5) || \ + ((AF) == GPIO_AF7_SPDIFRX) || ((AF) == GPIO_AF8_SPDIFRX) || \ + ((AF) == GPIO_AF8_SAI2) || ((AF) == GPIO_AF8_USART6) || \ + ((AF) == GPIO_AF8_UART4) || ((AF) == GPIO_AF8_UART5) || \ + ((AF) == GPIO_AF8_UART7) || ((AF) == GPIO_AF8_UART8) || \ + ((AF) == GPIO_AF9_CAN1) || ((AF) == GPIO_AF9_CAN2) || \ + ((AF) == GPIO_AF9_TIM12) || ((AF) == GPIO_AF9_TIM12) || \ + ((AF) == GPIO_AF9_TIM14) || ((AF) == GPIO_AF9_QUADSPI) || \ + ((AF) == GPIO_AF10_OTG_HS) || ((AF) == GPIO_AF10_SAI2) || \ + ((AF) == GPIO_AF10_QUADSPI) || ((AF) == GPIO_AF11_ETH) || \ + ((AF) == GPIO_AF10_SDMMC2) || ((AF) == GPIO_AF11_SDMMC2) || \ + ((AF) == GPIO_AF11_CAN3) || ((AF) == GPIO_AF12_OTG_HS_FS) || \ + ((AF) == GPIO_AF12_SDMMC1) || ((AF) == GPIO_AF12_FMC) || \ + ((AF) == GPIO_AF15_EVENTOUT) || ((AF) == GPIO_AF13_DCMI) || \ + ((AF) == GPIO_AF10_OTG_FS)) +#elif defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define IS_GPIO_AF(AF) (((AF) == GPIO_AF0_RTC_50Hz) || ((AF) == GPIO_AF1_TIM1) || \ + ((AF) == GPIO_AF0_SWJ) || ((AF) == GPIO_AF0_TRACE) || \ + ((AF) == GPIO_AF0_MCO) || ((AF) == GPIO_AF1_TIM2) || \ + ((AF) == GPIO_AF2_TIM3) || ((AF) == GPIO_AF2_TIM4) || \ + ((AF) == GPIO_AF2_TIM5) || ((AF) == GPIO_AF3_TIM8) || \ + ((AF) == GPIO_AF3_TIM9) || ((AF) == GPIO_AF3_TIM10) || \ + ((AF) == GPIO_AF3_TIM11) || ((AF) == GPIO_AF3_LPTIM1) || \ + ((AF) == GPIO_AF4_I2C1) || ((AF) == GPIO_AF4_I2C2) || \ + ((AF) == GPIO_AF4_I2C3) || ((AF) == GPIO_AF5_SPI1) || \ + ((AF) == GPIO_AF5_SPI2) || ((AF) == GPIO_AF5_SPI3) || \ + ((AF) == GPIO_AF5_SPI4) || ((AF) == GPIO_AF5_SPI5) || \ + ((AF) == GPIO_AF6_SPI3) || ((AF) == GPIO_AF6_SAI1) || \ + ((AF) == GPIO_AF7_SPI3) || ((AF) == GPIO_AF7_SPI2) || \ + ((AF) == GPIO_AF7_USART1) || ((AF) == GPIO_AF7_USART2) || \ + ((AF) == GPIO_AF7_USART3) || ((AF) == GPIO_AF7_UART5) || \ + ((AF) == GPIO_AF8_SAI2) || ((AF) == GPIO_AF8_USART6) || \ + ((AF) == GPIO_AF8_UART4) || ((AF) == GPIO_AF8_UART5) || \ + ((AF) == GPIO_AF8_UART7) || ((AF) == GPIO_AF8_UART8) || \ + ((AF) == GPIO_AF9_CAN1) || ((AF) == GPIO_AF9_TIM12) || \ + ((AF) == GPIO_AF9_TIM12) || ((AF) == GPIO_AF9_TIM14) || \ + ((AF) == GPIO_AF9_QUADSPI) || ((AF) == GPIO_AF10_OTG_HS) || \ + ((AF) == GPIO_AF10_SAI2) || ((AF) == GPIO_AF10_QUADSPI) || \ + ((AF) == GPIO_AF10_SDMMC2) || ((AF) == GPIO_AF11_SDMMC2) || \ + ((AF) == GPIO_AF12_OTG_HS_FS) || ((AF) == GPIO_AF12_SDMMC1) || \ + ((AF) == GPIO_AF12_FMC) || ((AF) == GPIO_AF15_EVENTOUT) || \ + ((AF) == GPIO_AF10_OTG_FS)) +#endif /* STM32F756xx || STM32F746xx || STM32F750xx */ +/** + * @} + */ + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup GPIOEx_Private_Functions GPIO Private Functions + * @{ + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_GPIO_EX_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_i2c.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_i2c.h new file mode 100644 index 0000000..89fc40f --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_i2c.h @@ -0,0 +1,835 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_i2c.h + * @author MCD Application Team + * @brief Header file of I2C HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_I2C_H +#define STM32F7xx_HAL_I2C_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup I2C + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup I2C_Exported_Types I2C Exported Types + * @{ + */ + +/** @defgroup I2C_Configuration_Structure_definition I2C Configuration Structure definition + * @brief I2C Configuration Structure definition + * @{ + */ +typedef struct +{ + uint32_t Timing; /*!< Specifies the I2C_TIMINGR_register value. + This parameter calculated by referring to I2C initialization section + in Reference manual */ + + uint32_t OwnAddress1; /*!< Specifies the first device own address. + This parameter can be a 7-bit or 10-bit address. */ + + uint32_t AddressingMode; /*!< Specifies if 7-bit or 10-bit addressing mode is selected. + This parameter can be a value of @ref I2C_ADDRESSING_MODE */ + + uint32_t DualAddressMode; /*!< Specifies if dual addressing mode is selected. + This parameter can be a value of @ref I2C_DUAL_ADDRESSING_MODE */ + + uint32_t OwnAddress2; /*!< Specifies the second device own address if dual addressing mode is selected + This parameter can be a 7-bit address. */ + + uint32_t OwnAddress2Masks; /*!< Specifies the acknowledge mask address second device own address if dual addressing + mode is selected. + This parameter can be a value of @ref I2C_OWN_ADDRESS2_MASKS */ + + uint32_t GeneralCallMode; /*!< Specifies if general call mode is selected. + This parameter can be a value of @ref I2C_GENERAL_CALL_ADDRESSING_MODE */ + + uint32_t NoStretchMode; /*!< Specifies if nostretch mode is selected. + This parameter can be a value of @ref I2C_NOSTRETCH_MODE */ + +} I2C_InitTypeDef; + +/** + * @} + */ + +/** @defgroup HAL_state_structure_definition HAL state structure definition + * @brief HAL State structure definition + * @note HAL I2C State value coding follow below described bitmap :\n + * b7-b6 Error information\n + * 00 : No Error\n + * 01 : Abort (Abort user request on going)\n + * 10 : Timeout\n + * 11 : Error\n + * b5 Peripheral initialization status\n + * 0 : Reset (peripheral not initialized)\n + * 1 : Init done (peripheral initialized and ready to use. HAL I2C Init function called)\n + * b4 (not used)\n + * x : Should be set to 0\n + * b3\n + * 0 : Ready or Busy (No Listen mode ongoing)\n + * 1 : Listen (peripheral in Address Listen Mode)\n + * b2 Intrinsic process state\n + * 0 : Ready\n + * 1 : Busy (peripheral busy with some configuration or internal operations)\n + * b1 Rx state\n + * 0 : Ready (no Rx operation ongoing)\n + * 1 : Busy (Rx operation ongoing)\n + * b0 Tx state\n + * 0 : Ready (no Tx operation ongoing)\n + * 1 : Busy (Tx operation ongoing) + * @{ + */ +typedef enum +{ + HAL_I2C_STATE_RESET = 0x00U, /*!< Peripheral is not yet Initialized */ + HAL_I2C_STATE_READY = 0x20U, /*!< Peripheral Initialized and ready for use */ + HAL_I2C_STATE_BUSY = 0x24U, /*!< An internal process is ongoing */ + HAL_I2C_STATE_BUSY_TX = 0x21U, /*!< Data Transmission process is ongoing */ + HAL_I2C_STATE_BUSY_RX = 0x22U, /*!< Data Reception process is ongoing */ + HAL_I2C_STATE_LISTEN = 0x28U, /*!< Address Listen Mode is ongoing */ + HAL_I2C_STATE_BUSY_TX_LISTEN = 0x29U, /*!< Address Listen Mode and Data Transmission + process is ongoing */ + HAL_I2C_STATE_BUSY_RX_LISTEN = 0x2AU, /*!< Address Listen Mode and Data Reception + process is ongoing */ + HAL_I2C_STATE_ABORT = 0x60U, /*!< Abort user request ongoing */ + HAL_I2C_STATE_TIMEOUT = 0xA0U, /*!< Timeout state */ + HAL_I2C_STATE_ERROR = 0xE0U /*!< Error */ + +} HAL_I2C_StateTypeDef; + +/** + * @} + */ + +/** @defgroup HAL_mode_structure_definition HAL mode structure definition + * @brief HAL Mode structure definition + * @note HAL I2C Mode value coding follow below described bitmap :\n + * b7 (not used)\n + * x : Should be set to 0\n + * b6\n + * 0 : None\n + * 1 : Memory (HAL I2C communication is in Memory Mode)\n + * b5\n + * 0 : None\n + * 1 : Slave (HAL I2C communication is in Slave Mode)\n + * b4\n + * 0 : None\n + * 1 : Master (HAL I2C communication is in Master Mode)\n + * b3-b2-b1-b0 (not used)\n + * xxxx : Should be set to 0000 + * @{ + */ +typedef enum +{ + HAL_I2C_MODE_NONE = 0x00U, /*!< No I2C communication on going */ + HAL_I2C_MODE_MASTER = 0x10U, /*!< I2C communication is in Master Mode */ + HAL_I2C_MODE_SLAVE = 0x20U, /*!< I2C communication is in Slave Mode */ + HAL_I2C_MODE_MEM = 0x40U /*!< I2C communication is in Memory Mode */ + +} HAL_I2C_ModeTypeDef; + +/** + * @} + */ + +/** @defgroup I2C_Error_Code_definition I2C Error Code definition + * @brief I2C Error Code definition + * @{ + */ +#define HAL_I2C_ERROR_NONE (0x00000000U) /*!< No error */ +#define HAL_I2C_ERROR_BERR (0x00000001U) /*!< BERR error */ +#define HAL_I2C_ERROR_ARLO (0x00000002U) /*!< ARLO error */ +#define HAL_I2C_ERROR_AF (0x00000004U) /*!< ACKF error */ +#define HAL_I2C_ERROR_OVR (0x00000008U) /*!< OVR error */ +#define HAL_I2C_ERROR_DMA (0x00000010U) /*!< DMA transfer error */ +#define HAL_I2C_ERROR_TIMEOUT (0x00000020U) /*!< Timeout error */ +#define HAL_I2C_ERROR_SIZE (0x00000040U) /*!< Size Management error */ +#define HAL_I2C_ERROR_DMA_PARAM (0x00000080U) /*!< DMA Parameter Error */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) +#define HAL_I2C_ERROR_INVALID_CALLBACK (0x00000100U) /*!< Invalid Callback error */ +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ +#define HAL_I2C_ERROR_INVALID_PARAM (0x00000200U) /*!< Invalid Parameters error */ +/** + * @} + */ + +/** @defgroup I2C_handle_Structure_definition I2C handle Structure definition + * @brief I2C handle Structure definition + * @{ + */ +typedef struct __I2C_HandleTypeDef +{ + I2C_TypeDef *Instance; /*!< I2C registers base address */ + + I2C_InitTypeDef Init; /*!< I2C communication parameters */ + + uint8_t *pBuffPtr; /*!< Pointer to I2C transfer buffer */ + + uint16_t XferSize; /*!< I2C transfer size */ + + __IO uint16_t XferCount; /*!< I2C transfer counter */ + + __IO uint32_t XferOptions; /*!< I2C sequantial transfer options, this parameter can + be a value of @ref I2C_XFEROPTIONS */ + + __IO uint32_t PreviousState; /*!< I2C communication Previous state */ + + HAL_StatusTypeDef(*XferISR)(struct __I2C_HandleTypeDef *hi2c, uint32_t ITFlags, uint32_t ITSources); + /*!< I2C transfer IRQ handler function pointer */ + + DMA_HandleTypeDef *hdmatx; /*!< I2C Tx DMA handle parameters */ + + DMA_HandleTypeDef *hdmarx; /*!< I2C Rx DMA handle parameters */ + + HAL_LockTypeDef Lock; /*!< I2C locking object */ + + __IO HAL_I2C_StateTypeDef State; /*!< I2C communication state */ + + __IO HAL_I2C_ModeTypeDef Mode; /*!< I2C communication mode */ + + __IO uint32_t ErrorCode; /*!< I2C Error code */ + + __IO uint32_t AddrEventCount; /*!< I2C Address Event counter */ + +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + void (* MasterTxCpltCallback)(struct __I2C_HandleTypeDef *hi2c); + /*!< I2C Master Tx Transfer completed callback */ + void (* MasterRxCpltCallback)(struct __I2C_HandleTypeDef *hi2c); + /*!< I2C Master Rx Transfer completed callback */ + void (* SlaveTxCpltCallback)(struct __I2C_HandleTypeDef *hi2c); + /*!< I2C Slave Tx Transfer completed callback */ + void (* SlaveRxCpltCallback)(struct __I2C_HandleTypeDef *hi2c); + /*!< I2C Slave Rx Transfer completed callback */ + void (* ListenCpltCallback)(struct __I2C_HandleTypeDef *hi2c); + /*!< I2C Listen Complete callback */ + void (* MemTxCpltCallback)(struct __I2C_HandleTypeDef *hi2c); + /*!< I2C Memory Tx Transfer completed callback */ + void (* MemRxCpltCallback)(struct __I2C_HandleTypeDef *hi2c); + /*!< I2C Memory Rx Transfer completed callback */ + void (* ErrorCallback)(struct __I2C_HandleTypeDef *hi2c); + /*!< I2C Error callback */ + void (* AbortCpltCallback)(struct __I2C_HandleTypeDef *hi2c); + /*!< I2C Abort callback */ + + void (* AddrCallback)(struct __I2C_HandleTypeDef *hi2c, uint8_t TransferDirection, uint16_t AddrMatchCode); + /*!< I2C Slave Address Match callback */ + + void (* MspInitCallback)(struct __I2C_HandleTypeDef *hi2c); + /*!< I2C Msp Init callback */ + void (* MspDeInitCallback)(struct __I2C_HandleTypeDef *hi2c); + /*!< I2C Msp DeInit callback */ + +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ +} I2C_HandleTypeDef; + +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) +/** + * @brief HAL I2C Callback ID enumeration definition + */ +typedef enum +{ + HAL_I2C_MASTER_TX_COMPLETE_CB_ID = 0x00U, /*!< I2C Master Tx Transfer completed callback ID */ + HAL_I2C_MASTER_RX_COMPLETE_CB_ID = 0x01U, /*!< I2C Master Rx Transfer completed callback ID */ + HAL_I2C_SLAVE_TX_COMPLETE_CB_ID = 0x02U, /*!< I2C Slave Tx Transfer completed callback ID */ + HAL_I2C_SLAVE_RX_COMPLETE_CB_ID = 0x03U, /*!< I2C Slave Rx Transfer completed callback ID */ + HAL_I2C_LISTEN_COMPLETE_CB_ID = 0x04U, /*!< I2C Listen Complete callback ID */ + HAL_I2C_MEM_TX_COMPLETE_CB_ID = 0x05U, /*!< I2C Memory Tx Transfer callback ID */ + HAL_I2C_MEM_RX_COMPLETE_CB_ID = 0x06U, /*!< I2C Memory Rx Transfer completed callback ID */ + HAL_I2C_ERROR_CB_ID = 0x07U, /*!< I2C Error callback ID */ + HAL_I2C_ABORT_CB_ID = 0x08U, /*!< I2C Abort callback ID */ + + HAL_I2C_MSPINIT_CB_ID = 0x09U, /*!< I2C Msp Init callback ID */ + HAL_I2C_MSPDEINIT_CB_ID = 0x0AU /*!< I2C Msp DeInit callback ID */ + +} HAL_I2C_CallbackIDTypeDef; + +/** + * @brief HAL I2C Callback pointer definition + */ +typedef void (*pI2C_CallbackTypeDef)(I2C_HandleTypeDef *hi2c); +/*!< pointer to an I2C callback function */ +typedef void (*pI2C_AddrCallbackTypeDef)(I2C_HandleTypeDef *hi2c, uint8_t TransferDirection, + uint16_t AddrMatchCode); +/*!< pointer to an I2C Address Match callback function */ + +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ +/** + * @} + */ + +/** + * @} + */ +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup I2C_Exported_Constants I2C Exported Constants + * @{ + */ + +/** @defgroup I2C_XFEROPTIONS I2C Sequential Transfer Options + * @{ + */ +#define I2C_FIRST_FRAME ((uint32_t)I2C_SOFTEND_MODE) +#define I2C_FIRST_AND_NEXT_FRAME ((uint32_t)(I2C_RELOAD_MODE | I2C_SOFTEND_MODE)) +#define I2C_NEXT_FRAME ((uint32_t)(I2C_RELOAD_MODE | I2C_SOFTEND_MODE)) +#define I2C_FIRST_AND_LAST_FRAME ((uint32_t)I2C_AUTOEND_MODE) +#define I2C_LAST_FRAME ((uint32_t)I2C_AUTOEND_MODE) +#define I2C_LAST_FRAME_NO_STOP ((uint32_t)I2C_SOFTEND_MODE) + +/* List of XferOptions in usage of : + * 1- Restart condition in all use cases (direction change or not) + */ +#define I2C_OTHER_FRAME (0x000000AAU) +#define I2C_OTHER_AND_LAST_FRAME (0x0000AA00U) +/** + * @} + */ + +/** @defgroup I2C_ADDRESSING_MODE I2C Addressing Mode + * @{ + */ +#define I2C_ADDRESSINGMODE_7BIT (0x00000001U) +#define I2C_ADDRESSINGMODE_10BIT (0x00000002U) +/** + * @} + */ + +/** @defgroup I2C_DUAL_ADDRESSING_MODE I2C Dual Addressing Mode + * @{ + */ +#define I2C_DUALADDRESS_DISABLE (0x00000000U) +#define I2C_DUALADDRESS_ENABLE I2C_OAR2_OA2EN +/** + * @} + */ + +/** @defgroup I2C_OWN_ADDRESS2_MASKS I2C Own Address2 Masks + * @{ + */ +#define I2C_OA2_NOMASK ((uint8_t)0x00U) +#define I2C_OA2_MASK01 ((uint8_t)0x01U) +#define I2C_OA2_MASK02 ((uint8_t)0x02U) +#define I2C_OA2_MASK03 ((uint8_t)0x03U) +#define I2C_OA2_MASK04 ((uint8_t)0x04U) +#define I2C_OA2_MASK05 ((uint8_t)0x05U) +#define I2C_OA2_MASK06 ((uint8_t)0x06U) +#define I2C_OA2_MASK07 ((uint8_t)0x07U) +/** + * @} + */ + +/** @defgroup I2C_GENERAL_CALL_ADDRESSING_MODE I2C General Call Addressing Mode + * @{ + */ +#define I2C_GENERALCALL_DISABLE (0x00000000U) +#define I2C_GENERALCALL_ENABLE I2C_CR1_GCEN +/** + * @} + */ + +/** @defgroup I2C_NOSTRETCH_MODE I2C No-Stretch Mode + * @{ + */ +#define I2C_NOSTRETCH_DISABLE (0x00000000U) +#define I2C_NOSTRETCH_ENABLE I2C_CR1_NOSTRETCH +/** + * @} + */ + +/** @defgroup I2C_MEMORY_ADDRESS_SIZE I2C Memory Address Size + * @{ + */ +#define I2C_MEMADD_SIZE_8BIT (0x00000001U) +#define I2C_MEMADD_SIZE_16BIT (0x00000002U) +/** + * @} + */ + +/** @defgroup I2C_XFERDIRECTION I2C Transfer Direction Master Point of View + * @{ + */ +#define I2C_DIRECTION_TRANSMIT (0x00000000U) +#define I2C_DIRECTION_RECEIVE (0x00000001U) +/** + * @} + */ + +/** @defgroup I2C_RELOAD_END_MODE I2C Reload End Mode + * @{ + */ +#define I2C_RELOAD_MODE I2C_CR2_RELOAD +#define I2C_AUTOEND_MODE I2C_CR2_AUTOEND +#define I2C_SOFTEND_MODE (0x00000000U) +/** + * @} + */ + +/** @defgroup I2C_START_STOP_MODE I2C Start or Stop Mode + * @{ + */ +#define I2C_NO_STARTSTOP (0x00000000U) +#define I2C_GENERATE_STOP (uint32_t)(0x80000000U | I2C_CR2_STOP) +#define I2C_GENERATE_START_READ (uint32_t)(0x80000000U | I2C_CR2_START | I2C_CR2_RD_WRN) +#define I2C_GENERATE_START_WRITE (uint32_t)(0x80000000U | I2C_CR2_START) +/** + * @} + */ + +/** @defgroup I2C_Interrupt_configuration_definition I2C Interrupt configuration definition + * @brief I2C Interrupt definition + * Elements values convention: 0xXXXXXXXX + * - XXXXXXXX : Interrupt control mask + * @{ + */ +#define I2C_IT_ERRI I2C_CR1_ERRIE +#define I2C_IT_TCI I2C_CR1_TCIE +#define I2C_IT_STOPI I2C_CR1_STOPIE +#define I2C_IT_NACKI I2C_CR1_NACKIE +#define I2C_IT_ADDRI I2C_CR1_ADDRIE +#define I2C_IT_RXI I2C_CR1_RXIE +#define I2C_IT_TXI I2C_CR1_TXIE +/** + * @} + */ + +/** @defgroup I2C_Flag_definition I2C Flag definition + * @{ + */ +#define I2C_FLAG_TXE I2C_ISR_TXE +#define I2C_FLAG_TXIS I2C_ISR_TXIS +#define I2C_FLAG_RXNE I2C_ISR_RXNE +#define I2C_FLAG_ADDR I2C_ISR_ADDR +#define I2C_FLAG_AF I2C_ISR_NACKF +#define I2C_FLAG_STOPF I2C_ISR_STOPF +#define I2C_FLAG_TC I2C_ISR_TC +#define I2C_FLAG_TCR I2C_ISR_TCR +#define I2C_FLAG_BERR I2C_ISR_BERR +#define I2C_FLAG_ARLO I2C_ISR_ARLO +#define I2C_FLAG_OVR I2C_ISR_OVR +#define I2C_FLAG_PECERR I2C_ISR_PECERR +#define I2C_FLAG_TIMEOUT I2C_ISR_TIMEOUT +#define I2C_FLAG_ALERT I2C_ISR_ALERT +#define I2C_FLAG_BUSY I2C_ISR_BUSY +#define I2C_FLAG_DIR I2C_ISR_DIR +/** + * @} + */ + +/** + * @} + */ + +/* Exported macros -----------------------------------------------------------*/ + +/** @defgroup I2C_Exported_Macros I2C Exported Macros + * @{ + */ + +/** @brief Reset I2C handle state. + * @param __HANDLE__ specifies the I2C Handle. + * @retval None + */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) +#define __HAL_I2C_RESET_HANDLE_STATE(__HANDLE__) do{ \ + (__HANDLE__)->State = HAL_I2C_STATE_RESET; \ + (__HANDLE__)->MspInitCallback = NULL; \ + (__HANDLE__)->MspDeInitCallback = NULL; \ + } while(0) +#else +#define __HAL_I2C_RESET_HANDLE_STATE(__HANDLE__) ((__HANDLE__)->State = HAL_I2C_STATE_RESET) +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + +/** @brief Enable the specified I2C interrupt. + * @param __HANDLE__ specifies the I2C Handle. + * @param __INTERRUPT__ specifies the interrupt source to enable. + * This parameter can be one of the following values: + * @arg @ref I2C_IT_ERRI Errors interrupt enable + * @arg @ref I2C_IT_TCI Transfer complete interrupt enable + * @arg @ref I2C_IT_STOPI STOP detection interrupt enable + * @arg @ref I2C_IT_NACKI NACK received interrupt enable + * @arg @ref I2C_IT_ADDRI Address match interrupt enable + * @arg @ref I2C_IT_RXI RX interrupt enable + * @arg @ref I2C_IT_TXI TX interrupt enable + * + * @retval None + */ +#define __HAL_I2C_ENABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->CR1 |= (__INTERRUPT__)) + +/** @brief Disable the specified I2C interrupt. + * @param __HANDLE__ specifies the I2C Handle. + * @param __INTERRUPT__ specifies the interrupt source to disable. + * This parameter can be one of the following values: + * @arg @ref I2C_IT_ERRI Errors interrupt enable + * @arg @ref I2C_IT_TCI Transfer complete interrupt enable + * @arg @ref I2C_IT_STOPI STOP detection interrupt enable + * @arg @ref I2C_IT_NACKI NACK received interrupt enable + * @arg @ref I2C_IT_ADDRI Address match interrupt enable + * @arg @ref I2C_IT_RXI RX interrupt enable + * @arg @ref I2C_IT_TXI TX interrupt enable + * + * @retval None + */ +#define __HAL_I2C_DISABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->CR1 &= (~(__INTERRUPT__))) + +/** @brief Check whether the specified I2C interrupt source is enabled or not. + * @param __HANDLE__ specifies the I2C Handle. + * @param __INTERRUPT__ specifies the I2C interrupt source to check. + * This parameter can be one of the following values: + * @arg @ref I2C_IT_ERRI Errors interrupt enable + * @arg @ref I2C_IT_TCI Transfer complete interrupt enable + * @arg @ref I2C_IT_STOPI STOP detection interrupt enable + * @arg @ref I2C_IT_NACKI NACK received interrupt enable + * @arg @ref I2C_IT_ADDRI Address match interrupt enable + * @arg @ref I2C_IT_RXI RX interrupt enable + * @arg @ref I2C_IT_TXI TX interrupt enable + * + * @retval The new state of __INTERRUPT__ (SET or RESET). + */ +#define __HAL_I2C_GET_IT_SOURCE(__HANDLE__, __INTERRUPT__) ((((__HANDLE__)->Instance->CR1 & \ + (__INTERRUPT__)) == (__INTERRUPT__)) ? SET : RESET) + +/** @brief Check whether the specified I2C flag is set or not. + * @param __HANDLE__ specifies the I2C Handle. + * @param __FLAG__ specifies the flag to check. + * This parameter can be one of the following values: + * @arg @ref I2C_FLAG_TXE Transmit data register empty + * @arg @ref I2C_FLAG_TXIS Transmit interrupt status + * @arg @ref I2C_FLAG_RXNE Receive data register not empty + * @arg @ref I2C_FLAG_ADDR Address matched (slave mode) + * @arg @ref I2C_FLAG_AF Acknowledge failure received flag + * @arg @ref I2C_FLAG_STOPF STOP detection flag + * @arg @ref I2C_FLAG_TC Transfer complete (master mode) + * @arg @ref I2C_FLAG_TCR Transfer complete reload + * @arg @ref I2C_FLAG_BERR Bus error + * @arg @ref I2C_FLAG_ARLO Arbitration lost + * @arg @ref I2C_FLAG_OVR Overrun/Underrun + * @arg @ref I2C_FLAG_PECERR PEC error in reception + * @arg @ref I2C_FLAG_TIMEOUT Timeout or Tlow detection flag + * @arg @ref I2C_FLAG_ALERT SMBus alert + * @arg @ref I2C_FLAG_BUSY Bus busy + * @arg @ref I2C_FLAG_DIR Transfer direction (slave mode) + * + * @retval The new state of __FLAG__ (SET or RESET). + */ +#define I2C_FLAG_MASK (0x0001FFFFU) +#define __HAL_I2C_GET_FLAG(__HANDLE__, __FLAG__) (((((__HANDLE__)->Instance->ISR) & \ + (__FLAG__)) == (__FLAG__)) ? SET : RESET) + +/** @brief Clear the I2C pending flags which are cleared by writing 1 in a specific bit. + * @param __HANDLE__ specifies the I2C Handle. + * @param __FLAG__ specifies the flag to clear. + * This parameter can be any combination of the following values: + * @arg @ref I2C_FLAG_TXE Transmit data register empty + * @arg @ref I2C_FLAG_ADDR Address matched (slave mode) + * @arg @ref I2C_FLAG_AF Acknowledge failure received flag + * @arg @ref I2C_FLAG_STOPF STOP detection flag + * @arg @ref I2C_FLAG_BERR Bus error + * @arg @ref I2C_FLAG_ARLO Arbitration lost + * @arg @ref I2C_FLAG_OVR Overrun/Underrun + * @arg @ref I2C_FLAG_PECERR PEC error in reception + * @arg @ref I2C_FLAG_TIMEOUT Timeout or Tlow detection flag + * @arg @ref I2C_FLAG_ALERT SMBus alert + * + * @retval None + */ +#define __HAL_I2C_CLEAR_FLAG(__HANDLE__, __FLAG__) (((__FLAG__) == I2C_FLAG_TXE) ? \ + ((__HANDLE__)->Instance->ISR |= (__FLAG__)) : \ + ((__HANDLE__)->Instance->ICR = (__FLAG__))) + +/** @brief Enable the specified I2C peripheral. + * @param __HANDLE__ specifies the I2C Handle. + * @retval None + */ +#define __HAL_I2C_ENABLE(__HANDLE__) (SET_BIT((__HANDLE__)->Instance->CR1, I2C_CR1_PE)) + +/** @brief Disable the specified I2C peripheral. + * @param __HANDLE__ specifies the I2C Handle. + * @retval None + */ +#define __HAL_I2C_DISABLE(__HANDLE__) (CLEAR_BIT((__HANDLE__)->Instance->CR1, I2C_CR1_PE)) + +/** @brief Generate a Non-Acknowledge I2C peripheral in Slave mode. + * @param __HANDLE__ specifies the I2C Handle. + * @retval None + */ +#define __HAL_I2C_GENERATE_NACK(__HANDLE__) (SET_BIT((__HANDLE__)->Instance->CR2, I2C_CR2_NACK)) +/** + * @} + */ + +/* Include I2C HAL Extended module */ +#include "stm32f7xx_hal_i2c_ex.h" + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup I2C_Exported_Functions + * @{ + */ + +/** @addtogroup I2C_Exported_Functions_Group1 Initialization and de-initialization functions + * @{ + */ +/* Initialization and de-initialization functions******************************/ +HAL_StatusTypeDef HAL_I2C_Init(I2C_HandleTypeDef *hi2c); +HAL_StatusTypeDef HAL_I2C_DeInit(I2C_HandleTypeDef *hi2c); +void HAL_I2C_MspInit(I2C_HandleTypeDef *hi2c); +void HAL_I2C_MspDeInit(I2C_HandleTypeDef *hi2c); + +/* Callbacks Register/UnRegister functions ***********************************/ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) +HAL_StatusTypeDef HAL_I2C_RegisterCallback(I2C_HandleTypeDef *hi2c, HAL_I2C_CallbackIDTypeDef CallbackID, + pI2C_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_I2C_UnRegisterCallback(I2C_HandleTypeDef *hi2c, HAL_I2C_CallbackIDTypeDef CallbackID); + +HAL_StatusTypeDef HAL_I2C_RegisterAddrCallback(I2C_HandleTypeDef *hi2c, pI2C_AddrCallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_I2C_UnRegisterAddrCallback(I2C_HandleTypeDef *hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ +/** + * @} + */ + +/** @addtogroup I2C_Exported_Functions_Group2 Input and Output operation functions + * @{ + */ +/* IO operation functions ****************************************************/ +/******* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_I2C_Master_Transmit(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, + uint16_t Size, uint32_t Timeout); +HAL_StatusTypeDef HAL_I2C_Master_Receive(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, + uint16_t Size, uint32_t Timeout); +HAL_StatusTypeDef HAL_I2C_Slave_Transmit(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size, + uint32_t Timeout); +HAL_StatusTypeDef HAL_I2C_Slave_Receive(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size, + uint32_t Timeout); +HAL_StatusTypeDef HAL_I2C_Mem_Write(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, + uint16_t MemAddSize, uint8_t *pData, uint16_t Size, uint32_t Timeout); +HAL_StatusTypeDef HAL_I2C_Mem_Read(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, + uint16_t MemAddSize, uint8_t *pData, uint16_t Size, uint32_t Timeout); +HAL_StatusTypeDef HAL_I2C_IsDeviceReady(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint32_t Trials, + uint32_t Timeout); + +/******* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_I2C_Master_Transmit_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, + uint16_t Size); +HAL_StatusTypeDef HAL_I2C_Master_Receive_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, + uint16_t Size); +HAL_StatusTypeDef HAL_I2C_Slave_Transmit_IT(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_I2C_Slave_Receive_IT(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_I2C_Mem_Write_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, + uint16_t MemAddSize, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_I2C_Mem_Read_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, + uint16_t MemAddSize, uint8_t *pData, uint16_t Size); + +HAL_StatusTypeDef HAL_I2C_Master_Seq_Transmit_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, + uint16_t Size, uint32_t XferOptions); +HAL_StatusTypeDef HAL_I2C_Master_Seq_Receive_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, + uint16_t Size, uint32_t XferOptions); +HAL_StatusTypeDef HAL_I2C_Slave_Seq_Transmit_IT(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size, + uint32_t XferOptions); +HAL_StatusTypeDef HAL_I2C_Slave_Seq_Receive_IT(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size, + uint32_t XferOptions); +HAL_StatusTypeDef HAL_I2C_EnableListen_IT(I2C_HandleTypeDef *hi2c); +HAL_StatusTypeDef HAL_I2C_DisableListen_IT(I2C_HandleTypeDef *hi2c); +HAL_StatusTypeDef HAL_I2C_Master_Abort_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress); + +/******* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_I2C_Master_Transmit_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, + uint16_t Size); +HAL_StatusTypeDef HAL_I2C_Master_Receive_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, + uint16_t Size); +HAL_StatusTypeDef HAL_I2C_Slave_Transmit_DMA(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_I2C_Slave_Receive_DMA(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_I2C_Mem_Write_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, + uint16_t MemAddSize, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_I2C_Mem_Read_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, + uint16_t MemAddSize, uint8_t *pData, uint16_t Size); + +HAL_StatusTypeDef HAL_I2C_Master_Seq_Transmit_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, + uint16_t Size, uint32_t XferOptions); +HAL_StatusTypeDef HAL_I2C_Master_Seq_Receive_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, + uint16_t Size, uint32_t XferOptions); +HAL_StatusTypeDef HAL_I2C_Slave_Seq_Transmit_DMA(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size, + uint32_t XferOptions); +HAL_StatusTypeDef HAL_I2C_Slave_Seq_Receive_DMA(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size, + uint32_t XferOptions); +/** + * @} + */ + +/** @addtogroup I2C_IRQ_Handler_and_Callbacks IRQ Handler and Callbacks + * @{ + */ +/******* I2C IRQHandler and Callbacks used in non blocking modes (Interrupt and DMA) */ +void HAL_I2C_EV_IRQHandler(I2C_HandleTypeDef *hi2c); +void HAL_I2C_ER_IRQHandler(I2C_HandleTypeDef *hi2c); +void HAL_I2C_MasterTxCpltCallback(I2C_HandleTypeDef *hi2c); +void HAL_I2C_MasterRxCpltCallback(I2C_HandleTypeDef *hi2c); +void HAL_I2C_SlaveTxCpltCallback(I2C_HandleTypeDef *hi2c); +void HAL_I2C_SlaveRxCpltCallback(I2C_HandleTypeDef *hi2c); +void HAL_I2C_AddrCallback(I2C_HandleTypeDef *hi2c, uint8_t TransferDirection, uint16_t AddrMatchCode); +void HAL_I2C_ListenCpltCallback(I2C_HandleTypeDef *hi2c); +void HAL_I2C_MemTxCpltCallback(I2C_HandleTypeDef *hi2c); +void HAL_I2C_MemRxCpltCallback(I2C_HandleTypeDef *hi2c); +void HAL_I2C_ErrorCallback(I2C_HandleTypeDef *hi2c); +void HAL_I2C_AbortCpltCallback(I2C_HandleTypeDef *hi2c); +/** + * @} + */ + +/** @addtogroup I2C_Exported_Functions_Group3 Peripheral State, Mode and Error functions + * @{ + */ +/* Peripheral State, Mode and Error functions *********************************/ +HAL_I2C_StateTypeDef HAL_I2C_GetState(I2C_HandleTypeDef *hi2c); +HAL_I2C_ModeTypeDef HAL_I2C_GetMode(I2C_HandleTypeDef *hi2c); +uint32_t HAL_I2C_GetError(I2C_HandleTypeDef *hi2c); + +/** + * @} + */ + +/** + * @} + */ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup I2C_Private_Constants I2C Private Constants + * @{ + */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup I2C_Private_Macro I2C Private Macros + * @{ + */ + +#define IS_I2C_ADDRESSING_MODE(MODE) (((MODE) == I2C_ADDRESSINGMODE_7BIT) || \ + ((MODE) == I2C_ADDRESSINGMODE_10BIT)) + +#define IS_I2C_DUAL_ADDRESS(ADDRESS) (((ADDRESS) == I2C_DUALADDRESS_DISABLE) || \ + ((ADDRESS) == I2C_DUALADDRESS_ENABLE)) + +#define IS_I2C_OWN_ADDRESS2_MASK(MASK) (((MASK) == I2C_OA2_NOMASK) || \ + ((MASK) == I2C_OA2_MASK01) || \ + ((MASK) == I2C_OA2_MASK02) || \ + ((MASK) == I2C_OA2_MASK03) || \ + ((MASK) == I2C_OA2_MASK04) || \ + ((MASK) == I2C_OA2_MASK05) || \ + ((MASK) == I2C_OA2_MASK06) || \ + ((MASK) == I2C_OA2_MASK07)) + +#define IS_I2C_GENERAL_CALL(CALL) (((CALL) == I2C_GENERALCALL_DISABLE) || \ + ((CALL) == I2C_GENERALCALL_ENABLE)) + +#define IS_I2C_NO_STRETCH(STRETCH) (((STRETCH) == I2C_NOSTRETCH_DISABLE) || \ + ((STRETCH) == I2C_NOSTRETCH_ENABLE)) + +#define IS_I2C_MEMADD_SIZE(SIZE) (((SIZE) == I2C_MEMADD_SIZE_8BIT) || \ + ((SIZE) == I2C_MEMADD_SIZE_16BIT)) + +#define IS_TRANSFER_MODE(MODE) (((MODE) == I2C_RELOAD_MODE) || \ + ((MODE) == I2C_AUTOEND_MODE) || \ + ((MODE) == I2C_SOFTEND_MODE)) + +#define IS_TRANSFER_REQUEST(REQUEST) (((REQUEST) == I2C_GENERATE_STOP) || \ + ((REQUEST) == I2C_GENERATE_START_READ) || \ + ((REQUEST) == I2C_GENERATE_START_WRITE) || \ + ((REQUEST) == I2C_NO_STARTSTOP)) + +#define IS_I2C_TRANSFER_OPTIONS_REQUEST(REQUEST) (((REQUEST) == I2C_FIRST_FRAME) || \ + ((REQUEST) == I2C_FIRST_AND_NEXT_FRAME) || \ + ((REQUEST) == I2C_NEXT_FRAME) || \ + ((REQUEST) == I2C_FIRST_AND_LAST_FRAME) || \ + ((REQUEST) == I2C_LAST_FRAME) || \ + ((REQUEST) == I2C_LAST_FRAME_NO_STOP) || \ + IS_I2C_TRANSFER_OTHER_OPTIONS_REQUEST(REQUEST)) + +#define IS_I2C_TRANSFER_OTHER_OPTIONS_REQUEST(REQUEST) (((REQUEST) == I2C_OTHER_FRAME) || \ + ((REQUEST) == I2C_OTHER_AND_LAST_FRAME)) + +#define I2C_RESET_CR2(__HANDLE__) ((__HANDLE__)->Instance->CR2 &= \ + (uint32_t)~((uint32_t)(I2C_CR2_SADD | I2C_CR2_HEAD10R | \ + I2C_CR2_NBYTES | I2C_CR2_RELOAD | \ + I2C_CR2_RD_WRN))) + +#define I2C_GET_ADDR_MATCH(__HANDLE__) ((uint16_t)(((__HANDLE__)->Instance->ISR & I2C_ISR_ADDCODE) \ + >> 16U)) +#define I2C_GET_DIR(__HANDLE__) ((uint8_t)(((__HANDLE__)->Instance->ISR & I2C_ISR_DIR) \ + >> 16U)) +#define I2C_GET_STOP_MODE(__HANDLE__) ((__HANDLE__)->Instance->CR2 & I2C_CR2_AUTOEND) +#define I2C_GET_OWN_ADDRESS1(__HANDLE__) ((uint16_t)((__HANDLE__)->Instance->OAR1 & I2C_OAR1_OA1)) +#define I2C_GET_OWN_ADDRESS2(__HANDLE__) ((uint16_t)((__HANDLE__)->Instance->OAR2 & I2C_OAR2_OA2)) + +#define IS_I2C_OWN_ADDRESS1(ADDRESS1) ((ADDRESS1) <= 0x000003FFU) +#define IS_I2C_OWN_ADDRESS2(ADDRESS2) ((ADDRESS2) <= (uint16_t)0x00FFU) + +#define I2C_MEM_ADD_MSB(__ADDRESS__) ((uint8_t)((uint16_t)(((uint16_t)((__ADDRESS__) & \ + (uint16_t)(0xFF00U))) >> 8U))) +#define I2C_MEM_ADD_LSB(__ADDRESS__) ((uint8_t)((uint16_t)((__ADDRESS__) & (uint16_t)(0x00FFU)))) + +#define I2C_GENERATE_START(__ADDMODE__,__ADDRESS__) (((__ADDMODE__) == I2C_ADDRESSINGMODE_7BIT) ? \ + (uint32_t)((((uint32_t)(__ADDRESS__) & (I2C_CR2_SADD)) | \ + (I2C_CR2_START) | (I2C_CR2_AUTOEND)) & \ + (~I2C_CR2_RD_WRN)) : \ + (uint32_t)((((uint32_t)(__ADDRESS__) & (I2C_CR2_SADD)) | \ + (I2C_CR2_ADD10) | (I2C_CR2_START)) & \ + (~I2C_CR2_RD_WRN))) + +#define I2C_CHECK_FLAG(__ISR__, __FLAG__) ((((__ISR__) & ((__FLAG__) & I2C_FLAG_MASK)) == \ + ((__FLAG__) & I2C_FLAG_MASK)) ? SET : RESET) +#define I2C_CHECK_IT_SOURCE(__CR1__, __IT__) ((((__CR1__) & (__IT__)) == (__IT__)) ? SET : RESET) +/** + * @} + */ + +/* Private Functions ---------------------------------------------------------*/ +/** @defgroup I2C_Private_Functions I2C Private Functions + * @{ + */ +/* Private functions are defined in stm32f7xx_hal_i2c.c file */ +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + + +#endif /* STM32F7xx_HAL_I2C_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_i2c_ex.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_i2c_ex.h new file mode 100644 index 0000000..27bdcf0 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_i2c_ex.h @@ -0,0 +1,212 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_i2c_ex.h + * @author MCD Application Team + * @brief Header file of I2C HAL Extended module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_I2C_EX_H +#define STM32F7xx_HAL_I2C_EX_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup I2CEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ +/** @defgroup I2CEx_Exported_Constants I2C Extended Exported Constants + * @{ + */ + +/** @defgroup I2CEx_Analog_Filter I2C Extended Analog Filter + * @{ + */ +#define I2C_ANALOGFILTER_ENABLE 0x00000000U +#define I2C_ANALOGFILTER_DISABLE I2C_CR1_ANFOFF +/** + * @} + */ + +/** @defgroup I2CEx_FastModePlus I2C Extended Fast Mode Plus + * @{ + */ +#define I2C_FMP_NOT_SUPPORTED 0xAAAA0000U /*!< Fast Mode Plus not supported */ +#if defined(SYSCFG_PMC_I2C_PB6_FMP) +#define I2C_FASTMODEPLUS_PB6 SYSCFG_PMC_I2C_PB6_FMP /*!< Enable Fast Mode Plus on PB6 */ +#define I2C_FASTMODEPLUS_PB7 SYSCFG_PMC_I2C_PB7_FMP /*!< Enable Fast Mode Plus on PB7 */ +#else +#define I2C_FASTMODEPLUS_PB6 (uint32_t)(0x00000004U | I2C_FMP_NOT_SUPPORTED) /*!< Fast Mode Plus PB6 not supported */ +#define I2C_FASTMODEPLUS_PB7 (uint32_t)(0x00000008U | I2C_FMP_NOT_SUPPORTED) /*!< Fast Mode Plus PB7 not supported */ +#endif /* SYSCFG_PMC_I2C_PB6_FMP */ +#if defined(SYSCFG_PMC_I2C_PB8_FMP) +#define I2C_FASTMODEPLUS_PB8 SYSCFG_PMC_I2C_PB8_FMP /*!< Enable Fast Mode Plus on PB8 */ +#define I2C_FASTMODEPLUS_PB9 SYSCFG_PMC_I2C_PB9_FMP /*!< Enable Fast Mode Plus on PB9 */ +#else +#define I2C_FASTMODEPLUS_PB8 (uint32_t)(0x00000010U | I2C_FMP_NOT_SUPPORTED) /*!< Fast Mode Plus PB8 not supported */ +#define I2C_FASTMODEPLUS_PB9 (uint32_t)(0x00000012U | I2C_FMP_NOT_SUPPORTED) /*!< Fast Mode Plus PB9 not supported */ +#endif /* SYSCFG_PMC_I2C_PB8_FMP */ +#if defined(SYSCFG_PMC_I2C1_FMP) +#define I2C_FASTMODEPLUS_I2C1 SYSCFG_PMC_I2C1_FMP /*!< Enable Fast Mode Plus on I2C1 pins */ +#else +#define I2C_FASTMODEPLUS_I2C1 (uint32_t)(0x00000100U | I2C_FMP_NOT_SUPPORTED) /*!< Fast Mode Plus I2C1 not supported */ +#endif /* SYSCFG_PMC_I2C1_FMP */ +#if defined(SYSCFG_PMC_I2C2_FMP) +#define I2C_FASTMODEPLUS_I2C2 SYSCFG_PMC_I2C2_FMP /*!< Enable Fast Mode Plus on I2C2 pins */ +#else +#define I2C_FASTMODEPLUS_I2C2 (uint32_t)(0x00000200U | I2C_FMP_NOT_SUPPORTED) /*!< Fast Mode Plus I2C2 not supported */ +#endif /* SYSCFG_PMC_I2C2_FMP */ +#if defined(SYSCFG_PMC_I2C3_FMP) +#define I2C_FASTMODEPLUS_I2C3 SYSCFG_PMC_I2C3_FMP /*!< Enable Fast Mode Plus on I2C3 pins */ +#else +#define I2C_FASTMODEPLUS_I2C3 (uint32_t)(0x00000400U | I2C_FMP_NOT_SUPPORTED) /*!< Fast Mode Plus I2C3 not supported */ +#endif /* SYSCFG_PMC_I2C3_FMP */ +#if defined(SYSCFG_PMC_I2C4_FMP) +#define I2C_FASTMODEPLUS_I2C4 SYSCFG_PMC_I2C4_FMP /*!< Enable Fast Mode Plus on I2C4 pins */ +#else +#define I2C_FASTMODEPLUS_I2C4 (uint32_t)(0x00000800U | I2C_FMP_NOT_SUPPORTED) /*!< Fast Mode Plus I2C4 not supported */ +#endif /* SYSCFG_PMC_I2C4_FMP */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup I2CEx_Exported_Macros I2C Extended Exported Macros + * @{ + */ + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup I2CEx_Exported_Functions I2C Extended Exported Functions + * @{ + */ + +/** @addtogroup I2CEx_Exported_Functions_Group1 Filter Mode Functions + * @{ + */ +/* Peripheral Control functions ************************************************/ +HAL_StatusTypeDef HAL_I2CEx_ConfigAnalogFilter(I2C_HandleTypeDef *hi2c, uint32_t AnalogFilter); +HAL_StatusTypeDef HAL_I2CEx_ConfigDigitalFilter(I2C_HandleTypeDef *hi2c, uint32_t DigitalFilter); +/** + * @} + */ +#if (defined(SYSCFG_PMC_I2C_PB6_FMP) || defined(SYSCFG_PMC_I2C_PB7_FMP)) || (defined(SYSCFG_PMC_I2C_PB8_FMP) || defined(SYSCFG_PMC_I2C_PB9_FMP)) || (defined(SYSCFG_PMC_I2C1_FMP)) || (defined(SYSCFG_PMC_I2C2_FMP)) || defined(SYSCFG_PMC_I2C3_FMP) || defined(SYSCFG_PMC_I2C4_FMP) + +/** @addtogroup I2CEx_Exported_Functions_Group3 Fast Mode Plus Functions + * @{ + */ +void HAL_I2CEx_EnableFastModePlus(uint32_t ConfigFastModePlus); +void HAL_I2CEx_DisableFastModePlus(uint32_t ConfigFastModePlus); +/** + * @} + */ +#endif /* Fast Mode Plus Availability */ + +/** + * @} + */ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup I2CEx_Private_Constants I2C Extended Private Constants + * @{ + */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup I2CEx_Private_Macro I2C Extended Private Macros + * @{ + */ +#define IS_I2C_ANALOG_FILTER(FILTER) (((FILTER) == I2C_ANALOGFILTER_ENABLE) || \ + ((FILTER) == I2C_ANALOGFILTER_DISABLE)) + +#define IS_I2C_DIGITAL_FILTER(FILTER) ((FILTER) <= 0x0000000FU) + +#if defined(SYSCFG_PMC_I2C4_FMP) +#define IS_I2C_FASTMODEPLUS(__CONFIG__) ((((__CONFIG__) & I2C_FASTMODEPLUS_PB6) == I2C_FASTMODEPLUS_PB6) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_PB7) == I2C_FASTMODEPLUS_PB7) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_PB8) == I2C_FASTMODEPLUS_PB8) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_PB9) == I2C_FASTMODEPLUS_PB9) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_I2C1) == I2C_FASTMODEPLUS_I2C1) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_I2C2) == I2C_FASTMODEPLUS_I2C2) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_I2C3) == I2C_FASTMODEPLUS_I2C3) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_I2C4) == I2C_FASTMODEPLUS_I2C4)) +#elif defined(SYSCFG_PMC_I2C3_FMP) +#define IS_I2C_FASTMODEPLUS(__CONFIG__) ((((__CONFIG__) & I2C_FASTMODEPLUS_PB6) == I2C_FASTMODEPLUS_PB6) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_PB7) == I2C_FASTMODEPLUS_PB7) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_PB8) == I2C_FASTMODEPLUS_PB8) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_PB9) == I2C_FASTMODEPLUS_PB9) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_I2C1) == I2C_FASTMODEPLUS_I2C1) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_I2C2) == I2C_FASTMODEPLUS_I2C2) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_I2C3) == I2C_FASTMODEPLUS_I2C3)) +#elif defined(SYSCFG_PMC_I2C2_FMP) +#define IS_I2C_FASTMODEPLUS(__CONFIG__) ((((__CONFIG__) & I2C_FASTMODEPLUS_PB6) == I2C_FASTMODEPLUS_PB6) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_PB7) == I2C_FASTMODEPLUS_PB7) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_PB8) == I2C_FASTMODEPLUS_PB8) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_PB9) == I2C_FASTMODEPLUS_PB9) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_I2C1) == I2C_FASTMODEPLUS_I2C1) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_I2C2) == I2C_FASTMODEPLUS_I2C2)) +#elif defined(SYSCFG_PMC_I2C1_FMP) +#define IS_I2C_FASTMODEPLUS(__CONFIG__) ((((__CONFIG__) & I2C_FASTMODEPLUS_PB6) == I2C_FASTMODEPLUS_PB6) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_PB7) == I2C_FASTMODEPLUS_PB7) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_PB8) == I2C_FASTMODEPLUS_PB8) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_PB9) == I2C_FASTMODEPLUS_PB9) || \ + (((__CONFIG__) & I2C_FASTMODEPLUS_I2C1) == I2C_FASTMODEPLUS_I2C1)) +#endif /* SYSCFG_PMC_I2C1_FMP && SYSCFG_PMC_I2C2_FMP && SYSCFG_PMC_I2C3_FMP && SYSCFG_PMC_I2C4_FMP */ +/** + * @} + */ + +/* Private Functions ---------------------------------------------------------*/ +/** @defgroup I2CEx_Private_Functions I2C Extended Private Functions + * @{ + */ +/* Private functions are defined in stm32f7xx_hal_i2c_ex.c file */ +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_HAL_I2C_EX_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pcd.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pcd.h new file mode 100644 index 0000000..dc84fc0 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pcd.h @@ -0,0 +1,459 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_pcd.h + * @author MCD Application Team + * @brief Header file of PCD HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_PCD_H +#define STM32F7xx_HAL_PCD_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_ll_usb.h" + +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup PCD + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup PCD_Exported_Types PCD Exported Types + * @{ + */ + +/** + * @brief PCD State structure definition + */ +typedef enum +{ + HAL_PCD_STATE_RESET = 0x00, + HAL_PCD_STATE_READY = 0x01, + HAL_PCD_STATE_ERROR = 0x02, + HAL_PCD_STATE_BUSY = 0x03, + HAL_PCD_STATE_TIMEOUT = 0x04 +} PCD_StateTypeDef; + +/* Device LPM suspend state */ +typedef enum +{ + LPM_L0 = 0x00, /* on */ + LPM_L1 = 0x01, /* LPM L1 sleep */ + LPM_L2 = 0x02, /* suspend */ + LPM_L3 = 0x03, /* off */ +} PCD_LPM_StateTypeDef; + +typedef enum +{ + PCD_LPM_L0_ACTIVE = 0x00, /* on */ + PCD_LPM_L1_ACTIVE = 0x01, /* LPM L1 sleep */ +} PCD_LPM_MsgTypeDef; + +typedef enum +{ + PCD_BCD_ERROR = 0xFF, + PCD_BCD_CONTACT_DETECTION = 0xFE, + PCD_BCD_STD_DOWNSTREAM_PORT = 0xFD, + PCD_BCD_CHARGING_DOWNSTREAM_PORT = 0xFC, + PCD_BCD_DEDICATED_CHARGING_PORT = 0xFB, + PCD_BCD_DISCOVERY_COMPLETED = 0x00, + +} PCD_BCD_MsgTypeDef; + +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +typedef USB_OTG_GlobalTypeDef PCD_TypeDef; +typedef USB_OTG_CfgTypeDef PCD_InitTypeDef; +typedef USB_OTG_EPTypeDef PCD_EPTypeDef; +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + +/** + * @brief PCD Handle Structure definition + */ +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) +typedef struct __PCD_HandleTypeDef +#else +typedef struct +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ +{ + PCD_TypeDef *Instance; /*!< Register base address */ + PCD_InitTypeDef Init; /*!< PCD required parameters */ + __IO uint8_t USB_Address; /*!< USB Address */ + PCD_EPTypeDef IN_ep[16]; /*!< IN endpoint parameters */ + PCD_EPTypeDef OUT_ep[16]; /*!< OUT endpoint parameters */ + HAL_LockTypeDef Lock; /*!< PCD peripheral status */ + __IO PCD_StateTypeDef State; /*!< PCD communication state */ + __IO uint32_t ErrorCode; /*!< PCD Error code */ + uint32_t Setup[12]; /*!< Setup packet buffer */ + PCD_LPM_StateTypeDef LPM_State; /*!< LPM State */ + uint32_t BESL; + + + uint32_t lpm_active; /*!< Enable or disable the Link Power Management . + This parameter can be set to ENABLE or DISABLE */ + void *pData; /*!< Pointer to upper stack Handler */ + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + void (* SOFCallback)(struct __PCD_HandleTypeDef *hpcd); /*!< USB OTG PCD SOF callback */ + void (* SetupStageCallback)(struct __PCD_HandleTypeDef *hpcd); /*!< USB OTG PCD Setup Stage callback */ + void (* ResetCallback)(struct __PCD_HandleTypeDef *hpcd); /*!< USB OTG PCD Reset callback */ + void (* SuspendCallback)(struct __PCD_HandleTypeDef *hpcd); /*!< USB OTG PCD Suspend callback */ + void (* ResumeCallback)(struct __PCD_HandleTypeDef *hpcd); /*!< USB OTG PCD Resume callback */ + void (* ConnectCallback)(struct __PCD_HandleTypeDef *hpcd); /*!< USB OTG PCD Connect callback */ + void (* DisconnectCallback)(struct __PCD_HandleTypeDef *hpcd); /*!< USB OTG PCD Disconnect callback */ + + void (* DataOutStageCallback)(struct __PCD_HandleTypeDef *hpcd, uint8_t epnum); /*!< USB OTG PCD Data OUT Stage callback */ + void (* DataInStageCallback)(struct __PCD_HandleTypeDef *hpcd, uint8_t epnum); /*!< USB OTG PCD Data IN Stage callback */ + void (* ISOOUTIncompleteCallback)(struct __PCD_HandleTypeDef *hpcd, uint8_t epnum); /*!< USB OTG PCD ISO OUT Incomplete callback */ + void (* ISOINIncompleteCallback)(struct __PCD_HandleTypeDef *hpcd, uint8_t epnum); /*!< USB OTG PCD ISO IN Incomplete callback */ + void (* LPMCallback)(struct __PCD_HandleTypeDef *hpcd, PCD_LPM_MsgTypeDef msg); /*!< USB OTG PCD LPM callback */ + + void (* MspInitCallback)(struct __PCD_HandleTypeDef *hpcd); /*!< USB OTG PCD Msp Init callback */ + void (* MspDeInitCallback)(struct __PCD_HandleTypeDef *hpcd); /*!< USB OTG PCD Msp DeInit callback */ +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ +} PCD_HandleTypeDef; + +/** + * @} + */ + +/* Include PCD HAL Extended module */ +#include "stm32f7xx_hal_pcd_ex.h" + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup PCD_Exported_Constants PCD Exported Constants + * @{ + */ + +/** @defgroup PCD_Speed PCD Speed + * @{ + */ +#define PCD_SPEED_HIGH USBD_HS_SPEED +#define PCD_SPEED_HIGH_IN_FULL USBD_HSINFS_SPEED +#define PCD_SPEED_FULL USBD_FS_SPEED +/** + * @} + */ + +/** @defgroup PCD_PHY_Module PCD PHY Module + * @{ + */ +#define PCD_PHY_ULPI 1U +#define PCD_PHY_EMBEDDED 2U +#define PCD_PHY_UTMI 3U +/** + * @} + */ + +/** @defgroup PCD_Error_Code_definition PCD Error Code definition + * @brief PCD Error Code definition + * @{ + */ +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) +#define HAL_PCD_ERROR_INVALID_CALLBACK (0x00000010U) /*!< Invalid Callback error */ +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** + * @} + */ + +/* Exported macros -----------------------------------------------------------*/ +/** @defgroup PCD_Exported_Macros PCD Exported Macros + * @brief macros to handle interrupts and specific clock configurations + * @{ + */ +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +#define __HAL_PCD_ENABLE(__HANDLE__) (void)USB_EnableGlobalInt ((__HANDLE__)->Instance) +#define __HAL_PCD_DISABLE(__HANDLE__) (void)USB_DisableGlobalInt ((__HANDLE__)->Instance) + +#define __HAL_PCD_GET_FLAG(__HANDLE__, __INTERRUPT__) \ + ((USB_ReadInterrupts((__HANDLE__)->Instance) & (__INTERRUPT__)) == (__INTERRUPT__)) + +#define __HAL_PCD_CLEAR_FLAG(__HANDLE__, __INTERRUPT__) (((__HANDLE__)->Instance->GINTSTS) &= (__INTERRUPT__)) +#define __HAL_PCD_IS_INVALID_INTERRUPT(__HANDLE__) (USB_ReadInterrupts((__HANDLE__)->Instance) == 0U) + +#define __HAL_PCD_UNGATE_PHYCLOCK(__HANDLE__) \ + *(__IO uint32_t *)((uint32_t)((__HANDLE__)->Instance) + USB_OTG_PCGCCTL_BASE) &= ~(USB_OTG_PCGCCTL_STOPCLK) + +#define __HAL_PCD_GATE_PHYCLOCK(__HANDLE__) \ + *(__IO uint32_t *)((uint32_t)((__HANDLE__)->Instance) + USB_OTG_PCGCCTL_BASE) |= USB_OTG_PCGCCTL_STOPCLK + +#define __HAL_PCD_IS_PHY_SUSPENDED(__HANDLE__) \ + ((*(__IO uint32_t *)((uint32_t)((__HANDLE__)->Instance) + USB_OTG_PCGCCTL_BASE)) & 0x10U) + +#define __HAL_USB_OTG_HS_WAKEUP_EXTI_ENABLE_IT() EXTI->IMR |= (USB_OTG_HS_WAKEUP_EXTI_LINE) +#define __HAL_USB_OTG_HS_WAKEUP_EXTI_DISABLE_IT() EXTI->IMR &= ~(USB_OTG_HS_WAKEUP_EXTI_LINE) +#define __HAL_USB_OTG_HS_WAKEUP_EXTI_GET_FLAG() EXTI->PR & (USB_OTG_HS_WAKEUP_EXTI_LINE) +#define __HAL_USB_OTG_HS_WAKEUP_EXTI_CLEAR_FLAG() EXTI->PR = (USB_OTG_HS_WAKEUP_EXTI_LINE) + +#define __HAL_USB_OTG_HS_WAKEUP_EXTI_ENABLE_RISING_EDGE() \ + do { \ + EXTI->FTSR &= ~(USB_OTG_HS_WAKEUP_EXTI_LINE); \ + EXTI->RTSR |= USB_OTG_HS_WAKEUP_EXTI_LINE; \ + } while(0U) +#define __HAL_USB_OTG_FS_WAKEUP_EXTI_ENABLE_IT() EXTI->IMR |= USB_OTG_FS_WAKEUP_EXTI_LINE +#define __HAL_USB_OTG_FS_WAKEUP_EXTI_DISABLE_IT() EXTI->IMR &= ~(USB_OTG_FS_WAKEUP_EXTI_LINE) +#define __HAL_USB_OTG_FS_WAKEUP_EXTI_GET_FLAG() EXTI->PR & (USB_OTG_FS_WAKEUP_EXTI_LINE) +#define __HAL_USB_OTG_FS_WAKEUP_EXTI_CLEAR_FLAG() EXTI->PR = USB_OTG_FS_WAKEUP_EXTI_LINE + +#define __HAL_USB_OTG_FS_WAKEUP_EXTI_ENABLE_RISING_EDGE() \ + do { \ + EXTI->FTSR &= ~(USB_OTG_FS_WAKEUP_EXTI_LINE); \ + EXTI->RTSR |= USB_OTG_FS_WAKEUP_EXTI_LINE; \ + } while(0U) +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + + +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup PCD_Exported_Functions PCD Exported Functions + * @{ + */ + +/* Initialization/de-initialization functions ********************************/ +/** @addtogroup PCD_Exported_Functions_Group1 Initialization and de-initialization functions + * @{ + */ +HAL_StatusTypeDef HAL_PCD_Init(PCD_HandleTypeDef *hpcd); +HAL_StatusTypeDef HAL_PCD_DeInit(PCD_HandleTypeDef *hpcd); +void HAL_PCD_MspInit(PCD_HandleTypeDef *hpcd); +void HAL_PCD_MspDeInit(PCD_HandleTypeDef *hpcd); + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) +/** @defgroup HAL_PCD_Callback_ID_enumeration_definition HAL USB OTG PCD Callback ID enumeration definition + * @brief HAL USB OTG PCD Callback ID enumeration definition + * @{ + */ +typedef enum +{ + HAL_PCD_SOF_CB_ID = 0x01, /*!< USB PCD SOF callback ID */ + HAL_PCD_SETUPSTAGE_CB_ID = 0x02, /*!< USB PCD Setup Stage callback ID */ + HAL_PCD_RESET_CB_ID = 0x03, /*!< USB PCD Reset callback ID */ + HAL_PCD_SUSPEND_CB_ID = 0x04, /*!< USB PCD Suspend callback ID */ + HAL_PCD_RESUME_CB_ID = 0x05, /*!< USB PCD Resume callback ID */ + HAL_PCD_CONNECT_CB_ID = 0x06, /*!< USB PCD Connect callback ID */ + HAL_PCD_DISCONNECT_CB_ID = 0x07, /*!< USB PCD Disconnect callback ID */ + + HAL_PCD_MSPINIT_CB_ID = 0x08, /*!< USB PCD MspInit callback ID */ + HAL_PCD_MSPDEINIT_CB_ID = 0x09 /*!< USB PCD MspDeInit callback ID */ + +} HAL_PCD_CallbackIDTypeDef; +/** + * @} + */ + +/** @defgroup HAL_PCD_Callback_pointer_definition HAL USB OTG PCD Callback pointer definition + * @brief HAL USB OTG PCD Callback pointer definition + * @{ + */ + +typedef void (*pPCD_CallbackTypeDef)(PCD_HandleTypeDef *hpcd); /*!< pointer to a common USB OTG PCD callback function */ +typedef void (*pPCD_DataOutStageCallbackTypeDef)(PCD_HandleTypeDef *hpcd, uint8_t epnum); /*!< pointer to USB OTG PCD Data OUT Stage callback */ +typedef void (*pPCD_DataInStageCallbackTypeDef)(PCD_HandleTypeDef *hpcd, uint8_t epnum); /*!< pointer to USB OTG PCD Data IN Stage callback */ +typedef void (*pPCD_IsoOutIncpltCallbackTypeDef)(PCD_HandleTypeDef *hpcd, uint8_t epnum); /*!< pointer to USB OTG PCD ISO OUT Incomplete callback */ +typedef void (*pPCD_IsoInIncpltCallbackTypeDef)(PCD_HandleTypeDef *hpcd, uint8_t epnum); /*!< pointer to USB OTG PCD ISO IN Incomplete callback */ +typedef void (*pPCD_LpmCallbackTypeDef)(PCD_HandleTypeDef *hpcd, PCD_LPM_MsgTypeDef msg); /*!< pointer to USB OTG PCD LPM callback */ + +/** + * @} + */ + +HAL_StatusTypeDef HAL_PCD_RegisterCallback(PCD_HandleTypeDef *hpcd, + HAL_PCD_CallbackIDTypeDef CallbackID, + pPCD_CallbackTypeDef pCallback); + +HAL_StatusTypeDef HAL_PCD_UnRegisterCallback(PCD_HandleTypeDef *hpcd, + HAL_PCD_CallbackIDTypeDef CallbackID); + +HAL_StatusTypeDef HAL_PCD_RegisterDataOutStageCallback(PCD_HandleTypeDef *hpcd, + pPCD_DataOutStageCallbackTypeDef pCallback); + +HAL_StatusTypeDef HAL_PCD_UnRegisterDataOutStageCallback(PCD_HandleTypeDef *hpcd); + +HAL_StatusTypeDef HAL_PCD_RegisterDataInStageCallback(PCD_HandleTypeDef *hpcd, + pPCD_DataInStageCallbackTypeDef pCallback); + +HAL_StatusTypeDef HAL_PCD_UnRegisterDataInStageCallback(PCD_HandleTypeDef *hpcd); + +HAL_StatusTypeDef HAL_PCD_RegisterIsoOutIncpltCallback(PCD_HandleTypeDef *hpcd, + pPCD_IsoOutIncpltCallbackTypeDef pCallback); + +HAL_StatusTypeDef HAL_PCD_UnRegisterIsoOutIncpltCallback(PCD_HandleTypeDef *hpcd); + +HAL_StatusTypeDef HAL_PCD_RegisterIsoInIncpltCallback(PCD_HandleTypeDef *hpcd, + pPCD_IsoInIncpltCallbackTypeDef pCallback); + +HAL_StatusTypeDef HAL_PCD_UnRegisterIsoInIncpltCallback(PCD_HandleTypeDef *hpcd); + +HAL_StatusTypeDef HAL_PCD_RegisterLpmCallback(PCD_HandleTypeDef *hpcd, + pPCD_LpmCallbackTypeDef pCallback); + +HAL_StatusTypeDef HAL_PCD_UnRegisterLpmCallback(PCD_HandleTypeDef *hpcd); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ +/** + * @} + */ + +/* I/O operation functions ***************************************************/ +/* Non-Blocking mode: Interrupt */ +/** @addtogroup PCD_Exported_Functions_Group2 Input and Output operation functions + * @{ + */ +HAL_StatusTypeDef HAL_PCD_Start(PCD_HandleTypeDef *hpcd); +HAL_StatusTypeDef HAL_PCD_Stop(PCD_HandleTypeDef *hpcd); +void HAL_PCD_IRQHandler(PCD_HandleTypeDef *hpcd); +void HAL_PCD_WKUP_IRQHandler(PCD_HandleTypeDef *hpcd); + +void HAL_PCD_SOFCallback(PCD_HandleTypeDef *hpcd); +void HAL_PCD_SetupStageCallback(PCD_HandleTypeDef *hpcd); +void HAL_PCD_ResetCallback(PCD_HandleTypeDef *hpcd); +void HAL_PCD_SuspendCallback(PCD_HandleTypeDef *hpcd); +void HAL_PCD_ResumeCallback(PCD_HandleTypeDef *hpcd); +void HAL_PCD_ConnectCallback(PCD_HandleTypeDef *hpcd); +void HAL_PCD_DisconnectCallback(PCD_HandleTypeDef *hpcd); + +void HAL_PCD_DataOutStageCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum); +void HAL_PCD_DataInStageCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum); +void HAL_PCD_ISOOUTIncompleteCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum); +void HAL_PCD_ISOINIncompleteCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum); +/** + * @} + */ + +/* Peripheral Control functions **********************************************/ +/** @addtogroup PCD_Exported_Functions_Group3 Peripheral Control functions + * @{ + */ +HAL_StatusTypeDef HAL_PCD_DevConnect(PCD_HandleTypeDef *hpcd); +HAL_StatusTypeDef HAL_PCD_DevDisconnect(PCD_HandleTypeDef *hpcd); +HAL_StatusTypeDef HAL_PCD_SetAddress(PCD_HandleTypeDef *hpcd, uint8_t address); +HAL_StatusTypeDef HAL_PCD_EP_Open(PCD_HandleTypeDef *hpcd, uint8_t ep_addr, + uint16_t ep_mps, uint8_t ep_type); + +HAL_StatusTypeDef HAL_PCD_EP_Close(PCD_HandleTypeDef *hpcd, uint8_t ep_addr); +HAL_StatusTypeDef HAL_PCD_EP_Receive(PCD_HandleTypeDef *hpcd, uint8_t ep_addr, + uint8_t *pBuf, uint32_t len); + +HAL_StatusTypeDef HAL_PCD_EP_Transmit(PCD_HandleTypeDef *hpcd, uint8_t ep_addr, + uint8_t *pBuf, uint32_t len); + +HAL_StatusTypeDef HAL_PCD_EP_SetStall(PCD_HandleTypeDef *hpcd, uint8_t ep_addr); +HAL_StatusTypeDef HAL_PCD_EP_ClrStall(PCD_HandleTypeDef *hpcd, uint8_t ep_addr); +HAL_StatusTypeDef HAL_PCD_EP_Flush(PCD_HandleTypeDef *hpcd, uint8_t ep_addr); +HAL_StatusTypeDef HAL_PCD_ActivateRemoteWakeup(PCD_HandleTypeDef *hpcd); +HAL_StatusTypeDef HAL_PCD_DeActivateRemoteWakeup(PCD_HandleTypeDef *hpcd); +HAL_StatusTypeDef HAL_PCD_SetTestMode(PCD_HandleTypeDef *hpcd, uint8_t testmode); + +uint32_t HAL_PCD_EP_GetRxCount(PCD_HandleTypeDef *hpcd, uint8_t ep_addr); +/** + * @} + */ + +/* Peripheral State functions ************************************************/ +/** @addtogroup PCD_Exported_Functions_Group4 Peripheral State functions + * @{ + */ +PCD_StateTypeDef HAL_PCD_GetState(PCD_HandleTypeDef *hpcd); +/** + * @} + */ + +/** + * @} + */ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup PCD_Private_Constants PCD Private Constants + * @{ + */ +/** @defgroup USB_EXTI_Line_Interrupt USB EXTI line interrupt + * @{ + */ +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +#define USB_OTG_FS_WAKEUP_EXTI_LINE (0x1U << 18) /*!< USB FS EXTI Line WakeUp Interrupt */ +#define USB_OTG_HS_WAKEUP_EXTI_LINE (0x1U << 20) /*!< USB HS EXTI Line WakeUp Interrupt */ +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + + +/** + * @} + */ +/** + * @} + */ + +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +#ifndef USB_OTG_DOEPINT_OTEPSPR +#define USB_OTG_DOEPINT_OTEPSPR (0x1UL << 5) /*!< Status Phase Received interrupt */ +#endif /* defined USB_OTG_DOEPINT_OTEPSPR */ + +#ifndef USB_OTG_DOEPMSK_OTEPSPRM +#define USB_OTG_DOEPMSK_OTEPSPRM (0x1UL << 5) /*!< Setup Packet Received interrupt mask */ +#endif /* defined USB_OTG_DOEPMSK_OTEPSPRM */ + +#ifndef USB_OTG_DOEPINT_NAK +#define USB_OTG_DOEPINT_NAK (0x1UL << 13) /*!< NAK interrupt */ +#endif /* defined USB_OTG_DOEPINT_NAK */ + +#ifndef USB_OTG_DOEPMSK_NAKM +#define USB_OTG_DOEPMSK_NAKM (0x1UL << 13) /*!< OUT Packet NAK interrupt mask */ +#endif /* defined USB_OTG_DOEPMSK_NAKM */ + +#ifndef USB_OTG_DOEPINT_STPKTRX +#define USB_OTG_DOEPINT_STPKTRX (0x1UL << 15) /*!< Setup Packet Received interrupt */ +#endif /* defined USB_OTG_DOEPINT_STPKTRX */ + +#ifndef USB_OTG_DOEPMSK_NYETM +#define USB_OTG_DOEPMSK_NYETM (0x1UL << 14) /*!< Setup Packet Received interrupt mask */ +#endif /* defined USB_OTG_DOEPMSK_NYETM */ +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup PCD_Private_Macros PCD Private Macros + * @{ + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_HAL_PCD_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pcd_ex.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pcd_ex.h new file mode 100644 index 0000000..d33c541 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pcd_ex.h @@ -0,0 +1,83 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_pcd_ex.h + * @author MCD Application Team + * @brief Header file of PCD HAL Extension module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_PCD_EX_H +#define STM32F7xx_HAL_PCD_EX_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup PCDEx + * @{ + */ +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ +/* Exported macros -----------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup PCDEx_Exported_Functions PCDEx Exported Functions + * @{ + */ +/** @addtogroup PCDEx_Exported_Functions_Group1 Peripheral Control functions + * @{ + */ + +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +HAL_StatusTypeDef HAL_PCDEx_SetTxFiFo(PCD_HandleTypeDef *hpcd, uint8_t fifo, uint16_t size); +HAL_StatusTypeDef HAL_PCDEx_SetRxFiFo(PCD_HandleTypeDef *hpcd, uint16_t size); +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + + +HAL_StatusTypeDef HAL_PCDEx_ActivateLPM(PCD_HandleTypeDef *hpcd); +HAL_StatusTypeDef HAL_PCDEx_DeActivateLPM(PCD_HandleTypeDef *hpcd); + +void HAL_PCDEx_LPM_Callback(PCD_HandleTypeDef *hpcd, PCD_LPM_MsgTypeDef msg); +void HAL_PCDEx_BCD_Callback(PCD_HandleTypeDef *hpcd, PCD_BCD_MsgTypeDef msg); + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + + +#endif /* STM32F7xx_HAL_PCD_EX_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pwr.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pwr.h new file mode 100644 index 0000000..736ff6f --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pwr.h @@ -0,0 +1,402 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_pwr.h + * @author MCD Application Team + * @brief Header file of PWR HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_PWR_H +#define __STM32F7xx_HAL_PWR_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup PWR + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ + +/** @defgroup PWR_Exported_Types PWR Exported Types + * @{ + */ + +/** + * @brief PWR PVD configuration structure definition + */ +typedef struct +{ + uint32_t PVDLevel; /*!< PVDLevel: Specifies the PVD detection level. + This parameter can be a value of @ref PWR_PVD_detection_level */ + + uint32_t Mode; /*!< Mode: Specifies the operating mode for the selected pins. + This parameter can be a value of @ref PWR_PVD_Mode */ +}PWR_PVDTypeDef; + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup PWR_Exported_Constants PWR Exported Constants + * @{ + */ + +/** @defgroup PWR_PVD_detection_level PWR PVD detection level + * @{ + */ +#define PWR_PVDLEVEL_0 PWR_CR1_PLS_LEV0 +#define PWR_PVDLEVEL_1 PWR_CR1_PLS_LEV1 +#define PWR_PVDLEVEL_2 PWR_CR1_PLS_LEV2 +#define PWR_PVDLEVEL_3 PWR_CR1_PLS_LEV3 +#define PWR_PVDLEVEL_4 PWR_CR1_PLS_LEV4 +#define PWR_PVDLEVEL_5 PWR_CR1_PLS_LEV5 +#define PWR_PVDLEVEL_6 PWR_CR1_PLS_LEV6 +#define PWR_PVDLEVEL_7 PWR_CR1_PLS_LEV7/* External input analog voltage + (Compare internally to VREFINT) */ + +/** + * @} + */ + +/** @defgroup PWR_PVD_Mode PWR PVD Mode + * @{ + */ +#define PWR_PVD_MODE_NORMAL ((uint32_t)0x00000000U) /*!< basic mode is used */ +#define PWR_PVD_MODE_IT_RISING ((uint32_t)0x00010001U) /*!< External Interrupt Mode with Rising edge trigger detection */ +#define PWR_PVD_MODE_IT_FALLING ((uint32_t)0x00010002U) /*!< External Interrupt Mode with Falling edge trigger detection */ +#define PWR_PVD_MODE_IT_RISING_FALLING ((uint32_t)0x00010003U) /*!< External Interrupt Mode with Rising/Falling edge trigger detection */ +#define PWR_PVD_MODE_EVENT_RISING ((uint32_t)0x00020001U) /*!< Event Mode with Rising edge trigger detection */ +#define PWR_PVD_MODE_EVENT_FALLING ((uint32_t)0x00020002U) /*!< Event Mode with Falling edge trigger detection */ +#define PWR_PVD_MODE_EVENT_RISING_FALLING ((uint32_t)0x00020003U) /*!< Event Mode with Rising/Falling edge trigger detection */ +/** + * @} + */ + +/** @defgroup PWR_Regulator_state_in_STOP_mode PWR Regulator state in SLEEP/STOP mode + * @{ + */ +#define PWR_MAINREGULATOR_ON ((uint32_t)0x00000000U) +#define PWR_LOWPOWERREGULATOR_ON PWR_CR1_LPDS +/** + * @} + */ + +/** @defgroup PWR_SLEEP_mode_entry PWR SLEEP mode entry + * @{ + */ +#define PWR_SLEEPENTRY_WFI ((uint8_t)0x01U) +#define PWR_SLEEPENTRY_WFE ((uint8_t)0x02U) +/** + * @} + */ + +/** @defgroup PWR_STOP_mode_entry PWR STOP mode entry + * @{ + */ +#define PWR_STOPENTRY_WFI ((uint8_t)0x01U) +#define PWR_STOPENTRY_WFE ((uint8_t)0x02U) +/** + * @} + */ + +/** @defgroup PWR_Regulator_Voltage_Scale PWR Regulator Voltage Scale + * @{ + */ +#define PWR_REGULATOR_VOLTAGE_SCALE1 PWR_CR1_VOS +#define PWR_REGULATOR_VOLTAGE_SCALE2 PWR_CR1_VOS_1 +#define PWR_REGULATOR_VOLTAGE_SCALE3 PWR_CR1_VOS_0 +/** + * @} + */ + +/** @defgroup PWR_Flag PWR Flag + * @{ + */ +#define PWR_FLAG_WU PWR_CSR1_WUIF +#define PWR_FLAG_SB PWR_CSR1_SBF +#define PWR_FLAG_PVDO PWR_CSR1_PVDO +#define PWR_FLAG_BRR PWR_CSR1_BRR +#define PWR_FLAG_VOSRDY PWR_CSR1_VOSRDY +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup PWR_Exported_Macro PWR Exported Macro + * @{ + */ + +/** @brief macros configure the main internal regulator output voltage. + * @param __REGULATOR__ specifies the regulator output voltage to achieve + * a tradeoff between performance and power consumption when the device does + * not operate at the maximum frequency (refer to the datasheets for more details). + * This parameter can be one of the following values: + * @arg PWR_REGULATOR_VOLTAGE_SCALE1: Regulator voltage output Scale 1 mode + * @arg PWR_REGULATOR_VOLTAGE_SCALE2: Regulator voltage output Scale 2 mode + * @arg PWR_REGULATOR_VOLTAGE_SCALE3: Regulator voltage output Scale 3 mode + * @retval None + */ +#define __HAL_PWR_VOLTAGESCALING_CONFIG(__REGULATOR__) do { \ + __IO uint32_t tmpreg; \ + MODIFY_REG(PWR->CR1, PWR_CR1_VOS, (__REGULATOR__)); \ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(PWR->CR1, PWR_CR1_VOS); \ + UNUSED(tmpreg); \ + } while(0) + +/** @brief Check PWR flag is set or not. + * @param __FLAG__ specifies the flag to check. + * This parameter can be one of the following values: + * @arg PWR_FLAG_WU: Wake Up flag. This flag indicates that a wakeup event + * was received on the internal wakeup line in standby mode (RTC alarm (Alarm A or Alarm B), + * RTC Tamper event, RTC TimeStamp event or RTC Wakeup)). + * @arg PWR_FLAG_SB: StandBy flag. This flag indicates that the system was + * resumed from StandBy mode. + * @arg PWR_FLAG_PVDO: PVD Output. This flag is valid only if PVD is enabled + * by the HAL_PWR_EnablePVD() function. The PVD is stopped by Standby mode + * For this reason, this bit is equal to 0 after Standby or reset + * until the PVDE bit is set. + * @arg PWR_FLAG_BRR: Backup regulator ready flag. This bit is not reset + * when the device wakes up from Standby mode or by a system reset + * or power reset. + * @arg PWR_FLAG_VOSRDY: This flag indicates that the Regulator voltage + * scaling output selection is ready. + * @retval The new state of __FLAG__ (TRUE or FALSE). + */ +#define __HAL_PWR_GET_FLAG(__FLAG__) ((PWR->CSR1 & (__FLAG__)) == (__FLAG__)) + +/** @brief Clear the PWR's pending flags. + * @param __FLAG__ specifies the flag to clear. + * This parameter can be one of the following values: + * @arg PWR_FLAG_SB: StandBy flag + */ +#define __HAL_PWR_CLEAR_FLAG(__FLAG__) (PWR->CR1 |= (__FLAG__) << 2) + +/** + * @brief Enable the PVD Exti Line 16. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_ENABLE_IT() (EXTI->IMR |= (PWR_EXTI_LINE_PVD)) + +/** + * @brief Disable the PVD EXTI Line 16. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_DISABLE_IT() (EXTI->IMR &= ~(PWR_EXTI_LINE_PVD)) + +/** + * @brief Enable event on PVD Exti Line 16. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_ENABLE_EVENT() (EXTI->EMR |= (PWR_EXTI_LINE_PVD)) + +/** + * @brief Disable event on PVD Exti Line 16. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_DISABLE_EVENT() (EXTI->EMR &= ~(PWR_EXTI_LINE_PVD)) + +/** + * @brief Enable the PVD Extended Interrupt Rising Trigger. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_ENABLE_RISING_EDGE() SET_BIT(EXTI->RTSR, PWR_EXTI_LINE_PVD) + +/** + * @brief Disable the PVD Extended Interrupt Rising Trigger. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_DISABLE_RISING_EDGE() CLEAR_BIT(EXTI->RTSR, PWR_EXTI_LINE_PVD) + +/** + * @brief Enable the PVD Extended Interrupt Falling Trigger. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_ENABLE_FALLING_EDGE() SET_BIT(EXTI->FTSR, PWR_EXTI_LINE_PVD) + + +/** + * @brief Disable the PVD Extended Interrupt Falling Trigger. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_DISABLE_FALLING_EDGE() CLEAR_BIT(EXTI->FTSR, PWR_EXTI_LINE_PVD) + + +/** + * @brief PVD EXTI line configuration: set rising & falling edge trigger. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_ENABLE_RISING_FALLING_EDGE() __HAL_PWR_PVD_EXTI_ENABLE_RISING_EDGE();__HAL_PWR_PVD_EXTI_ENABLE_FALLING_EDGE(); + +/** + * @brief Disable the PVD Extended Interrupt Rising & Falling Trigger. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_DISABLE_RISING_FALLING_EDGE() __HAL_PWR_PVD_EXTI_DISABLE_RISING_EDGE();__HAL_PWR_PVD_EXTI_DISABLE_FALLING_EDGE(); + +/** + * @brief checks whether the specified PVD Exti interrupt flag is set or not. + * @retval EXTI PVD Line Status. + */ +#define __HAL_PWR_PVD_EXTI_GET_FLAG() (EXTI->PR & (PWR_EXTI_LINE_PVD)) + +/** + * @brief Clear the PVD Exti flag. + * @retval None. + */ +#define __HAL_PWR_PVD_EXTI_CLEAR_FLAG() (EXTI->PR = (PWR_EXTI_LINE_PVD)) + +/** + * @brief Generates a Software interrupt on PVD EXTI line. + * @retval None + */ +#define __HAL_PWR_PVD_EXTI_GENERATE_SWIT() (EXTI->SWIER |= (PWR_EXTI_LINE_PVD)) + +/** + * @} + */ + +/* Include PWR HAL Extension module */ +#include "stm32f7xx_hal_pwr_ex.h" + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup PWR_Exported_Functions PWR Exported Functions + * @{ + */ + +/** @addtogroup PWR_Exported_Functions_Group1 Initialization and de-initialization functions + * @{ + */ +/* Initialization and de-initialization functions *****************************/ +void HAL_PWR_DeInit(void); +void HAL_PWR_EnableBkUpAccess(void); +void HAL_PWR_DisableBkUpAccess(void); +/** + * @} + */ + +/** @addtogroup PWR_Exported_Functions_Group2 Peripheral Control functions + * @{ + */ +/* Peripheral Control functions **********************************************/ +/* PVD configuration */ +void HAL_PWR_ConfigPVD(PWR_PVDTypeDef *sConfigPVD); +void HAL_PWR_EnablePVD(void); +void HAL_PWR_DisablePVD(void); + +/* WakeUp pins configuration */ +void HAL_PWR_EnableWakeUpPin(uint32_t WakeUpPinPolarity); +void HAL_PWR_DisableWakeUpPin(uint32_t WakeUpPinx); + +/* Low Power modes entry */ +void HAL_PWR_EnterSTOPMode(uint32_t Regulator, uint8_t STOPEntry); +void HAL_PWR_EnterSLEEPMode(uint32_t Regulator, uint8_t SLEEPEntry); +void HAL_PWR_EnterSTANDBYMode(void); + +/* Power PVD IRQ Handler */ +void HAL_PWR_PVD_IRQHandler(void); +void HAL_PWR_PVDCallback(void); + +/* Cortex System Control functions *******************************************/ +void HAL_PWR_EnableSleepOnExit(void); +void HAL_PWR_DisableSleepOnExit(void); +void HAL_PWR_EnableSEVOnPend(void); +void HAL_PWR_DisableSEVOnPend(void); +/** + * @} + */ + +/** + * @} + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup PWR_Private_Constants PWR Private Constants + * @{ + */ + +/** @defgroup PWR_PVD_EXTI_Line PWR PVD EXTI Line + * @{ + */ +#define PWR_EXTI_LINE_PVD ((uint32_t)EXTI_IMR_IM16) /*!< External interrupt line 16 Connected to the PVD EXTI Line */ +/** + * @} + */ + +/** + * @} + */ +/* Private macros ------------------------------------------------------------*/ +/** @defgroup PWR_Private_Macros PWR Private Macros + * @{ + */ + +/** @defgroup PWR_IS_PWR_Definitions PWR Private macros to check input parameters + * @{ + */ +#define IS_PWR_PVD_LEVEL(LEVEL) (((LEVEL) == PWR_PVDLEVEL_0) || ((LEVEL) == PWR_PVDLEVEL_1)|| \ + ((LEVEL) == PWR_PVDLEVEL_2) || ((LEVEL) == PWR_PVDLEVEL_3)|| \ + ((LEVEL) == PWR_PVDLEVEL_4) || ((LEVEL) == PWR_PVDLEVEL_5)|| \ + ((LEVEL) == PWR_PVDLEVEL_6) || ((LEVEL) == PWR_PVDLEVEL_7)) +#define IS_PWR_PVD_MODE(MODE) (((MODE) == PWR_PVD_MODE_IT_RISING)|| ((MODE) == PWR_PVD_MODE_IT_FALLING) || \ + ((MODE) == PWR_PVD_MODE_IT_RISING_FALLING) || ((MODE) == PWR_PVD_MODE_EVENT_RISING) || \ + ((MODE) == PWR_PVD_MODE_EVENT_FALLING) || ((MODE) == PWR_PVD_MODE_EVENT_RISING_FALLING) || \ + ((MODE) == PWR_PVD_MODE_NORMAL)) +#define IS_PWR_REGULATOR(REGULATOR) (((REGULATOR) == PWR_MAINREGULATOR_ON) || \ + ((REGULATOR) == PWR_LOWPOWERREGULATOR_ON)) +#define IS_PWR_SLEEP_ENTRY(ENTRY) (((ENTRY) == PWR_SLEEPENTRY_WFI) || ((ENTRY) == PWR_SLEEPENTRY_WFE)) +#define IS_PWR_STOP_ENTRY(ENTRY) (((ENTRY) == PWR_STOPENTRY_WFI) || ((ENTRY) == PWR_STOPENTRY_WFE)) +#define IS_PWR_REGULATOR_VOLTAGE(VOLTAGE) (((VOLTAGE) == PWR_REGULATOR_VOLTAGE_SCALE1) || \ + ((VOLTAGE) == PWR_REGULATOR_VOLTAGE_SCALE2) || \ + ((VOLTAGE) == PWR_REGULATOR_VOLTAGE_SCALE3)) + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + + +#endif /* __STM32F7xx_HAL_PWR_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pwr_ex.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pwr_ex.h new file mode 100644 index 0000000..e4a5995 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_pwr_ex.h @@ -0,0 +1,260 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_pwr_ex.h + * @author MCD Application Team + * @brief Header file of PWR HAL Extension module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_PWR_EX_H +#define __STM32F7xx_HAL_PWR_EX_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup PWREx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/* Exported constants --------------------------------------------------------*/ +/** @defgroup PWREx_Exported_Constants PWREx Exported Constants + * @{ + */ +/** @defgroup PWREx_WakeUp_Pins PWREx Wake Up Pins + * @{ + */ +#define PWR_WAKEUP_PIN1 PWR_CSR2_EWUP1 +#define PWR_WAKEUP_PIN2 PWR_CSR2_EWUP2 +#define PWR_WAKEUP_PIN3 PWR_CSR2_EWUP3 +#define PWR_WAKEUP_PIN4 PWR_CSR2_EWUP4 +#define PWR_WAKEUP_PIN5 PWR_CSR2_EWUP5 +#define PWR_WAKEUP_PIN6 PWR_CSR2_EWUP6 +#define PWR_WAKEUP_PIN1_HIGH PWR_CSR2_EWUP1 +#define PWR_WAKEUP_PIN2_HIGH PWR_CSR2_EWUP2 +#define PWR_WAKEUP_PIN3_HIGH PWR_CSR2_EWUP3 +#define PWR_WAKEUP_PIN4_HIGH PWR_CSR2_EWUP4 +#define PWR_WAKEUP_PIN5_HIGH PWR_CSR2_EWUP5 +#define PWR_WAKEUP_PIN6_HIGH PWR_CSR2_EWUP6 +#define PWR_WAKEUP_PIN1_LOW (uint32_t)((PWR_CR2_WUPP1<<6) | PWR_CSR2_EWUP1) +#define PWR_WAKEUP_PIN2_LOW (uint32_t)((PWR_CR2_WUPP2<<6) | PWR_CSR2_EWUP2) +#define PWR_WAKEUP_PIN3_LOW (uint32_t)((PWR_CR2_WUPP3<<6) | PWR_CSR2_EWUP3) +#define PWR_WAKEUP_PIN4_LOW (uint32_t)((PWR_CR2_WUPP4<<6) | PWR_CSR2_EWUP4) +#define PWR_WAKEUP_PIN5_LOW (uint32_t)((PWR_CR2_WUPP5<<6) | PWR_CSR2_EWUP5) +#define PWR_WAKEUP_PIN6_LOW (uint32_t)((PWR_CR2_WUPP6<<6) | PWR_CSR2_EWUP6) + +/** + * @} + */ + +/** @defgroup PWREx_Regulator_state_in_UnderDrive_mode PWREx Regulator state in UnderDrive mode + * @{ + */ +#define PWR_MAINREGULATOR_UNDERDRIVE_ON PWR_CR1_MRUDS +#define PWR_LOWPOWERREGULATOR_UNDERDRIVE_ON ((uint32_t)(PWR_CR1_LPDS | PWR_CR1_LPUDS)) +/** + * @} + */ + +/** @defgroup PWREx_Over_Under_Drive_Flag PWREx Over Under Drive Flag + * @{ + */ +#define PWR_FLAG_ODRDY PWR_CSR1_ODRDY +#define PWR_FLAG_ODSWRDY PWR_CSR1_ODSWRDY +#define PWR_FLAG_UDRDY PWR_CSR1_UDRDY +/** + * @} + */ + +/** @defgroup PWREx_Wakeup_Pins_Flag PWREx Wake Up Pin Flags + * @{ + */ +#define PWR_WAKEUP_PIN_FLAG1 PWR_CSR2_WUPF1 +#define PWR_WAKEUP_PIN_FLAG2 PWR_CSR2_WUPF2 +#define PWR_WAKEUP_PIN_FLAG3 PWR_CSR2_WUPF3 +#define PWR_WAKEUP_PIN_FLAG4 PWR_CSR2_WUPF4 +#define PWR_WAKEUP_PIN_FLAG5 PWR_CSR2_WUPF5 +#define PWR_WAKEUP_PIN_FLAG6 PWR_CSR2_WUPF6 +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup PWREx_Exported_Macro PWREx Exported Macro + * @{ + */ +/** @brief Macros to enable or disable the Over drive mode. + */ +#define __HAL_PWR_OVERDRIVE_ENABLE() (PWR->CR1 |= (uint32_t)PWR_CR1_ODEN) +#define __HAL_PWR_OVERDRIVE_DISABLE() (PWR->CR1 &= (uint32_t)(~PWR_CR1_ODEN)) + +/** @brief Macros to enable or disable the Over drive switching. + */ +#define __HAL_PWR_OVERDRIVESWITCHING_ENABLE() (PWR->CR1 |= (uint32_t)PWR_CR1_ODSWEN) +#define __HAL_PWR_OVERDRIVESWITCHING_DISABLE() (PWR->CR1 &= (uint32_t)(~PWR_CR1_ODSWEN)) + +/** @brief Macros to enable or disable the Under drive mode. + * @note This mode is enabled only with STOP low power mode. + * In this mode, the 1.2V domain is preserved in reduced leakage mode. This + * mode is only available when the main regulator or the low power regulator + * is in low voltage mode. + * @note If the Under-drive mode was enabled, it is automatically disabled after + * exiting Stop mode. + * When the voltage regulator operates in Under-drive mode, an additional + * startup delay is induced when waking up from Stop mode. + */ +#define __HAL_PWR_UNDERDRIVE_ENABLE() (PWR->CR1 |= (uint32_t)PWR_CR1_UDEN) +#define __HAL_PWR_UNDERDRIVE_DISABLE() (PWR->CR1 &= (uint32_t)(~PWR_CR1_UDEN)) + +/** @brief Check PWR flag is set or not. + * @param __FLAG__ specifies the flag to check. + * This parameter can be one of the following values: + * @arg PWR_FLAG_ODRDY: This flag indicates that the Over-drive mode + * is ready + * @arg PWR_FLAG_ODSWRDY: This flag indicates that the Over-drive mode + * switching is ready + * @arg PWR_FLAG_UDRDY: This flag indicates that the Under-drive mode + * is enabled in Stop mode + * @retval The new state of __FLAG__ (TRUE or FALSE). + */ +#define __HAL_PWR_GET_ODRUDR_FLAG(__FLAG__) ((PWR->CSR1 & (__FLAG__)) == (__FLAG__)) + +/** @brief Clear the Under-Drive Ready flag. + */ +#define __HAL_PWR_CLEAR_ODRUDR_FLAG() (PWR->CSR1 |= (PWR_FLAG_UDRDY | PWR_CSR1_EIWUP)) + +/** @brief Check Wake Up flag is set or not. + * @param __WUFLAG__ specifies the Wake Up flag to check. + * This parameter can be one of the following values: + * @arg PWR_WAKEUP_PIN_FLAG1: Wakeup Pin Flag for PA0 + * @arg PWR_WAKEUP_PIN_FLAG2: Wakeup Pin Flag for PA2 + * @arg PWR_WAKEUP_PIN_FLAG3: Wakeup Pin Flag for PC1 + * @arg PWR_WAKEUP_PIN_FLAG4: Wakeup Pin Flag for PC13 + * @arg PWR_WAKEUP_PIN_FLAG5: Wakeup Pin Flag for PI8 + * @arg PWR_WAKEUP_PIN_FLAG6: Wakeup Pin Flag for PI11 + */ +#define __HAL_PWR_GET_WAKEUP_FLAG(__WUFLAG__) (PWR->CSR2 & (__WUFLAG__)) + +/** @brief Clear the WakeUp pins flags. + * @param __WUFLAG__ specifies the Wake Up pin flag to clear. + * This parameter can be one of the following values: + * @arg PWR_WAKEUP_PIN_FLAG1: Wakeup Pin Flag for PA0 + * @arg PWR_WAKEUP_PIN_FLAG2: Wakeup Pin Flag for PA2 + * @arg PWR_WAKEUP_PIN_FLAG3: Wakeup Pin Flag for PC1 + * @arg PWR_WAKEUP_PIN_FLAG4: Wakeup Pin Flag for PC13 + * @arg PWR_WAKEUP_PIN_FLAG5: Wakeup Pin Flag for PI8 + * @arg PWR_WAKEUP_PIN_FLAG6: Wakeup Pin Flag for PI11 + */ +#define __HAL_PWR_CLEAR_WAKEUP_FLAG(__WUFLAG__) (PWR->CR2 |= (__WUFLAG__)) +/** + * @} + */ +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup PWREx_Exported_Functions PWREx Exported Functions + * @{ + */ + +/** @addtogroup PWREx_Exported_Functions_Group1 + * @{ + */ +uint32_t HAL_PWREx_GetVoltageRange(void); +HAL_StatusTypeDef HAL_PWREx_ControlVoltageScaling(uint32_t VoltageScaling); + +void HAL_PWREx_EnableFlashPowerDown(void); +void HAL_PWREx_DisableFlashPowerDown(void); +HAL_StatusTypeDef HAL_PWREx_EnableBkUpReg(void); +HAL_StatusTypeDef HAL_PWREx_DisableBkUpReg(void); + +void HAL_PWREx_EnableMainRegulatorLowVoltage(void); +void HAL_PWREx_DisableMainRegulatorLowVoltage(void); +void HAL_PWREx_EnableLowRegulatorLowVoltage(void); +void HAL_PWREx_DisableLowRegulatorLowVoltage(void); + +HAL_StatusTypeDef HAL_PWREx_EnableOverDrive(void); +HAL_StatusTypeDef HAL_PWREx_DisableOverDrive(void); +HAL_StatusTypeDef HAL_PWREx_EnterUnderDriveSTOPMode(uint32_t Regulator, uint8_t STOPEntry); + +/** + * @} + */ + +/** + * @} + */ +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/** @defgroup PWREx_Private_Macros PWREx Private Macros + * @{ + */ + +/** @defgroup PWREx_IS_PWR_Definitions PWREx Private macros to check input parameters + * @{ + */ +#define IS_PWR_REGULATOR_UNDERDRIVE(REGULATOR) (((REGULATOR) == PWR_MAINREGULATOR_UNDERDRIVE_ON) || \ + ((REGULATOR) == PWR_LOWPOWERREGULATOR_UNDERDRIVE_ON)) +#define IS_PWR_WAKEUP_PIN(__PIN__) (((__PIN__) == PWR_WAKEUP_PIN1) || \ + ((__PIN__) == PWR_WAKEUP_PIN2) || \ + ((__PIN__) == PWR_WAKEUP_PIN3) || \ + ((__PIN__) == PWR_WAKEUP_PIN4) || \ + ((__PIN__) == PWR_WAKEUP_PIN5) || \ + ((__PIN__) == PWR_WAKEUP_PIN6) || \ + ((__PIN__) == PWR_WAKEUP_PIN1_HIGH) || \ + ((__PIN__) == PWR_WAKEUP_PIN2_HIGH) || \ + ((__PIN__) == PWR_WAKEUP_PIN3_HIGH) || \ + ((__PIN__) == PWR_WAKEUP_PIN4_HIGH) || \ + ((__PIN__) == PWR_WAKEUP_PIN5_HIGH) || \ + ((__PIN__) == PWR_WAKEUP_PIN6_HIGH) || \ + ((__PIN__) == PWR_WAKEUP_PIN1_LOW) || \ + ((__PIN__) == PWR_WAKEUP_PIN2_LOW) || \ + ((__PIN__) == PWR_WAKEUP_PIN3_LOW) || \ + ((__PIN__) == PWR_WAKEUP_PIN4_LOW) || \ + ((__PIN__) == PWR_WAKEUP_PIN5_LOW) || \ + ((__PIN__) == PWR_WAKEUP_PIN6_LOW)) +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + + +#endif /* __STM32F7xx_HAL_PWR_EX_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rcc.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rcc.h new file mode 100644 index 0000000..f3309a3 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rcc.h @@ -0,0 +1,1307 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_rcc.h + * @author MCD Application Team + * @brief Header file of RCC HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_RCC_H +#define __STM32F7xx_HAL_RCC_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/* Include RCC HAL Extended module */ +/* (include on top of file since RCC structures are defined in extended file) */ +#include "stm32f7xx_hal_rcc_ex.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup RCC + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ + +/** @defgroup RCC_Exported_Types RCC Exported Types + * @{ + */ + +/** + * @brief RCC Internal/External Oscillator (HSE, HSI, LSE and LSI) configuration structure definition + */ +typedef struct +{ + uint32_t OscillatorType; /*!< The oscillators to be configured. + This parameter can be a value of @ref RCC_Oscillator_Type */ + + uint32_t HSEState; /*!< The new state of the HSE. + This parameter can be a value of @ref RCC_HSE_Config */ + + uint32_t LSEState; /*!< The new state of the LSE. + This parameter can be a value of @ref RCC_LSE_Config */ + + uint32_t HSIState; /*!< The new state of the HSI. + This parameter can be a value of @ref RCC_HSI_Config */ + + uint32_t HSICalibrationValue; /*!< The HSI calibration trimming value (default is RCC_HSICALIBRATION_DEFAULT). + This parameter must be a number between Min_Data = 0x00 and Max_Data = 0x1F */ + + uint32_t LSIState; /*!< The new state of the LSI. + This parameter can be a value of @ref RCC_LSI_Config */ + + RCC_PLLInitTypeDef PLL; /*!< PLL structure parameters */ + +}RCC_OscInitTypeDef; + +/** + * @brief RCC System, AHB and APB buses clock configuration structure definition + */ +typedef struct +{ + uint32_t ClockType; /*!< The clock to be configured. + This parameter can be a value of @ref RCC_System_Clock_Type */ + + uint32_t SYSCLKSource; /*!< The clock source (SYSCLKS) used as system clock. + This parameter can be a value of @ref RCC_System_Clock_Source */ + + uint32_t AHBCLKDivider; /*!< The AHB clock (HCLK) divider. This clock is derived from the system clock (SYSCLK). + This parameter can be a value of @ref RCC_AHB_Clock_Source */ + + uint32_t APB1CLKDivider; /*!< The APB1 clock (PCLK1) divider. This clock is derived from the AHB clock (HCLK). + This parameter can be a value of @ref RCC_APB1_APB2_Clock_Source */ + + uint32_t APB2CLKDivider; /*!< The APB2 clock (PCLK2) divider. This clock is derived from the AHB clock (HCLK). + This parameter can be a value of @ref RCC_APB1_APB2_Clock_Source */ + +}RCC_ClkInitTypeDef; + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup RCC_Exported_Constants RCC Exported Constants + * @{ + */ + +/** @defgroup RCC_Oscillator_Type Oscillator Type + * @{ + */ +#define RCC_OSCILLATORTYPE_NONE ((uint32_t)0x00000000U) +#define RCC_OSCILLATORTYPE_HSE ((uint32_t)0x00000001U) +#define RCC_OSCILLATORTYPE_HSI ((uint32_t)0x00000002U) +#define RCC_OSCILLATORTYPE_LSE ((uint32_t)0x00000004U) +#define RCC_OSCILLATORTYPE_LSI ((uint32_t)0x00000008U) +/** + * @} + */ + +/** @defgroup RCC_HSE_Config RCC HSE Config + * @{ + */ +#define RCC_HSE_OFF ((uint32_t)0x00000000U) +#define RCC_HSE_ON RCC_CR_HSEON +#define RCC_HSE_BYPASS ((uint32_t)(RCC_CR_HSEBYP | RCC_CR_HSEON)) +/** + * @} + */ + +/** @defgroup RCC_LSE_Config RCC LSE Config + * @{ + */ +#define RCC_LSE_OFF ((uint32_t)0x00000000U) +#define RCC_LSE_ON RCC_BDCR_LSEON +#define RCC_LSE_BYPASS ((uint32_t)(RCC_BDCR_LSEBYP | RCC_BDCR_LSEON)) +/** + * @} + */ + +/** @defgroup RCC_HSI_Config RCC HSI Config + * @{ + */ +#define RCC_HSI_OFF ((uint32_t)0x00000000U) +#define RCC_HSI_ON RCC_CR_HSION + +#define RCC_HSICALIBRATION_DEFAULT ((uint32_t)0x10U) /* Default HSI calibration trimming value */ +/** + * @} + */ + +/** @defgroup RCC_LSI_Config RCC LSI Config + * @{ + */ +#define RCC_LSI_OFF ((uint32_t)0x00000000U) +#define RCC_LSI_ON RCC_CSR_LSION +/** + * @} + */ + +/** @defgroup RCC_PLL_Config RCC PLL Config + * @{ + */ +#define RCC_PLL_NONE ((uint32_t)0x00000000U) +#define RCC_PLL_OFF ((uint32_t)0x00000001U) +#define RCC_PLL_ON ((uint32_t)0x00000002U) +/** + * @} + */ + +/** @defgroup RCC_PLLP_Clock_Divider PLLP Clock Divider + * @{ + */ +#define RCC_PLLP_DIV2 ((uint32_t)0x00000002U) +#define RCC_PLLP_DIV4 ((uint32_t)0x00000004U) +#define RCC_PLLP_DIV6 ((uint32_t)0x00000006U) +#define RCC_PLLP_DIV8 ((uint32_t)0x00000008U) +/** + * @} + */ + +/** @defgroup RCC_PLL_Clock_Source PLL Clock Source + * @{ + */ +#define RCC_PLLSOURCE_HSI RCC_PLLCFGR_PLLSRC_HSI +#define RCC_PLLSOURCE_HSE RCC_PLLCFGR_PLLSRC_HSE +/** + * @} + */ + +/** @defgroup RCC_System_Clock_Type RCC System Clock Type + * @{ + */ +#define RCC_CLOCKTYPE_SYSCLK ((uint32_t)0x00000001U) +#define RCC_CLOCKTYPE_HCLK ((uint32_t)0x00000002U) +#define RCC_CLOCKTYPE_PCLK1 ((uint32_t)0x00000004U) +#define RCC_CLOCKTYPE_PCLK2 ((uint32_t)0x00000008U) +/** + * @} + */ + +/** @defgroup RCC_System_Clock_Source RCC System Clock Source + * @{ + */ +#define RCC_SYSCLKSOURCE_HSI RCC_CFGR_SW_HSI +#define RCC_SYSCLKSOURCE_HSE RCC_CFGR_SW_HSE +#define RCC_SYSCLKSOURCE_PLLCLK RCC_CFGR_SW_PLL +/** + * @} + */ + + +/** @defgroup RCC_System_Clock_Source_Status System Clock Source Status + * @{ + */ +#define RCC_SYSCLKSOURCE_STATUS_HSI RCC_CFGR_SWS_HSI /*!< HSI used as system clock */ +#define RCC_SYSCLKSOURCE_STATUS_HSE RCC_CFGR_SWS_HSE /*!< HSE used as system clock */ +#define RCC_SYSCLKSOURCE_STATUS_PLLCLK RCC_CFGR_SWS_PLL /*!< PLL used as system clock */ +/** + * @} + */ + +/** @defgroup RCC_AHB_Clock_Source RCC AHB Clock Source + * @{ + */ +#define RCC_SYSCLK_DIV1 RCC_CFGR_HPRE_DIV1 +#define RCC_SYSCLK_DIV2 RCC_CFGR_HPRE_DIV2 +#define RCC_SYSCLK_DIV4 RCC_CFGR_HPRE_DIV4 +#define RCC_SYSCLK_DIV8 RCC_CFGR_HPRE_DIV8 +#define RCC_SYSCLK_DIV16 RCC_CFGR_HPRE_DIV16 +#define RCC_SYSCLK_DIV64 RCC_CFGR_HPRE_DIV64 +#define RCC_SYSCLK_DIV128 RCC_CFGR_HPRE_DIV128 +#define RCC_SYSCLK_DIV256 RCC_CFGR_HPRE_DIV256 +#define RCC_SYSCLK_DIV512 RCC_CFGR_HPRE_DIV512 +/** + * @} + */ + +/** @defgroup RCC_APB1_APB2_Clock_Source RCC APB1/APB2 Clock Source + * @{ + */ +#define RCC_HCLK_DIV1 RCC_CFGR_PPRE1_DIV1 +#define RCC_HCLK_DIV2 RCC_CFGR_PPRE1_DIV2 +#define RCC_HCLK_DIV4 RCC_CFGR_PPRE1_DIV4 +#define RCC_HCLK_DIV8 RCC_CFGR_PPRE1_DIV8 +#define RCC_HCLK_DIV16 RCC_CFGR_PPRE1_DIV16 +/** + * @} + */ + +/** @defgroup RCC_RTC_Clock_Source RCC RTC Clock Source + * @{ + */ +#define RCC_RTCCLKSOURCE_NO_CLK ((uint32_t)0x00000000U) +#define RCC_RTCCLKSOURCE_LSE ((uint32_t)0x00000100U) +#define RCC_RTCCLKSOURCE_LSI ((uint32_t)0x00000200U) +#define RCC_RTCCLKSOURCE_HSE_DIVX ((uint32_t)0x00000300U) +#define RCC_RTCCLKSOURCE_HSE_DIV2 ((uint32_t)0x00020300U) +#define RCC_RTCCLKSOURCE_HSE_DIV3 ((uint32_t)0x00030300U) +#define RCC_RTCCLKSOURCE_HSE_DIV4 ((uint32_t)0x00040300U) +#define RCC_RTCCLKSOURCE_HSE_DIV5 ((uint32_t)0x00050300U) +#define RCC_RTCCLKSOURCE_HSE_DIV6 ((uint32_t)0x00060300U) +#define RCC_RTCCLKSOURCE_HSE_DIV7 ((uint32_t)0x00070300U) +#define RCC_RTCCLKSOURCE_HSE_DIV8 ((uint32_t)0x00080300U) +#define RCC_RTCCLKSOURCE_HSE_DIV9 ((uint32_t)0x00090300U) +#define RCC_RTCCLKSOURCE_HSE_DIV10 ((uint32_t)0x000A0300U) +#define RCC_RTCCLKSOURCE_HSE_DIV11 ((uint32_t)0x000B0300U) +#define RCC_RTCCLKSOURCE_HSE_DIV12 ((uint32_t)0x000C0300U) +#define RCC_RTCCLKSOURCE_HSE_DIV13 ((uint32_t)0x000D0300U) +#define RCC_RTCCLKSOURCE_HSE_DIV14 ((uint32_t)0x000E0300U) +#define RCC_RTCCLKSOURCE_HSE_DIV15 ((uint32_t)0x000F0300U) +#define RCC_RTCCLKSOURCE_HSE_DIV16 ((uint32_t)0x00100300U) +#define RCC_RTCCLKSOURCE_HSE_DIV17 ((uint32_t)0x00110300U) +#define RCC_RTCCLKSOURCE_HSE_DIV18 ((uint32_t)0x00120300U) +#define RCC_RTCCLKSOURCE_HSE_DIV19 ((uint32_t)0x00130300U) +#define RCC_RTCCLKSOURCE_HSE_DIV20 ((uint32_t)0x00140300U) +#define RCC_RTCCLKSOURCE_HSE_DIV21 ((uint32_t)0x00150300U) +#define RCC_RTCCLKSOURCE_HSE_DIV22 ((uint32_t)0x00160300U) +#define RCC_RTCCLKSOURCE_HSE_DIV23 ((uint32_t)0x00170300U) +#define RCC_RTCCLKSOURCE_HSE_DIV24 ((uint32_t)0x00180300U) +#define RCC_RTCCLKSOURCE_HSE_DIV25 ((uint32_t)0x00190300U) +#define RCC_RTCCLKSOURCE_HSE_DIV26 ((uint32_t)0x001A0300U) +#define RCC_RTCCLKSOURCE_HSE_DIV27 ((uint32_t)0x001B0300U) +#define RCC_RTCCLKSOURCE_HSE_DIV28 ((uint32_t)0x001C0300U) +#define RCC_RTCCLKSOURCE_HSE_DIV29 ((uint32_t)0x001D0300U) +#define RCC_RTCCLKSOURCE_HSE_DIV30 ((uint32_t)0x001E0300U) +#define RCC_RTCCLKSOURCE_HSE_DIV31 ((uint32_t)0x001F0300U) +/** + * @} + */ + + + +/** @defgroup RCC_MCO_Index RCC MCO Index + * @{ + */ +#define RCC_MCO1 ((uint32_t)0x00000000U) +#define RCC_MCO2 ((uint32_t)0x00000001U) +/** + * @} + */ + +/** @defgroup RCC_MCO1_Clock_Source RCC MCO1 Clock Source + * @{ + */ +#define RCC_MCO1SOURCE_HSI ((uint32_t)0x00000000U) +#define RCC_MCO1SOURCE_LSE RCC_CFGR_MCO1_0 +#define RCC_MCO1SOURCE_HSE RCC_CFGR_MCO1_1 +#define RCC_MCO1SOURCE_PLLCLK RCC_CFGR_MCO1 +/** + * @} + */ + +/** @defgroup RCC_MCO2_Clock_Source RCC MCO2 Clock Source + * @{ + */ +#define RCC_MCO2SOURCE_SYSCLK ((uint32_t)0x00000000U) +#define RCC_MCO2SOURCE_PLLI2SCLK RCC_CFGR_MCO2_0 +#define RCC_MCO2SOURCE_HSE RCC_CFGR_MCO2_1 +#define RCC_MCO2SOURCE_PLLCLK RCC_CFGR_MCO2 +/** + * @} + */ + +/** @defgroup RCC_MCOx_Clock_Prescaler RCC MCO1 Clock Prescaler + * @{ + */ +#define RCC_MCODIV_1 ((uint32_t)0x00000000U) +#define RCC_MCODIV_2 RCC_CFGR_MCO1PRE_2 +#define RCC_MCODIV_3 ((uint32_t)RCC_CFGR_MCO1PRE_0 | RCC_CFGR_MCO1PRE_2) +#define RCC_MCODIV_4 ((uint32_t)RCC_CFGR_MCO1PRE_1 | RCC_CFGR_MCO1PRE_2) +#define RCC_MCODIV_5 RCC_CFGR_MCO1PRE +/** + * @} + */ + +/** @defgroup RCC_Interrupt RCC Interrupt + * @{ + */ +#define RCC_IT_LSIRDY ((uint8_t)0x01U) +#define RCC_IT_LSERDY ((uint8_t)0x02U) +#define RCC_IT_HSIRDY ((uint8_t)0x04U) +#define RCC_IT_HSERDY ((uint8_t)0x08U) +#define RCC_IT_PLLRDY ((uint8_t)0x10U) +#define RCC_IT_PLLI2SRDY ((uint8_t)0x20U) +#define RCC_IT_PLLSAIRDY ((uint8_t)0x40U) +#define RCC_IT_CSS ((uint8_t)0x80U) +/** + * @} + */ + +/** @defgroup RCC_Flag RCC Flags + * Elements values convention: 0XXYYYYYb + * - YYYYY : Flag position in the register + * - 0XX : Register index + * - 01: CR register + * - 10: BDCR register + * - 11: CSR register + * @{ + */ +/* Flags in the CR register */ +#define RCC_FLAG_HSIRDY ((uint8_t)0x21U) +#define RCC_FLAG_HSERDY ((uint8_t)0x31U) +#define RCC_FLAG_PLLRDY ((uint8_t)0x39U) +#define RCC_FLAG_PLLI2SRDY ((uint8_t)0x3BU) +#define RCC_FLAG_PLLSAIRDY ((uint8_t)0x3CU) + +/* Flags in the BDCR register */ +#define RCC_FLAG_LSERDY ((uint8_t)0x41U) + +/* Flags in the CSR register */ +#define RCC_FLAG_LSIRDY ((uint8_t)0x61U) +#define RCC_FLAG_BORRST ((uint8_t)0x79U) +#define RCC_FLAG_PINRST ((uint8_t)0x7AU) +#define RCC_FLAG_PORRST ((uint8_t)0x7BU) +#define RCC_FLAG_SFTRST ((uint8_t)0x7CU) +#define RCC_FLAG_IWDGRST ((uint8_t)0x7DU) +#define RCC_FLAG_WWDGRST ((uint8_t)0x7EU) +#define RCC_FLAG_LPWRRST ((uint8_t)0x7FU) +/** + * @} + */ + +/** @defgroup RCC_LSEDrive_Configuration RCC LSE Drive configurations + * @{ + */ +#define RCC_LSEDRIVE_LOW ((uint32_t)0x00000000U) +#define RCC_LSEDRIVE_MEDIUMLOW RCC_BDCR_LSEDRV_1 +#define RCC_LSEDRIVE_MEDIUMHIGH RCC_BDCR_LSEDRV_0 +#define RCC_LSEDRIVE_HIGH RCC_BDCR_LSEDRV +/** + * @} + */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup RCC_Exported_Macros RCC Exported Macros + * @{ + */ + +/** @defgroup RCC_AHB1_Clock_Enable_Disable AHB1 Peripheral Clock Enable Disable + * @brief Enable or disable the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_CRC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_CRCEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_DMA1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA1EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_CRC_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_CRCEN)) +#define __HAL_RCC_DMA1_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_DMA1EN)) + +/** + * @} + */ + +/** @defgroup RCC_APB1_Clock_Enable_Disable APB1 Peripheral Clock Enable Disable + * @brief Enable or disable the Low Speed APB (APB1) peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_WWDG_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_WWDGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_WWDGEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_PWR_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_PWREN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_PWREN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_WWDG_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_WWDGEN)) +#define __HAL_RCC_PWR_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_PWREN)) +/** + * @} + */ + +/** @defgroup RCC_APB2_Clock_Enable_Disable APB2 Peripheral Clock Enable Disable + * @brief Enable or disable the High Speed APB (APB2) peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_SYSCFG_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SYSCFGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SYSCFGEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_SYSCFG_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SYSCFGEN)) + +/** + * @} + */ + +/** @defgroup RCC_AHB1_Peripheral_Clock_Enable_Disable_Status AHB1 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_CRC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) != RESET) +#define __HAL_RCC_DMA1_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_DMA1EN)) != RESET) + +#define __HAL_RCC_CRC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_CRCEN)) == RESET) +#define __HAL_RCC_DMA1_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_DMA1EN)) == RESET) +/** + * @} + */ + +/** @defgroup RCC_APB1_Clock_Enable_Disable_Status APB1 Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the APB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_WWDG_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_WWDGEN)) != RESET) +#define __HAL_RCC_PWR_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_PWREN)) != RESET) + +#define __HAL_RCC_WWDG_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_WWDGEN)) == RESET) +#define __HAL_RCC_PWR_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_PWREN)) == RESET) +/** + * @} + */ + +/** @defgroup RCC_APB2_Clock_Enable_Disable_Status APB2 Peripheral Clock Enable Disable Status + * @brief EGet the enable or disable status of the APB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ +#define __HAL_RCC_SYSCFG_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SYSCFGEN)) != RESET) +#define __HAL_RCC_SYSCFG_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SYSCFGEN)) == RESET) +/** + * @} + */ + +/** @defgroup RCC_Peripheral_Clock_Force_Release RCC Peripheral Clock Force Release + * @brief Force or release AHB peripheral reset. + * @{ + */ +#define __HAL_RCC_AHB1_FORCE_RESET() (RCC->AHB1RSTR = 0xFFFFFFFFU) +#define __HAL_RCC_CRC_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_CRCRST)) +#define __HAL_RCC_DMA1_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_DMA1RST)) + +#define __HAL_RCC_AHB1_RELEASE_RESET() (RCC->AHB1RSTR = 0x00U) +#define __HAL_RCC_CRC_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_CRCRST)) +#define __HAL_RCC_DMA1_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_DMA1RST)) +/** + * @} + */ + +/** @defgroup RCC_APB1_Force_Release_Reset APB1 Force Release Reset + * @brief Force or release APB1 peripheral reset. + * @{ + */ +#define __HAL_RCC_APB1_FORCE_RESET() (RCC->APB1RSTR = 0xFFFFFFFFU) +#define __HAL_RCC_WWDG_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_WWDGRST)) +#define __HAL_RCC_PWR_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_PWRRST)) + +#define __HAL_RCC_APB1_RELEASE_RESET() (RCC->APB1RSTR = 0x00U) +#define __HAL_RCC_WWDG_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_WWDGRST)) +#define __HAL_RCC_PWR_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_PWRRST)) +/** + * @} + */ + +/** @defgroup RCC_APB2_Force_Release_Reset APB2 Force Release Reset + * @brief Force or release APB2 peripheral reset. + * @{ + */ +#define __HAL_RCC_APB2_FORCE_RESET() (RCC->APB2RSTR = 0xFFFFFFFFU) +#define __HAL_RCC_SYSCFG_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SYSCFGRST)) + +#define __HAL_RCC_APB2_RELEASE_RESET() (RCC->APB2RSTR = 0x00U) +#define __HAL_RCC_SYSCFG_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SYSCFGRST)) + +/** + * @} + */ + +/** @defgroup RCC_Peripheral_Clock_Sleep_Enable_Disable RCC Peripheral Clock Sleep Enable Disable + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_CRC_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_CRCLPEN)) +#define __HAL_RCC_DMA1_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_DMA1LPEN)) + +#define __HAL_RCC_CRC_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_CRCLPEN)) +#define __HAL_RCC_DMA1_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_DMA1LPEN)) + +/** @brief Enable or disable the APB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + */ +#define __HAL_RCC_WWDG_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_WWDGLPEN)) +#define __HAL_RCC_PWR_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_PWRLPEN)) + +#define __HAL_RCC_WWDG_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_WWDGLPEN)) +#define __HAL_RCC_PWR_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_PWRLPEN)) + +/** @brief Enable or disable the APB2 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + */ +#define __HAL_RCC_SYSCFG_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SYSCFGLPEN)) +#define __HAL_RCC_SYSCFG_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SYSCFGLPEN)) + +/** + * @} + */ + +/** @defgroup RCC_AHB1_Clock_Sleep_Enable_Disable_Status AHB1 Peripheral Clock Sleep Enable Disable Status + * @brief Get the enable or disable status of the AHB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_CRC_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_CRCLPEN)) != RESET) +#define __HAL_RCC_DMA1_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_DMA1LPEN)) != RESET) + +#define __HAL_RCC_CRC_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_CRCLPEN)) == RESET) +#define __HAL_RCC_DMA1_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_DMA1LPEN)) == RESET) +/** + * @} + */ + +/** @defgroup RCC_APB1_Clock_Sleep_Enable_Disable_Status APB1 Peripheral Clock Sleep Enable Disable Status + * @brief Get the enable or disable status of the APB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_WWDG_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_WWDGLPEN)) != RESET) +#define __HAL_RCC_PWR_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_PWRLPEN)) != RESET) + +#define __HAL_RCC_WWDG_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_WWDGLPEN)) == RESET) +#define __HAL_RCC_PWR_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_PWRLPEN)) == RESET) +/** + * @} + */ + +/** @defgroup RCC_APB2_Clock_Sleep_Enable_Disable_Status APB2 Peripheral Clock Sleep Enable Disable Status + * @brief Get the enable or disable status of the APB2 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ +#define __HAL_RCC_SYSCFG_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SYSCFGLPEN)) != RESET) +#define __HAL_RCC_SYSCFG_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SYSCFGLPEN)) == RESET) +/** + * @} + */ + +/** @defgroup RCC_HSI_Configuration HSI Configuration + * @{ + */ + +/** @brief Macros to enable or disable the Internal High Speed oscillator (HSI). + * @note The HSI is stopped by hardware when entering STOP and STANDBY modes. + * It is used (enabled by hardware) as system clock source after startup + * from Reset, wakeup from STOP and STANDBY mode, or in case of failure + * of the HSE used directly or indirectly as system clock (if the Clock + * Security System CSS is enabled). + * @note HSI can not be stopped if it is used as system clock source. In this case, + * you have to select another source of the system clock then stop the HSI. + * @note After enabling the HSI, the application software should wait on HSIRDY + * flag to be set indicating that HSI clock is stable and can be used as + * system clock source. + * @note When the HSI is stopped, HSIRDY flag goes low after 6 HSI oscillator + * clock cycles. + */ +#define __HAL_RCC_HSI_ENABLE() (RCC->CR |= (RCC_CR_HSION)) +#define __HAL_RCC_HSI_DISABLE() (RCC->CR &= ~(RCC_CR_HSION)) + +/** @brief Macro to adjust the Internal High Speed oscillator (HSI) calibration value. + * @note The calibration is used to compensate for the variations in voltage + * and temperature that influence the frequency of the internal HSI RC. + * @param __HSICALIBRATIONVALUE__ specifies the calibration trimming value. + * (default is RCC_HSICALIBRATION_DEFAULT). + */ +#define __HAL_RCC_HSI_CALIBRATIONVALUE_ADJUST(__HSICALIBRATIONVALUE__) (MODIFY_REG(RCC->CR,\ + RCC_CR_HSITRIM, (uint32_t)(__HSICALIBRATIONVALUE__) << RCC_CR_HSITRIM_Pos)) +/** + * @} + */ + +/** @defgroup RCC_LSI_Configuration LSI Configuration + * @{ + */ + +/** @brief Macros to enable or disable the Internal Low Speed oscillator (LSI). + * @note After enabling the LSI, the application software should wait on + * LSIRDY flag to be set indicating that LSI clock is stable and can + * be used to clock the IWDG and/or the RTC. + * @note LSI can not be disabled if the IWDG is running. + * @note When the LSI is stopped, LSIRDY flag goes low after 6 LSI oscillator + * clock cycles. + */ +#define __HAL_RCC_LSI_ENABLE() (RCC->CSR |= (RCC_CSR_LSION)) +#define __HAL_RCC_LSI_DISABLE() (RCC->CSR &= ~(RCC_CSR_LSION)) +/** + * @} + */ + +/** @defgroup RCC_HSE_Configuration HSE Configuration + * @{ + */ +/** + * @brief Macro to configure the External High Speed oscillator (HSE). + * @note Transitions HSE Bypass to HSE On and HSE On to HSE Bypass are not + * supported by this macro. User should request a transition to HSE Off + * first and then HSE On or HSE Bypass. + * @note After enabling the HSE (RCC_HSE_ON or RCC_HSE_Bypass), the application + * software should wait on HSERDY flag to be set indicating that HSE clock + * is stable and can be used to clock the PLL and/or system clock. + * @note HSE state can not be changed if it is used directly or through the + * PLL as system clock. In this case, you have to select another source + * of the system clock then change the HSE state (ex. disable it). + * @note The HSE is stopped by hardware when entering STOP and STANDBY modes. + * @note This function reset the CSSON bit, so if the clock security system(CSS) + * was previously enabled you have to enable it again after calling this + * function. + * @param __STATE__ specifies the new state of the HSE. + * This parameter can be one of the following values: + * @arg RCC_HSE_OFF: turn OFF the HSE oscillator, HSERDY flag goes low after + * 6 HSE oscillator clock cycles. + * @arg RCC_HSE_ON: turn ON the HSE oscillator. + * @arg RCC_HSE_BYPASS: HSE oscillator bypassed with external clock. + */ +#define __HAL_RCC_HSE_CONFIG(__STATE__) \ + do { \ + if ((__STATE__) == RCC_HSE_ON) \ + { \ + SET_BIT(RCC->CR, RCC_CR_HSEON); \ + } \ + else if ((__STATE__) == RCC_HSE_OFF) \ + { \ + CLEAR_BIT(RCC->CR, RCC_CR_HSEON); \ + CLEAR_BIT(RCC->CR, RCC_CR_HSEBYP); \ + } \ + else if ((__STATE__) == RCC_HSE_BYPASS) \ + { \ + SET_BIT(RCC->CR, RCC_CR_HSEBYP); \ + SET_BIT(RCC->CR, RCC_CR_HSEON); \ + } \ + else \ + { \ + CLEAR_BIT(RCC->CR, RCC_CR_HSEON); \ + CLEAR_BIT(RCC->CR, RCC_CR_HSEBYP); \ + } \ + } while(0) +/** + * @} + */ + +/** @defgroup RCC_LSE_Configuration LSE Configuration + * @{ + */ + +/** + * @brief Macro to configure the External Low Speed oscillator (LSE). + * @note Transitions LSE Bypass to LSE On and LSE On to LSE Bypass are not supported by this macro. + * User should request a transition to LSE Off first and then LSE On or LSE Bypass. + * @note As the LSE is in the Backup domain and write access is denied to + * this domain after reset, you have to enable write access using + * HAL_PWR_EnableBkUpAccess() function before to configure the LSE + * (to be done once after reset). + * @note After enabling the LSE (RCC_LSE_ON or RCC_LSE_BYPASS), the application + * software should wait on LSERDY flag to be set indicating that LSE clock + * is stable and can be used to clock the RTC. + * @param __STATE__ specifies the new state of the LSE. + * This parameter can be one of the following values: + * @arg RCC_LSE_OFF: turn OFF the LSE oscillator, LSERDY flag goes low after + * 6 LSE oscillator clock cycles. + * @arg RCC_LSE_ON: turn ON the LSE oscillator. + * @arg RCC_LSE_BYPASS: LSE oscillator bypassed with external clock. + */ +#define __HAL_RCC_LSE_CONFIG(__STATE__) \ + do { \ + if((__STATE__) == RCC_LSE_ON) \ + { \ + SET_BIT(RCC->BDCR, RCC_BDCR_LSEON); \ + } \ + else if((__STATE__) == RCC_LSE_OFF) \ + { \ + CLEAR_BIT(RCC->BDCR, RCC_BDCR_LSEON); \ + CLEAR_BIT(RCC->BDCR, RCC_BDCR_LSEBYP); \ + } \ + else if((__STATE__) == RCC_LSE_BYPASS) \ + { \ + SET_BIT(RCC->BDCR, RCC_BDCR_LSEBYP); \ + SET_BIT(RCC->BDCR, RCC_BDCR_LSEON); \ + } \ + else \ + { \ + CLEAR_BIT(RCC->BDCR, RCC_BDCR_LSEON); \ + CLEAR_BIT(RCC->BDCR, RCC_BDCR_LSEBYP); \ + } \ + } while(0) +/** + * @} + */ + +/** @defgroup RCC_Internal_RTC_Clock_Configuration RTC Clock Configuration + * @{ + */ + +/** @brief Macros to enable or disable the RTC clock. + * @note These macros must be used only after the RTC clock source was selected. + */ +#define __HAL_RCC_RTC_ENABLE() (RCC->BDCR |= (RCC_BDCR_RTCEN)) +#define __HAL_RCC_RTC_DISABLE() (RCC->BDCR &= ~(RCC_BDCR_RTCEN)) + +/** @brief Macros to configure the RTC clock (RTCCLK). + * @note As the RTC clock configuration bits are in the Backup domain and write + * access is denied to this domain after reset, you have to enable write + * access using the Power Backup Access macro before to configure + * the RTC clock source (to be done once after reset). + * @note Once the RTC clock is configured it can't be changed unless the + * Backup domain is reset using __HAL_RCC_BackupReset_RELEASE() macro, or by + * a Power On Reset (POR). + * @param __RTCCLKSource__ specifies the RTC clock source. + * This parameter can be one of the following values: + @arg @ref RCC_RTCCLKSOURCE_NO_CLK: No clock selected as RTC clock. + * @arg @ref RCC_RTCCLKSOURCE_LSE: LSE selected as RTC clock. + * @arg @ref RCC_RTCCLKSOURCE_LSI: LSI selected as RTC clock. + * @arg @ref RCC_RTCCLKSOURCE_HSE_DIVX: HSE clock divided by x selected + * as RTC clock, where x:[2,31] + * @note If the LSE or LSI is used as RTC clock source, the RTC continues to + * work in STOP and STANDBY modes, and can be used as wakeup source. + * However, when the HSE clock is used as RTC clock source, the RTC + * cannot be used in STOP and STANDBY modes. + * @note The maximum input clock frequency for RTC is 1MHz (when using HSE as + * RTC clock source). + */ +#define __HAL_RCC_RTC_CLKPRESCALER(__RTCCLKSource__) (((__RTCCLKSource__) & RCC_BDCR_RTCSEL) == RCC_BDCR_RTCSEL) ? \ + MODIFY_REG(RCC->CFGR, RCC_CFGR_RTCPRE, ((__RTCCLKSource__) & 0xFFFFCFF)) : CLEAR_BIT(RCC->CFGR, RCC_CFGR_RTCPRE) + +#define __HAL_RCC_RTC_CONFIG(__RTCCLKSource__) do { __HAL_RCC_RTC_CLKPRESCALER(__RTCCLKSource__); \ + RCC->BDCR |= ((__RTCCLKSource__) & 0x00000FFF); \ + } while (0) + +/** @brief Macro to get the RTC clock source. + * @retval The clock source can be one of the following values: + * @arg @ref RCC_RTCCLKSOURCE_NO_CLK No clock selected as RTC clock + * @arg @ref RCC_RTCCLKSOURCE_LSE LSE selected as RTC clock + * @arg @ref RCC_RTCCLKSOURCE_LSI LSI selected as RTC clock + * @arg @ref RCC_RTCCLKSOURCE_HSE_DIVX HSE divided by X selected as RTC clock (X can be retrieved thanks to @ref __HAL_RCC_GET_RTC_HSE_PRESCALER() + */ +#define __HAL_RCC_GET_RTC_SOURCE() (READ_BIT(RCC->BDCR, RCC_BDCR_RTCSEL)) + +/** + * @brief Get the RTC and HSE clock divider (RTCPRE). + * @retval Returned value can be one of the following values: + * @arg @ref RCC_RTCCLKSOURCE_HSE_DIVX: HSE clock divided by x selected + * as RTC clock, where x:[2,31] + */ +#define __HAL_RCC_GET_RTC_HSE_PRESCALER() (READ_BIT(RCC->CFGR, RCC_CFGR_RTCPRE) | RCC_BDCR_RTCSEL) + +/** @brief Macros to force or release the Backup domain reset. + * @note This function resets the RTC peripheral (including the backup registers) + * and the RTC clock source selection in RCC_CSR register. + * @note The BKPSRAM is not affected by this reset. + */ +#define __HAL_RCC_BACKUPRESET_FORCE() (RCC->BDCR |= (RCC_BDCR_BDRST)) +#define __HAL_RCC_BACKUPRESET_RELEASE() (RCC->BDCR &= ~(RCC_BDCR_BDRST)) +/** + * @} + */ + +/** @defgroup RCC_PLL_Configuration PLL Configuration + * @{ + */ + +/** @brief Macros to enable or disable the main PLL. + * @note After enabling the main PLL, the application software should wait on + * PLLRDY flag to be set indicating that PLL clock is stable and can + * be used as system clock source. + * @note The main PLL can not be disabled if it is used as system clock source + * @note The main PLL is disabled by hardware when entering STOP and STANDBY modes. + */ +#define __HAL_RCC_PLL_ENABLE() SET_BIT(RCC->CR, RCC_CR_PLLON) +#define __HAL_RCC_PLL_DISABLE() CLEAR_BIT(RCC->CR, RCC_CR_PLLON) + +/** @brief Macro to configure the PLL clock source. + * @note This function must be used only when the main PLL is disabled. + * @param __PLLSOURCE__ specifies the PLL entry clock source. + * This parameter can be one of the following values: + * @arg RCC_PLLSOURCE_HSI: HSI oscillator clock selected as PLL clock entry + * @arg RCC_PLLSOURCE_HSE: HSE oscillator clock selected as PLL clock entry + * + */ +#define __HAL_RCC_PLL_PLLSOURCE_CONFIG(__PLLSOURCE__) MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC, (__PLLSOURCE__)) + +/** @brief Macro to configure the PLL multiplication factor. + * @note This function must be used only when the main PLL is disabled. + * @param __PLLM__ specifies the division factor for PLL VCO input clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 63. + * @note You have to set the PLLM parameter correctly to ensure that the VCO input + * frequency ranges from 1 to 2 MHz. It is recommended to select a frequency + * of 2 MHz to limit PLL jitter. + * + */ +#define __HAL_RCC_PLL_PLLM_CONFIG(__PLLM__) MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLM, (__PLLM__)) +/** + * @} + */ + +/** @defgroup RCC_PLL_I2S_Configuration PLL I2S Configuration + * @{ + */ + +/** @brief Macro to configure the I2S clock source (I2SCLK). + * @note This function must be called before enabling the I2S APB clock. + * @param __SOURCE__ specifies the I2S clock source. + * This parameter can be one of the following values: + * @arg RCC_I2SCLKSOURCE_PLLI2S: PLLI2S clock used as I2S clock source. + * @arg RCC_I2SCLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin + * used as I2S clock source. + */ +#define __HAL_RCC_I2S_CONFIG(__SOURCE__) do {RCC->CFGR &= ~(RCC_CFGR_I2SSRC); \ + RCC->CFGR |= (__SOURCE__); \ + }while(0) + +/** @brief Macros to enable or disable the PLLI2S. + * @note The PLLI2S is disabled by hardware when entering STOP and STANDBY modes. + */ +#define __HAL_RCC_PLLI2S_ENABLE() (RCC->CR |= (RCC_CR_PLLI2SON)) +#define __HAL_RCC_PLLI2S_DISABLE() (RCC->CR &= ~(RCC_CR_PLLI2SON)) +/** + * @} + */ + +/** @defgroup RCC_Get_Clock_source Get Clock source + * @{ + */ +/** + * @brief Macro to configure the system clock source. + * @param __RCC_SYSCLKSOURCE__ specifies the system clock source. + * This parameter can be one of the following values: + * - RCC_SYSCLKSOURCE_HSI: HSI oscillator is used as system clock source. + * - RCC_SYSCLKSOURCE_HSE: HSE oscillator is used as system clock source. + * - RCC_SYSCLKSOURCE_PLLCLK: PLL output is used as system clock source. + */ +#define __HAL_RCC_SYSCLK_CONFIG(__RCC_SYSCLKSOURCE__) MODIFY_REG(RCC->CFGR, RCC_CFGR_SW, (__RCC_SYSCLKSOURCE__)) + +/** @brief Macro to get the clock source used as system clock. + * @retval The clock source used as system clock. The returned value can be one + * of the following: + * - RCC_SYSCLKSOURCE_STATUS_HSI: HSI used as system clock. + * - RCC_SYSCLKSOURCE_STATUS_HSE: HSE used as system clock. + * - RCC_SYSCLKSOURCE_STATUS_PLLCLK: PLL used as system clock. + */ +#define __HAL_RCC_GET_SYSCLK_SOURCE() (RCC->CFGR & RCC_CFGR_SWS) + +/** + * @brief Macro to configures the External Low Speed oscillator (LSE) drive capability. + * @note As the LSE is in the Backup domain and write access is denied to + * this domain after reset, you have to enable write access using + * HAL_PWR_EnableBkUpAccess() function before to configure the LSE + * (to be done once after reset). + * @param __RCC_LSEDRIVE__ specifies the new state of the LSE drive capability. + * This parameter can be one of the following values: + * @arg RCC_LSEDRIVE_LOW: LSE oscillator low drive capability. + * @arg RCC_LSEDRIVE_MEDIUMLOW: LSE oscillator medium low drive capability. + * @arg RCC_LSEDRIVE_MEDIUMHIGH: LSE oscillator medium high drive capability. + * @arg RCC_LSEDRIVE_HIGH: LSE oscillator high drive capability. + * @retval None + */ +#define __HAL_RCC_LSEDRIVE_CONFIG(__RCC_LSEDRIVE__) \ + (MODIFY_REG(RCC->BDCR, RCC_BDCR_LSEDRV, (uint32_t)(__RCC_LSEDRIVE__) )) + +/** @brief Macro to get the oscillator used as PLL clock source. + * @retval The oscillator used as PLL clock source. The returned value can be one + * of the following: + * - RCC_PLLSOURCE_HSI: HSI oscillator is used as PLL clock source. + * - RCC_PLLSOURCE_HSE: HSE oscillator is used as PLL clock source. + */ +#define __HAL_RCC_GET_PLL_OSCSOURCE() ((uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC)) +/** + * @} + */ + +/** @defgroup RCCEx_MCOx_Clock_Config RCC Extended MCOx Clock Config + * @{ + */ + +/** @brief Macro to configure the MCO1 clock. + * @param __MCOCLKSOURCE__ specifies the MCO clock source. + * This parameter can be one of the following values: + * @arg RCC_MCO1SOURCE_HSI: HSI clock selected as MCO1 source + * @arg RCC_MCO1SOURCE_LSE: LSE clock selected as MCO1 source + * @arg RCC_MCO1SOURCE_HSE: HSE clock selected as MCO1 source + * @arg RCC_MCO1SOURCE_PLLCLK: main PLL clock selected as MCO1 source + * @param __MCODIV__ specifies the MCO clock prescaler. + * This parameter can be one of the following values: + * @arg RCC_MCODIV_1: no division applied to MCOx clock + * @arg RCC_MCODIV_2: division by 2 applied to MCOx clock + * @arg RCC_MCODIV_3: division by 3 applied to MCOx clock + * @arg RCC_MCODIV_4: division by 4 applied to MCOx clock + * @arg RCC_MCODIV_5: division by 5 applied to MCOx clock + */ + +#define __HAL_RCC_MCO1_CONFIG(__MCOCLKSOURCE__, __MCODIV__) \ + MODIFY_REG(RCC->CFGR, (RCC_CFGR_MCO1 | RCC_CFGR_MCO1PRE), ((__MCOCLKSOURCE__) | (__MCODIV__))) + +/** @brief Macro to configure the MCO2 clock. + * @param __MCOCLKSOURCE__ specifies the MCO clock source. + * This parameter can be one of the following values: + * @arg RCC_MCO2SOURCE_SYSCLK: System clock (SYSCLK) selected as MCO2 source + * @arg RCC_MCO2SOURCE_PLLI2SCLK: PLLI2S clock selected as MCO2 source + * @arg RCC_MCO2SOURCE_HSE: HSE clock selected as MCO2 source + * @arg RCC_MCO2SOURCE_PLLCLK: main PLL clock selected as MCO2 source + * @param __MCODIV__ specifies the MCO clock prescaler. + * This parameter can be one of the following values: + * @arg RCC_MCODIV_1: no division applied to MCOx clock + * @arg RCC_MCODIV_2: division by 2 applied to MCOx clock + * @arg RCC_MCODIV_3: division by 3 applied to MCOx clock + * @arg RCC_MCODIV_4: division by 4 applied to MCOx clock + * @arg RCC_MCODIV_5: division by 5 applied to MCOx clock + */ + +#define __HAL_RCC_MCO2_CONFIG(__MCOCLKSOURCE__, __MCODIV__) \ + MODIFY_REG(RCC->CFGR, (RCC_CFGR_MCO2 | RCC_CFGR_MCO2PRE), ((__MCOCLKSOURCE__) | ((__MCODIV__) << 3))); +/** + * @} + */ + +/** @defgroup RCC_Flags_Interrupts_Management Flags Interrupts Management + * @brief macros to manage the specified RCC Flags and interrupts. + * @{ + */ + +/** @brief Enable RCC interrupt (Perform Byte access to RCC_CIR[14:8] bits to enable + * the selected interrupts). + * @param __INTERRUPT__ specifies the RCC interrupt sources to be enabled. + * This parameter can be any combination of the following values: + * @arg RCC_IT_LSIRDY: LSI ready interrupt. + * @arg RCC_IT_LSERDY: LSE ready interrupt. + * @arg RCC_IT_HSIRDY: HSI ready interrupt. + * @arg RCC_IT_HSERDY: HSE ready interrupt. + * @arg RCC_IT_PLLRDY: Main PLL ready interrupt. + * @arg RCC_IT_PLLI2SRDY: PLLI2S ready interrupt. + */ +#define __HAL_RCC_ENABLE_IT(__INTERRUPT__) (*(__IO uint8_t *) RCC_CIR_BYTE1_ADDRESS |= (__INTERRUPT__)) + +/** @brief Disable RCC interrupt (Perform Byte access to RCC_CIR[14:8] bits to disable + * the selected interrupts). + * @param __INTERRUPT__ specifies the RCC interrupt sources to be disabled. + * This parameter can be any combination of the following values: + * @arg RCC_IT_LSIRDY: LSI ready interrupt. + * @arg RCC_IT_LSERDY: LSE ready interrupt. + * @arg RCC_IT_HSIRDY: HSI ready interrupt. + * @arg RCC_IT_HSERDY: HSE ready interrupt. + * @arg RCC_IT_PLLRDY: Main PLL ready interrupt. + * @arg RCC_IT_PLLI2SRDY: PLLI2S ready interrupt. + */ +#define __HAL_RCC_DISABLE_IT(__INTERRUPT__) (*(__IO uint8_t *) RCC_CIR_BYTE1_ADDRESS &= (uint8_t)(~(__INTERRUPT__))) + +/** @brief Clear the RCC's interrupt pending bits (Perform Byte access to RCC_CIR[23:16] + * bits to clear the selected interrupt pending bits. + * @param __INTERRUPT__ specifies the interrupt pending bit to clear. + * This parameter can be any combination of the following values: + * @arg RCC_IT_LSIRDY: LSI ready interrupt. + * @arg RCC_IT_LSERDY: LSE ready interrupt. + * @arg RCC_IT_HSIRDY: HSI ready interrupt. + * @arg RCC_IT_HSERDY: HSE ready interrupt. + * @arg RCC_IT_PLLRDY: Main PLL ready interrupt. + * @arg RCC_IT_PLLI2SRDY: PLLI2S ready interrupt. + * @arg RCC_IT_CSS: Clock Security System interrupt + */ +#define __HAL_RCC_CLEAR_IT(__INTERRUPT__) (*(__IO uint8_t *) RCC_CIR_BYTE2_ADDRESS = (__INTERRUPT__)) + +/** @brief Check the RCC's interrupt has occurred or not. + * @param __INTERRUPT__ specifies the RCC interrupt source to check. + * This parameter can be one of the following values: + * @arg RCC_IT_LSIRDY: LSI ready interrupt. + * @arg RCC_IT_LSERDY: LSE ready interrupt. + * @arg RCC_IT_HSIRDY: HSI ready interrupt. + * @arg RCC_IT_HSERDY: HSE ready interrupt. + * @arg RCC_IT_PLLRDY: Main PLL ready interrupt. + * @arg RCC_IT_PLLI2SRDY: PLLI2S ready interrupt. + * @arg RCC_IT_CSS: Clock Security System interrupt + * @retval The new state of __INTERRUPT__ (TRUE or FALSE). + */ +#define __HAL_RCC_GET_IT(__INTERRUPT__) ((RCC->CIR & (__INTERRUPT__)) == (__INTERRUPT__)) + +/** @brief Set RMVF bit to clear the reset flags: RCC_FLAG_PINRST, RCC_FLAG_PORRST, + * RCC_FLAG_SFTRST, RCC_FLAG_IWDGRST, RCC_FLAG_WWDGRST and RCC_FLAG_LPWRRST. + */ +#define __HAL_RCC_CLEAR_RESET_FLAGS() (RCC->CSR |= RCC_CSR_RMVF) + +/** @brief Check RCC flag is set or not. + * @param __FLAG__ specifies the flag to check. + * This parameter can be one of the following values: + * @arg RCC_FLAG_HSIRDY: HSI oscillator clock ready. + * @arg RCC_FLAG_HSERDY: HSE oscillator clock ready. + * @arg RCC_FLAG_PLLRDY: Main PLL clock ready. + * @arg RCC_FLAG_PLLI2SRDY: PLLI2S clock ready. + * @arg RCC_FLAG_LSERDY: LSE oscillator clock ready. + * @arg RCC_FLAG_LSIRDY: LSI oscillator clock ready. + * @arg RCC_FLAG_BORRST: POR/PDR or BOR reset. + * @arg RCC_FLAG_PINRST: Pin reset. + * @arg RCC_FLAG_PORRST: POR/PDR reset. + * @arg RCC_FLAG_SFTRST: Software reset. + * @arg RCC_FLAG_IWDGRST: Independent Watchdog reset. + * @arg RCC_FLAG_WWDGRST: Window Watchdog reset. + * @arg RCC_FLAG_LPWRRST: Low Power reset. + * @retval The new state of __FLAG__ (TRUE or FALSE). + */ +#define RCC_FLAG_MASK ((uint8_t)0x1F) +#define __HAL_RCC_GET_FLAG(__FLAG__) (((((((__FLAG__) >> 5) == 1)? RCC->CR :((((__FLAG__) >> 5) == 2) ? RCC->BDCR :((((__FLAG__) >> 5) == 3)? RCC->CSR :RCC->CIR))) & ((uint32_t)1 << ((__FLAG__) & RCC_FLAG_MASK)))!= 0)? 1 : 0) + +/** + * @} + */ + +/** + * @} + */ + +/* Include RCC HAL Extension module */ +#include "stm32f7xx_hal_rcc_ex.h" + +/* Exported functions --------------------------------------------------------*/ + /** @addtogroup RCC_Exported_Functions + * @{ + */ + +/** @addtogroup RCC_Exported_Functions_Group1 + * @{ + */ +/* Initialization and de-initialization functions ******************************/ +HAL_StatusTypeDef HAL_RCC_DeInit(void); +HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct); +HAL_StatusTypeDef HAL_RCC_ClockConfig(RCC_ClkInitTypeDef *RCC_ClkInitStruct, uint32_t FLatency); +/** + * @} + */ + +/** @addtogroup RCC_Exported_Functions_Group2 + * @{ + */ +/* Peripheral Control functions ************************************************/ +void HAL_RCC_MCOConfig(uint32_t RCC_MCOx, uint32_t RCC_MCOSource, uint32_t RCC_MCODiv); +void HAL_RCC_EnableCSS(void); +void HAL_RCC_DisableCSS(void); +uint32_t HAL_RCC_GetSysClockFreq(void); +uint32_t HAL_RCC_GetHCLKFreq(void); +uint32_t HAL_RCC_GetPCLK1Freq(void); +uint32_t HAL_RCC_GetPCLK2Freq(void); +void HAL_RCC_GetOscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct); +void HAL_RCC_GetClockConfig(RCC_ClkInitTypeDef *RCC_ClkInitStruct, uint32_t *pFLatency); + +/* CSS NMI IRQ handler */ +void HAL_RCC_NMI_IRQHandler(void); + +/* User Callbacks in non blocking mode (IT mode) */ +void HAL_RCC_CSSCallback(void); +/** + * @} + */ + +/** + * @} + */ + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @defgroup RCC_Private_Constants RCC Private Constants + * @{ + */ +#define HSE_TIMEOUT_VALUE HSE_STARTUP_TIMEOUT +#define HSI_TIMEOUT_VALUE ((uint32_t)2) /* 2 ms */ +#define LSI_TIMEOUT_VALUE ((uint32_t)2) /* 2 ms */ +#define PLL_TIMEOUT_VALUE ((uint32_t)2) /* 2 ms */ +#define CLOCKSWITCH_TIMEOUT_VALUE ((uint32_t)5000) /* 5 s */ +#define PLLI2S_TIMEOUT_VALUE 100U /* Timeout value fixed to 100 ms */ +#define PLLSAI_TIMEOUT_VALUE 100U /* Timeout value fixed to 100 ms */ + +/** @defgroup RCC_BitAddress_Alias RCC BitAddress Alias + * @brief RCC registers bit address alias + * @{ + */ +/* CIR register byte 2 (Bits[15:8]) base address */ +#define RCC_CIR_BYTE1_ADDRESS ((uint32_t)(RCC_BASE + 0x0C + 0x01)) + +/* CIR register byte 3 (Bits[23:16]) base address */ +#define RCC_CIR_BYTE2_ADDRESS ((uint32_t)(RCC_BASE + 0x0C + 0x02)) + +#define RCC_DBP_TIMEOUT_VALUE ((uint32_t)100) +#define RCC_LSE_TIMEOUT_VALUE LSE_STARTUP_TIMEOUT +/** + * @} + */ +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @addtogroup RCC_Private_Macros RCC Private Macros + * @{ + */ + +/** @defgroup RCC_IS_RCC_Definitions RCC Private macros to check input parameters + * @{ + */ +#define IS_RCC_OSCILLATORTYPE(OSCILLATOR) ((OSCILLATOR) <= 15) + +#define IS_RCC_HSE(HSE) (((HSE) == RCC_HSE_OFF) || ((HSE) == RCC_HSE_ON) || \ + ((HSE) == RCC_HSE_BYPASS)) + +#define IS_RCC_LSE(LSE) (((LSE) == RCC_LSE_OFF) || ((LSE) == RCC_LSE_ON) || \ + ((LSE) == RCC_LSE_BYPASS)) + +#define IS_RCC_HSI(HSI) (((HSI) == RCC_HSI_OFF) || ((HSI) == RCC_HSI_ON)) + +#define IS_RCC_LSI(LSI) (((LSI) == RCC_LSI_OFF) || ((LSI) == RCC_LSI_ON)) + +#define IS_RCC_PLL(PLL) (((PLL) == RCC_PLL_NONE) ||((PLL) == RCC_PLL_OFF) || ((PLL) == RCC_PLL_ON)) + +#define IS_RCC_PLLSOURCE(SOURCE) (((SOURCE) == RCC_PLLSOURCE_HSI) || \ + ((SOURCE) == RCC_PLLSOURCE_HSE)) + +#define IS_RCC_SYSCLKSOURCE(SOURCE) (((SOURCE) == RCC_SYSCLKSOURCE_HSI) || \ + ((SOURCE) == RCC_SYSCLKSOURCE_HSE) || \ + ((SOURCE) == RCC_SYSCLKSOURCE_PLLCLK)) +#define IS_RCC_PLLM_VALUE(VALUE) ((2 <= (VALUE)) && ((VALUE) <= 63)) + +#define IS_RCC_PLLN_VALUE(VALUE) ((50 <= (VALUE)) && ((VALUE) <= 432)) + +#define IS_RCC_PLLP_VALUE(VALUE) (((VALUE) == RCC_PLLP_DIV2) || ((VALUE) == RCC_PLLP_DIV4) || \ + ((VALUE) == RCC_PLLP_DIV6) || ((VALUE) == RCC_PLLP_DIV8)) +#define IS_RCC_PLLQ_VALUE(VALUE) ((2 <= (VALUE)) && ((VALUE) <= 15)) + +#define IS_RCC_HCLK(HCLK) (((HCLK) == RCC_SYSCLK_DIV1) || ((HCLK) == RCC_SYSCLK_DIV2) || \ + ((HCLK) == RCC_SYSCLK_DIV4) || ((HCLK) == RCC_SYSCLK_DIV8) || \ + ((HCLK) == RCC_SYSCLK_DIV16) || ((HCLK) == RCC_SYSCLK_DIV64) || \ + ((HCLK) == RCC_SYSCLK_DIV128) || ((HCLK) == RCC_SYSCLK_DIV256) || \ + ((HCLK) == RCC_SYSCLK_DIV512)) + +#define IS_RCC_CLOCKTYPE(CLK) ((1 <= (CLK)) && ((CLK) <= 15)) + +#define IS_RCC_PCLK(PCLK) (((PCLK) == RCC_HCLK_DIV1) || ((PCLK) == RCC_HCLK_DIV2) || \ + ((PCLK) == RCC_HCLK_DIV4) || ((PCLK) == RCC_HCLK_DIV8) || \ + ((PCLK) == RCC_HCLK_DIV16)) + +#define IS_RCC_MCO(MCOX) (((MCOX) == RCC_MCO1) || ((MCOX) == RCC_MCO2)) + + +#define IS_RCC_MCO1SOURCE(SOURCE) (((SOURCE) == RCC_MCO1SOURCE_HSI) || ((SOURCE) == RCC_MCO1SOURCE_LSE) || \ + ((SOURCE) == RCC_MCO1SOURCE_HSE) || ((SOURCE) == RCC_MCO1SOURCE_PLLCLK)) + +#define IS_RCC_MCO2SOURCE(SOURCE) (((SOURCE) == RCC_MCO2SOURCE_SYSCLK) || ((SOURCE) == RCC_MCO2SOURCE_PLLI2SCLK)|| \ + ((SOURCE) == RCC_MCO2SOURCE_HSE) || ((SOURCE) == RCC_MCO2SOURCE_PLLCLK)) + +#define IS_RCC_MCODIV(DIV) (((DIV) == RCC_MCODIV_1) || ((DIV) == RCC_MCODIV_2) || \ + ((DIV) == RCC_MCODIV_3) || ((DIV) == RCC_MCODIV_4) || \ + ((DIV) == RCC_MCODIV_5)) +#define IS_RCC_CALIBRATION_VALUE(VALUE) ((VALUE) <= 0x1F) + +#define IS_RCC_RTCCLKSOURCE(SOURCE) (((SOURCE) == RCC_RTCCLKSOURCE_LSE) || ((SOURCE) == RCC_RTCCLKSOURCE_LSI) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV2) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV3) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV4) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV5) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV6) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV7) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV8) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV9) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV10) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV11) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV12) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV13) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV14) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV15) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV16) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV17) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV18) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV19) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV20) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV21) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV22) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV23) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV24) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV25) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV26) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV27) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV28) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV29) || \ + ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV30) || ((SOURCE) == RCC_RTCCLKSOURCE_HSE_DIV31)) + + +#define IS_RCC_LSE_DRIVE(DRIVE) (((DRIVE) == RCC_LSEDRIVE_LOW) || \ + ((DRIVE) == RCC_LSEDRIVE_MEDIUMLOW) || \ + ((DRIVE) == RCC_LSEDRIVE_MEDIUMHIGH) || \ + ((DRIVE) == RCC_LSEDRIVE_HIGH)) +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_RCC_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rcc_ex.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rcc_ex.h new file mode 100644 index 0000000..3aff351 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_rcc_ex.h @@ -0,0 +1,3521 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_rcc_ex.h + * @author MCD Application Team + * @brief Header file of RCC HAL Extension module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __STM32F7xx_HAL_RCC_EX_H +#define __STM32F7xx_HAL_RCC_EX_H + +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup RCCEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup RCCEx_Exported_Types RCCEx Exported Types + * @{ + */ + +/** + * @brief RCC PLL configuration structure definition + */ +typedef struct +{ + uint32_t PLLState; /*!< The new state of the PLL. + This parameter can be a value of @ref RCC_PLL_Config */ + + uint32_t PLLSource; /*!< RCC_PLLSource: PLL entry clock source. + This parameter must be a value of @ref RCC_PLL_Clock_Source */ + + uint32_t PLLM; /*!< PLLM: Division factor for PLL VCO input clock. + This parameter must be a number between Min_Data = 2 and Max_Data = 63 */ + + uint32_t PLLN; /*!< PLLN: Multiplication factor for PLL VCO output clock. + This parameter must be a number between Min_Data = 50 and Max_Data = 432 */ + + uint32_t PLLP; /*!< PLLP: Division factor for main system clock (SYSCLK). + This parameter must be a value of @ref RCC_PLLP_Clock_Divider */ + + uint32_t PLLQ; /*!< PLLQ: Division factor for OTG FS, SDMMC and RNG clocks. + This parameter must be a number between Min_Data = 2 and Max_Data = 15 */ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) + uint32_t PLLR; /*!< PLLR: Division factor for DSI clock. + This parameter must be a number between Min_Data = 2 and Max_Data = 7 */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +}RCC_PLLInitTypeDef; + +/** + * @brief PLLI2S Clock structure definition + */ +typedef struct +{ + uint32_t PLLI2SN; /*!< Specifies the multiplication factor for PLLI2S VCO output clock. + This parameter must be a number between Min_Data = 50 and Max_Data = 432. + This parameter will be used only when PLLI2S is selected as Clock Source I2S or SAI */ + + uint32_t PLLI2SR; /*!< Specifies the division factor for I2S clock. + This parameter must be a number between Min_Data = 2 and Max_Data = 7. + This parameter will be used only when PLLI2S is selected as Clock Source I2S or SAI */ + + uint32_t PLLI2SQ; /*!< Specifies the division factor for SAI1 clock. + This parameter must be a number between Min_Data = 2 and Max_Data = 15. + This parameter will be used only when PLLI2S is selected as Clock Source SAI */ + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) || defined (STM32F767xx) || \ + defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) + uint32_t PLLI2SP; /*!< Specifies the division factor for SPDIF-RX clock. + This parameter must be a value of @ref RCCEx_PLLI2SP_Clock_Divider. + This parameter will be used only when PLLI2S is selected as Clock Source SPDIF-RX */ +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +}RCC_PLLI2SInitTypeDef; + +/** + * @brief PLLSAI Clock structure definition + */ +typedef struct +{ + uint32_t PLLSAIN; /*!< Specifies the multiplication factor for PLLI2S VCO output clock. + This parameter must be a number between Min_Data = 50 and Max_Data = 432. + This parameter will be used only when PLLSAI is selected as Clock Source SAI or LTDC */ + + uint32_t PLLSAIQ; /*!< Specifies the division factor for SAI1 clock. + This parameter must be a number between Min_Data = 2 and Max_Data = 15. + This parameter will be used only when PLLSAI is selected as Clock Source SAI or LTDC */ + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) || defined (STM32F767xx) || \ + defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) + uint32_t PLLSAIR; /*!< specifies the division factor for LTDC clock + This parameter must be a number between Min_Data = 2 and Max_Data = 7. + This parameter will be used only when PLLSAI is selected as Clock Source LTDC */ +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + + uint32_t PLLSAIP; /*!< Specifies the division factor for 48MHz clock. + This parameter must be a value of @ref RCCEx_PLLSAIP_Clock_Divider + This parameter will be used only when PLLSAI is disabled */ +}RCC_PLLSAIInitTypeDef; + +/** + * @brief RCC extended clocks structure definition + */ +typedef struct +{ + uint32_t PeriphClockSelection; /*!< The Extended Clock to be configured. + This parameter can be a value of @ref RCCEx_Periph_Clock_Selection */ + + RCC_PLLI2SInitTypeDef PLLI2S; /*!< PLL I2S structure parameters. + This parameter will be used only when PLLI2S is selected as Clock Source I2S or SAI */ + + RCC_PLLSAIInitTypeDef PLLSAI; /*!< PLL SAI structure parameters. + This parameter will be used only when PLLI2S is selected as Clock Source SAI or LTDC */ + + uint32_t PLLI2SDivQ; /*!< Specifies the PLLI2S division factor for SAI1 clock. + This parameter must be a number between Min_Data = 1 and Max_Data = 32 + This parameter will be used only when PLLI2S is selected as Clock Source SAI */ + + uint32_t PLLSAIDivQ; /*!< Specifies the PLLI2S division factor for SAI1 clock. + This parameter must be a number between Min_Data = 1 and Max_Data = 32 + This parameter will be used only when PLLSAI is selected as Clock Source SAI */ + + uint32_t PLLSAIDivR; /*!< Specifies the PLLSAI division factor for LTDC clock. + This parameter must be one value of @ref RCCEx_PLLSAI_DIVR */ + + uint32_t RTCClockSelection; /*!< Specifies RTC Clock source Selection. + This parameter can be a value of @ref RCC_RTC_Clock_Source */ + + uint32_t I2sClockSelection; /*!< Specifies I2S Clock source Selection. + This parameter can be a value of @ref RCCEx_I2S_Clock_Source */ + + uint32_t TIMPresSelection; /*!< Specifies TIM Clock Prescalers Selection. + This parameter can be a value of @ref RCCEx_TIM_Prescaler_Selection */ + + uint32_t Sai1ClockSelection; /*!< Specifies SAI1 Clock Prescalers Selection + This parameter can be a value of @ref RCCEx_SAI1_Clock_Source */ + + uint32_t Sai2ClockSelection; /*!< Specifies SAI2 Clock Prescalers Selection + This parameter can be a value of @ref RCCEx_SAI2_Clock_Source */ + + uint32_t Usart1ClockSelection; /*!< USART1 clock source + This parameter can be a value of @ref RCCEx_USART1_Clock_Source */ + + uint32_t Usart2ClockSelection; /*!< USART2 clock source + This parameter can be a value of @ref RCCEx_USART2_Clock_Source */ + + uint32_t Usart3ClockSelection; /*!< USART3 clock source + This parameter can be a value of @ref RCCEx_USART3_Clock_Source */ + + uint32_t Uart4ClockSelection; /*!< UART4 clock source + This parameter can be a value of @ref RCCEx_UART4_Clock_Source */ + + uint32_t Uart5ClockSelection; /*!< UART5 clock source + This parameter can be a value of @ref RCCEx_UART5_Clock_Source */ + + uint32_t Usart6ClockSelection; /*!< USART6 clock source + This parameter can be a value of @ref RCCEx_USART6_Clock_Source */ + + uint32_t Uart7ClockSelection; /*!< UART7 clock source + This parameter can be a value of @ref RCCEx_UART7_Clock_Source */ + + uint32_t Uart8ClockSelection; /*!< UART8 clock source + This parameter can be a value of @ref RCCEx_UART8_Clock_Source */ + + uint32_t I2c1ClockSelection; /*!< I2C1 clock source + This parameter can be a value of @ref RCCEx_I2C1_Clock_Source */ + + uint32_t I2c2ClockSelection; /*!< I2C2 clock source + This parameter can be a value of @ref RCCEx_I2C2_Clock_Source */ + + uint32_t I2c3ClockSelection; /*!< I2C3 clock source + This parameter can be a value of @ref RCCEx_I2C3_Clock_Source */ + + uint32_t I2c4ClockSelection; /*!< I2C4 clock source + This parameter can be a value of @ref RCCEx_I2C4_Clock_Source */ + + uint32_t Lptim1ClockSelection; /*!< Specifies LPTIM1 clock source + This parameter can be a value of @ref RCCEx_LPTIM1_Clock_Source */ + + uint32_t CecClockSelection; /*!< CEC clock source + This parameter can be a value of @ref RCCEx_CEC_Clock_Source */ + + uint32_t Clk48ClockSelection; /*!< Specifies 48Mhz clock source used by USB OTG FS, RNG and SDMMC + This parameter can be a value of @ref RCCEx_CLK48_Clock_Source */ + + uint32_t Sdmmc1ClockSelection; /*!< SDMMC1 clock source + This parameter can be a value of @ref RCCEx_SDMMC1_Clock_Source */ + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) + uint32_t Sdmmc2ClockSelection; /*!< SDMMC2 clock source + This parameter can be a value of @ref RCCEx_SDMMC2_Clock_Source */ +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ + +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) + uint32_t Dfsdm1ClockSelection; /*!< DFSDM1 clock source + This parameter can be a value of @ref RCCEx_DFSDM1_Kernel_Clock_Source */ + + uint32_t Dfsdm1AudioClockSelection; /*!< DFSDM1 clock source + This parameter can be a value of @ref RCCEx_DFSDM1_AUDIO_Clock_Source */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +}RCC_PeriphCLKInitTypeDef; +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup RCCEx_Exported_Constants RCCEx Exported Constants + * @{ + */ + +/** @defgroup RCCEx_Periph_Clock_Selection RCC Periph Clock Selection + * @{ + */ +#define RCC_PERIPHCLK_I2S ((uint32_t)0x00000001U) +#if defined(STM32F746xx) || defined(STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define RCC_PERIPHCLK_LTDC ((uint32_t)0x00000008U) +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +#define RCC_PERIPHCLK_TIM ((uint32_t)0x00000010U) +#define RCC_PERIPHCLK_RTC ((uint32_t)0x00000020U) +#define RCC_PERIPHCLK_USART1 ((uint32_t)0x00000040U) +#define RCC_PERIPHCLK_USART2 ((uint32_t)0x00000080U) +#define RCC_PERIPHCLK_USART3 ((uint32_t)0x00000100U) +#define RCC_PERIPHCLK_UART4 ((uint32_t)0x00000200U) +#define RCC_PERIPHCLK_UART5 ((uint32_t)0x00000400U) +#define RCC_PERIPHCLK_USART6 ((uint32_t)0x00000800U) +#define RCC_PERIPHCLK_UART7 ((uint32_t)0x00001000U) +#define RCC_PERIPHCLK_UART8 ((uint32_t)0x00002000U) +#define RCC_PERIPHCLK_I2C1 ((uint32_t)0x00004000U) +#define RCC_PERIPHCLK_I2C2 ((uint32_t)0x00008000U) +#define RCC_PERIPHCLK_I2C3 ((uint32_t)0x00010000U) +#define RCC_PERIPHCLK_I2C4 ((uint32_t)0x00020000U) +#define RCC_PERIPHCLK_LPTIM1 ((uint32_t)0x00040000U) +#define RCC_PERIPHCLK_SAI1 ((uint32_t)0x00080000U) +#define RCC_PERIPHCLK_SAI2 ((uint32_t)0x00100000U) +#define RCC_PERIPHCLK_CLK48 ((uint32_t)0x00200000U) +#define RCC_PERIPHCLK_CEC ((uint32_t)0x00400000U) +#define RCC_PERIPHCLK_SDMMC1 ((uint32_t)0x00800000U) +#define RCC_PERIPHCLK_SPDIFRX ((uint32_t)0x01000000U) +#define RCC_PERIPHCLK_PLLI2S ((uint32_t)0x02000000U) +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) +#define RCC_PERIPHCLK_SDMMC2 ((uint32_t)0x04000000U) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define RCC_PERIPHCLK_DFSDM1 ((uint32_t)0x08000000U) +#define RCC_PERIPHCLK_DFSDM1_AUDIO ((uint32_t)0x10000000U) +#endif /* STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +/** + * @} + */ + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) || defined (STM32F767xx) || \ + defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +/** @defgroup RCCEx_PLLI2SP_Clock_Divider RCCEx PLLI2SP Clock Divider + * @{ + */ +#define RCC_PLLI2SP_DIV2 ((uint32_t)0x00000000U) +#define RCC_PLLI2SP_DIV4 ((uint32_t)0x00000001U) +#define RCC_PLLI2SP_DIV6 ((uint32_t)0x00000002U) +#define RCC_PLLI2SP_DIV8 ((uint32_t)0x00000003U) +/** + * @} + */ +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +/** @defgroup RCCEx_PLLSAIP_Clock_Divider RCCEx PLLSAIP Clock Divider + * @{ + */ +#define RCC_PLLSAIP_DIV2 ((uint32_t)0x00000000U) +#define RCC_PLLSAIP_DIV4 ((uint32_t)0x00000001U) +#define RCC_PLLSAIP_DIV6 ((uint32_t)0x00000002U) +#define RCC_PLLSAIP_DIV8 ((uint32_t)0x00000003U) +/** + * @} + */ + +/** @defgroup RCCEx_PLLSAI_DIVR RCCEx PLLSAI DIVR + * @{ + */ +#define RCC_PLLSAIDIVR_2 ((uint32_t)0x00000000U) +#define RCC_PLLSAIDIVR_4 RCC_DCKCFGR1_PLLSAIDIVR_0 +#define RCC_PLLSAIDIVR_8 RCC_DCKCFGR1_PLLSAIDIVR_1 +#define RCC_PLLSAIDIVR_16 RCC_DCKCFGR1_PLLSAIDIVR +/** + * @} + */ + +/** @defgroup RCCEx_I2S_Clock_Source RCCEx I2S Clock Source + * @{ + */ +#define RCC_I2SCLKSOURCE_PLLI2S ((uint32_t)0x00000000U) +#define RCC_I2SCLKSOURCE_EXT RCC_CFGR_I2SSRC + +/** + * @} + */ + +/** @defgroup RCCEx_SAI1_Clock_Source RCCEx SAI1 Clock Source + * @{ + */ +#define RCC_SAI1CLKSOURCE_PLLSAI ((uint32_t)0x00000000U) +#define RCC_SAI1CLKSOURCE_PLLI2S RCC_DCKCFGR1_SAI1SEL_0 +#define RCC_SAI1CLKSOURCE_PIN RCC_DCKCFGR1_SAI1SEL_1 +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define RCC_SAI1CLKSOURCE_PLLSRC RCC_DCKCFGR1_SAI1SEL +#endif /* STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +/** + * @} + */ + +/** @defgroup RCCEx_SAI2_Clock_Source RCCEx SAI2 Clock Source + * @{ + */ +#define RCC_SAI2CLKSOURCE_PLLSAI ((uint32_t)0x00000000U) +#define RCC_SAI2CLKSOURCE_PLLI2S RCC_DCKCFGR1_SAI2SEL_0 +#define RCC_SAI2CLKSOURCE_PIN RCC_DCKCFGR1_SAI2SEL_1 +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define RCC_SAI2CLKSOURCE_PLLSRC RCC_DCKCFGR1_SAI2SEL +#endif /* STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +/** + * @} + */ + +/** @defgroup RCCEx_CEC_Clock_Source RCCEx CEC Clock Source + * @{ + */ +#define RCC_CECCLKSOURCE_LSE ((uint32_t)0x00000000U) +#define RCC_CECCLKSOURCE_HSI RCC_DCKCFGR2_CECSEL /* CEC clock is HSI/488*/ +/** + * @} + */ + +/** @defgroup RCCEx_USART1_Clock_Source RCCEx USART1 Clock Source + * @{ + */ +#define RCC_USART1CLKSOURCE_PCLK2 ((uint32_t)0x00000000U) +#define RCC_USART1CLKSOURCE_SYSCLK RCC_DCKCFGR2_USART1SEL_0 +#define RCC_USART1CLKSOURCE_HSI RCC_DCKCFGR2_USART1SEL_1 +#define RCC_USART1CLKSOURCE_LSE RCC_DCKCFGR2_USART1SEL +/** + * @} + */ + +/** @defgroup RCCEx_USART2_Clock_Source RCCEx USART2 Clock Source + * @{ + */ +#define RCC_USART2CLKSOURCE_PCLK1 ((uint32_t)0x00000000U) +#define RCC_USART2CLKSOURCE_SYSCLK RCC_DCKCFGR2_USART2SEL_0 +#define RCC_USART2CLKSOURCE_HSI RCC_DCKCFGR2_USART2SEL_1 +#define RCC_USART2CLKSOURCE_LSE RCC_DCKCFGR2_USART2SEL +/** + * @} + */ + +/** @defgroup RCCEx_USART3_Clock_Source RCCEx USART3 Clock Source + * @{ + */ +#define RCC_USART3CLKSOURCE_PCLK1 ((uint32_t)0x00000000U) +#define RCC_USART3CLKSOURCE_SYSCLK RCC_DCKCFGR2_USART3SEL_0 +#define RCC_USART3CLKSOURCE_HSI RCC_DCKCFGR2_USART3SEL_1 +#define RCC_USART3CLKSOURCE_LSE RCC_DCKCFGR2_USART3SEL +/** + * @} + */ + +/** @defgroup RCCEx_UART4_Clock_Source RCCEx UART4 Clock Source + * @{ + */ +#define RCC_UART4CLKSOURCE_PCLK1 ((uint32_t)0x00000000U) +#define RCC_UART4CLKSOURCE_SYSCLK RCC_DCKCFGR2_UART4SEL_0 +#define RCC_UART4CLKSOURCE_HSI RCC_DCKCFGR2_UART4SEL_1 +#define RCC_UART4CLKSOURCE_LSE RCC_DCKCFGR2_UART4SEL +/** + * @} + */ + +/** @defgroup RCCEx_UART5_Clock_Source RCCEx UART5 Clock Source + * @{ + */ +#define RCC_UART5CLKSOURCE_PCLK1 ((uint32_t)0x00000000U) +#define RCC_UART5CLKSOURCE_SYSCLK RCC_DCKCFGR2_UART5SEL_0 +#define RCC_UART5CLKSOURCE_HSI RCC_DCKCFGR2_UART5SEL_1 +#define RCC_UART5CLKSOURCE_LSE RCC_DCKCFGR2_UART5SEL +/** + * @} + */ + +/** @defgroup RCCEx_USART6_Clock_Source RCCEx USART6 Clock Source + * @{ + */ +#define RCC_USART6CLKSOURCE_PCLK2 ((uint32_t)0x00000000U) +#define RCC_USART6CLKSOURCE_SYSCLK RCC_DCKCFGR2_USART6SEL_0 +#define RCC_USART6CLKSOURCE_HSI RCC_DCKCFGR2_USART6SEL_1 +#define RCC_USART6CLKSOURCE_LSE RCC_DCKCFGR2_USART6SEL +/** + * @} + */ + +/** @defgroup RCCEx_UART7_Clock_Source RCCEx UART7 Clock Source + * @{ + */ +#define RCC_UART7CLKSOURCE_PCLK1 ((uint32_t)0x00000000U) +#define RCC_UART7CLKSOURCE_SYSCLK RCC_DCKCFGR2_UART7SEL_0 +#define RCC_UART7CLKSOURCE_HSI RCC_DCKCFGR2_UART7SEL_1 +#define RCC_UART7CLKSOURCE_LSE RCC_DCKCFGR2_UART7SEL +/** + * @} + */ + +/** @defgroup RCCEx_UART8_Clock_Source RCCEx UART8 Clock Source + * @{ + */ +#define RCC_UART8CLKSOURCE_PCLK1 ((uint32_t)0x00000000U) +#define RCC_UART8CLKSOURCE_SYSCLK RCC_DCKCFGR2_UART8SEL_0 +#define RCC_UART8CLKSOURCE_HSI RCC_DCKCFGR2_UART8SEL_1 +#define RCC_UART8CLKSOURCE_LSE RCC_DCKCFGR2_UART8SEL +/** + * @} + */ + +/** @defgroup RCCEx_I2C1_Clock_Source RCCEx I2C1 Clock Source + * @{ + */ +#define RCC_I2C1CLKSOURCE_PCLK1 ((uint32_t)0x00000000U) +#define RCC_I2C1CLKSOURCE_SYSCLK RCC_DCKCFGR2_I2C1SEL_0 +#define RCC_I2C1CLKSOURCE_HSI RCC_DCKCFGR2_I2C1SEL_1 +/** + * @} + */ + +/** @defgroup RCCEx_I2C2_Clock_Source RCCEx I2C2 Clock Source + * @{ + */ +#define RCC_I2C2CLKSOURCE_PCLK1 ((uint32_t)0x00000000U) +#define RCC_I2C2CLKSOURCE_SYSCLK RCC_DCKCFGR2_I2C2SEL_0 +#define RCC_I2C2CLKSOURCE_HSI RCC_DCKCFGR2_I2C2SEL_1 + +/** + * @} + */ + +/** @defgroup RCCEx_I2C3_Clock_Source RCCEx I2C3 Clock Source + * @{ + */ +#define RCC_I2C3CLKSOURCE_PCLK1 ((uint32_t)0x00000000U) +#define RCC_I2C3CLKSOURCE_SYSCLK RCC_DCKCFGR2_I2C3SEL_0 +#define RCC_I2C3CLKSOURCE_HSI RCC_DCKCFGR2_I2C3SEL_1 +/** + * @} + */ + +/** @defgroup RCCEx_I2C4_Clock_Source RCCEx I2C4 Clock Source + * @{ + */ +#define RCC_I2C4CLKSOURCE_PCLK1 ((uint32_t)0x00000000U) +#define RCC_I2C4CLKSOURCE_SYSCLK RCC_DCKCFGR2_I2C4SEL_0 +#define RCC_I2C4CLKSOURCE_HSI RCC_DCKCFGR2_I2C4SEL_1 +/** + * @} + */ + +/** @defgroup RCCEx_LPTIM1_Clock_Source RCCEx LPTIM1 Clock Source + * @{ + */ +#define RCC_LPTIM1CLKSOURCE_PCLK1 ((uint32_t)0x00000000U) +#define RCC_LPTIM1CLKSOURCE_LSI RCC_DCKCFGR2_LPTIM1SEL_0 +#define RCC_LPTIM1CLKSOURCE_HSI RCC_DCKCFGR2_LPTIM1SEL_1 +#define RCC_LPTIM1CLKSOURCE_LSE RCC_DCKCFGR2_LPTIM1SEL + +/** + * @} + */ + +/** @defgroup RCCEx_CLK48_Clock_Source RCCEx CLK48 Clock Source + * @{ + */ +#define RCC_CLK48SOURCE_PLL ((uint32_t)0x00000000U) +#define RCC_CLK48SOURCE_PLLSAIP RCC_DCKCFGR2_CK48MSEL +/** + * @} + */ + +/** @defgroup RCCEx_TIM_Prescaler_Selection RCCEx TIM Prescaler Selection + * @{ + */ +#define RCC_TIMPRES_DESACTIVATED ((uint32_t)0x00000000U) +#define RCC_TIMPRES_ACTIVATED RCC_DCKCFGR1_TIMPRE +/** + * @} + */ + +/** @defgroup RCCEx_SDMMC1_Clock_Source RCCEx SDMMC1 Clock Source + * @{ + */ +#define RCC_SDMMC1CLKSOURCE_CLK48 ((uint32_t)0x00000000U) +#define RCC_SDMMC1CLKSOURCE_SYSCLK RCC_DCKCFGR2_SDMMC1SEL +/** + * @} + */ + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) +/** @defgroup RCCEx_SDMMC2_Clock_Source RCCEx SDMMC2 Clock Source + * @{ + */ +#define RCC_SDMMC2CLKSOURCE_CLK48 ((uint32_t)0x00000000U) +#define RCC_SDMMC2CLKSOURCE_SYSCLK RCC_DCKCFGR2_SDMMC2SEL +/** + * @} + */ +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ + +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +/** @defgroup RCCEx_DFSDM1_Kernel_Clock_Source RCCEx DFSDM1 Kernel Clock Source + * @{ + */ +#define RCC_DFSDM1CLKSOURCE_PCLK2 ((uint32_t)0x00000000U) +#define RCC_DFSDM1CLKSOURCE_SYSCLK RCC_DCKCFGR1_DFSDM1SEL +/** + * @} + */ + +/** @defgroup RCCEx_DFSDM1_AUDIO_Clock_Source RCCEx DFSDM1 AUDIO Clock Source + * @{ + */ +#define RCC_DFSDM1AUDIOCLKSOURCE_SAI1 ((uint32_t)0x00000000U) +#define RCC_DFSDM1AUDIOCLKSOURCE_SAI2 RCC_DCKCFGR1_ADFSDM1SEL +/** + * @} + */ +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +#if defined (STM32F769xx) || defined (STM32F779xx) +/** @defgroup RCCEx_DSI_Clock_Source RCC DSI Clock Source + * @{ + */ +#define RCC_DSICLKSOURCE_DSIPHY ((uint32_t)0x00000000U) +#define RCC_DSICLKSOURCE_PLLR ((uint32_t)RCC_DCKCFGR2_DSISEL) +/** + * @} + */ +#endif /* STM32F769xx || STM32F779xx */ + +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup RCCEx_Exported_Macros RCCEx Exported Macros + * @{ + */ +/** @defgroup RCCEx_Peripheral_Clock_Enable_Disable RCCEx_Peripheral_Clock_Enable_Disable + * @brief Enables or disables the AHB/APB peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ + +/** @brief Enables or disables the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + */ +#define __HAL_RCC_BKPSRAM_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_BKPSRAMEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_BKPSRAMEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_DTCMRAMEN_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DTCMRAMEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DTCMRAMEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_DMA2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA2EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_USB_OTG_HS_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSULPIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_OTGHSULPIEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_GPIOA_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOAEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOAEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_GPIOB_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOBEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOBEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_GPIOC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOCEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_GPIOD_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIODEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_GPIOE_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOEEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_GPIOF_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOFEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_GPIOG_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOGEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_GPIOH_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOHEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOHEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_GPIOI_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOIEN);\ + UNUSED(tmpreg); \ + } while(0) + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_GPIOJ_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOJEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOJEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_GPIOK_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOKEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_GPIOKEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_DMA2D_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA2DEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_DMA2DEN);\ + UNUSED(tmpreg); \ + } while(0) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#define __HAL_RCC_BKPSRAM_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_BKPSRAMEN)) +#define __HAL_RCC_DTCMRAMEN_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_DTCMRAMEN)) +#define __HAL_RCC_DMA2_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_DMA2EN)) +#define __HAL_RCC_USB_OTG_HS_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_OTGHSEN)) +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_OTGHSULPIEN)) +#define __HAL_RCC_GPIOA_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOAEN)) +#define __HAL_RCC_GPIOB_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOBEN)) +#define __HAL_RCC_GPIOC_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOCEN)) +#define __HAL_RCC_GPIOD_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIODEN)) +#define __HAL_RCC_GPIOE_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOEEN)) +#define __HAL_RCC_GPIOF_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOFEN)) +#define __HAL_RCC_GPIOG_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOGEN)) +#define __HAL_RCC_GPIOH_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOHEN)) +#define __HAL_RCC_GPIOI_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOIEN)) +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_GPIOJ_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOJEN)) +#define __HAL_RCC_GPIOK_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_GPIOKEN)) +#define __HAL_RCC_DMA2D_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_DMA2DEN)) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +/** + * @brief Enable ETHERNET clock. + */ +#define __HAL_RCC_ETHMAC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_ETHMACTX_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACTXEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACTXEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_ETHMACRX_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACRXEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACRXEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_ETHMACPTP_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACPTPEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB1ENR, RCC_AHB1ENR_ETHMACPTPEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_ETH_CLK_ENABLE() do { \ + __HAL_RCC_ETHMAC_CLK_ENABLE(); \ + __HAL_RCC_ETHMACTX_CLK_ENABLE(); \ + __HAL_RCC_ETHMACRX_CLK_ENABLE(); \ + } while(0) +/** + * @brief Disable ETHERNET clock. + */ +#define __HAL_RCC_ETHMAC_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_ETHMACEN)) +#define __HAL_RCC_ETHMACTX_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_ETHMACTXEN)) +#define __HAL_RCC_ETHMACRX_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_ETHMACRXEN)) +#define __HAL_RCC_ETHMACPTP_CLK_DISABLE() (RCC->AHB1ENR &= ~(RCC_AHB1ENR_ETHMACPTPEN)) +#define __HAL_RCC_ETH_CLK_DISABLE() do { \ + __HAL_RCC_ETHMACTX_CLK_DISABLE(); \ + __HAL_RCC_ETHMACRX_CLK_DISABLE(); \ + __HAL_RCC_ETHMAC_CLK_DISABLE(); \ + } while(0) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +/** @brief Enable or disable the AHB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + */ +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_DCMI_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_DCMIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_DCMIEN);\ + UNUSED(tmpreg); \ + } while(0) +#define __HAL_RCC_DCMI_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_DCMIEN)) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_JPEG_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_JPEGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_JPEGEN);\ + UNUSED(tmpreg); \ + } while(0) +#define __HAL_RCC_JPEG_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_JPEGEN)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +#define __HAL_RCC_RNG_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_RNGEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_USB_OTG_FS_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_OTGFSEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_OTGFSEN);\ + UNUSED(tmpreg); \ + __HAL_RCC_SYSCFG_CLK_ENABLE();\ + } while(0) + +#define __HAL_RCC_RNG_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_RNGEN)) + +#define __HAL_RCC_USB_OTG_FS_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_OTGFSEN)) +#if defined(STM32F756xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_CRYP_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_CRYPEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_CRYPEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_HASH_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_HASHEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_HASHEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_CRYP_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_CRYPEN)) +#define __HAL_RCC_HASH_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_HASHEN)) +#endif /* STM32F756x || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined(STM32F732xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define __HAL_RCC_AES_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB2ENR, RCC_AHB2ENR_AESEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB2ENR, RCC_AHB2ENR_AESEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_AES_CLK_DISABLE() (RCC->AHB2ENR &= ~(RCC_AHB2ENR_AESEN)) +#endif /* STM32F732xx || STM32F733xx || STM32F730xx */ + +/** @brief Enables or disables the AHB3 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + */ +#define __HAL_RCC_FMC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FMCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_FMCEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_QSPI_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->AHB3ENR, RCC_AHB3ENR_QSPIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->AHB3ENR, RCC_AHB3ENR_QSPIEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_FMC_CLK_DISABLE() (RCC->AHB3ENR &= ~(RCC_AHB3ENR_FMCEN)) +#define __HAL_RCC_QSPI_CLK_DISABLE() (RCC->AHB3ENR &= ~(RCC_AHB3ENR_QSPIEN)) + +/** @brief Enable or disable the Low Speed APB (APB1) peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + */ +#define __HAL_RCC_TIM2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM2EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_TIM3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM3EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_TIM4_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM4EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_TIM5_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM5EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM5EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_TIM6_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM6EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_TIM7_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM7EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_TIM12_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM12EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_TIM13_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM13EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_TIM14_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_TIM14EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_LPTIM1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_LPTIM1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_LPTIM1EN);\ + UNUSED(tmpreg); \ + } while(0) + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) ||\ + defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) ||\ + defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_RTC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_RTCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_RTCEN);\ + UNUSED(tmpreg); \ + } while(0) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || + STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ + +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_CAN3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN3EN);\ + UNUSED(tmpreg); \ + } while(0) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +#define __HAL_RCC_SPI2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI2EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_SPI3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPI3EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_USART2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_USART2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_USART2EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_USART3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_USART3EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_UART4_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART4EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_UART5_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART5EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_I2C1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C1EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_I2C2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C2EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_I2C3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C3EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_CAN1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN1EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_DAC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_DACEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_UART7_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART7EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART7EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_UART8_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_UART8EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_UART8EN);\ + UNUSED(tmpreg); \ + } while(0) + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_SPDIFRX_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_SPDIFRXEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_SPDIFRXEN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_I2C4_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_I2C4EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_CAN2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CAN2EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_CEC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB1ENR, RCC_APB1ENR_CECEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB1ENR, RCC_APB1ENR_CECEN);\ + UNUSED(tmpreg); \ + } while(0) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#define __HAL_RCC_TIM2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM2EN)) +#define __HAL_RCC_TIM3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM3EN)) +#define __HAL_RCC_TIM4_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM4EN)) +#define __HAL_RCC_TIM5_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM5EN)) +#define __HAL_RCC_TIM6_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM6EN)) +#define __HAL_RCC_TIM7_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM7EN)) +#define __HAL_RCC_TIM12_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM12EN)) +#define __HAL_RCC_TIM13_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM13EN)) +#define __HAL_RCC_TIM14_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_TIM14EN)) +#define __HAL_RCC_LPTIM1_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_LPTIM1EN)) +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) ||\ + defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) ||\ + defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_RTC_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_RTCEN)) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || + STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_CAN3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_CAN3EN)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#define __HAL_RCC_SPI2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_SPI2EN)) +#define __HAL_RCC_SPI3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_SPI3EN)) +#define __HAL_RCC_USART2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_USART2EN)) +#define __HAL_RCC_USART3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_USART3EN)) +#define __HAL_RCC_UART4_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_UART4EN)) +#define __HAL_RCC_UART5_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_UART5EN)) +#define __HAL_RCC_I2C1_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_I2C1EN)) +#define __HAL_RCC_I2C2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_I2C2EN)) +#define __HAL_RCC_I2C3_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_I2C3EN)) +#define __HAL_RCC_CAN1_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_CAN1EN)) +#define __HAL_RCC_DAC_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_DACEN)) +#define __HAL_RCC_UART7_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_UART7EN)) +#define __HAL_RCC_UART8_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_UART8EN)) +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_SPDIFRX_CLK_DISABLE()(RCC->APB1ENR &= ~(RCC_APB1ENR_SPDIFRXEN)) +#define __HAL_RCC_I2C4_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_I2C4EN)) +#define __HAL_RCC_CAN2_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_CAN2EN)) +#define __HAL_RCC_CEC_CLK_DISABLE() (RCC->APB1ENR &= ~(RCC_APB1ENR_CECEN)) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || || STM32F750xx */ + +/** @brief Enable or disable the High Speed APB (APB2) peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + */ +#define __HAL_RCC_TIM1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM1EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_TIM8_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM8EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_USART1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_USART1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_USART1EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_USART6_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_USART6EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_USART6EN);\ + UNUSED(tmpreg); \ + } while(0) + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_SDMMC2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SDMMC2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SDMMC2EN);\ + UNUSED(tmpreg); \ + } while(0) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || || STM32F730xx */ + +#define __HAL_RCC_ADC1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC1EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_ADC2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC2EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_ADC3_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC3EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_ADC3EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_SDMMC1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SDMMC1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SDMMC1EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_SPI1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI1EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_SPI4_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI4EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_TIM9_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM9EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM9EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_TIM10_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM10EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_TIM11_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM11EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_TIM11EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_SPI5_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI5EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_SPI6_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI6EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SPI6EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_SAI1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI1EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_SAI2_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI2EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_SAI2EN);\ + UNUSED(tmpreg); \ + } while(0) + +#if defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_LTDC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_LTDCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_LTDCEN);\ + UNUSED(tmpreg); \ + } while(0) +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined (STM32F769xx) || defined (STM32F779xx) +#define __HAL_RCC_DSI_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_DSIEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_DSIEN);\ + UNUSED(tmpreg); \ + } while(0) +#endif /* STM32F769xx || STM32F779xx */ + +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_DFSDM1_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_DFSDM1EN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_DFSDM1EN);\ + UNUSED(tmpreg); \ + } while(0) + +#define __HAL_RCC_MDIO_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_MDIOEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_MDIOEN);\ + UNUSED(tmpreg); \ + } while(0) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#if defined (STM32F723xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define __HAL_RCC_OTGPHYC_CLK_ENABLE() do { \ + __IO uint32_t tmpreg; \ + SET_BIT(RCC->APB2ENR, RCC_APB2ENR_OTGPHYCEN);\ + /* Delay after an RCC peripheral clock enabling */ \ + tmpreg = READ_BIT(RCC->APB2ENR, RCC_APB2ENR_OTGPHYCEN);\ + UNUSED(tmpreg); \ + } while(0) +#endif /* STM32F723xx || STM32F733xx || STM32F730xx */ + +#define __HAL_RCC_TIM1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM1EN)) +#define __HAL_RCC_TIM8_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM8EN)) +#define __HAL_RCC_USART1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_USART1EN)) +#define __HAL_RCC_USART6_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_USART6EN)) +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_SDMMC2_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SDMMC2EN)) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ +#define __HAL_RCC_ADC1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_ADC1EN)) +#define __HAL_RCC_ADC2_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_ADC2EN)) +#define __HAL_RCC_ADC3_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_ADC3EN)) +#define __HAL_RCC_SDMMC1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SDMMC1EN)) +#define __HAL_RCC_SPI1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI1EN)) +#define __HAL_RCC_SPI4_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI4EN)) +#define __HAL_RCC_TIM9_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM9EN)) +#define __HAL_RCC_TIM10_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM10EN)) +#define __HAL_RCC_TIM11_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_TIM11EN)) +#define __HAL_RCC_SPI5_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI5EN)) +#define __HAL_RCC_SPI6_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SPI6EN)) +#define __HAL_RCC_SAI1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SAI1EN)) +#define __HAL_RCC_SAI2_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_SAI2EN)) +#if defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_LTDC_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_LTDCEN)) +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +#if defined (STM32F769xx) || defined (STM32F779xx) +#define __HAL_RCC_DSI_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_DSIEN)) +#endif /* STM32F769xx || STM32F779xx */ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_DFSDM1_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_DFSDM1EN)) +#define __HAL_RCC_MDIO_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_MDIOEN)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#if defined (STM32F723xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define __HAL_RCC_OTGPHYC_CLK_DISABLE() (RCC->APB2ENR &= ~(RCC_APB2ENR_OTGPHYCEN)) +#endif /* STM32F723xx || STM32F733xx || STM32F730xx */ + +/** + * @} + */ + +/** @defgroup RCCEx_Peripheral_Clock_Enable_Disable_Status Peripheral Clock Enable Disable Status + * @brief Get the enable or disable status of the AHB/APB peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + * @{ + */ + +/** @brief Get the enable or disable status of the AHB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + */ +#define __HAL_RCC_BKPSRAM_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_BKPSRAMEN)) != RESET) +#define __HAL_RCC_DTCMRAMEN_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_DTCMRAMEN)) != RESET) +#define __HAL_RCC_DMA2_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_DMA2EN)) != RESET) +#define __HAL_RCC_USB_OTG_HS_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSEN)) != RESET) +#define __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSULPIEN)) != RESET) +#define __HAL_RCC_GPIOA_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOAEN)) != RESET) +#define __HAL_RCC_GPIOB_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOBEN)) != RESET) +#define __HAL_RCC_GPIOC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOCEN)) != RESET) +#define __HAL_RCC_GPIOD_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) != RESET) +#define __HAL_RCC_GPIOE_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) != RESET) +#define __HAL_RCC_GPIOF_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOFEN)) != RESET) +#define __HAL_RCC_GPIOG_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOGEN)) != RESET) +#define __HAL_RCC_GPIOH_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOHEN)) != RESET) +#define __HAL_RCC_GPIOI_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOIEN)) != RESET) +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_GPIOJ_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOJEN)) != RESET) +#define __HAL_RCC_GPIOK_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOKEN)) != RESET) +#define __HAL_RCC_DMA2D_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_DMA2DEN)) != RESET) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#define __HAL_RCC_BKPSRAM_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_BKPSRAMEN)) == RESET) +#define __HAL_RCC_DTCMRAMEN_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_DTCMRAMEN)) == RESET) +#define __HAL_RCC_DMA2_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_DMA2EN)) == RESET) +#define __HAL_RCC_USB_OTG_HS_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSEN)) == RESET) +#define __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_OTGHSULPIEN)) == RESET) +#define __HAL_RCC_GPIOA_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOAEN)) == RESET) +#define __HAL_RCC_GPIOB_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOBEN)) == RESET) +#define __HAL_RCC_GPIOC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOCEN)) == RESET) +#define __HAL_RCC_GPIOD_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIODEN)) == RESET) +#define __HAL_RCC_GPIOE_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOEEN)) == RESET) +#define __HAL_RCC_GPIOF_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOFEN)) == RESET) +#define __HAL_RCC_GPIOG_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOGEN)) == RESET) +#define __HAL_RCC_GPIOH_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOHEN)) == RESET) +#define __HAL_RCC_GPIOI_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOIEN)) == RESET) +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_GPIOJ_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOJEN)) == RESET) +#define __HAL_RCC_GPIOK_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_GPIOKEN)) == RESET) +#define __HAL_RCC_DMA2D_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_DMA2DEN)) == RESET) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +/** + * @brief Enable ETHERNET clock. + */ +#define __HAL_RCC_ETHMAC_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACEN)) != RESET) +#define __HAL_RCC_ETHMACTX_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACTXEN)) != RESET) +#define __HAL_RCC_ETHMACRX_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACRXEN)) != RESET) +#define __HAL_RCC_ETHMACPTP_IS_CLK_ENABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACPTPEN)) != RESET) +#define __HAL_RCC_ETH_IS_CLK_ENABLED() (__HAL_RCC_ETHMAC_IS_CLK_ENABLED() && \ + __HAL_RCC_ETHMACTX_IS_CLK_ENABLED() && \ + __HAL_RCC_ETHMACRX_IS_CLK_ENABLED()) + +/** + * @brief Disable ETHERNET clock. + */ +#define __HAL_RCC_ETHMAC_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACEN)) == RESET) +#define __HAL_RCC_ETHMACTX_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACTXEN)) == RESET) +#define __HAL_RCC_ETHMACRX_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACRXEN)) == RESET) +#define __HAL_RCC_ETHMACPTP_IS_CLK_DISABLED() ((RCC->AHB1ENR & (RCC_AHB1ENR_ETHMACPTPEN)) == RESET) +#define __HAL_RCC_ETH_IS_CLK_DISABLED() (__HAL_RCC_ETHMAC_IS_CLK_DISABLED() && \ + __HAL_RCC_ETHMACTX_IS_CLK_DISABLED() && \ + __HAL_RCC_ETHMACRX_IS_CLK_DISABLED()) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +/** @brief Get the enable or disable status of the AHB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + */ +#define __HAL_RCC_RNG_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) != RESET) +#define __HAL_RCC_USB_OTG_FS_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) != RESET) + +#define __HAL_RCC_RNG_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_RNGEN)) == RESET) +#define __HAL_RCC_USB_IS_OTG_FS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_OTGFSEN)) == RESET) + +#if defined(STM32F756xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_CRYP_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_CRYPEN)) != RESET) +#define __HAL_RCC_HASH_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_HASHEN)) != RESET) +#define __HAL_RCC_CRYP_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_CRYPEN)) == RESET) +#define __HAL_RCC_HASH_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_HASHEN)) == RESET) +#endif /* STM32F756xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined(STM32F732xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define __HAL_RCC_AES_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_AESEN)) != RESET) +#define __HAL_RCC_AES_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_AESEN)) == RESET) +#endif /* STM32F732xx || STM32F733xx || STM32F730xx */ + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_DCMI_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_DCMIEN)) != RESET) +#define __HAL_RCC_DCMI_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_DCMIEN)) == RESET) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_JPEG_IS_CLK_ENABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_JPEGEN)) != RESET) +#define __HAL_RCC_JPEG_IS_CLK_DISABLED() ((RCC->AHB2ENR & (RCC_AHB2ENR_JPEGEN)) == RESET) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +/** @brief Get the enable or disable status of the AHB3 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + */ +#define __HAL_RCC_FMC_IS_CLK_ENABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_FMCEN)) != RESET) +#define __HAL_RCC_QSPI_IS_CLK_ENABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_QSPIEN)) != RESET) + +#define __HAL_RCC_FMC_IS_CLK_DISABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_FMCEN)) == RESET) +#define __HAL_RCC_QSPI_IS_CLK_DISABLED() ((RCC->AHB3ENR & (RCC_AHB3ENR_QSPIEN)) == RESET) + +/** @brief Get the enable or disable status of the APB1 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + */ +#define __HAL_RCC_TIM2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) != RESET) +#define __HAL_RCC_TIM3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) != RESET) +#define __HAL_RCC_TIM4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) != RESET) +#define __HAL_RCC_TIM5_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM5EN)) != RESET) +#define __HAL_RCC_TIM6_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) != RESET) +#define __HAL_RCC_TIM7_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM7EN)) != RESET) +#define __HAL_RCC_TIM12_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM12EN)) != RESET) +#define __HAL_RCC_TIM13_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM13EN)) != RESET) +#define __HAL_RCC_TIM14_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM14EN)) != RESET) +#define __HAL_RCC_LPTIM1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_LPTIM1EN)) != RESET) +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_CAN3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN3EN)) != RESET) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#define __HAL_RCC_SPI2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI2EN)) != RESET) +#define __HAL_RCC_SPI3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) != RESET) +#define __HAL_RCC_USART2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART2EN)) != RESET) +#define __HAL_RCC_USART3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART3EN)) != RESET) +#define __HAL_RCC_UART4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART4EN)) != RESET) +#define __HAL_RCC_UART5_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART5EN)) != RESET) +#define __HAL_RCC_I2C1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C1EN)) != RESET) +#define __HAL_RCC_I2C2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C2EN)) != RESET) +#define __HAL_RCC_I2C3_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) != RESET) +#define __HAL_RCC_CAN1_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN1EN)) != RESET) +#define __HAL_RCC_DAC_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) != RESET) +#define __HAL_RCC_UART7_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART7EN)) != RESET) +#define __HAL_RCC_UART8_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART8EN)) != RESET) + +#define __HAL_RCC_TIM2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM2EN)) == RESET) +#define __HAL_RCC_TIM3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM3EN)) == RESET) +#define __HAL_RCC_TIM4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM4EN)) == RESET) +#define __HAL_RCC_TIM5_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM5EN)) == RESET) +#define __HAL_RCC_TIM6_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM6EN)) == RESET) +#define __HAL_RCC_TIM7_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM7EN)) == RESET) +#define __HAL_RCC_TIM12_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM12EN)) == RESET) +#define __HAL_RCC_TIM13_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM13EN)) == RESET) +#define __HAL_RCC_TIM14_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_TIM14EN)) == RESET) +#define __HAL_RCC_LPTIM1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_LPTIM1EN)) == RESET) +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_CAN3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN3EN)) == RESET) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#define __HAL_RCC_SPI2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI2EN)) == RESET) +#define __HAL_RCC_SPI3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPI3EN)) == RESET) +#define __HAL_RCC_USART2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART2EN)) == RESET) +#define __HAL_RCC_USART3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_USART3EN)) == RESET) +#define __HAL_RCC_UART4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART4EN)) == RESET) +#define __HAL_RCC_UART5_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART5EN)) == RESET) +#define __HAL_RCC_I2C1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C1EN)) == RESET) +#define __HAL_RCC_I2C2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C2EN)) == RESET) +#define __HAL_RCC_I2C3_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C3EN)) == RESET) +#define __HAL_RCC_CAN1_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN1EN)) == RESET) +#define __HAL_RCC_DAC_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_DACEN)) == RESET) +#define __HAL_RCC_UART7_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART7EN)) == RESET) +#define __HAL_RCC_UART8_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_UART8EN)) == RESET) +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_SPDIFRX_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_SPDIFRXEN)) != RESET) +#define __HAL_RCC_CAN2_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN2EN)) != RESET) +#define __HAL_RCC_CEC_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CECEN)) != RESET) +#define __HAL_RCC_I2C4_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C4EN)) != RESET) + +#define __HAL_RCC_SPDIFRX_IS_CLK_DISABLED()((RCC->APB1ENR & (RCC_APB1ENR_SPDIFRXEN)) == RESET) +#define __HAL_RCC_CAN2_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CAN2EN)) == RESET) +#define __HAL_RCC_CEC_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_CECEN)) == RESET) +#define __HAL_RCC_I2C4_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_I2C4EN)) == RESET) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) ||\ + defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) ||\ + defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_RTC_IS_CLK_ENABLED() ((RCC->APB1ENR & (RCC_APB1ENR_RTCEN)) != RESET) +#define __HAL_RCC_RTC_IS_CLK_DISABLED() ((RCC->APB1ENR & (RCC_APB1ENR_RTCEN)) == RESET) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || + STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ + +/** @brief Get the enable or disable status of the APB2 peripheral clock. + * @note After reset, the peripheral clock (used for registers read/write access) + * is disabled and the application software has to enable this clock before + * using it. + */ +#define __HAL_RCC_TIM1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM1EN)) != RESET) +#define __HAL_RCC_TIM8_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM8EN)) != RESET) +#define __HAL_RCC_USART1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_USART1EN)) != RESET) +#define __HAL_RCC_USART6_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_USART6EN)) != RESET) +#define __HAL_RCC_ADC1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC1EN)) != RESET) +#define __HAL_RCC_ADC2_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC2EN)) != RESET) +#define __HAL_RCC_ADC3_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC3EN)) != RESET) +#define __HAL_RCC_SDMMC1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDMMC1EN)) != RESET) +#define __HAL_RCC_SPI1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI1EN)) != RESET) +#define __HAL_RCC_SPI4_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) != RESET) +#define __HAL_RCC_TIM9_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM9EN)) != RESET) +#define __HAL_RCC_TIM10_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) != RESET) +#define __HAL_RCC_TIM11_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM11EN)) != RESET) +#define __HAL_RCC_SPI5_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI5EN)) != RESET) +#define __HAL_RCC_SPI6_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI6EN)) != RESET) +#define __HAL_RCC_SAI1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI1EN)) != RESET) +#define __HAL_RCC_SAI2_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI2EN)) != RESET) +#if defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_LTDC_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_LTDCEN)) != RESET) +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +#if defined (STM32F769xx) || defined (STM32F779xx) +#define __HAL_RCC_DSI_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_DSIEN)) != RESET) +#endif /* STM32F769xx || STM32F779xx */ +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_SDMMC2_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDMMC2EN)) != RESET) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_DFSDM1_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_DFSDM1EN)) != RESET) +#define __HAL_RCC_MDIO_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_MDIOEN)) != RESET) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#if defined (STM32F723xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define __HAL_RCC_OTGPHYC_IS_CLK_ENABLED() ((RCC->APB2ENR & (RCC_APB2ENR_OTGPHYCEN)) != RESET) +#endif /* STM32F723xx || STM32F733xx || STM32F730xx */ + +#define __HAL_RCC_TIM1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM1EN)) == RESET) +#define __HAL_RCC_TIM8_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM8EN)) == RESET) +#define __HAL_RCC_USART1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_USART1EN)) == RESET) +#define __HAL_RCC_USART6_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_USART6EN)) == RESET) +#define __HAL_RCC_ADC1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC1EN)) == RESET) +#define __HAL_RCC_ADC2_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC2EN)) == RESET) +#define __HAL_RCC_ADC3_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_ADC3EN)) == RESET) +#define __HAL_RCC_SDMMC1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDMMC1EN)) == RESET) +#define __HAL_RCC_SPI1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI1EN)) == RESET) +#define __HAL_RCC_SPI4_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI4EN)) == RESET) +#define __HAL_RCC_TIM9_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM9EN)) == RESET) +#define __HAL_RCC_TIM10_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM10EN)) == RESET) +#define __HAL_RCC_TIM11_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_TIM11EN)) == RESET) +#define __HAL_RCC_SPI5_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI5EN)) == RESET) +#define __HAL_RCC_SPI6_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SPI6EN)) == RESET) +#define __HAL_RCC_SAI1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI1EN)) == RESET) +#define __HAL_RCC_SAI2_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SAI2EN)) == RESET) +#if defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_LTDC_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_LTDCEN)) == RESET) +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +#if defined (STM32F769xx) || defined (STM32F779xx) +#define __HAL_RCC_DSI_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_DSIEN)) == RESET) +#endif /* STM32F769xx || STM32F779xx */ +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_SDMMC2_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_SDMMC2EN)) == RESET) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_DFSDM1_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_DFSDM1EN)) == RESET) +#define __HAL_RCC_MDIO_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_MDIOEN)) == RESET) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#if defined (STM32F723xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define __HAL_RCC_OTGPHYC_IS_CLK_DISABLED() ((RCC->APB2ENR & (RCC_APB2ENR_OTGPHYCEN)) == RESET) +#endif /* STM32F723xx || STM32F733xx || STM32F730xx */ + +/** + * @} + */ + +/** @defgroup RCCEx_Force_Release_Peripheral_Reset RCCEx Force Release Peripheral Reset + * @brief Forces or releases AHB/APB peripheral reset. + * @{ + */ + +/** @brief Force or release AHB1 peripheral reset. + */ +#define __HAL_RCC_DMA2_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_DMA2RST)) +#define __HAL_RCC_USB_OTG_HS_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_OTGHRST)) +#define __HAL_RCC_GPIOA_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOARST)) +#define __HAL_RCC_GPIOB_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOBRST)) +#define __HAL_RCC_GPIOC_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOCRST)) +#define __HAL_RCC_GPIOD_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIODRST)) +#define __HAL_RCC_GPIOE_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOERST)) +#define __HAL_RCC_GPIOF_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOFRST)) +#define __HAL_RCC_GPIOG_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOGRST)) +#define __HAL_RCC_GPIOH_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOHRST)) +#define __HAL_RCC_GPIOI_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOIRST)) + +#define __HAL_RCC_DMA2_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_DMA2RST)) +#define __HAL_RCC_USB_OTG_HS_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_OTGHRST)) +#define __HAL_RCC_GPIOA_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOARST)) +#define __HAL_RCC_GPIOB_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOBRST)) +#define __HAL_RCC_GPIOC_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOCRST)) +#define __HAL_RCC_GPIOD_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIODRST)) +#define __HAL_RCC_GPIOE_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOERST)) +#define __HAL_RCC_GPIOF_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOFRST)) +#define __HAL_RCC_GPIOG_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOGRST)) +#define __HAL_RCC_GPIOH_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOHRST)) +#define __HAL_RCC_GPIOI_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOIRST)) + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_DMA2D_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_DMA2DRST)) +#define __HAL_RCC_ETHMAC_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_ETHMACRST)) +#define __HAL_RCC_GPIOJ_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOJRST)) +#define __HAL_RCC_GPIOK_FORCE_RESET() (RCC->AHB1RSTR |= (RCC_AHB1RSTR_GPIOKRST)) + +#define __HAL_RCC_DMA2D_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_DMA2DRST)) +#define __HAL_RCC_ETHMAC_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_ETHMACRST)) +#define __HAL_RCC_GPIOJ_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOJRST)) +#define __HAL_RCC_GPIOK_RELEASE_RESET() (RCC->AHB1RSTR &= ~(RCC_AHB1RSTR_GPIOKRST)) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +/** @brief Force or release AHB2 peripheral reset. + */ +#define __HAL_RCC_AHB2_FORCE_RESET() (RCC->AHB2RSTR = 0xFFFFFFFFU) +#define __HAL_RCC_RNG_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_RNGRST)) +#define __HAL_RCC_USB_OTG_FS_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_OTGFSRST)) + +#define __HAL_RCC_AHB2_RELEASE_RESET() (RCC->AHB2RSTR = 0x00U) +#define __HAL_RCC_RNG_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_RNGRST)) +#define __HAL_RCC_USB_OTG_FS_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_OTGFSRST)) + +#if defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_JPEG_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_JPEGRST)) +#define __HAL_RCC_JPEG_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_JPEGRST)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +#if defined(STM32F756xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_CRYP_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_CRYPRST)) +#define __HAL_RCC_HASH_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_HASHRST)) +#define __HAL_RCC_CRYP_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_CRYPRST)) +#define __HAL_RCC_HASH_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_HASHRST)) +#endif /* STM32F756xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined(STM32F732xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define __HAL_RCC_AES_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_AESRST)) +#define __HAL_RCC_AES_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_AESRST)) +#endif /* STM32F732xx || STM32F733xx || STM32F730xx */ + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_DCMI_FORCE_RESET() (RCC->AHB2RSTR |= (RCC_AHB2RSTR_DCMIRST)) +#define __HAL_RCC_DCMI_RELEASE_RESET() (RCC->AHB2RSTR &= ~(RCC_AHB2RSTR_DCMIRST)) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +/** @brief Force or release AHB3 peripheral reset + */ +#define __HAL_RCC_AHB3_FORCE_RESET() (RCC->AHB3RSTR = 0xFFFFFFFFU) +#define __HAL_RCC_FMC_FORCE_RESET() (RCC->AHB3RSTR |= (RCC_AHB3RSTR_FMCRST)) +#define __HAL_RCC_QSPI_FORCE_RESET() (RCC->AHB3RSTR |= (RCC_AHB3RSTR_QSPIRST)) + +#define __HAL_RCC_AHB3_RELEASE_RESET() (RCC->AHB3RSTR = 0x00U) +#define __HAL_RCC_FMC_RELEASE_RESET() (RCC->AHB3RSTR &= ~(RCC_AHB3RSTR_FMCRST)) +#define __HAL_RCC_QSPI_RELEASE_RESET() (RCC->AHB3RSTR &= ~(RCC_AHB3RSTR_QSPIRST)) + +/** @brief Force or release APB1 peripheral reset. + */ +#define __HAL_RCC_TIM2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM2RST)) +#define __HAL_RCC_TIM3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM3RST)) +#define __HAL_RCC_TIM4_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM4RST)) +#define __HAL_RCC_TIM5_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM5RST)) +#define __HAL_RCC_TIM6_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM6RST)) +#define __HAL_RCC_TIM7_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM7RST)) +#define __HAL_RCC_TIM12_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM12RST)) +#define __HAL_RCC_TIM13_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM13RST)) +#define __HAL_RCC_TIM14_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_TIM14RST)) +#define __HAL_RCC_LPTIM1_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_LPTIM1RST)) +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_CAN3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_CAN3RST)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#define __HAL_RCC_SPI2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_SPI2RST)) +#define __HAL_RCC_SPI3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_SPI3RST)) +#define __HAL_RCC_USART2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_USART2RST)) +#define __HAL_RCC_USART3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_USART3RST)) +#define __HAL_RCC_UART4_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART4RST)) +#define __HAL_RCC_UART5_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART5RST)) +#define __HAL_RCC_I2C1_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_I2C1RST)) +#define __HAL_RCC_I2C2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_I2C2RST)) +#define __HAL_RCC_I2C3_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_I2C3RST)) +#define __HAL_RCC_CAN1_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_CAN1RST)) +#define __HAL_RCC_DAC_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_DACRST)) +#define __HAL_RCC_UART7_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART7RST)) +#define __HAL_RCC_UART8_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_UART8RST)) + +#define __HAL_RCC_TIM2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM2RST)) +#define __HAL_RCC_TIM3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM3RST)) +#define __HAL_RCC_TIM4_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM4RST)) +#define __HAL_RCC_TIM5_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM5RST)) +#define __HAL_RCC_TIM6_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM6RST)) +#define __HAL_RCC_TIM7_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM7RST)) +#define __HAL_RCC_TIM12_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM12RST)) +#define __HAL_RCC_TIM13_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM13RST)) +#define __HAL_RCC_TIM14_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_TIM14RST)) +#define __HAL_RCC_LPTIM1_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_LPTIM1RST)) +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_CAN3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_CAN3RST)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#define __HAL_RCC_SPI2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_SPI2RST)) +#define __HAL_RCC_SPI3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_SPI3RST)) +#define __HAL_RCC_USART2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_USART2RST)) +#define __HAL_RCC_USART3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_USART3RST)) +#define __HAL_RCC_UART4_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART4RST)) +#define __HAL_RCC_UART5_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART5RST)) +#define __HAL_RCC_I2C1_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_I2C1RST)) +#define __HAL_RCC_I2C2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_I2C2RST)) +#define __HAL_RCC_I2C3_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_I2C3RST)) +#define __HAL_RCC_CAN1_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_CAN1RST)) +#define __HAL_RCC_DAC_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_DACRST)) +#define __HAL_RCC_UART7_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART7RST)) +#define __HAL_RCC_UART8_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_UART8RST)) + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_SPDIFRX_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_SPDIFRXRST)) +#define __HAL_RCC_I2C4_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_I2C4RST)) +#define __HAL_RCC_CAN2_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_CAN2RST)) +#define __HAL_RCC_CEC_FORCE_RESET() (RCC->APB1RSTR |= (RCC_APB1RSTR_CECRST)) + +#define __HAL_RCC_SPDIFRX_RELEASE_RESET()(RCC->APB1RSTR &= ~(RCC_APB1RSTR_SPDIFRXRST)) +#define __HAL_RCC_I2C4_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_I2C4RST)) +#define __HAL_RCC_CAN2_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_CAN2RST)) +#define __HAL_RCC_CEC_RELEASE_RESET() (RCC->APB1RSTR &= ~(RCC_APB1RSTR_CECRST)) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +/** @brief Force or release APB2 peripheral reset. + */ +#define __HAL_RCC_TIM1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM1RST)) +#define __HAL_RCC_TIM8_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM8RST)) +#define __HAL_RCC_USART1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_USART1RST)) +#define __HAL_RCC_USART6_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_USART6RST)) +#define __HAL_RCC_ADC_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_ADCRST)) +#define __HAL_RCC_SDMMC1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SDMMC1RST)) +#define __HAL_RCC_SPI1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI1RST)) +#define __HAL_RCC_SPI4_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI4RST)) +#define __HAL_RCC_TIM9_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM9RST)) +#define __HAL_RCC_TIM10_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM10RST)) +#define __HAL_RCC_TIM11_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_TIM11RST)) +#define __HAL_RCC_SPI5_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI5RST)) +#define __HAL_RCC_SPI6_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SPI6RST)) +#define __HAL_RCC_SAI1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SAI1RST)) +#define __HAL_RCC_SAI2_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SAI2RST)) +#if defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_LTDC_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_LTDCRST)) +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +#if defined (STM32F723xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define __HAL_RCC_OTGPHYC_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_OTGPHYCRST)) +#endif /* STM32F723xx || STM32F733xx || STM32F730xx */ + +#define __HAL_RCC_TIM1_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM1RST)) +#define __HAL_RCC_TIM8_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM8RST)) +#define __HAL_RCC_USART1_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_USART1RST)) +#define __HAL_RCC_USART6_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_USART6RST)) +#define __HAL_RCC_ADC_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_ADCRST)) +#define __HAL_RCC_SDMMC1_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SDMMC1RST)) +#define __HAL_RCC_SPI1_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI1RST)) +#define __HAL_RCC_SPI4_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI4RST)) +#define __HAL_RCC_TIM9_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM9RST)) +#define __HAL_RCC_TIM10_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM10RST)) +#define __HAL_RCC_TIM11_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_TIM11RST)) +#define __HAL_RCC_SPI5_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI5RST)) +#define __HAL_RCC_SPI6_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SPI6RST)) +#define __HAL_RCC_SAI1_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SAI1RST)) +#define __HAL_RCC_SAI2_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SAI2RST)) +#if defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_LTDC_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_LTDCRST)) +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +#if defined (STM32F723xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define __HAL_RCC_OTGPHYC_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_OTGPHYCRST)) +#endif /* STM32F723xx || STM32F733xx || STM32F730xx */ + +#if defined (STM32F769xx) || defined (STM32F779xx) +#define __HAL_RCC_DSI_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_DSIRST)) +#define __HAL_RCC_DSI_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_DSIRST)) +#endif /* STM32F769xx || STM32F779xx */ + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_SDMMC2_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_SDMMC2RST)) +#define __HAL_RCC_SDMMC2_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_SDMMC2RST)) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ + +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_DFSDM1_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_DFSDM1RST)) +#define __HAL_RCC_MDIO_FORCE_RESET() (RCC->APB2RSTR |= (RCC_APB2RSTR_MDIORST)) +#define __HAL_RCC_DFSDM1_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_DFSDM1RST)) +#define __HAL_RCC_MDIO_RELEASE_RESET() (RCC->APB2RSTR &= ~(RCC_APB2RSTR_MDIORST)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +/** + * @} + */ + +/** @defgroup RCCEx_Peripheral_Clock_Sleep_Enable_Disable RCCEx Peripheral Clock Sleep Enable Disable + * @brief Enables or disables the AHB/APB peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ + +/** @brief Enable or disable the AHB1 peripheral clock during Low Power (Sleep) mode. + */ +#define __HAL_RCC_FLITF_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_FLITFLPEN)) +#define __HAL_RCC_AXI_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_AXILPEN)) +#define __HAL_RCC_SRAM1_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_SRAM1LPEN)) +#define __HAL_RCC_SRAM2_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_SRAM2LPEN)) +#define __HAL_RCC_BKPSRAM_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_BKPSRAMLPEN)) +#define __HAL_RCC_DTCM_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_DTCMLPEN)) +#define __HAL_RCC_DMA2_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_DMA2LPEN)) +#define __HAL_RCC_USB_OTG_HS_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_OTGHSLPEN)) +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_OTGHSULPILPEN)) +#define __HAL_RCC_GPIOA_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOALPEN)) +#define __HAL_RCC_GPIOB_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOBLPEN)) +#define __HAL_RCC_GPIOC_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOCLPEN)) +#define __HAL_RCC_GPIOD_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIODLPEN)) +#define __HAL_RCC_GPIOE_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOELPEN)) +#define __HAL_RCC_GPIOF_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOFLPEN)) +#define __HAL_RCC_GPIOG_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOGLPEN)) +#define __HAL_RCC_GPIOH_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOHLPEN)) +#define __HAL_RCC_GPIOI_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOILPEN)) + +#define __HAL_RCC_FLITF_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_FLITFLPEN)) +#define __HAL_RCC_AXI_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_AXILPEN)) +#define __HAL_RCC_SRAM1_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_SRAM1LPEN)) +#define __HAL_RCC_SRAM2_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_SRAM2LPEN)) +#define __HAL_RCC_BKPSRAM_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_BKPSRAMLPEN)) +#define __HAL_RCC_DTCM_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_DTCMLPEN)) +#define __HAL_RCC_DMA2_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_DMA2LPEN)) +#define __HAL_RCC_USB_OTG_HS_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_OTGHSLPEN)) +#define __HAL_RCC_USB_OTG_HS_ULPI_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_OTGHSULPILPEN)) +#define __HAL_RCC_GPIOA_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOALPEN)) +#define __HAL_RCC_GPIOB_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOBLPEN)) +#define __HAL_RCC_GPIOC_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOCLPEN)) +#define __HAL_RCC_GPIOD_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIODLPEN)) +#define __HAL_RCC_GPIOE_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOELPEN)) +#define __HAL_RCC_GPIOF_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOFLPEN)) +#define __HAL_RCC_GPIOG_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOGLPEN)) +#define __HAL_RCC_GPIOH_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOHLPEN)) +#define __HAL_RCC_GPIOI_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOILPEN)) + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_DMA2D_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_DMA2DLPEN)) +#define __HAL_RCC_ETHMAC_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_ETHMACLPEN)) +#define __HAL_RCC_ETHMACTX_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_ETHMACTXLPEN)) +#define __HAL_RCC_ETHMACRX_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_ETHMACRXLPEN)) +#define __HAL_RCC_ETHMACPTP_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_ETHMACPTPLPEN)) +#define __HAL_RCC_GPIOJ_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOJLPEN)) +#define __HAL_RCC_GPIOK_CLK_SLEEP_ENABLE() (RCC->AHB1LPENR |= (RCC_AHB1LPENR_GPIOKLPEN)) + +#define __HAL_RCC_DMA2D_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_DMA2DLPEN)) +#define __HAL_RCC_ETHMAC_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_ETHMACLPEN)) +#define __HAL_RCC_ETHMACTX_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_ETHMACTXLPEN)) +#define __HAL_RCC_ETHMACRX_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_ETHMACRXLPEN)) +#define __HAL_RCC_ETHMACPTP_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_ETHMACPTPLPEN)) +#define __HAL_RCC_GPIOJ_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOJLPEN)) +#define __HAL_RCC_GPIOK_CLK_SLEEP_DISABLE() (RCC->AHB1LPENR &= ~(RCC_AHB1LPENR_GPIOKLPEN)) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +/** @brief Enable or disable the AHB2 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + */ +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_DCMI_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_DCMILPEN)) +#define __HAL_RCC_DCMI_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_DCMILPEN)) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_JPEG_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_JPEGLPEN)) +#define __HAL_RCC_JPEG_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_JPEGLPEN)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +#define __HAL_RCC_RNG_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_RNGLPEN)) +#define __HAL_RCC_RNG_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_RNGLPEN)) + +#define __HAL_RCC_USB_OTG_FS_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_OTGFSLPEN)) +#define __HAL_RCC_USB_OTG_FS_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_OTGFSLPEN)) + +#if defined(STM32F756xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_CRYP_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_CRYPLPEN)) +#define __HAL_RCC_HASH_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_HASHLPEN)) + +#define __HAL_RCC_CRYP_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_CRYPLPEN)) +#define __HAL_RCC_HASH_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_HASHLPEN)) +#endif /* STM32F756xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined(STM32F732xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define __HAL_RCC_AES_CLK_SLEEP_ENABLE() (RCC->AHB2LPENR |= (RCC_AHB2LPENR_AESLPEN)) +#define __HAL_RCC_AES_CLK_SLEEP_DISABLE() (RCC->AHB2LPENR &= ~(RCC_AHB2LPENR_AESLPEN)) +#endif /* STM32F732xx || STM32F733xx || STM32F730xx */ + +/** @brief Enable or disable the AHB3 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + */ +#define __HAL_RCC_FMC_CLK_SLEEP_ENABLE() (RCC->AHB3LPENR |= (RCC_AHB3LPENR_FMCLPEN)) +#define __HAL_RCC_FMC_CLK_SLEEP_DISABLE() (RCC->AHB3LPENR &= ~(RCC_AHB3LPENR_FMCLPEN)) + +#define __HAL_RCC_QSPI_CLK_SLEEP_ENABLE() (RCC->AHB3LPENR |= (RCC_AHB3LPENR_QSPILPEN)) +#define __HAL_RCC_QSPI_CLK_SLEEP_DISABLE() (RCC->AHB3LPENR &= ~(RCC_AHB3LPENR_QSPILPEN)) + +/** @brief Enable or disable the APB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + */ +#define __HAL_RCC_TIM2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM2LPEN)) +#define __HAL_RCC_TIM3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM3LPEN)) +#define __HAL_RCC_TIM4_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM4LPEN)) +#define __HAL_RCC_TIM5_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM5LPEN)) +#define __HAL_RCC_TIM6_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM6LPEN)) +#define __HAL_RCC_TIM7_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM7LPEN)) +#define __HAL_RCC_TIM12_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM12LPEN)) +#define __HAL_RCC_TIM13_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM13LPEN)) +#define __HAL_RCC_TIM14_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_TIM14LPEN)) +#define __HAL_RCC_LPTIM1_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_LPTIM1LPEN)) +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_CAN3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_CAN3LPEN)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#define __HAL_RCC_SPI2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_SPI2LPEN)) +#define __HAL_RCC_SPI3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_SPI3LPEN)) +#define __HAL_RCC_USART2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_USART2LPEN)) +#define __HAL_RCC_USART3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_USART3LPEN)) +#define __HAL_RCC_UART4_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART4LPEN)) +#define __HAL_RCC_UART5_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART5LPEN)) +#define __HAL_RCC_I2C1_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_I2C1LPEN)) +#define __HAL_RCC_I2C2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_I2C2LPEN)) +#define __HAL_RCC_I2C3_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_I2C3LPEN)) +#define __HAL_RCC_CAN1_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_CAN1LPEN)) +#define __HAL_RCC_DAC_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_DACLPEN)) +#define __HAL_RCC_UART7_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART7LPEN)) +#define __HAL_RCC_UART8_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_UART8LPEN)) + +#define __HAL_RCC_TIM2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM2LPEN)) +#define __HAL_RCC_TIM3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM3LPEN)) +#define __HAL_RCC_TIM4_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM4LPEN)) +#define __HAL_RCC_TIM5_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM5LPEN)) +#define __HAL_RCC_TIM6_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM6LPEN)) +#define __HAL_RCC_TIM7_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM7LPEN)) +#define __HAL_RCC_TIM12_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM12LPEN)) +#define __HAL_RCC_TIM13_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM13LPEN)) +#define __HAL_RCC_TIM14_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_TIM14LPEN)) +#define __HAL_RCC_LPTIM1_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_LPTIM1LPEN)) +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_CAN3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_CAN3LPEN)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#define __HAL_RCC_SPI2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_SPI2LPEN)) +#define __HAL_RCC_SPI3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_SPI3LPEN)) +#define __HAL_RCC_USART2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_USART2LPEN)) +#define __HAL_RCC_USART3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_USART3LPEN)) +#define __HAL_RCC_UART4_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART4LPEN)) +#define __HAL_RCC_UART5_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART5LPEN)) +#define __HAL_RCC_I2C1_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_I2C1LPEN)) +#define __HAL_RCC_I2C2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_I2C2LPEN)) +#define __HAL_RCC_I2C3_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_I2C3LPEN)) +#define __HAL_RCC_CAN1_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_CAN1LPEN)) +#define __HAL_RCC_DAC_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_DACLPEN)) +#define __HAL_RCC_UART7_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART7LPEN)) +#define __HAL_RCC_UART8_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_UART8LPEN)) + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) ||\ + defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) ||\ + defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_RTC_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_RTCLPEN)) +#define __HAL_RCC_RTC_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_RTCLPEN)) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || + STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_SPDIFRX_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_SPDIFRXLPEN)) +#define __HAL_RCC_I2C4_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_I2C4LPEN)) +#define __HAL_RCC_CAN2_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_CAN2LPEN)) +#define __HAL_RCC_CEC_CLK_SLEEP_ENABLE() (RCC->APB1LPENR |= (RCC_APB1LPENR_CECLPEN)) + +#define __HAL_RCC_SPDIFRX_CLK_SLEEP_DISABLE()(RCC->APB1LPENR &= ~(RCC_APB1LPENR_SPDIFRXLPEN)) +#define __HAL_RCC_I2C4_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_I2C4LPEN)) +#define __HAL_RCC_CAN2_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_CAN2LPEN)) +#define __HAL_RCC_CEC_CLK_SLEEP_DISABLE() (RCC->APB1LPENR &= ~(RCC_APB1LPENR_CECLPEN)) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +/** @brief Enable or disable the APB2 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + */ +#define __HAL_RCC_TIM1_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_TIM1LPEN)) +#define __HAL_RCC_TIM8_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_TIM8LPEN)) +#define __HAL_RCC_USART1_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_USART1LPEN)) +#define __HAL_RCC_USART6_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_USART6LPEN)) +#define __HAL_RCC_ADC1_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_ADC1LPEN)) +#define __HAL_RCC_ADC2_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_ADC2LPEN)) +#define __HAL_RCC_ADC3_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_ADC3LPEN)) +#define __HAL_RCC_SDMMC1_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SDMMC1LPEN)) +#define __HAL_RCC_SPI1_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SPI1LPEN)) +#define __HAL_RCC_SPI4_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SPI4LPEN)) +#define __HAL_RCC_TIM9_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_TIM9LPEN)) +#define __HAL_RCC_TIM10_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_TIM10LPEN)) +#define __HAL_RCC_TIM11_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_TIM11LPEN)) +#define __HAL_RCC_SPI5_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SPI5LPEN)) +#define __HAL_RCC_SAI1_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SAI1LPEN)) +#define __HAL_RCC_SAI2_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SAI2LPEN)) +#if defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_LTDC_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_LTDCLPEN)) +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#define __HAL_RCC_TIM1_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM1LPEN)) +#define __HAL_RCC_TIM8_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM8LPEN)) +#define __HAL_RCC_USART1_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_USART1LPEN)) +#define __HAL_RCC_USART6_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_USART6LPEN)) +#define __HAL_RCC_ADC1_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_ADC1LPEN)) +#define __HAL_RCC_ADC2_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_ADC2LPEN)) +#define __HAL_RCC_ADC3_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_ADC3LPEN)) +#define __HAL_RCC_SDMMC1_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SDMMC1LPEN)) +#define __HAL_RCC_SPI1_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SPI1LPEN)) +#define __HAL_RCC_SPI4_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SPI4LPEN)) +#define __HAL_RCC_TIM9_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM9LPEN)) +#define __HAL_RCC_TIM10_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM10LPEN)) +#define __HAL_RCC_TIM11_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_TIM11LPEN)) +#define __HAL_RCC_SPI5_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SPI5LPEN)) +#define __HAL_RCC_SAI1_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SAI1LPEN)) +#define __HAL_RCC_SAI2_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SAI2LPEN)) +#if defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx)|| defined (STM32F750xx) +#define __HAL_RCC_LTDC_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_LTDCLPEN)) +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +#if defined (STM32F769xx) || defined (STM32F779xx) +#define __HAL_RCC_DSI_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_DSILPEN)) +#define __HAL_RCC_DSI_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_DSILPEN)) +#endif /* STM32F769xx || STM32F779xx */ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_DFSDM1_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_DFSDM1LPEN)) +#define __HAL_RCC_MDIO_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_MDIOLPEN)) +#define __HAL_RCC_DFSDM1_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_DFSDM1LPEN)) +#define __HAL_RCC_MDIO_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_MDIOLPEN)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_SDMMC2_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SDMMC2LPEN)) +#define __HAL_RCC_SDMMC2_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SDMMC2LPEN)) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_SPI6_CLK_SLEEP_ENABLE() (RCC->APB2LPENR |= (RCC_APB2LPENR_SPI6LPEN)) +#define __HAL_RCC_SPI6_CLK_SLEEP_DISABLE() (RCC->APB2LPENR &= ~(RCC_APB2LPENR_SPI6LPEN)) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +/** + * @} + */ + +/** @defgroup RCC_Clock_Sleep_Enable_Disable_Status AHB/APB Peripheral Clock Sleep Enable Disable Status + * @brief Get the enable or disable status of the AHB/APB peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + * @{ + */ + +/** @brief Get the enable or disable status of the AHB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + */ +#define __HAL_RCC_FLITF_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_FLITFLPEN)) != RESET) +#define __HAL_RCC_AXI_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_AXILPEN)) != RESET) +#define __HAL_RCC_SRAM1_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_SRAM1LPEN)) != RESET) +#define __HAL_RCC_SRAM2_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_SRAM2LPEN)) != RESET) +#define __HAL_RCC_BKPSRAM_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_BKPSRAMLPEN)) != RESET) +#define __HAL_RCC_DTCM_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_DTCMLPEN)) != RESET) +#define __HAL_RCC_DMA2_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_DMA2LPEN)) != RESET) +#define __HAL_RCC_USB_OTG_HS_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_OTGHSLPEN)) != RESET) +#define __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_OTGHSULPILPEN)) != RESET) +#define __HAL_RCC_GPIOA_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOALPEN)) != RESET) +#define __HAL_RCC_GPIOB_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOBLPEN)) != RESET) +#define __HAL_RCC_GPIOC_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOCLPEN)) != RESET) +#define __HAL_RCC_GPIOD_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIODLPEN)) != RESET) +#define __HAL_RCC_GPIOE_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOELPEN)) != RESET) +#define __HAL_RCC_GPIOF_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOFLPEN)) != RESET) +#define __HAL_RCC_GPIOG_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOGLPEN)) != RESET) +#define __HAL_RCC_GPIOH_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOHLPEN)) != RESET) +#define __HAL_RCC_GPIOI_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOILPEN)) != RESET) + +#define __HAL_RCC_FLITF_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_FLITFLPEN)) == RESET) +#define __HAL_RCC_AXI_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_AXILPEN)) == RESET) +#define __HAL_RCC_SRAM1_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_SRAM1LPEN)) == RESET) +#define __HAL_RCC_SRAM2_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_SRAM2LPEN)) == RESET) +#define __HAL_RCC_BKPSRAM_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_BKPSRAMLPEN)) == RESET) +#define __HAL_RCC_DTCM_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_DTCMLPEN)) == RESET) +#define __HAL_RCC_DMA2_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_DMA2LPEN)) == RESET) +#define __HAL_RCC_USB_OTG_HS_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_OTGHSLPEN)) == RESET) +#define __HAL_RCC_USB_OTG_HS_ULPI_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_OTGHSULPILPEN)) == RESET) +#define __HAL_RCC_GPIOA_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOALPEN)) == RESET) +#define __HAL_RCC_GPIOB_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOBLPEN)) == RESET) +#define __HAL_RCC_GPIOC_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOCLPEN)) == RESET) +#define __HAL_RCC_GPIOD_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIODLPEN)) == RESET) +#define __HAL_RCC_GPIOE_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOELPEN)) == RESET) +#define __HAL_RCC_GPIOF_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOFLPEN)) == RESET) +#define __HAL_RCC_GPIOG_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOGLPEN)) == RESET) +#define __HAL_RCC_GPIOH_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOHLPEN)) == RESET) +#define __HAL_RCC_GPIOI_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOILPEN)) == RESET) + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_DMA2D_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_DMA2DLPEN)) != RESET) +#define __HAL_RCC_ETHMAC_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_ETHMACLPEN)) != RESET) +#define __HAL_RCC_ETHMACTX_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_ETHMACTXLPEN)) != RESET) +#define __HAL_RCC_ETHMACRX_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_ETHMACRXLPEN)) != RESET) +#define __HAL_RCC_ETHMACPTP_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_ETHMACPTPLPEN)) != RESET) +#define __HAL_RCC_GPIOJ_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOJLPEN)) != RESET) +#define __HAL_RCC_GPIOK_IS_CLK_SLEEP_ENABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOKLPEN)) != RESET) + +#define __HAL_RCC_DMA2D_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_DMA2DLPEN)) == RESET) +#define __HAL_RCC_ETHMAC_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_ETHMACLPEN)) == RESET) +#define __HAL_RCC_ETHMACTX_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_ETHMACTXLPEN)) == RESET) +#define __HAL_RCC_ETHMACRX_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_ETHMACRXLPEN)) == RESET) +#define __HAL_RCC_ETHMACPTP_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_ETHMACPTPLPEN)) == RESET) +#define __HAL_RCC_GPIOJ_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOJLPEN)) == RESET) +#define __HAL_RCC_GPIOK_IS_CLK_SLEEP_DISABLED() ((RCC->AHB1LPENR & (RCC_AHB1LPENR_GPIOKLPEN)) == RESET) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +/** @brief Get the enable or disable status of the AHB2 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + */ +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_DCMI_IS_CLK_SLEEP_ENABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_DCMILPEN)) != RESET) +#define __HAL_RCC_DCMI_IS_CLK_SLEEP_DISABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_DCMILPEN)) == RESET) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined(STM32F767xx) || defined(STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_JPEG_IS_CLK_SLEEP_ENABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_JPEGLPEN)) != RESET) +#define __HAL_RCC_JPEG_IS_CLK_SLEEP_DISABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_JPEGLPEN)) == RESET) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +#define __HAL_RCC_RNG_IS_CLK_SLEEP_ENABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_RNGLPEN)) != RESET) +#define __HAL_RCC_RNG_IS_CLK_SLEEP_DISABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_RNGLPEN)) == RESET) + +#define __HAL_RCC_USB_OTG_FS_IS_CLK_SLEEP_ENABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_OTGFSLPEN)) != RESET) +#define __HAL_RCC_USB_OTG_FS_IS_CLK_SLEEP_DISABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_OTGFSLPEN)) == RESET) + +#if defined(STM32F756xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_CRYP_IS_CLK_SLEEP_ENABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_CRYPLPEN)) != RESET) +#define __HAL_RCC_HASH_IS_CLK_SLEEP_ENABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_HASHLPEN)) != RESET) + +#define __HAL_RCC_CRYP_IS_CLK_SLEEP_DISABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_CRYPLPEN)) == RESET) +#define __HAL_RCC_HASH_IS_CLK_SLEEP_DISABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_HASHLPEN)) == RESET) +#endif /* STM32F756xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined(STM32F732xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define __HAL_RCC_AES_IS_CLK_SLEEP_ENABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_AESLPEN)) != RESET) +#define __HAL_RCC_AES_IS_CLK_SLEEP_DISABLED() ((RCC->AHB2LPENR & (RCC_AHB2LPENR_AESLPEN)) == RESET) +#endif /* STM32F732xx || STM32F733xx || STM32F730xx */ + +/** @brief Get the enable or disable status of the AHB3 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + */ +#define __HAL_RCC_FMC_IS_CLK_SLEEP_ENABLED() ((RCC->AHB3LPENR & (RCC_AHB3LPENR_FMCLPEN)) != RESET) +#define __HAL_RCC_FMC_IS_CLK_SLEEP_DISABLED() ((RCC->AHB3LPENR & (RCC_AHB3LPENR_FMCLPEN)) == RESET) + +#define __HAL_RCC_QSPI_IS_CLK_SLEEP_ENABLED() ((RCC->AHB3LPENR & (RCC_AHB3LPENR_QSPILPEN)) != RESET) +#define __HAL_RCC_QSPI_IS_CLK_SLEEP_DISABLED() ((RCC->AHB3LPENR & (RCC_AHB3LPENR_QSPILPEN)) == RESET) + +/** @brief Get the enable or disable status of the APB1 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + */ +#define __HAL_RCC_TIM2_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM2LPEN)) != RESET) +#define __HAL_RCC_TIM3_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM3LPEN)) != RESET) +#define __HAL_RCC_TIM4_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM4LPEN)) != RESET) +#define __HAL_RCC_TIM5_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM5LPEN)) != RESET) +#define __HAL_RCC_TIM6_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM6LPEN)) != RESET) +#define __HAL_RCC_TIM7_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM7LPEN)) != RESET) +#define __HAL_RCC_TIM12_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM12LPEN)) != RESET) +#define __HAL_RCC_TIM13_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM13LPEN)) != RESET) +#define __HAL_RCC_TIM14_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM14LPEN)) != RESET) +#define __HAL_RCC_LPTIM1_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_LPTIM1LPEN)) != RESET) +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) ||\ + defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) ||\ + defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_RTC_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_RTCLPEN)) != RESET) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || + STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_CAN3_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_CAN3LPEN)) != RESET) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#define __HAL_RCC_SPI2_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_SPI2LPEN)) != RESET) +#define __HAL_RCC_SPI3_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_SPI3LPEN)) != RESET) +#define __HAL_RCC_USART2_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_USART2LPEN)) != RESET) +#define __HAL_RCC_USART3_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_USART3LPEN)) != RESET) +#define __HAL_RCC_UART4_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_UART4LPEN)) != RESET) +#define __HAL_RCC_UART5_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_UART5LPEN)) != RESET) +#define __HAL_RCC_I2C1_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_I2C1LPEN)) != RESET) +#define __HAL_RCC_I2C2_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_I2C2LPEN)) != RESET) +#define __HAL_RCC_I2C3_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_I2C3LPEN)) != RESET) +#define __HAL_RCC_CAN1_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_CAN1LPEN)) != RESET) +#define __HAL_RCC_DAC_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_DACLPEN)) != RESET) +#define __HAL_RCC_UART7_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_UART7LPEN)) != RESET) +#define __HAL_RCC_UART8_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_UART8LPEN)) != RESET) + +#define __HAL_RCC_TIM2_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM2LPEN)) == RESET) +#define __HAL_RCC_TIM3_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM3LPEN)) == RESET) +#define __HAL_RCC_TIM4_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM4LPEN)) == RESET) +#define __HAL_RCC_TIM5_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM5LPEN)) == RESET) +#define __HAL_RCC_TIM6_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM6LPEN)) == RESET) +#define __HAL_RCC_TIM7_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM7LPEN)) == RESET) +#define __HAL_RCC_TIM12_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM12LPEN)) == RESET) +#define __HAL_RCC_TIM13_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM13LPEN)) == RESET) +#define __HAL_RCC_TIM14_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_TIM14LPEN)) == RESET) +#define __HAL_RCC_LPTIM1_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_LPTIM1LPEN)) == RESET) +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) ||\ + defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) ||\ + defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_RTC_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_RTCLPEN)) == RESET) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || + STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_CAN3_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_CAN3LPEN)) == RESET) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +#define __HAL_RCC_SPI2_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_SPI2LPEN)) == RESET) +#define __HAL_RCC_SPI3_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_SPI3LPEN)) == RESET) +#define __HAL_RCC_USART2_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_USART2LPEN)) == RESET) +#define __HAL_RCC_USART3_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_USART3LPEN)) == RESET) +#define __HAL_RCC_UART4_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_UART4LPEN)) == RESET) +#define __HAL_RCC_UART5_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_UART5LPEN)) == RESET) +#define __HAL_RCC_I2C1_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_I2C1LPEN)) == RESET) +#define __HAL_RCC_I2C2_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_I2C2LPEN)) == RESET) +#define __HAL_RCC_I2C3_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_I2C3LPEN)) == RESET) +#define __HAL_RCC_CAN1_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_CAN1LPEN)) == RESET) +#define __HAL_RCC_DAC_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_DACLPEN)) == RESET) +#define __HAL_RCC_UART7_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_UART7LPEN)) == RESET) +#define __HAL_RCC_UART8_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_UART8LPEN)) == RESET) + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_SPDIFRX_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_SPDIFRXLPEN)) != RESET) +#define __HAL_RCC_I2C4_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_I2C4LPEN)) != RESET) +#define __HAL_RCC_CAN2_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_CAN2LPEN)) != RESET) +#define __HAL_RCC_CEC_IS_CLK_SLEEP_ENABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_CECLPEN)) != RESET) + +#define __HAL_RCC_SPDIFRX_IS_CLK_SLEEP_DISABLED()((RCC->APB1LPENR & (RCC_APB1LPENR_SPDIFRXLPEN)) == RESET) +#define __HAL_RCC_I2C4_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_I2C4LPEN)) == RESET) +#define __HAL_RCC_CAN2_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_CAN2LPEN)) == RESET) +#define __HAL_RCC_CEC_IS_CLK_SLEEP_DISABLED() ((RCC->APB1LPENR & (RCC_APB1LPENR_CECLPEN)) == RESET) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +/** @brief Get the enable or disable status of the APB2 peripheral clock during Low Power (Sleep) mode. + * @note Peripheral clock gating in SLEEP mode can be used to further reduce + * power consumption. + * @note After wakeup from SLEEP mode, the peripheral clock is enabled again. + * @note By default, all peripheral clocks are enabled during SLEEP mode. + */ +#define __HAL_RCC_TIM1_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_TIM1LPEN)) != RESET) +#define __HAL_RCC_TIM8_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_TIM8LPEN)) != RESET) +#define __HAL_RCC_USART1_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_USART1LPEN)) != RESET) +#define __HAL_RCC_USART6_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_USART6LPEN)) != RESET) +#define __HAL_RCC_ADC1_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_ADC1LPEN)) != RESET) +#define __HAL_RCC_ADC2_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_ADC2LPEN)) != RESET) +#define __HAL_RCC_ADC3_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_ADC3LPEN)) != RESET) +#define __HAL_RCC_SDMMC1_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SDMMC1LPEN)) != RESET) +#define __HAL_RCC_SPI1_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SPI1LPEN)) != RESET) +#define __HAL_RCC_SPI4_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SPI4LPEN)) != RESET) +#define __HAL_RCC_TIM9_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_TIM9LPEN)) != RESET) +#define __HAL_RCC_TIM10_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_TIM10LPEN)) != RESET) +#define __HAL_RCC_TIM11_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_TIM11LPEN)) != RESET) +#define __HAL_RCC_SPI5_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SPI5LPEN)) != RESET) +#define __HAL_RCC_SAI1_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SAI1LPEN)) != RESET) +#define __HAL_RCC_SAI2_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SAI2LPEN)) != RESET) +#if defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_LTDC_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_LTDCLPEN)) != RESET) +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +#if defined (STM32F769xx) || defined (STM32F779xx) +#define __HAL_RCC_DSI_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_DSILPEN)) != RESET) +#endif /* STM32F769xx || STM32F779xx */ +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_SDMMC2_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SDMMC2LPEN)) != RESET) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_DFSDM1_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_DFSDM1LPEN)) != RESET) +#define __HAL_RCC_MDIO_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_MDIOLPEN)) != RESET) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +#define __HAL_RCC_TIM1_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_TIM1LPEN)) == RESET) +#define __HAL_RCC_TIM8_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_TIM8LPEN)) == RESET) +#define __HAL_RCC_USART1_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_USART1LPEN)) == RESET) +#define __HAL_RCC_USART6_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_USART6LPEN)) == RESET) +#define __HAL_RCC_ADC1_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_ADC1LPEN)) == RESET) +#define __HAL_RCC_ADC2_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_ADC2LPEN)) == RESET) +#define __HAL_RCC_ADC3_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_ADC3LPEN)) == RESET) +#define __HAL_RCC_SDMMC1_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SDMMC1LPEN)) == RESET) +#define __HAL_RCC_SPI1_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SPI1LPEN)) == RESET) +#define __HAL_RCC_SPI4_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SPI4LPEN)) == RESET) +#define __HAL_RCC_TIM9_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_TIM9LPEN)) == RESET) +#define __HAL_RCC_TIM10_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_TIM10LPEN)) == RESET) +#define __HAL_RCC_TIM11_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_TIM11LPEN)) == RESET) +#define __HAL_RCC_SPI5_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SPI5LPEN)) == RESET) +#define __HAL_RCC_SAI1_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SAI1LPEN)) == RESET) +#define __HAL_RCC_SAI2_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SAI2LPEN)) == RESET) +#if defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define __HAL_RCC_LTDC_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_LTDCLPEN)) == RESET) +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +#if defined (STM32F769xx) || defined (STM32F779xx) +#define __HAL_RCC_DSI_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_DSILPEN)) == RESET) +#endif /* STM32F769xx || STM32F779xx */ +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) +#define __HAL_RCC_SDMMC2_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SDMMC2LPEN)) == RESET) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define __HAL_RCC_DFSDM1_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_DFSDM1LPEN)) == RESET) +#define __HAL_RCC_MDIO_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_MDIOLPEN)) == RESET) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +#define __HAL_RCC_SPI6_IS_CLK_SLEEP_ENABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SPI6LPEN)) != RESET) +#define __HAL_RCC_SPI6_IS_CLK_SLEEP_DISABLED() ((RCC->APB2LPENR & (RCC_APB2LPENR_SPI6LPEN)) == RESET) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +/** + * @} + */ + +/*------------------------------- PLL Configuration --------------------------*/ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +/** @brief Macro to configure the main PLL clock source, multiplication and division factors. + * @note This function must be used only when the main PLL is disabled. + * @param __RCC_PLLSource__ specifies the PLL entry clock source. + * This parameter can be one of the following values: + * @arg RCC_PLLSOURCE_HSI: HSI oscillator clock selected as PLL clock entry + * @arg RCC_PLLSOURCE_HSE: HSE oscillator clock selected as PLL clock entry + * @note This clock source (RCC_PLLSource) is common for the main PLL and PLLI2S. + * @param __PLLM__ specifies the division factor for PLL VCO input clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 63. + * @note You have to set the PLLM parameter correctly to ensure that the VCO input + * frequency ranges from 1 to 2 MHz. It is recommended to select a frequency + * of 2 MHz to limit PLL jitter. + * @param __PLLN__ specifies the multiplication factor for PLL VCO output clock + * This parameter must be a number between Min_Data = 50 and Max_Data = 432. + * @note You have to set the PLLN parameter correctly to ensure that the VCO + * output frequency is between 100 and 432 MHz. + * @param __PLLP__ specifies the division factor for main system clock (SYSCLK) + * This parameter must be a number in the range {2, 4, 6, or 8}. + * @note You have to set the PLLP parameter correctly to not exceed 216 MHz on + * the System clock frequency. + * @param __PLLQ__ specifies the division factor for OTG FS, SDMMC and RNG clocks + * This parameter must be a number between Min_Data = 2 and Max_Data = 15. + * @note If the USB OTG FS is used in your application, you have to set the + * PLLQ parameter correctly to have 48 MHz clock for the USB. However, + * the SDMMC and RNG need a frequency lower than or equal to 48 MHz to work + * correctly. + * @param __PLLR__ specifies the division factor for DSI clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 7. + */ +#define __HAL_RCC_PLL_CONFIG(__RCC_PLLSource__, __PLLM__, __PLLN__, __PLLP__, __PLLQ__,__PLLR__) \ + (RCC->PLLCFGR = ((__RCC_PLLSource__) | (__PLLM__) | \ + ((__PLLN__) << RCC_PLLCFGR_PLLN_Pos) | \ + ((((__PLLP__) >> 1) -1) << RCC_PLLCFGR_PLLP_Pos) | \ + ((__PLLQ__) << RCC_PLLCFGR_PLLQ_Pos) | \ + ((__PLLR__) << RCC_PLLCFGR_PLLR_Pos))) +#else +/** @brief Macro to configure the main PLL clock source, multiplication and division factors. + * @note This function must be used only when the main PLL is disabled. + * @param __RCC_PLLSource__ specifies the PLL entry clock source. + * This parameter can be one of the following values: + * @arg RCC_PLLSOURCE_HSI: HSI oscillator clock selected as PLL clock entry + * @arg RCC_PLLSOURCE_HSE: HSE oscillator clock selected as PLL clock entry + * @note This clock source (RCC_PLLSource) is common for the main PLL and PLLI2S. + * @param __PLLM__ specifies the division factor for PLL VCO input clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 63. + * @note You have to set the PLLM parameter correctly to ensure that the VCO input + * frequency ranges from 1 to 2 MHz. It is recommended to select a frequency + * of 2 MHz to limit PLL jitter. + * @param __PLLN__ specifies the multiplication factor for PLL VCO output clock + * This parameter must be a number between Min_Data = 50 and Max_Data = 432. + * @note You have to set the PLLN parameter correctly to ensure that the VCO + * output frequency is between 100 and 432 MHz. + * @param __PLLP__ specifies the division factor for main system clock (SYSCLK) + * This parameter must be a number in the range {2, 4, 6, or 8}. + * @note You have to set the PLLP parameter correctly to not exceed 216 MHz on + * the System clock frequency. + * @param __PLLQ__ specifies the division factor for OTG FS, SDMMC and RNG clocks + * This parameter must be a number between Min_Data = 2 and Max_Data = 15. + * @note If the USB OTG FS is used in your application, you have to set the + * PLLQ parameter correctly to have 48 MHz clock for the USB. However, + * the SDMMC and RNG need a frequency lower than or equal to 48 MHz to work + * correctly. + */ +#define __HAL_RCC_PLL_CONFIG(__RCC_PLLSource__, __PLLM__, __PLLN__, __PLLP__, __PLLQ__) \ + (RCC->PLLCFGR = (0x20000000 | (__RCC_PLLSource__) | (__PLLM__)| \ + ((__PLLN__) << RCC_PLLCFGR_PLLN_Pos) | \ + ((((__PLLP__) >> 1) -1) << RCC_PLLCFGR_PLLP_Pos) | \ + ((__PLLQ__) << RCC_PLLCFGR_PLLQ_Pos))) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ +/*---------------------------------------------------------------------------------------------*/ + +/** @brief Macro to configure the Timers clocks prescalers + * @param __PRESC__ specifies the Timers clocks prescalers selection + * This parameter can be one of the following values: + * @arg RCC_TIMPRES_DESACTIVATED: The Timers kernels clocks prescaler is + * equal to HPRE if PPREx is corresponding to division by 1 or 2, + * else it is equal to [(HPRE * PPREx) / 2] if PPREx is corresponding to + * division by 4 or more. + * @arg RCC_TIMPRES_ACTIVATED: The Timers kernels clocks prescaler is + * equal to HPRE if PPREx is corresponding to division by 1, 2 or 4, + * else it is equal to [(HPRE * PPREx) / 4] if PPREx is corresponding + * to division by 8 or more. + */ +#define __HAL_RCC_TIMCLKPRESCALER(__PRESC__) do {RCC->DCKCFGR1 &= ~(RCC_DCKCFGR1_TIMPRE);\ + RCC->DCKCFGR1 |= (__PRESC__); \ + }while(0) + +/** @brief Macros to Enable or Disable the PLLISAI. + * @note The PLLSAI is disabled by hardware when entering STOP and STANDBY modes. + */ +#define __HAL_RCC_PLLSAI_ENABLE() (RCC->CR |= (RCC_CR_PLLSAION)) +#define __HAL_RCC_PLLSAI_DISABLE() (RCC->CR &= ~(RCC_CR_PLLSAION)) + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F730xx) +/** @brief Macro to configure the PLLSAI clock multiplication and division factors. + * @note This function must be used only when the PLLSAI is disabled. + * @note PLLSAI clock source is common with the main PLL (configured in + * RCC_PLLConfig function ) + * @param __PLLSAIN__ specifies the multiplication factor for PLLSAI VCO output clock. + * This parameter must be a number between Min_Data = 50 and Max_Data = 432. + * @note You have to set the PLLSAIN parameter correctly to ensure that the VCO + * output frequency is between Min_Data = 100 and Max_Data = 432 MHz. + * @param __PLLSAIP__ specifies the division factor for USB, RNG, SDMMC clocks + * This parameter can be a value of @ref RCCEx_PLLSAIP_Clock_Divider. + * @param __PLLSAIQ__ specifies the division factor for SAI clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 15. + */ +#define __HAL_RCC_PLLSAI_CONFIG(__PLLSAIN__, __PLLSAIP__, __PLLSAIQ__) \ + (RCC->PLLSAICFGR = ((__PLLSAIN__) << RCC_PLLSAICFGR_PLLSAIN_Pos) |\ + ((__PLLSAIP__) << RCC_PLLSAICFGR_PLLSAIP_Pos) |\ + ((__PLLSAIQ__) << RCC_PLLSAICFGR_PLLSAIQ_Pos)) + +/** @brief Macro to configure the PLLI2S clock multiplication and division factors. + * @note This macro must be used only when the PLLI2S is disabled. + * @note PLLI2S clock source is common with the main PLL (configured in + * HAL_RCC_ClockConfig() API) + * @param __PLLI2SN__ specifies the multiplication factor for PLLI2S VCO output clock. + * This parameter must be a number between Min_Data = 50 and Max_Data = 432. + * @note You have to set the PLLI2SN parameter correctly to ensure that the VCO + * output frequency is between Min_Data = 100 and Max_Data = 432 MHz. + * @param __PLLI2SQ__ specifies the division factor for SAI clock. + * This parameter must be a number between Min_Data = 2 and Max_Data = 15. + * @param __PLLI2SR__ specifies the division factor for I2S clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 7. + * @note You have to set the PLLI2SR parameter correctly to not exceed 192 MHz + * on the I2S clock frequency. + */ +#define __HAL_RCC_PLLI2S_CONFIG(__PLLI2SN__, __PLLI2SQ__, __PLLI2SR__) \ + (RCC->PLLI2SCFGR = ((__PLLI2SN__) << RCC_PLLI2SCFGR_PLLI2SN_Pos) |\ + ((__PLLI2SQ__) << RCC_PLLI2SCFGR_PLLI2SQ_Pos) |\ + ((__PLLI2SR__) << RCC_PLLI2SCFGR_PLLI2SR_Pos)) +#else +/** @brief Macro to configure the PLLSAI clock multiplication and division factors. + * @note This function must be used only when the PLLSAI is disabled. + * @note PLLSAI clock source is common with the main PLL (configured in + * RCC_PLLConfig function ) + * @param __PLLSAIN__ specifies the multiplication factor for PLLSAI VCO output clock. + * This parameter must be a number between Min_Data = 50 and Max_Data = 432. + * @note You have to set the PLLSAIN parameter correctly to ensure that the VCO + * output frequency is between Min_Data = 100 and Max_Data = 432 MHz. + * @param __PLLSAIP__ specifies the division factor for USB, RNG, SDMMC clocks + * This parameter can be a value of @ref RCCEx_PLLSAIP_Clock_Divider. + * @param __PLLSAIQ__ specifies the division factor for SAI clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 15. + * @param __PLLSAIR__ specifies the division factor for LTDC clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 7. + */ +#define __HAL_RCC_PLLSAI_CONFIG(__PLLSAIN__, __PLLSAIP__, __PLLSAIQ__, __PLLSAIR__) \ + (RCC->PLLSAICFGR = ((__PLLSAIN__) << RCC_PLLSAICFGR_PLLSAIN_Pos) |\ + ((__PLLSAIP__) << RCC_PLLSAICFGR_PLLSAIP_Pos) |\ + ((__PLLSAIQ__) << RCC_PLLSAICFGR_PLLSAIQ_Pos) |\ + ((__PLLSAIR__) << RCC_PLLSAICFGR_PLLSAIR_Pos)) + +/** @brief Macro to configure the PLLI2S clock multiplication and division factors. + * @note This macro must be used only when the PLLI2S is disabled. + * @note PLLI2S clock source is common with the main PLL (configured in + * HAL_RCC_ClockConfig() API) + * @param __PLLI2SN__ specifies the multiplication factor for PLLI2S VCO output clock. + * This parameter must be a number between Min_Data = 50 and Max_Data = 432. + * @note You have to set the PLLI2SN parameter correctly to ensure that the VCO + * output frequency is between Min_Data = 100 and Max_Data = 432 MHz. + * @param __PLLI2SP__ specifies the division factor for SPDDIF-RX clock. + * This parameter can be a value of @ref RCCEx_PLLI2SP_Clock_Divider. + * @param __PLLI2SQ__ specifies the division factor for SAI clock. + * This parameter must be a number between Min_Data = 2 and Max_Data = 15. + * @param __PLLI2SR__ specifies the division factor for I2S clock + * This parameter must be a number between Min_Data = 2 and Max_Data = 7. + * @note You have to set the PLLI2SR parameter correctly to not exceed 192 MHz + * on the I2S clock frequency. + */ +#define __HAL_RCC_PLLI2S_CONFIG(__PLLI2SN__, __PLLI2SP__, __PLLI2SQ__, __PLLI2SR__) \ + (RCC->PLLI2SCFGR = ((__PLLI2SN__) << RCC_PLLI2SCFGR_PLLI2SN_Pos) |\ + ((__PLLI2SP__) << RCC_PLLI2SCFGR_PLLI2SP_Pos) |\ + ((__PLLI2SQ__) << RCC_PLLI2SCFGR_PLLI2SQ_Pos) |\ + ((__PLLI2SR__) << RCC_PLLI2SCFGR_PLLI2SR_Pos)) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F730xx */ + +/** @brief Macro to configure the SAI clock Divider coming from PLLI2S. + * @note This function must be called before enabling the PLLI2S. + * @param __PLLI2SDivQ__ specifies the PLLI2S division factor for SAI1 clock . + * This parameter must be a number between 1 and 32. + * SAI1 clock frequency = f(PLLI2SQ) / __PLLI2SDivQ__ + */ +#define __HAL_RCC_PLLI2S_PLLSAICLKDIVQ_CONFIG(__PLLI2SDivQ__) (MODIFY_REG(RCC->DCKCFGR1, RCC_DCKCFGR1_PLLI2SDIVQ, (__PLLI2SDivQ__)-1)) + +/** @brief Macro to configure the SAI clock Divider coming from PLLSAI. + * @note This function must be called before enabling the PLLSAI. + * @param __PLLSAIDivQ__ specifies the PLLSAI division factor for SAI1 clock . + * This parameter must be a number between Min_Data = 1 and Max_Data = 32. + * SAI1 clock frequency = f(PLLSAIQ) / __PLLSAIDivQ__ + */ +#define __HAL_RCC_PLLSAI_PLLSAICLKDIVQ_CONFIG(__PLLSAIDivQ__) (MODIFY_REG(RCC->DCKCFGR1, RCC_DCKCFGR1_PLLSAIDIVQ, ((__PLLSAIDivQ__)-1)<<8)) + +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) ||\ + defined (STM32F750xx) +/** @brief Macro to configure the LTDC clock Divider coming from PLLSAI. + * @note This function must be called before enabling the PLLSAI. + * @param __PLLSAIDivR__ specifies the PLLSAI division factor for LTDC clock . + * This parameter can be a value of @ref RCCEx_PLLSAI_DIVR. + * LTDC clock frequency = f(PLLSAIR) / __PLLSAIDivR__ + */ +#define __HAL_RCC_PLLSAI_PLLSAICLKDIVR_CONFIG(__PLLSAIDivR__)\ + MODIFY_REG(RCC->DCKCFGR1, RCC_DCKCFGR1_PLLSAIDIVR, (uint32_t)(__PLLSAIDivR__)) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +/** @brief Macro to configure SAI1 clock source selection. + * @note This function must be called before enabling PLLSAI, PLLI2S and + * the SAI clock. + * @param __SOURCE__ specifies the SAI1 clock source. + * This parameter can be one of the following values: + * @arg RCC_SAI1CLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used + * as SAI1 clock. + * @arg RCC_SAI1CLKSOURCE_PLLSAI: PLLISAI_Q clock divided by PLLSAIDIVQ used + * as SAI1 clock. + * @arg RCC_SAI1CLKSOURCE_PIN: External clock mapped on the I2S_CKIN pin + * used as SAI1 clock. + * @arg RCC_SAI1CLKSOURCE_PLLSRC: HSI or HSE depending from PLL Source clock + * used as SAI1 clock. + * @note The RCC_SAI1CLKSOURCE_PLLSRC value is only available with STM32F767/769/777/779xx Devices + */ +#define __HAL_RCC_SAI1_CONFIG(__SOURCE__)\ + MODIFY_REG(RCC->DCKCFGR1, RCC_DCKCFGR1_SAI1SEL, (uint32_t)(__SOURCE__)) + +/** @brief Macro to get the SAI1 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_SAI1CLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used + * as SAI1 clock. + * @arg RCC_SAI1CLKSOURCE_PLLSAI: PLLISAI_Q clock divided by PLLSAIDIVQ used + * as SAI1 clock. + * @arg RCC_SAI1CLKSOURCE_PIN: External clock mapped on the I2S_CKIN pin + * used as SAI1 clock. + * @arg RCC_SAI1CLKSOURCE_PLLSRC: HSI or HSE depending from PLL Source clock + * used as SAI1 clock. + * @note The RCC_SAI1CLKSOURCE_PLLSRC value is only available with STM32F767/769/777/779xx Devices + */ +#define __HAL_RCC_GET_SAI1_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR1, RCC_DCKCFGR1_SAI1SEL))) + +/** @brief Macro to configure SAI2 clock source selection. + * @note This function must be called before enabling PLLSAI, PLLI2S and + * the SAI clock. + * @param __SOURCE__ specifies the SAI2 clock source. + * This parameter can be one of the following values: + * @arg RCC_SAI2CLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used + * as SAI2 clock. + * @arg RCC_SAI2CLKSOURCE_PLLSAI: PLLISAI_Q clock divided by PLLSAIDIVQ used + * as SAI2 clock. + * @arg RCC_SAI2CLKSOURCE_PIN: External clock mapped on the I2S_CKIN pin + * used as SAI2 clock. + * @arg RCC_SAI2CLKSOURCE_PLLSRC: HSI or HSE depending from PLL Source clock + * used as SAI2 clock. + * @note The RCC_SAI2CLKSOURCE_PLLSRC value is only available with STM32F767/769/777/779xx Devices + */ +#define __HAL_RCC_SAI2_CONFIG(__SOURCE__)\ + MODIFY_REG(RCC->DCKCFGR1, RCC_DCKCFGR1_SAI2SEL, (uint32_t)(__SOURCE__)) + + +/** @brief Macro to get the SAI2 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_SAI2CLKSOURCE_PLLI2S: PLLI2S_Q clock divided by PLLI2SDIVQ used + * as SAI2 clock. + * @arg RCC_SAI2CLKSOURCE_PLLSAI: PLLISAI_Q clock divided by PLLSAIDIVQ used + * as SAI2 clock. + * @arg RCC_SAI2CLKSOURCE_PIN: External clock mapped on the I2S_CKIN pin + * used as SAI2 clock. + * @arg RCC_SAI2CLKSOURCE_PLLSRC: HSI or HSE depending from PLL Source clock + * used as SAI2 clock. + * @note The RCC_SAI2CLKSOURCE_PLLSRC value is only available with STM32F767/769/777/779xx Devices + */ +#define __HAL_RCC_GET_SAI2_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR1, RCC_DCKCFGR1_SAI2SEL))) + + +/** @brief Enable PLLSAI_RDY interrupt. + */ +#define __HAL_RCC_PLLSAI_ENABLE_IT() (RCC->CIR |= (RCC_CIR_PLLSAIRDYIE)) + +/** @brief Disable PLLSAI_RDY interrupt. + */ +#define __HAL_RCC_PLLSAI_DISABLE_IT() (RCC->CIR &= ~(RCC_CIR_PLLSAIRDYIE)) + +/** @brief Clear the PLLSAI RDY interrupt pending bits. + */ +#define __HAL_RCC_PLLSAI_CLEAR_IT() (RCC->CIR |= (RCC_CIR_PLLSAIRDYF)) + +/** @brief Check the PLLSAI RDY interrupt has occurred or not. + * @retval The new state (TRUE or FALSE). + */ +#define __HAL_RCC_PLLSAI_GET_IT() ((RCC->CIR & (RCC_CIR_PLLSAIRDYIE)) == (RCC_CIR_PLLSAIRDYIE)) + +/** @brief Check PLLSAI RDY flag is set or not. + * @retval The new state (TRUE or FALSE). + */ +#define __HAL_RCC_PLLSAI_GET_FLAG() ((RCC->CR & (RCC_CR_PLLSAIRDY)) == (RCC_CR_PLLSAIRDY)) + +/** @brief Macro to Get I2S clock source selection. + * @retval The clock source can be one of the following values: + * @arg RCC_I2SCLKSOURCE_PLLI2S: PLLI2S VCO output clock divided by PLLI2SR used as I2S clock. + * @arg RCC_I2SCLKSOURCE_EXT: External clock mapped on the I2S_CKIN pin used as I2S clock source + */ +#define __HAL_RCC_GET_I2SCLKSOURCE() (READ_BIT(RCC->CFGR, RCC_CFGR_I2SSRC)) + +/** @brief Macro to configure the I2C1 clock (I2C1CLK). + * + * @param __I2C1_CLKSOURCE__ specifies the I2C1 clock source. + * This parameter can be one of the following values: + * @arg RCC_I2C1CLKSOURCE_PCLK1: PCLK1 selected as I2C1 clock + * @arg RCC_I2C1CLKSOURCE_HSI: HSI selected as I2C1 clock + * @arg RCC_I2C1CLKSOURCE_SYSCLK: System Clock selected as I2C1 clock + */ +#define __HAL_RCC_I2C1_CONFIG(__I2C1_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_I2C1SEL, (uint32_t)(__I2C1_CLKSOURCE__)) + +/** @brief Macro to get the I2C1 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_I2C1CLKSOURCE_PCLK1: PCLK1 selected as I2C1 clock + * @arg RCC_I2C1CLKSOURCE_HSI: HSI selected as I2C1 clock + * @arg RCC_I2C1CLKSOURCE_SYSCLK: System Clock selected as I2C1 clock + */ +#define __HAL_RCC_GET_I2C1_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_I2C1SEL))) + +/** @brief Macro to configure the I2C2 clock (I2C2CLK). + * + * @param __I2C2_CLKSOURCE__ specifies the I2C2 clock source. + * This parameter can be one of the following values: + * @arg RCC_I2C2CLKSOURCE_PCLK1: PCLK1 selected as I2C2 clock + * @arg RCC_I2C2CLKSOURCE_HSI: HSI selected as I2C2 clock + * @arg RCC_I2C2CLKSOURCE_SYSCLK: System Clock selected as I2C2 clock + */ +#define __HAL_RCC_I2C2_CONFIG(__I2C2_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_I2C2SEL, (uint32_t)(__I2C2_CLKSOURCE__)) + +/** @brief Macro to get the I2C2 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_I2C2CLKSOURCE_PCLK1: PCLK1 selected as I2C2 clock + * @arg RCC_I2C2CLKSOURCE_HSI: HSI selected as I2C2 clock + * @arg RCC_I2C2CLKSOURCE_SYSCLK: System Clock selected as I2C2 clock + */ +#define __HAL_RCC_GET_I2C2_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_I2C2SEL))) + +/** @brief Macro to configure the I2C3 clock (I2C3CLK). + * + * @param __I2C3_CLKSOURCE__ specifies the I2C3 clock source. + * This parameter can be one of the following values: + * @arg RCC_I2C3CLKSOURCE_PCLK1: PCLK1 selected as I2C3 clock + * @arg RCC_I2C3CLKSOURCE_HSI: HSI selected as I2C3 clock + * @arg RCC_I2C3CLKSOURCE_SYSCLK: System Clock selected as I2C3 clock + */ +#define __HAL_RCC_I2C3_CONFIG(__I2C3_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_I2C3SEL, (uint32_t)(__I2C3_CLKSOURCE__)) + +/** @brief macro to get the I2C3 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_I2C3CLKSOURCE_PCLK1: PCLK1 selected as I2C3 clock + * @arg RCC_I2C3CLKSOURCE_HSI: HSI selected as I2C3 clock + * @arg RCC_I2C3CLKSOURCE_SYSCLK: System Clock selected as I2C3 clock + */ +#define __HAL_RCC_GET_I2C3_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_I2C3SEL))) + +/** @brief Macro to configure the I2C4 clock (I2C4CLK). + * + * @param __I2C4_CLKSOURCE__ specifies the I2C4 clock source. + * This parameter can be one of the following values: + * @arg RCC_I2C4CLKSOURCE_PCLK1: PCLK1 selected as I2C4 clock + * @arg RCC_I2C4CLKSOURCE_HSI: HSI selected as I2C4 clock + * @arg RCC_I2C4CLKSOURCE_SYSCLK: System Clock selected as I2C4 clock + */ +#define __HAL_RCC_I2C4_CONFIG(__I2C4_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_I2C4SEL, (uint32_t)(__I2C4_CLKSOURCE__)) + +/** @brief macro to get the I2C4 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_I2C4CLKSOURCE_PCLK1: PCLK1 selected as I2C4 clock + * @arg RCC_I2C4CLKSOURCE_HSI: HSI selected as I2C4 clock + * @arg RCC_I2C4CLKSOURCE_SYSCLK: System Clock selected as I2C4 clock + */ +#define __HAL_RCC_GET_I2C4_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_I2C4SEL))) + +/** @brief Macro to configure the USART1 clock (USART1CLK). + * + * @param __USART1_CLKSOURCE__ specifies the USART1 clock source. + * This parameter can be one of the following values: + * @arg RCC_USART1CLKSOURCE_PCLK2: PCLK2 selected as USART1 clock + * @arg RCC_USART1CLKSOURCE_HSI: HSI selected as USART1 clock + * @arg RCC_USART1CLKSOURCE_SYSCLK: System Clock selected as USART1 clock + * @arg RCC_USART1CLKSOURCE_LSE: LSE selected as USART1 clock + */ +#define __HAL_RCC_USART1_CONFIG(__USART1_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_USART1SEL, (uint32_t)(__USART1_CLKSOURCE__)) + +/** @brief macro to get the USART1 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_USART1CLKSOURCE_PCLK2: PCLK2 selected as USART1 clock + * @arg RCC_USART1CLKSOURCE_HSI: HSI selected as USART1 clock + * @arg RCC_USART1CLKSOURCE_SYSCLK: System Clock selected as USART1 clock + * @arg RCC_USART1CLKSOURCE_LSE: LSE selected as USART1 clock + */ +#define __HAL_RCC_GET_USART1_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_USART1SEL))) + +/** @brief Macro to configure the USART2 clock (USART2CLK). + * + * @param __USART2_CLKSOURCE__ specifies the USART2 clock source. + * This parameter can be one of the following values: + * @arg RCC_USART2CLKSOURCE_PCLK1: PCLK1 selected as USART2 clock + * @arg RCC_USART2CLKSOURCE_HSI: HSI selected as USART2 clock + * @arg RCC_USART2CLKSOURCE_SYSCLK: System Clock selected as USART2 clock + * @arg RCC_USART2CLKSOURCE_LSE: LSE selected as USART2 clock + */ +#define __HAL_RCC_USART2_CONFIG(__USART2_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_USART2SEL, (uint32_t)(__USART2_CLKSOURCE__)) + +/** @brief macro to get the USART2 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_USART2CLKSOURCE_PCLK1: PCLK1 selected as USART2 clock + * @arg RCC_USART2CLKSOURCE_HSI: HSI selected as USART2 clock + * @arg RCC_USART2CLKSOURCE_SYSCLK: System Clock selected as USART2 clock + * @arg RCC_USART2CLKSOURCE_LSE: LSE selected as USART2 clock + */ +#define __HAL_RCC_GET_USART2_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_USART2SEL))) + +/** @brief Macro to configure the USART3 clock (USART3CLK). + * + * @param __USART3_CLKSOURCE__ specifies the USART3 clock source. + * This parameter can be one of the following values: + * @arg RCC_USART3CLKSOURCE_PCLK1: PCLK1 selected as USART3 clock + * @arg RCC_USART3CLKSOURCE_HSI: HSI selected as USART3 clock + * @arg RCC_USART3CLKSOURCE_SYSCLK: System Clock selected as USART3 clock + * @arg RCC_USART3CLKSOURCE_LSE: LSE selected as USART3 clock + */ +#define __HAL_RCC_USART3_CONFIG(__USART3_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_USART3SEL, (uint32_t)(__USART3_CLKSOURCE__)) + +/** @brief macro to get the USART3 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_USART3CLKSOURCE_PCLK1: PCLK1 selected as USART3 clock + * @arg RCC_USART3CLKSOURCE_HSI: HSI selected as USART3 clock + * @arg RCC_USART3CLKSOURCE_SYSCLK: System Clock selected as USART3 clock + * @arg RCC_USART3CLKSOURCE_LSE: LSE selected as USART3 clock + */ +#define __HAL_RCC_GET_USART3_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_USART3SEL))) + + /** @brief Macro to configure the UART4 clock (UART4CLK). + * + * @param __UART4_CLKSOURCE__ specifies the UART4 clock source. + * This parameter can be one of the following values: + * @arg RCC_UART4CLKSOURCE_PCLK1: PCLK1 selected as UART4 clock + * @arg RCC_UART4CLKSOURCE_HSI: HSI selected as UART4 clock + * @arg RCC_UART4CLKSOURCE_SYSCLK: System Clock selected as UART4 clock + * @arg RCC_UART4CLKSOURCE_LSE: LSE selected as UART4 clock + */ +#define __HAL_RCC_UART4_CONFIG(__UART4_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_UART4SEL, (uint32_t)(__UART4_CLKSOURCE__)) + +/** @brief macro to get the UART4 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_UART4CLKSOURCE_PCLK1: PCLK1 selected as UART4 clock + * @arg RCC_UART4CLKSOURCE_HSI: HSI selected as UART4 clock + * @arg RCC_UART4CLKSOURCE_SYSCLK: System Clock selected as UART4 clock + * @arg RCC_UART4CLKSOURCE_LSE: LSE selected as UART4 clock + */ +#define __HAL_RCC_GET_UART4_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_UART4SEL))) + + /** @brief Macro to configure the UART5 clock (UART5CLK). + * + * @param __UART5_CLKSOURCE__ specifies the UART5 clock source. + * This parameter can be one of the following values: + * @arg RCC_UART5CLKSOURCE_PCLK1: PCLK1 selected as UART5 clock + * @arg RCC_UART5CLKSOURCE_HSI: HSI selected as UART5 clock + * @arg RCC_UART5CLKSOURCE_SYSCLK: System Clock selected as UART5 clock + * @arg RCC_UART5CLKSOURCE_LSE: LSE selected as UART5 clock + */ +#define __HAL_RCC_UART5_CONFIG(__UART5_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_UART5SEL, (uint32_t)(__UART5_CLKSOURCE__)) + +/** @brief macro to get the UART5 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_UART5CLKSOURCE_PCLK1: PCLK1 selected as UART5 clock + * @arg RCC_UART5CLKSOURCE_HSI: HSI selected as UART5 clock + * @arg RCC_UART5CLKSOURCE_SYSCLK: System Clock selected as UART5 clock + * @arg RCC_UART5CLKSOURCE_LSE: LSE selected as UART5 clock + */ +#define __HAL_RCC_GET_UART5_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_UART5SEL))) + + /** @brief Macro to configure the USART6 clock (USART6CLK). + * + * @param __USART6_CLKSOURCE__ specifies the USART6 clock source. + * This parameter can be one of the following values: + * @arg RCC_USART6CLKSOURCE_PCLK1: PCLK1 selected as USART6 clock + * @arg RCC_USART6CLKSOURCE_HSI: HSI selected as USART6 clock + * @arg RCC_USART6CLKSOURCE_SYSCLK: System Clock selected as USART6 clock + * @arg RCC_USART6CLKSOURCE_LSE: LSE selected as USART6 clock + */ +#define __HAL_RCC_USART6_CONFIG(__USART6_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_USART6SEL, (uint32_t)(__USART6_CLKSOURCE__)) + +/** @brief macro to get the USART6 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_USART6CLKSOURCE_PCLK1: PCLK1 selected as USART6 clock + * @arg RCC_USART6CLKSOURCE_HSI: HSI selected as USART6 clock + * @arg RCC_USART6CLKSOURCE_SYSCLK: System Clock selected as USART6 clock + * @arg RCC_USART6CLKSOURCE_LSE: LSE selected as USART6 clock + */ +#define __HAL_RCC_GET_USART6_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_USART6SEL))) + + /** @brief Macro to configure the UART7 clock (UART7CLK). + * + * @param __UART7_CLKSOURCE__ specifies the UART7 clock source. + * This parameter can be one of the following values: + * @arg RCC_UART7CLKSOURCE_PCLK1: PCLK1 selected as UART7 clock + * @arg RCC_UART7CLKSOURCE_HSI: HSI selected as UART7 clock + * @arg RCC_UART7CLKSOURCE_SYSCLK: System Clock selected as UART7 clock + * @arg RCC_UART7CLKSOURCE_LSE: LSE selected as UART7 clock + */ +#define __HAL_RCC_UART7_CONFIG(__UART7_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_UART7SEL, (uint32_t)(__UART7_CLKSOURCE__)) + +/** @brief macro to get the UART7 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_UART7CLKSOURCE_PCLK1: PCLK1 selected as UART7 clock + * @arg RCC_UART7CLKSOURCE_HSI: HSI selected as UART7 clock + * @arg RCC_UART7CLKSOURCE_SYSCLK: System Clock selected as UART7 clock + * @arg RCC_UART7CLKSOURCE_LSE: LSE selected as UART7 clock + */ +#define __HAL_RCC_GET_UART7_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_UART7SEL))) + +/** @brief Macro to configure the UART8 clock (UART8CLK). + * + * @param __UART8_CLKSOURCE__ specifies the UART8 clock source. + * This parameter can be one of the following values: + * @arg RCC_UART8CLKSOURCE_PCLK1: PCLK1 selected as UART8 clock + * @arg RCC_UART8CLKSOURCE_HSI: HSI selected as UART8 clock + * @arg RCC_UART8CLKSOURCE_SYSCLK: System Clock selected as UART8 clock + * @arg RCC_UART8CLKSOURCE_LSE: LSE selected as UART8 clock + */ +#define __HAL_RCC_UART8_CONFIG(__UART8_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_UART8SEL, (uint32_t)(__UART8_CLKSOURCE__)) + +/** @brief macro to get the UART8 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_UART8CLKSOURCE_PCLK1: PCLK1 selected as UART8 clock + * @arg RCC_UART8CLKSOURCE_HSI: HSI selected as UART8 clock + * @arg RCC_UART8CLKSOURCE_SYSCLK: System Clock selected as UART8 clock + * @arg RCC_UART8CLKSOURCE_LSE: LSE selected as UART8 clock + */ +#define __HAL_RCC_GET_UART8_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_UART8SEL))) + +/** @brief Macro to configure the LPTIM1 clock (LPTIM1CLK). + * + * @param __LPTIM1_CLKSOURCE__ specifies the LPTIM1 clock source. + * This parameter can be one of the following values: + * @arg RCC_LPTIM1CLKSOURCE_PCLK1: PCLK selected as LPTIM1 clock + * @arg RCC_LPTIM1CLKSOURCE_HSI: HSI selected as LPTIM1 clock + * @arg RCC_LPTIM1CLKSOURCE_LSI: LSI selected as LPTIM1 clock + * @arg RCC_LPTIM1CLKSOURCE_LSE: LSE selected as LPTIM1 clock + */ +#define __HAL_RCC_LPTIM1_CONFIG(__LPTIM1_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_LPTIM1SEL, (uint32_t)(__LPTIM1_CLKSOURCE__)) + +/** @brief macro to get the LPTIM1 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_LPTIM1CLKSOURCE_PCLK1: PCLK selected as LPTIM1 clock + * @arg RCC_LPTIM1CLKSOURCE_HSI: HSI selected as LPTIM1 clock + * @arg RCC_LPTIM1CLKSOURCE_LSI: LSI selected as LPTIM1 clock + * @arg RCC_LPTIM1CLKSOURCE_LSE: LSE selected as LPTIM1 clock + */ +#define __HAL_RCC_GET_LPTIM1_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_LPTIM1SEL))) + +/** @brief Macro to configure the CEC clock (CECCLK). + * + * @param __CEC_CLKSOURCE__ specifies the CEC clock source. + * This parameter can be one of the following values: + * @arg RCC_CECCLKSOURCE_LSE: LSE selected as CEC clock + * @arg RCC_CECCLKSOURCE_HSI: HSI divided by 488 selected as CEC clock + */ +#define __HAL_RCC_CEC_CONFIG(__CEC_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_CECSEL, (uint32_t)(__CEC_CLKSOURCE__)) + +/** @brief macro to get the CEC clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_CECCLKSOURCE_LSE: LSE selected as CEC clock + * @arg RCC_CECCLKSOURCE_HSI: HSI selected as CEC clock + */ +#define __HAL_RCC_GET_CEC_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_CECSEL))) + +/** @brief Macro to configure the CLK48 source (CLK48CLK). + * + * @param __CLK48_SOURCE__ specifies the CLK48 clock source. + * This parameter can be one of the following values: + * @arg RCC_CLK48SOURCE_PLL: PLL selected as CLK48 source + * @arg RCC_CLK48SOURCE_PLLSAIP: PLLSAIP selected as CLK48 source + */ +#define __HAL_RCC_CLK48_CONFIG(__CLK48_SOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_CK48MSEL, (uint32_t)(__CLK48_SOURCE__)) + +/** @brief macro to get the CLK48 source. + * @retval The clock source can be one of the following values: + * @arg RCC_CLK48SOURCE_PLL: PLL used as CLK48 source + * @arg RCC_CLK48SOURCE_PLLSAIP: PLLSAIP used as CLK48 source + */ +#define __HAL_RCC_GET_CLK48_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_CK48MSEL))) + +/** @brief Macro to configure the SDMMC1 clock (SDMMC1CLK). + * + * @param __SDMMC1_CLKSOURCE__ specifies the SDMMC1 clock source. + * This parameter can be one of the following values: + * @arg RCC_SDMMC1CLKSOURCE_CLK48: CLK48 selected as SDMMC clock + * @arg RCC_SDMMC1CLKSOURCE_SYSCLK: SYSCLK selected as SDMMC clock + */ +#define __HAL_RCC_SDMMC1_CONFIG(__SDMMC1_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_SDMMC1SEL, (uint32_t)(__SDMMC1_CLKSOURCE__)) + +/** @brief macro to get the SDMMC1 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_SDMMC1CLKSOURCE_CLK48: CLK48 selected as SDMMC1 clock + * @arg RCC_SDMMC1CLKSOURCE_SYSCLK: SYSCLK selected as SDMMC1 clock + */ +#define __HAL_RCC_GET_SDMMC1_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_SDMMC1SEL))) + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) +/** @brief Macro to configure the SDMMC2 clock (SDMMC2CLK). + * @param __SDMMC2_CLKSOURCE__ specifies the SDMMC2 clock source. + * This parameter can be one of the following values: + * @arg RCC_SDMMC2CLKSOURCE_CLK48: CLK48 selected as SDMMC2 clock + * @arg RCC_SDMMC2CLKSOURCE_SYSCLK: SYSCLK selected as SDMMC2 clock + */ +#define __HAL_RCC_SDMMC2_CONFIG(__SDMMC2_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_SDMMC2SEL, (uint32_t)(__SDMMC2_CLKSOURCE__)) + +/** @brief macro to get the SDMMC2 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_SDMMC2CLKSOURCE_CLK48: CLK48 selected as SDMMC2 clock + * @arg RCC_SDMMC2CLKSOURCE_SYSCLK: SYSCLK selected as SDMMC2 clock + */ +#define __HAL_RCC_GET_SDMMC2_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_SDMMC2SEL))) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ + +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +/** @brief Macro to configure the DFSDM1 clock + * @param __DFSDM1_CLKSOURCE__ specifies the DFSDM1 clock source. + * This parameter can be one of the following values: + * @arg RCC_DFSDM1CLKSOURCE_PCLK2: PCLK2 Clock selected as DFSDM clock + * @arg RCC_DFSDMCLKSOURCE_SYSCLK: System Clock selected as DFSDM clock + */ +#define __HAL_RCC_DFSDM1_CONFIG(__DFSDM1_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR1, RCC_DCKCFGR1_DFSDM1SEL, (uint32_t)(__DFSDM1_CLKSOURCE__)) + +/** @brief Macro to get the DFSDM1 clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_DFSDM1CLKSOURCE_PCLK2: PCLK2 Clock selected as DFSDM1 clock + * @arg RCC_DFSDM1CLKSOURCE_SYSCLK: System Clock selected as DFSDM1 clock + */ +#define __HAL_RCC_GET_DFSDM1_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR1, RCC_DCKCFGR1_DFSDM1SEL))) + +/** @brief Macro to configure the DFSDM1 Audio clock + * @param __DFSDM1AUDIO_CLKSOURCE__ specifies the DFSDM1 Audio clock source. + * This parameter can be one of the following values: + * @arg RCC_DFSDM1AUDIOCLKSOURCE_SAI1: SAI1 Clock selected as DFSDM1 Audio clock + * @arg RCC_DFSDM1AUDIOCLKSOURCE_SAI2: SAI2 Clock selected as DFSDM1 Audio clock + */ +#define __HAL_RCC_DFSDM1AUDIO_CONFIG(__DFSDM1AUDIO_CLKSOURCE__) \ + MODIFY_REG(RCC->DCKCFGR1, RCC_DCKCFGR1_ADFSDM1SEL, (uint32_t)(__DFSDM1AUDIO_CLKSOURCE__)) + +/** @brief Macro to get the DFSDM1 Audio clock source. + * @retval The clock source can be one of the following values: + * @arg RCC_DFSDM1AUDIOCLKSOURCE_SAI1: SAI1 Clock selected as DFSDM1 Audio clock + * @arg RCC_DFSDM1AUDIOCLKSOURCE_SAI2: SAI2 Clock selected as DFSDM1 Audio clock + */ +#define __HAL_RCC_GET_DFSDM1AUDIO_SOURCE() ((uint32_t)(READ_BIT(RCC->DCKCFGR1, RCC_DCKCFGR1_ADFSDM1SEL))) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +#if defined (STM32F769xx) || defined (STM32F779xx) +/** @brief Macro to configure the DSI clock. + * @param __DSI_CLKSOURCE__ specifies the DSI clock source. + * This parameter can be one of the following values: + * @arg RCC_DSICLKSOURCE_PLLR: PLLR output used as DSI clock. + * @arg RCC_DSICLKSOURCE_DSIPHY: DSI-PHY output used as DSI clock. + */ +#define __HAL_RCC_DSI_CONFIG(__DSI_CLKSOURCE__) (MODIFY_REG(RCC->DCKCFGR2, RCC_DCKCFGR2_DSISEL, (uint32_t)(__DSI_CLKSOURCE__))) + +/** @brief Macro to Get the DSI clock. + * @retval The clock source can be one of the following values: + * @arg RCC_DSICLKSOURCE_PLLR: PLLR output used as DSI clock. + * @arg RCC_DSICLKSOURCE_DSIPHY: DSI-PHY output used as DSI clock. + */ +#define __HAL_RCC_GET_DSI_SOURCE() (READ_BIT(RCC->DCKCFGR2, RCC_DCKCFGR2_DSISEL)) +#endif /* STM32F769xx || STM32F779xx */ +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup RCCEx_Exported_Functions_Group1 + * @{ + */ +HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit); +void HAL_RCCEx_GetPeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit); +uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk); +HAL_StatusTypeDef HAL_RCCEx_EnablePLLI2S(RCC_PLLI2SInitTypeDef *PLLI2SInit); +HAL_StatusTypeDef HAL_RCCEx_DisablePLLI2S(void); +HAL_StatusTypeDef HAL_RCCEx_EnablePLLSAI(RCC_PLLSAIInitTypeDef *PLLSAIInit); +HAL_StatusTypeDef HAL_RCCEx_DisablePLLSAI(void); +/** + * @} + */ +/* Private macros ------------------------------------------------------------*/ +/** @addtogroup RCCEx_Private_Macros RCCEx Private Macros + * @{ + */ +/** @defgroup RCCEx_IS_RCC_Definitions RCC Private macros to check input parameters + * @{ + */ +#if defined(STM32F756xx) || defined(STM32F746xx) || defined(STM32F750xx) +#define IS_RCC_PERIPHCLOCK(SELECTION) \ + ((((SELECTION) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) || \ + (((SELECTION) & RCC_PERIPHCLK_LTDC) == RCC_PERIPHCLK_LTDC) || \ + (((SELECTION) & RCC_PERIPHCLK_TIM) == RCC_PERIPHCLK_TIM) || \ + (((SELECTION) & RCC_PERIPHCLK_USART1) == RCC_PERIPHCLK_USART1) || \ + (((SELECTION) & RCC_PERIPHCLK_USART2) == RCC_PERIPHCLK_USART2) || \ + (((SELECTION) & RCC_PERIPHCLK_USART3) == RCC_PERIPHCLK_USART3) || \ + (((SELECTION) & RCC_PERIPHCLK_UART4) == RCC_PERIPHCLK_UART4) || \ + (((SELECTION) & RCC_PERIPHCLK_UART5) == RCC_PERIPHCLK_UART5) || \ + (((SELECTION) & RCC_PERIPHCLK_USART6) == RCC_PERIPHCLK_USART6) || \ + (((SELECTION) & RCC_PERIPHCLK_UART7) == RCC_PERIPHCLK_UART7) || \ + (((SELECTION) & RCC_PERIPHCLK_UART8) == RCC_PERIPHCLK_UART8) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C1) == RCC_PERIPHCLK_I2C1) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C2) == RCC_PERIPHCLK_I2C2) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C3) == RCC_PERIPHCLK_I2C3) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C4) == RCC_PERIPHCLK_I2C4) || \ + (((SELECTION) & RCC_PERIPHCLK_LPTIM1) == RCC_PERIPHCLK_LPTIM1) || \ + (((SELECTION) & RCC_PERIPHCLK_SAI1) == RCC_PERIPHCLK_SAI1) || \ + (((SELECTION) & RCC_PERIPHCLK_SAI2) == RCC_PERIPHCLK_SAI2) || \ + (((SELECTION) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) || \ + (((SELECTION) & RCC_PERIPHCLK_CEC) == RCC_PERIPHCLK_CEC) || \ + (((SELECTION) & RCC_PERIPHCLK_SDMMC1) == RCC_PERIPHCLK_SDMMC1) || \ + (((SELECTION) & RCC_PERIPHCLK_SPDIFRX) == RCC_PERIPHCLK_SPDIFRX) || \ + (((SELECTION) & RCC_PERIPHCLK_RTC) == RCC_PERIPHCLK_RTC)) +#elif defined(STM32F745xx) +#define IS_RCC_PERIPHCLOCK(SELECTION) \ + ((((SELECTION) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) || \ + (((SELECTION) & RCC_PERIPHCLK_TIM) == RCC_PERIPHCLK_TIM) || \ + (((SELECTION) & RCC_PERIPHCLK_USART1) == RCC_PERIPHCLK_USART1) || \ + (((SELECTION) & RCC_PERIPHCLK_USART2) == RCC_PERIPHCLK_USART2) || \ + (((SELECTION) & RCC_PERIPHCLK_USART3) == RCC_PERIPHCLK_USART3) || \ + (((SELECTION) & RCC_PERIPHCLK_UART4) == RCC_PERIPHCLK_UART4) || \ + (((SELECTION) & RCC_PERIPHCLK_UART5) == RCC_PERIPHCLK_UART5) || \ + (((SELECTION) & RCC_PERIPHCLK_USART6) == RCC_PERIPHCLK_USART6) || \ + (((SELECTION) & RCC_PERIPHCLK_UART7) == RCC_PERIPHCLK_UART7) || \ + (((SELECTION) & RCC_PERIPHCLK_UART8) == RCC_PERIPHCLK_UART8) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C1) == RCC_PERIPHCLK_I2C1) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C2) == RCC_PERIPHCLK_I2C2) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C3) == RCC_PERIPHCLK_I2C3) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C4) == RCC_PERIPHCLK_I2C4) || \ + (((SELECTION) & RCC_PERIPHCLK_LPTIM1) == RCC_PERIPHCLK_LPTIM1) || \ + (((SELECTION) & RCC_PERIPHCLK_SAI1) == RCC_PERIPHCLK_SAI1) || \ + (((SELECTION) & RCC_PERIPHCLK_SAI2) == RCC_PERIPHCLK_SAI2) || \ + (((SELECTION) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) || \ + (((SELECTION) & RCC_PERIPHCLK_CEC) == RCC_PERIPHCLK_CEC) || \ + (((SELECTION) & RCC_PERIPHCLK_SDMMC1) == RCC_PERIPHCLK_SDMMC1) || \ + (((SELECTION) & RCC_PERIPHCLK_SPDIFRX) == RCC_PERIPHCLK_SPDIFRX) || \ + (((SELECTION) & RCC_PERIPHCLK_RTC) == RCC_PERIPHCLK_RTC)) +#elif defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define IS_RCC_PERIPHCLOCK(SELECTION) \ + ((((SELECTION) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) || \ + (((SELECTION) & RCC_PERIPHCLK_LTDC) == RCC_PERIPHCLK_LTDC) || \ + (((SELECTION) & RCC_PERIPHCLK_TIM) == RCC_PERIPHCLK_TIM) || \ + (((SELECTION) & RCC_PERIPHCLK_USART1) == RCC_PERIPHCLK_USART1) || \ + (((SELECTION) & RCC_PERIPHCLK_USART2) == RCC_PERIPHCLK_USART2) || \ + (((SELECTION) & RCC_PERIPHCLK_USART3) == RCC_PERIPHCLK_USART3) || \ + (((SELECTION) & RCC_PERIPHCLK_UART4) == RCC_PERIPHCLK_UART4) || \ + (((SELECTION) & RCC_PERIPHCLK_UART5) == RCC_PERIPHCLK_UART5) || \ + (((SELECTION) & RCC_PERIPHCLK_USART6) == RCC_PERIPHCLK_USART6) || \ + (((SELECTION) & RCC_PERIPHCLK_UART7) == RCC_PERIPHCLK_UART7) || \ + (((SELECTION) & RCC_PERIPHCLK_UART8) == RCC_PERIPHCLK_UART8) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C1) == RCC_PERIPHCLK_I2C1) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C2) == RCC_PERIPHCLK_I2C2) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C3) == RCC_PERIPHCLK_I2C3) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C4) == RCC_PERIPHCLK_I2C4) || \ + (((SELECTION) & RCC_PERIPHCLK_LPTIM1) == RCC_PERIPHCLK_LPTIM1) || \ + (((SELECTION) & RCC_PERIPHCLK_SAI1) == RCC_PERIPHCLK_SAI1) || \ + (((SELECTION) & RCC_PERIPHCLK_SAI2) == RCC_PERIPHCLK_SAI2) || \ + (((SELECTION) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) || \ + (((SELECTION) & RCC_PERIPHCLK_CEC) == RCC_PERIPHCLK_CEC) || \ + (((SELECTION) & RCC_PERIPHCLK_SDMMC1) == RCC_PERIPHCLK_SDMMC1) || \ + (((SELECTION) & RCC_PERIPHCLK_SDMMC2) == RCC_PERIPHCLK_SDMMC2) || \ + (((SELECTION) & RCC_PERIPHCLK_DFSDM1) == RCC_PERIPHCLK_DFSDM1) || \ + (((SELECTION) & RCC_PERIPHCLK_DFSDM1_AUDIO) == RCC_PERIPHCLK_DFSDM1_AUDIO) || \ + (((SELECTION) & RCC_PERIPHCLK_SPDIFRX) == RCC_PERIPHCLK_SPDIFRX) || \ + (((SELECTION) & RCC_PERIPHCLK_RTC) == RCC_PERIPHCLK_RTC)) +#elif defined (STM32F765xx) +#define IS_RCC_PERIPHCLOCK(SELECTION) \ + ((((SELECTION) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) || \ + (((SELECTION) & RCC_PERIPHCLK_TIM) == RCC_PERIPHCLK_TIM) || \ + (((SELECTION) & RCC_PERIPHCLK_USART1) == RCC_PERIPHCLK_USART1) || \ + (((SELECTION) & RCC_PERIPHCLK_USART2) == RCC_PERIPHCLK_USART2) || \ + (((SELECTION) & RCC_PERIPHCLK_USART3) == RCC_PERIPHCLK_USART3) || \ + (((SELECTION) & RCC_PERIPHCLK_UART4) == RCC_PERIPHCLK_UART4) || \ + (((SELECTION) & RCC_PERIPHCLK_UART5) == RCC_PERIPHCLK_UART5) || \ + (((SELECTION) & RCC_PERIPHCLK_USART6) == RCC_PERIPHCLK_USART6) || \ + (((SELECTION) & RCC_PERIPHCLK_UART7) == RCC_PERIPHCLK_UART7) || \ + (((SELECTION) & RCC_PERIPHCLK_UART8) == RCC_PERIPHCLK_UART8) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C1) == RCC_PERIPHCLK_I2C1) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C2) == RCC_PERIPHCLK_I2C2) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C3) == RCC_PERIPHCLK_I2C3) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C4) == RCC_PERIPHCLK_I2C4) || \ + (((SELECTION) & RCC_PERIPHCLK_LPTIM1) == RCC_PERIPHCLK_LPTIM1) || \ + (((SELECTION) & RCC_PERIPHCLK_SAI1) == RCC_PERIPHCLK_SAI1) || \ + (((SELECTION) & RCC_PERIPHCLK_SAI2) == RCC_PERIPHCLK_SAI2) || \ + (((SELECTION) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) || \ + (((SELECTION) & RCC_PERIPHCLK_CEC) == RCC_PERIPHCLK_CEC) || \ + (((SELECTION) & RCC_PERIPHCLK_SDMMC1) == RCC_PERIPHCLK_SDMMC1) || \ + (((SELECTION) & RCC_PERIPHCLK_SDMMC2) == RCC_PERIPHCLK_SDMMC2) || \ + (((SELECTION) & RCC_PERIPHCLK_DFSDM1) == RCC_PERIPHCLK_DFSDM1) || \ + (((SELECTION) & RCC_PERIPHCLK_DFSDM1_AUDIO) == RCC_PERIPHCLK_DFSDM1_AUDIO) || \ + (((SELECTION) & RCC_PERIPHCLK_SPDIFRX) == RCC_PERIPHCLK_SPDIFRX) || \ + (((SELECTION) & RCC_PERIPHCLK_RTC) == RCC_PERIPHCLK_RTC)) +#elif defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F730xx) +#define IS_RCC_PERIPHCLOCK(SELECTION) \ + ((((SELECTION) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) || \ + (((SELECTION) & RCC_PERIPHCLK_TIM) == RCC_PERIPHCLK_TIM) || \ + (((SELECTION) & RCC_PERIPHCLK_USART1) == RCC_PERIPHCLK_USART1) || \ + (((SELECTION) & RCC_PERIPHCLK_USART2) == RCC_PERIPHCLK_USART2) || \ + (((SELECTION) & RCC_PERIPHCLK_USART3) == RCC_PERIPHCLK_USART3) || \ + (((SELECTION) & RCC_PERIPHCLK_UART4) == RCC_PERIPHCLK_UART4) || \ + (((SELECTION) & RCC_PERIPHCLK_UART5) == RCC_PERIPHCLK_UART5) || \ + (((SELECTION) & RCC_PERIPHCLK_USART6) == RCC_PERIPHCLK_USART6) || \ + (((SELECTION) & RCC_PERIPHCLK_UART7) == RCC_PERIPHCLK_UART7) || \ + (((SELECTION) & RCC_PERIPHCLK_UART8) == RCC_PERIPHCLK_UART8) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C1) == RCC_PERIPHCLK_I2C1) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C2) == RCC_PERIPHCLK_I2C2) || \ + (((SELECTION) & RCC_PERIPHCLK_I2C3) == RCC_PERIPHCLK_I2C3) || \ + (((SELECTION) & RCC_PERIPHCLK_LPTIM1) == RCC_PERIPHCLK_LPTIM1) || \ + (((SELECTION) & RCC_PERIPHCLK_SAI1) == RCC_PERIPHCLK_SAI1) || \ + (((SELECTION) & RCC_PERIPHCLK_SAI2) == RCC_PERIPHCLK_SAI2) || \ + (((SELECTION) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) || \ + (((SELECTION) & RCC_PERIPHCLK_SDMMC1) == RCC_PERIPHCLK_SDMMC1) || \ + (((SELECTION) & RCC_PERIPHCLK_SDMMC2) == RCC_PERIPHCLK_SDMMC2) || \ + (((SELECTION) & RCC_PERIPHCLK_RTC) == RCC_PERIPHCLK_RTC)) +#endif /* STM32F746xx || STM32F756xx || STM32F750xx */ +#define IS_RCC_PLLI2SN_VALUE(VALUE) ((50 <= (VALUE)) && ((VALUE) <= 432)) +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) || defined (STM32F767xx) || \ + defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) +#define IS_RCC_PLLI2SP_VALUE(VALUE) (((VALUE) == RCC_PLLI2SP_DIV2) ||\ + ((VALUE) == RCC_PLLI2SP_DIV4) ||\ + ((VALUE) == RCC_PLLI2SP_DIV6) ||\ + ((VALUE) == RCC_PLLI2SP_DIV8)) +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ +#define IS_RCC_PLLI2SQ_VALUE(VALUE) ((2 <= (VALUE)) && ((VALUE) <= 15)) +#define IS_RCC_PLLI2SR_VALUE(VALUE) ((2 <= (VALUE)) && ((VALUE) <= 7)) + +#define IS_RCC_PLLSAIN_VALUE(VALUE) ((50 <= (VALUE)) && ((VALUE) <= 432)) +#define IS_RCC_PLLSAIP_VALUE(VALUE) (((VALUE) == RCC_PLLSAIP_DIV2) ||\ + ((VALUE) == RCC_PLLSAIP_DIV4) ||\ + ((VALUE) == RCC_PLLSAIP_DIV6) ||\ + ((VALUE) == RCC_PLLSAIP_DIV8)) +#define IS_RCC_PLLSAIQ_VALUE(VALUE) ((2 <= (VALUE)) && ((VALUE) <= 15)) +#define IS_RCC_PLLSAIR_VALUE(VALUE) ((2 <= (VALUE)) && ((VALUE) <= 7)) + +#define IS_RCC_PLLSAI_DIVQ_VALUE(VALUE) ((1 <= (VALUE)) && ((VALUE) <= 32)) + +#define IS_RCC_PLLI2S_DIVQ_VALUE(VALUE) ((1 <= (VALUE)) && ((VALUE) <= 32)) + +#define IS_RCC_PLLSAI_DIVR_VALUE(VALUE) (((VALUE) == RCC_PLLSAIDIVR_2) ||\ + ((VALUE) == RCC_PLLSAIDIVR_4) ||\ + ((VALUE) == RCC_PLLSAIDIVR_8) ||\ + ((VALUE) == RCC_PLLSAIDIVR_16)) +#define IS_RCC_I2SCLKSOURCE(SOURCE) (((SOURCE) == RCC_I2SCLKSOURCE_PLLI2S) || \ + ((SOURCE) == RCC_I2SCLKSOURCE_EXT)) + +#define IS_RCC_SDMMC1CLKSOURCE(SOURCE) (((SOURCE) == RCC_SDMMC1CLKSOURCE_SYSCLK) || \ + ((SOURCE) == RCC_SDMMC1CLKSOURCE_CLK48)) + +#define IS_RCC_CECCLKSOURCE(SOURCE) (((SOURCE) == RCC_CECCLKSOURCE_HSI) || \ + ((SOURCE) == RCC_CECCLKSOURCE_LSE)) +#define IS_RCC_USART1CLKSOURCE(SOURCE) \ + (((SOURCE) == RCC_USART1CLKSOURCE_PCLK2) || \ + ((SOURCE) == RCC_USART1CLKSOURCE_SYSCLK) || \ + ((SOURCE) == RCC_USART1CLKSOURCE_LSE) || \ + ((SOURCE) == RCC_USART1CLKSOURCE_HSI)) + +#define IS_RCC_USART2CLKSOURCE(SOURCE) \ + (((SOURCE) == RCC_USART2CLKSOURCE_PCLK1) || \ + ((SOURCE) == RCC_USART2CLKSOURCE_SYSCLK) || \ + ((SOURCE) == RCC_USART2CLKSOURCE_LSE) || \ + ((SOURCE) == RCC_USART2CLKSOURCE_HSI)) +#define IS_RCC_USART3CLKSOURCE(SOURCE) \ + (((SOURCE) == RCC_USART3CLKSOURCE_PCLK1) || \ + ((SOURCE) == RCC_USART3CLKSOURCE_SYSCLK) || \ + ((SOURCE) == RCC_USART3CLKSOURCE_LSE) || \ + ((SOURCE) == RCC_USART3CLKSOURCE_HSI)) + +#define IS_RCC_UART4CLKSOURCE(SOURCE) \ + (((SOURCE) == RCC_UART4CLKSOURCE_PCLK1) || \ + ((SOURCE) == RCC_UART4CLKSOURCE_SYSCLK) || \ + ((SOURCE) == RCC_UART4CLKSOURCE_LSE) || \ + ((SOURCE) == RCC_UART4CLKSOURCE_HSI)) + +#define IS_RCC_UART5CLKSOURCE(SOURCE) \ + (((SOURCE) == RCC_UART5CLKSOURCE_PCLK1) || \ + ((SOURCE) == RCC_UART5CLKSOURCE_SYSCLK) || \ + ((SOURCE) == RCC_UART5CLKSOURCE_LSE) || \ + ((SOURCE) == RCC_UART5CLKSOURCE_HSI)) + +#define IS_RCC_USART6CLKSOURCE(SOURCE) \ + (((SOURCE) == RCC_USART6CLKSOURCE_PCLK2) || \ + ((SOURCE) == RCC_USART6CLKSOURCE_SYSCLK) || \ + ((SOURCE) == RCC_USART6CLKSOURCE_LSE) || \ + ((SOURCE) == RCC_USART6CLKSOURCE_HSI)) + +#define IS_RCC_UART7CLKSOURCE(SOURCE) \ + (((SOURCE) == RCC_UART7CLKSOURCE_PCLK1) || \ + ((SOURCE) == RCC_UART7CLKSOURCE_SYSCLK) || \ + ((SOURCE) == RCC_UART7CLKSOURCE_LSE) || \ + ((SOURCE) == RCC_UART7CLKSOURCE_HSI)) + +#define IS_RCC_UART8CLKSOURCE(SOURCE) \ + (((SOURCE) == RCC_UART8CLKSOURCE_PCLK1) || \ + ((SOURCE) == RCC_UART8CLKSOURCE_SYSCLK) || \ + ((SOURCE) == RCC_UART8CLKSOURCE_LSE) || \ + ((SOURCE) == RCC_UART8CLKSOURCE_HSI)) +#define IS_RCC_I2C1CLKSOURCE(SOURCE) \ + (((SOURCE) == RCC_I2C1CLKSOURCE_PCLK1) || \ + ((SOURCE) == RCC_I2C1CLKSOURCE_SYSCLK)|| \ + ((SOURCE) == RCC_I2C1CLKSOURCE_HSI)) +#define IS_RCC_I2C2CLKSOURCE(SOURCE) \ + (((SOURCE) == RCC_I2C2CLKSOURCE_PCLK1) || \ + ((SOURCE) == RCC_I2C2CLKSOURCE_SYSCLK)|| \ + ((SOURCE) == RCC_I2C2CLKSOURCE_HSI)) + +#define IS_RCC_I2C3CLKSOURCE(SOURCE) \ + (((SOURCE) == RCC_I2C3CLKSOURCE_PCLK1) || \ + ((SOURCE) == RCC_I2C3CLKSOURCE_SYSCLK)|| \ + ((SOURCE) == RCC_I2C3CLKSOURCE_HSI)) +#define IS_RCC_I2C4CLKSOURCE(SOURCE) \ + (((SOURCE) == RCC_I2C4CLKSOURCE_PCLK1) || \ + ((SOURCE) == RCC_I2C4CLKSOURCE_SYSCLK)|| \ + ((SOURCE) == RCC_I2C4CLKSOURCE_HSI)) +#define IS_RCC_LPTIM1CLK(SOURCE) \ + (((SOURCE) == RCC_LPTIM1CLKSOURCE_PCLK1) || \ + ((SOURCE) == RCC_LPTIM1CLKSOURCE_LSI) || \ + ((SOURCE) == RCC_LPTIM1CLKSOURCE_HSI) || \ + ((SOURCE) == RCC_LPTIM1CLKSOURCE_LSE)) +#define IS_RCC_CLK48SOURCE(SOURCE) \ + (((SOURCE) == RCC_CLK48SOURCE_PLLSAIP) || \ + ((SOURCE) == RCC_CLK48SOURCE_PLL)) +#define IS_RCC_TIMPRES(VALUE) \ + (((VALUE) == RCC_TIMPRES_DESACTIVATED) || \ + ((VALUE) == RCC_TIMPRES_ACTIVATED)) + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F745xx) ||\ + defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F730xx) || defined (STM32F750xx) +#define IS_RCC_SAI1CLKSOURCE(SOURCE) (((SOURCE) == RCC_SAI1CLKSOURCE_PLLSAI) || \ + ((SOURCE) == RCC_SAI1CLKSOURCE_PLLI2S) || \ + ((SOURCE) == RCC_SAI1CLKSOURCE_PIN)) +#define IS_RCC_SAI2CLKSOURCE(SOURCE) (((SOURCE) == RCC_SAI2CLKSOURCE_PLLSAI) || \ + ((SOURCE) == RCC_SAI2CLKSOURCE_PLLI2S) || \ + ((SOURCE) == RCC_SAI2CLKSOURCE_PIN)) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F745xx || STM32F746xx || STM32F756xx || STM32F750xx || STM32F730xx */ + +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define IS_RCC_PLLR_VALUE(VALUE) ((2 <= (VALUE)) && ((VALUE) <= 7)) + +#define IS_RCC_SAI1CLKSOURCE(SOURCE) (((SOURCE) == RCC_SAI1CLKSOURCE_PLLSAI) || \ + ((SOURCE) == RCC_SAI1CLKSOURCE_PLLI2S) || \ + ((SOURCE) == RCC_SAI1CLKSOURCE_PIN) || \ + ((SOURCE) == RCC_SAI1CLKSOURCE_PLLSRC)) + +#define IS_RCC_SAI2CLKSOURCE(SOURCE) (((SOURCE) == RCC_SAI2CLKSOURCE_PLLSAI) || \ + ((SOURCE) == RCC_SAI2CLKSOURCE_PLLI2S) || \ + ((SOURCE) == RCC_SAI2CLKSOURCE_PIN) || \ + ((SOURCE) == RCC_SAI2CLKSOURCE_PLLSRC)) + +#define IS_RCC_DFSDM1CLKSOURCE(SOURCE) (((SOURCE) == RCC_DFSDM1CLKSOURCE_PCLK2) || \ + ((SOURCE) == RCC_DFSDM1CLKSOURCE_SYSCLK)) + +#define IS_RCC_DFSDM1AUDIOCLKSOURCE(SOURCE) (((SOURCE) == RCC_DFSDM1AUDIOCLKSOURCE_SAI1) || \ + ((SOURCE) == RCC_DFSDM1AUDIOCLKSOURCE_SAI2)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F765xx) ||\ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F730xx) +#define IS_RCC_SDMMC2CLKSOURCE(SOURCE) (((SOURCE) == RCC_SDMMC2CLKSOURCE_SYSCLK) || \ + ((SOURCE) == RCC_SDMMC2CLKSOURCE_CLK48)) +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F730xx */ + +#if defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +#define IS_RCC_DSIBYTELANECLKSOURCE(SOURCE) (((SOURCE) == RCC_DSICLKSOURCE_PLLR) ||\ + ((SOURCE) == RCC_DSICLKSOURCE_DSIPHY)) +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ +#ifdef __cplusplus +} +#endif + +#endif /* __STM32F7xx_HAL_RCC_EX_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_tim.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_tim.h new file mode 100644 index 0000000..8e41f77 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_tim.h @@ -0,0 +1,2386 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_tim.h + * @author MCD Application Team + * @brief Header file of TIM HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_TIM_H +#define STM32F7xx_HAL_TIM_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup TIM + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup TIM_Exported_Types TIM Exported Types + * @{ + */ + +/** + * @brief TIM Time base Configuration Structure definition + */ +typedef struct +{ + uint32_t Prescaler; /*!< Specifies the prescaler value used to divide the TIM clock. + This parameter can be a number between Min_Data = 0x0000 and Max_Data = 0xFFFF */ + + uint32_t CounterMode; /*!< Specifies the counter mode. + This parameter can be a value of @ref TIM_Counter_Mode */ + + uint32_t Period; /*!< Specifies the period value to be loaded into the active + Auto-Reload Register at the next update event. + This parameter can be a number between Min_Data = 0x0000 and Max_Data = 0xFFFF. */ + + uint32_t ClockDivision; /*!< Specifies the clock division. + This parameter can be a value of @ref TIM_ClockDivision */ + + uint32_t RepetitionCounter; /*!< Specifies the repetition counter value. Each time the RCR downcounter + reaches zero, an update event is generated and counting restarts + from the RCR value (N). + This means in PWM mode that (N+1) corresponds to: + - the number of PWM periods in edge-aligned mode + - the number of half PWM period in center-aligned mode + GP timers: this parameter must be a number between Min_Data = 0x00 and + Max_Data = 0xFF. + Advanced timers: this parameter must be a number between Min_Data = 0x0000 and + Max_Data = 0xFFFF. */ + + uint32_t AutoReloadPreload; /*!< Specifies the auto-reload preload. + This parameter can be a value of @ref TIM_AutoReloadPreload */ +} TIM_Base_InitTypeDef; + +/** + * @brief TIM Output Compare Configuration Structure definition + */ +typedef struct +{ + uint32_t OCMode; /*!< Specifies the TIM mode. + This parameter can be a value of @ref TIM_Output_Compare_and_PWM_modes */ + + uint32_t Pulse; /*!< Specifies the pulse value to be loaded into the Capture Compare Register. + This parameter can be a number between Min_Data = 0x0000 and Max_Data = 0xFFFF */ + + uint32_t OCPolarity; /*!< Specifies the output polarity. + This parameter can be a value of @ref TIM_Output_Compare_Polarity */ + + uint32_t OCNPolarity; /*!< Specifies the complementary output polarity. + This parameter can be a value of @ref TIM_Output_Compare_N_Polarity + @note This parameter is valid only for timer instances supporting break feature. */ + + uint32_t OCFastMode; /*!< Specifies the Fast mode state. + This parameter can be a value of @ref TIM_Output_Fast_State + @note This parameter is valid only in PWM1 and PWM2 mode. */ + + + uint32_t OCIdleState; /*!< Specifies the TIM Output Compare pin state during Idle state. + This parameter can be a value of @ref TIM_Output_Compare_Idle_State + @note This parameter is valid only for timer instances supporting break feature. */ + + uint32_t OCNIdleState; /*!< Specifies the TIM Output Compare pin state during Idle state. + This parameter can be a value of @ref TIM_Output_Compare_N_Idle_State + @note This parameter is valid only for timer instances supporting break feature. */ +} TIM_OC_InitTypeDef; + +/** + * @brief TIM One Pulse Mode Configuration Structure definition + */ +typedef struct +{ + uint32_t OCMode; /*!< Specifies the TIM mode. + This parameter can be a value of @ref TIM_Output_Compare_and_PWM_modes */ + + uint32_t Pulse; /*!< Specifies the pulse value to be loaded into the Capture Compare Register. + This parameter can be a number between Min_Data = 0x0000 and Max_Data = 0xFFFF */ + + uint32_t OCPolarity; /*!< Specifies the output polarity. + This parameter can be a value of @ref TIM_Output_Compare_Polarity */ + + uint32_t OCNPolarity; /*!< Specifies the complementary output polarity. + This parameter can be a value of @ref TIM_Output_Compare_N_Polarity + @note This parameter is valid only for timer instances supporting break feature. */ + + uint32_t OCIdleState; /*!< Specifies the TIM Output Compare pin state during Idle state. + This parameter can be a value of @ref TIM_Output_Compare_Idle_State + @note This parameter is valid only for timer instances supporting break feature. */ + + uint32_t OCNIdleState; /*!< Specifies the TIM Output Compare pin state during Idle state. + This parameter can be a value of @ref TIM_Output_Compare_N_Idle_State + @note This parameter is valid only for timer instances supporting break feature. */ + + uint32_t ICPolarity; /*!< Specifies the active edge of the input signal. + This parameter can be a value of @ref TIM_Input_Capture_Polarity */ + + uint32_t ICSelection; /*!< Specifies the input. + This parameter can be a value of @ref TIM_Input_Capture_Selection */ + + uint32_t ICFilter; /*!< Specifies the input capture filter. + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ +} TIM_OnePulse_InitTypeDef; + +/** + * @brief TIM Input Capture Configuration Structure definition + */ +typedef struct +{ + uint32_t ICPolarity; /*!< Specifies the active edge of the input signal. + This parameter can be a value of @ref TIM_Input_Capture_Polarity */ + + uint32_t ICSelection; /*!< Specifies the input. + This parameter can be a value of @ref TIM_Input_Capture_Selection */ + + uint32_t ICPrescaler; /*!< Specifies the Input Capture Prescaler. + This parameter can be a value of @ref TIM_Input_Capture_Prescaler */ + + uint32_t ICFilter; /*!< Specifies the input capture filter. + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ +} TIM_IC_InitTypeDef; + +/** + * @brief TIM Encoder Configuration Structure definition + */ +typedef struct +{ + uint32_t EncoderMode; /*!< Specifies the active edge of the input signal. + This parameter can be a value of @ref TIM_Encoder_Mode */ + + uint32_t IC1Polarity; /*!< Specifies the active edge of the input signal. + This parameter can be a value of @ref TIM_Encoder_Input_Polarity */ + + uint32_t IC1Selection; /*!< Specifies the input. + This parameter can be a value of @ref TIM_Input_Capture_Selection */ + + uint32_t IC1Prescaler; /*!< Specifies the Input Capture Prescaler. + This parameter can be a value of @ref TIM_Input_Capture_Prescaler */ + + uint32_t IC1Filter; /*!< Specifies the input capture filter. + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ + + uint32_t IC2Polarity; /*!< Specifies the active edge of the input signal. + This parameter can be a value of @ref TIM_Encoder_Input_Polarity */ + + uint32_t IC2Selection; /*!< Specifies the input. + This parameter can be a value of @ref TIM_Input_Capture_Selection */ + + uint32_t IC2Prescaler; /*!< Specifies the Input Capture Prescaler. + This parameter can be a value of @ref TIM_Input_Capture_Prescaler */ + + uint32_t IC2Filter; /*!< Specifies the input capture filter. + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ +} TIM_Encoder_InitTypeDef; + +/** + * @brief Clock Configuration Handle Structure definition + */ +typedef struct +{ + uint32_t ClockSource; /*!< TIM clock sources + This parameter can be a value of @ref TIM_Clock_Source */ + uint32_t ClockPolarity; /*!< TIM clock polarity + This parameter can be a value of @ref TIM_Clock_Polarity */ + uint32_t ClockPrescaler; /*!< TIM clock prescaler + This parameter can be a value of @ref TIM_Clock_Prescaler */ + uint32_t ClockFilter; /*!< TIM clock filter + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ +} TIM_ClockConfigTypeDef; + +/** + * @brief TIM Clear Input Configuration Handle Structure definition + */ +typedef struct +{ + uint32_t ClearInputState; /*!< TIM clear Input state + This parameter can be ENABLE or DISABLE */ + uint32_t ClearInputSource; /*!< TIM clear Input sources + This parameter can be a value of @ref TIM_ClearInput_Source */ + uint32_t ClearInputPolarity; /*!< TIM Clear Input polarity + This parameter can be a value of @ref TIM_ClearInput_Polarity */ + uint32_t ClearInputPrescaler; /*!< TIM Clear Input prescaler + This parameter must be 0: When OCRef clear feature is used with ETR source, + ETR prescaler must be off */ + uint32_t ClearInputFilter; /*!< TIM Clear Input filter + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ +} TIM_ClearInputConfigTypeDef; + +/** + * @brief TIM Master configuration Structure definition + * @note Advanced timers provide TRGO2 internal line which is redirected + * to the ADC + */ +typedef struct +{ + uint32_t MasterOutputTrigger; /*!< Trigger output (TRGO) selection + This parameter can be a value of @ref TIM_Master_Mode_Selection */ + uint32_t MasterOutputTrigger2; /*!< Trigger output2 (TRGO2) selection + This parameter can be a value of @ref TIM_Master_Mode_Selection_2 */ + uint32_t MasterSlaveMode; /*!< Master/slave mode selection + This parameter can be a value of @ref TIM_Master_Slave_Mode + @note When the Master/slave mode is enabled, the effect of + an event on the trigger input (TRGI) is delayed to allow a + perfect synchronization between the current timer and its + slaves (through TRGO). It is not mandatory in case of timer + synchronization mode. */ +} TIM_MasterConfigTypeDef; + +/** + * @brief TIM Slave configuration Structure definition + */ +typedef struct +{ + uint32_t SlaveMode; /*!< Slave mode selection + This parameter can be a value of @ref TIM_Slave_Mode */ + uint32_t InputTrigger; /*!< Input Trigger source + This parameter can be a value of @ref TIM_Trigger_Selection */ + uint32_t TriggerPolarity; /*!< Input Trigger polarity + This parameter can be a value of @ref TIM_Trigger_Polarity */ + uint32_t TriggerPrescaler; /*!< Input trigger prescaler + This parameter can be a value of @ref TIM_Trigger_Prescaler */ + uint32_t TriggerFilter; /*!< Input trigger filter + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ + +} TIM_SlaveConfigTypeDef; + +/** + * @brief TIM Break input(s) and Dead time configuration Structure definition + * @note 2 break inputs can be configured (BKIN and BKIN2) with configurable + * filter and polarity. + */ +typedef struct +{ + uint32_t OffStateRunMode; /*!< TIM off state in run mode, This parameter can be a value of @ref TIM_OSSR_Off_State_Selection_for_Run_mode_state */ + + uint32_t OffStateIDLEMode; /*!< TIM off state in IDLE mode, This parameter can be a value of @ref TIM_OSSI_Off_State_Selection_for_Idle_mode_state */ + + uint32_t LockLevel; /*!< TIM Lock level, This parameter can be a value of @ref TIM_Lock_level */ + + uint32_t DeadTime; /*!< TIM dead Time, This parameter can be a number between Min_Data = 0x00 and Max_Data = 0xFF */ + + uint32_t BreakState; /*!< TIM Break State, This parameter can be a value of @ref TIM_Break_Input_enable_disable */ + + uint32_t BreakPolarity; /*!< TIM Break input polarity, This parameter can be a value of @ref TIM_Break_Polarity */ + + uint32_t BreakFilter; /*!< Specifies the break input filter.This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ + + uint32_t Break2State; /*!< TIM Break2 State, This parameter can be a value of @ref TIM_Break2_Input_enable_disable */ + + uint32_t Break2Polarity; /*!< TIM Break2 input polarity, This parameter can be a value of @ref TIM_Break2_Polarity */ + + uint32_t Break2Filter; /*!< TIM break2 input filter.This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ + + uint32_t AutomaticOutput; /*!< TIM Automatic Output Enable state, This parameter can be a value of @ref TIM_AOE_Bit_Set_Reset */ + +} TIM_BreakDeadTimeConfigTypeDef; + +/** + * @brief HAL State structures definition + */ +typedef enum +{ + HAL_TIM_STATE_RESET = 0x00U, /*!< Peripheral not yet initialized or disabled */ + HAL_TIM_STATE_READY = 0x01U, /*!< Peripheral Initialized and ready for use */ + HAL_TIM_STATE_BUSY = 0x02U, /*!< An internal process is ongoing */ + HAL_TIM_STATE_TIMEOUT = 0x03U, /*!< Timeout state */ + HAL_TIM_STATE_ERROR = 0x04U /*!< Reception process is ongoing */ +} HAL_TIM_StateTypeDef; + +/** + * @brief TIM Channel States definition + */ +typedef enum +{ + HAL_TIM_CHANNEL_STATE_RESET = 0x00U, /*!< TIM Channel initial state */ + HAL_TIM_CHANNEL_STATE_READY = 0x01U, /*!< TIM Channel ready for use */ + HAL_TIM_CHANNEL_STATE_BUSY = 0x02U, /*!< An internal process is ongoing on the TIM channel */ +} HAL_TIM_ChannelStateTypeDef; + +/** + * @brief DMA Burst States definition + */ +typedef enum +{ + HAL_DMA_BURST_STATE_RESET = 0x00U, /*!< DMA Burst initial state */ + HAL_DMA_BURST_STATE_READY = 0x01U, /*!< DMA Burst ready for use */ + HAL_DMA_BURST_STATE_BUSY = 0x02U, /*!< Ongoing DMA Burst */ +} HAL_TIM_DMABurstStateTypeDef; + +/** + * @brief HAL Active channel structures definition + */ +typedef enum +{ + HAL_TIM_ACTIVE_CHANNEL_1 = 0x01U, /*!< The active channel is 1 */ + HAL_TIM_ACTIVE_CHANNEL_2 = 0x02U, /*!< The active channel is 2 */ + HAL_TIM_ACTIVE_CHANNEL_3 = 0x04U, /*!< The active channel is 3 */ + HAL_TIM_ACTIVE_CHANNEL_4 = 0x08U, /*!< The active channel is 4 */ + HAL_TIM_ACTIVE_CHANNEL_5 = 0x10U, /*!< The active channel is 5 */ + HAL_TIM_ACTIVE_CHANNEL_6 = 0x20U, /*!< The active channel is 6 */ + HAL_TIM_ACTIVE_CHANNEL_CLEARED = 0x00U /*!< All active channels cleared */ +} HAL_TIM_ActiveChannel; + +/** + * @brief TIM Time Base Handle Structure definition + */ +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) +typedef struct __TIM_HandleTypeDef +#else +typedef struct +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +{ + TIM_TypeDef *Instance; /*!< Register base address */ + TIM_Base_InitTypeDef Init; /*!< TIM Time Base required parameters */ + HAL_TIM_ActiveChannel Channel; /*!< Active channel */ + DMA_HandleTypeDef *hdma[7]; /*!< DMA Handlers array + This array is accessed by a @ref DMA_Handle_index */ + HAL_LockTypeDef Lock; /*!< Locking object */ + __IO HAL_TIM_StateTypeDef State; /*!< TIM operation state */ + __IO HAL_TIM_ChannelStateTypeDef ChannelState[6]; /*!< TIM channel operation state */ + __IO HAL_TIM_ChannelStateTypeDef ChannelNState[4]; /*!< TIM complementary channel operation state */ + __IO HAL_TIM_DMABurstStateTypeDef DMABurstState; /*!< DMA burst operation state */ + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + void (* Base_MspInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Base Msp Init Callback */ + void (* Base_MspDeInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Base Msp DeInit Callback */ + void (* IC_MspInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM IC Msp Init Callback */ + void (* IC_MspDeInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM IC Msp DeInit Callback */ + void (* OC_MspInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM OC Msp Init Callback */ + void (* OC_MspDeInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM OC Msp DeInit Callback */ + void (* PWM_MspInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM PWM Msp Init Callback */ + void (* PWM_MspDeInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM PWM Msp DeInit Callback */ + void (* OnePulse_MspInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM One Pulse Msp Init Callback */ + void (* OnePulse_MspDeInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM One Pulse Msp DeInit Callback */ + void (* Encoder_MspInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Encoder Msp Init Callback */ + void (* Encoder_MspDeInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Encoder Msp DeInit Callback */ + void (* HallSensor_MspInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Hall Sensor Msp Init Callback */ + void (* HallSensor_MspDeInitCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Hall Sensor Msp DeInit Callback */ + void (* PeriodElapsedCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Period Elapsed Callback */ + void (* PeriodElapsedHalfCpltCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Period Elapsed half complete Callback */ + void (* TriggerCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Trigger Callback */ + void (* TriggerHalfCpltCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Trigger half complete Callback */ + void (* IC_CaptureCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Input Capture Callback */ + void (* IC_CaptureHalfCpltCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Input Capture half complete Callback */ + void (* OC_DelayElapsedCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Output Compare Delay Elapsed Callback */ + void (* PWM_PulseFinishedCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM PWM Pulse Finished Callback */ + void (* PWM_PulseFinishedHalfCpltCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM PWM Pulse Finished half complete Callback */ + void (* ErrorCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Error Callback */ + void (* CommutationCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Commutation Callback */ + void (* CommutationHalfCpltCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Commutation half complete Callback */ + void (* BreakCallback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Break Callback */ + void (* Break2Callback)(struct __TIM_HandleTypeDef *htim); /*!< TIM Break2 Callback */ +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +} TIM_HandleTypeDef; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) +/** + * @brief HAL TIM Callback ID enumeration definition + */ +typedef enum +{ + HAL_TIM_BASE_MSPINIT_CB_ID = 0x00U /*!< TIM Base MspInit Callback ID */ + , HAL_TIM_BASE_MSPDEINIT_CB_ID = 0x01U /*!< TIM Base MspDeInit Callback ID */ + , HAL_TIM_IC_MSPINIT_CB_ID = 0x02U /*!< TIM IC MspInit Callback ID */ + , HAL_TIM_IC_MSPDEINIT_CB_ID = 0x03U /*!< TIM IC MspDeInit Callback ID */ + , HAL_TIM_OC_MSPINIT_CB_ID = 0x04U /*!< TIM OC MspInit Callback ID */ + , HAL_TIM_OC_MSPDEINIT_CB_ID = 0x05U /*!< TIM OC MspDeInit Callback ID */ + , HAL_TIM_PWM_MSPINIT_CB_ID = 0x06U /*!< TIM PWM MspInit Callback ID */ + , HAL_TIM_PWM_MSPDEINIT_CB_ID = 0x07U /*!< TIM PWM MspDeInit Callback ID */ + , HAL_TIM_ONE_PULSE_MSPINIT_CB_ID = 0x08U /*!< TIM One Pulse MspInit Callback ID */ + , HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID = 0x09U /*!< TIM One Pulse MspDeInit Callback ID */ + , HAL_TIM_ENCODER_MSPINIT_CB_ID = 0x0AU /*!< TIM Encoder MspInit Callback ID */ + , HAL_TIM_ENCODER_MSPDEINIT_CB_ID = 0x0BU /*!< TIM Encoder MspDeInit Callback ID */ + , HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID = 0x0CU /*!< TIM Hall Sensor MspDeInit Callback ID */ + , HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID = 0x0DU /*!< TIM Hall Sensor MspDeInit Callback ID */ + , HAL_TIM_PERIOD_ELAPSED_CB_ID = 0x0EU /*!< TIM Period Elapsed Callback ID */ + , HAL_TIM_PERIOD_ELAPSED_HALF_CB_ID = 0x0FU /*!< TIM Period Elapsed half complete Callback ID */ + , HAL_TIM_TRIGGER_CB_ID = 0x10U /*!< TIM Trigger Callback ID */ + , HAL_TIM_TRIGGER_HALF_CB_ID = 0x11U /*!< TIM Trigger half complete Callback ID */ + + , HAL_TIM_IC_CAPTURE_CB_ID = 0x12U /*!< TIM Input Capture Callback ID */ + , HAL_TIM_IC_CAPTURE_HALF_CB_ID = 0x13U /*!< TIM Input Capture half complete Callback ID */ + , HAL_TIM_OC_DELAY_ELAPSED_CB_ID = 0x14U /*!< TIM Output Compare Delay Elapsed Callback ID */ + , HAL_TIM_PWM_PULSE_FINISHED_CB_ID = 0x15U /*!< TIM PWM Pulse Finished Callback ID */ + , HAL_TIM_PWM_PULSE_FINISHED_HALF_CB_ID = 0x16U /*!< TIM PWM Pulse Finished half complete Callback ID */ + , HAL_TIM_ERROR_CB_ID = 0x17U /*!< TIM Error Callback ID */ + , HAL_TIM_COMMUTATION_CB_ID = 0x18U /*!< TIM Commutation Callback ID */ + , HAL_TIM_COMMUTATION_HALF_CB_ID = 0x19U /*!< TIM Commutation half complete Callback ID */ + , HAL_TIM_BREAK_CB_ID = 0x1AU /*!< TIM Break Callback ID */ + , HAL_TIM_BREAK2_CB_ID = 0x1BU /*!< TIM Break2 Callback ID */ +} HAL_TIM_CallbackIDTypeDef; + +/** + * @brief HAL TIM Callback pointer definition + */ +typedef void (*pTIM_CallbackTypeDef)(TIM_HandleTypeDef *htim); /*!< pointer to the TIM callback function */ + +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + +/** + * @} + */ +/* End of exported types -----------------------------------------------------*/ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup TIM_Exported_Constants TIM Exported Constants + * @{ + */ + +/** @defgroup TIM_ClearInput_Source TIM Clear Input Source + * @{ + */ +#define TIM_CLEARINPUTSOURCE_NONE 0x00000000U /*!< OCREF_CLR is disabled */ +#define TIM_CLEARINPUTSOURCE_ETR 0x00000001U /*!< OCREF_CLR is connected to ETRF input */ +/** + * @} + */ + +/** @defgroup TIM_DMA_Base_address TIM DMA Base Address + * @{ + */ +#define TIM_DMABASE_CR1 0x00000000U +#define TIM_DMABASE_CR2 0x00000001U +#define TIM_DMABASE_SMCR 0x00000002U +#define TIM_DMABASE_DIER 0x00000003U +#define TIM_DMABASE_SR 0x00000004U +#define TIM_DMABASE_EGR 0x00000005U +#define TIM_DMABASE_CCMR1 0x00000006U +#define TIM_DMABASE_CCMR2 0x00000007U +#define TIM_DMABASE_CCER 0x00000008U +#define TIM_DMABASE_CNT 0x00000009U +#define TIM_DMABASE_PSC 0x0000000AU +#define TIM_DMABASE_ARR 0x0000000BU +#define TIM_DMABASE_RCR 0x0000000CU +#define TIM_DMABASE_CCR1 0x0000000DU +#define TIM_DMABASE_CCR2 0x0000000EU +#define TIM_DMABASE_CCR3 0x0000000FU +#define TIM_DMABASE_CCR4 0x00000010U +#define TIM_DMABASE_BDTR 0x00000011U +#define TIM_DMABASE_DCR 0x00000012U +#define TIM_DMABASE_DMAR 0x00000013U +#define TIM_DMABASE_OR 0x00000014U +#define TIM_DMABASE_CCMR3 0x00000015U +#define TIM_DMABASE_CCR5 0x00000016U +#define TIM_DMABASE_CCR6 0x00000017U +#if defined(TIM_BREAK_INPUT_SUPPORT) +#define TIM_DMABASE_AF1 0x00000018U +#define TIM_DMABASE_AF2 0x00000019U +#endif /* TIM_BREAK_INPUT_SUPPORT */ +/** + * @} + */ + +/** @defgroup TIM_Event_Source TIM Event Source + * @{ + */ +#define TIM_EVENTSOURCE_UPDATE TIM_EGR_UG /*!< Reinitialize the counter and generates an update of the registers */ +#define TIM_EVENTSOURCE_CC1 TIM_EGR_CC1G /*!< A capture/compare event is generated on channel 1 */ +#define TIM_EVENTSOURCE_CC2 TIM_EGR_CC2G /*!< A capture/compare event is generated on channel 2 */ +#define TIM_EVENTSOURCE_CC3 TIM_EGR_CC3G /*!< A capture/compare event is generated on channel 3 */ +#define TIM_EVENTSOURCE_CC4 TIM_EGR_CC4G /*!< A capture/compare event is generated on channel 4 */ +#define TIM_EVENTSOURCE_COM TIM_EGR_COMG /*!< A commutation event is generated */ +#define TIM_EVENTSOURCE_TRIGGER TIM_EGR_TG /*!< A trigger event is generated */ +#define TIM_EVENTSOURCE_BREAK TIM_EGR_BG /*!< A break event is generated */ +#define TIM_EVENTSOURCE_BREAK2 TIM_EGR_B2G /*!< A break 2 event is generated */ +/** + * @} + */ + +/** @defgroup TIM_Input_Channel_Polarity TIM Input Channel polarity + * @{ + */ +#define TIM_INPUTCHANNELPOLARITY_RISING 0x00000000U /*!< Polarity for TIx source */ +#define TIM_INPUTCHANNELPOLARITY_FALLING TIM_CCER_CC1P /*!< Polarity for TIx source */ +#define TIM_INPUTCHANNELPOLARITY_BOTHEDGE (TIM_CCER_CC1P | TIM_CCER_CC1NP) /*!< Polarity for TIx source */ +/** + * @} + */ + +/** @defgroup TIM_ETR_Polarity TIM ETR Polarity + * @{ + */ +#define TIM_ETRPOLARITY_INVERTED TIM_SMCR_ETP /*!< Polarity for ETR source */ +#define TIM_ETRPOLARITY_NONINVERTED 0x00000000U /*!< Polarity for ETR source */ +/** + * @} + */ + +/** @defgroup TIM_ETR_Prescaler TIM ETR Prescaler + * @{ + */ +#define TIM_ETRPRESCALER_DIV1 0x00000000U /*!< No prescaler is used */ +#define TIM_ETRPRESCALER_DIV2 TIM_SMCR_ETPS_0 /*!< ETR input source is divided by 2 */ +#define TIM_ETRPRESCALER_DIV4 TIM_SMCR_ETPS_1 /*!< ETR input source is divided by 4 */ +#define TIM_ETRPRESCALER_DIV8 TIM_SMCR_ETPS /*!< ETR input source is divided by 8 */ +/** + * @} + */ + +/** @defgroup TIM_Counter_Mode TIM Counter Mode + * @{ + */ +#define TIM_COUNTERMODE_UP 0x00000000U /*!< Counter used as up-counter */ +#define TIM_COUNTERMODE_DOWN TIM_CR1_DIR /*!< Counter used as down-counter */ +#define TIM_COUNTERMODE_CENTERALIGNED1 TIM_CR1_CMS_0 /*!< Center-aligned mode 1 */ +#define TIM_COUNTERMODE_CENTERALIGNED2 TIM_CR1_CMS_1 /*!< Center-aligned mode 2 */ +#define TIM_COUNTERMODE_CENTERALIGNED3 TIM_CR1_CMS /*!< Center-aligned mode 3 */ +/** + * @} + */ + +/** @defgroup TIM_Update_Interrupt_Flag_Remap TIM Update Interrupt Flag Remap + * @{ + */ +#define TIM_UIFREMAP_DISABLE 0x00000000U /*!< Update interrupt flag remap disabled */ +#define TIM_UIFREMAP_ENABLE TIM_CR1_UIFREMAP /*!< Update interrupt flag remap enabled */ +/** + * @} + */ + +/** @defgroup TIM_ClockDivision TIM Clock Division + * @{ + */ +#define TIM_CLOCKDIVISION_DIV1 0x00000000U /*!< Clock division: tDTS=tCK_INT */ +#define TIM_CLOCKDIVISION_DIV2 TIM_CR1_CKD_0 /*!< Clock division: tDTS=2*tCK_INT */ +#define TIM_CLOCKDIVISION_DIV4 TIM_CR1_CKD_1 /*!< Clock division: tDTS=4*tCK_INT */ +/** + * @} + */ + +/** @defgroup TIM_Output_Compare_State TIM Output Compare State + * @{ + */ +#define TIM_OUTPUTSTATE_DISABLE 0x00000000U /*!< Capture/Compare 1 output disabled */ +#define TIM_OUTPUTSTATE_ENABLE TIM_CCER_CC1E /*!< Capture/Compare 1 output enabled */ +/** + * @} + */ + +/** @defgroup TIM_AutoReloadPreload TIM Auto-Reload Preload + * @{ + */ +#define TIM_AUTORELOAD_PRELOAD_DISABLE 0x00000000U /*!< TIMx_ARR register is not buffered */ +#define TIM_AUTORELOAD_PRELOAD_ENABLE TIM_CR1_ARPE /*!< TIMx_ARR register is buffered */ + +/** + * @} + */ + +/** @defgroup TIM_Output_Fast_State TIM Output Fast State + * @{ + */ +#define TIM_OCFAST_DISABLE 0x00000000U /*!< Output Compare fast disable */ +#define TIM_OCFAST_ENABLE TIM_CCMR1_OC1FE /*!< Output Compare fast enable */ +/** + * @} + */ + +/** @defgroup TIM_Output_Compare_N_State TIM Complementary Output Compare State + * @{ + */ +#define TIM_OUTPUTNSTATE_DISABLE 0x00000000U /*!< OCxN is disabled */ +#define TIM_OUTPUTNSTATE_ENABLE TIM_CCER_CC1NE /*!< OCxN is enabled */ +/** + * @} + */ + +/** @defgroup TIM_Output_Compare_Polarity TIM Output Compare Polarity + * @{ + */ +#define TIM_OCPOLARITY_HIGH 0x00000000U /*!< Capture/Compare output polarity */ +#define TIM_OCPOLARITY_LOW TIM_CCER_CC1P /*!< Capture/Compare output polarity */ +/** + * @} + */ + +/** @defgroup TIM_Output_Compare_N_Polarity TIM Complementary Output Compare Polarity + * @{ + */ +#define TIM_OCNPOLARITY_HIGH 0x00000000U /*!< Capture/Compare complementary output polarity */ +#define TIM_OCNPOLARITY_LOW TIM_CCER_CC1NP /*!< Capture/Compare complementary output polarity */ +/** + * @} + */ + +/** @defgroup TIM_Output_Compare_Idle_State TIM Output Compare Idle State + * @{ + */ +#define TIM_OCIDLESTATE_SET TIM_CR2_OIS1 /*!< Output Idle state: OCx=1 when MOE=0 */ +#define TIM_OCIDLESTATE_RESET 0x00000000U /*!< Output Idle state: OCx=0 when MOE=0 */ +/** + * @} + */ + +/** @defgroup TIM_Output_Compare_N_Idle_State TIM Complementary Output Compare Idle State + * @{ + */ +#define TIM_OCNIDLESTATE_SET TIM_CR2_OIS1N /*!< Complementary output Idle state: OCxN=1 when MOE=0 */ +#define TIM_OCNIDLESTATE_RESET 0x00000000U /*!< Complementary output Idle state: OCxN=0 when MOE=0 */ +/** + * @} + */ + +/** @defgroup TIM_Input_Capture_Polarity TIM Input Capture Polarity + * @{ + */ +#define TIM_ICPOLARITY_RISING TIM_INPUTCHANNELPOLARITY_RISING /*!< Capture triggered by rising edge on timer input */ +#define TIM_ICPOLARITY_FALLING TIM_INPUTCHANNELPOLARITY_FALLING /*!< Capture triggered by falling edge on timer input */ +#define TIM_ICPOLARITY_BOTHEDGE TIM_INPUTCHANNELPOLARITY_BOTHEDGE /*!< Capture triggered by both rising and falling edges on timer input*/ +/** + * @} + */ + +/** @defgroup TIM_Encoder_Input_Polarity TIM Encoder Input Polarity + * @{ + */ +#define TIM_ENCODERINPUTPOLARITY_RISING TIM_INPUTCHANNELPOLARITY_RISING /*!< Encoder input with rising edge polarity */ +#define TIM_ENCODERINPUTPOLARITY_FALLING TIM_INPUTCHANNELPOLARITY_FALLING /*!< Encoder input with falling edge polarity */ +/** + * @} + */ + +/** @defgroup TIM_Input_Capture_Selection TIM Input Capture Selection + * @{ + */ +#define TIM_ICSELECTION_DIRECTTI TIM_CCMR1_CC1S_0 /*!< TIM Input 1, 2, 3 or 4 is selected to be connected to IC1, IC2, IC3 or IC4, respectively */ +#define TIM_ICSELECTION_INDIRECTTI TIM_CCMR1_CC1S_1 /*!< TIM Input 1, 2, 3 or 4 is selected to be connected to IC2, IC1, IC4 or IC3, respectively */ +#define TIM_ICSELECTION_TRC TIM_CCMR1_CC1S /*!< TIM Input 1, 2, 3 or 4 is selected to be connected to TRC */ +/** + * @} + */ + +/** @defgroup TIM_Input_Capture_Prescaler TIM Input Capture Prescaler + * @{ + */ +#define TIM_ICPSC_DIV1 0x00000000U /*!< Capture performed each time an edge is detected on the capture input */ +#define TIM_ICPSC_DIV2 TIM_CCMR1_IC1PSC_0 /*!< Capture performed once every 2 events */ +#define TIM_ICPSC_DIV4 TIM_CCMR1_IC1PSC_1 /*!< Capture performed once every 4 events */ +#define TIM_ICPSC_DIV8 TIM_CCMR1_IC1PSC /*!< Capture performed once every 8 events */ +/** + * @} + */ + +/** @defgroup TIM_One_Pulse_Mode TIM One Pulse Mode + * @{ + */ +#define TIM_OPMODE_SINGLE TIM_CR1_OPM /*!< Counter stops counting at the next update event */ +#define TIM_OPMODE_REPETITIVE 0x00000000U /*!< Counter is not stopped at update event */ +/** + * @} + */ + +/** @defgroup TIM_Encoder_Mode TIM Encoder Mode + * @{ + */ +#define TIM_ENCODERMODE_TI1 TIM_SMCR_SMS_0 /*!< Quadrature encoder mode 1, x2 mode, counts up/down on TI1FP1 edge depending on TI2FP2 level */ +#define TIM_ENCODERMODE_TI2 TIM_SMCR_SMS_1 /*!< Quadrature encoder mode 2, x2 mode, counts up/down on TI2FP2 edge depending on TI1FP1 level. */ +#define TIM_ENCODERMODE_TI12 (TIM_SMCR_SMS_1 | TIM_SMCR_SMS_0) /*!< Quadrature encoder mode 3, x4 mode, counts up/down on both TI1FP1 and TI2FP2 edges depending on the level of the other input. */ +/** + * @} + */ + +/** @defgroup TIM_Interrupt_definition TIM interrupt Definition + * @{ + */ +#define TIM_IT_UPDATE TIM_DIER_UIE /*!< Update interrupt */ +#define TIM_IT_CC1 TIM_DIER_CC1IE /*!< Capture/Compare 1 interrupt */ +#define TIM_IT_CC2 TIM_DIER_CC2IE /*!< Capture/Compare 2 interrupt */ +#define TIM_IT_CC3 TIM_DIER_CC3IE /*!< Capture/Compare 3 interrupt */ +#define TIM_IT_CC4 TIM_DIER_CC4IE /*!< Capture/Compare 4 interrupt */ +#define TIM_IT_COM TIM_DIER_COMIE /*!< Commutation interrupt */ +#define TIM_IT_TRIGGER TIM_DIER_TIE /*!< Trigger interrupt */ +#define TIM_IT_BREAK TIM_DIER_BIE /*!< Break interrupt */ +/** + * @} + */ + +/** @defgroup TIM_Commutation_Source TIM Commutation Source + * @{ + */ +#define TIM_COMMUTATION_TRGI TIM_CR2_CCUS /*!< When Capture/compare control bits are preloaded, they are updated by setting the COMG bit or when an rising edge occurs on trigger input */ +#define TIM_COMMUTATION_SOFTWARE 0x00000000U /*!< When Capture/compare control bits are preloaded, they are updated by setting the COMG bit */ +/** + * @} + */ + +/** @defgroup TIM_DMA_sources TIM DMA Sources + * @{ + */ +#define TIM_DMA_UPDATE TIM_DIER_UDE /*!< DMA request is triggered by the update event */ +#define TIM_DMA_CC1 TIM_DIER_CC1DE /*!< DMA request is triggered by the capture/compare macth 1 event */ +#define TIM_DMA_CC2 TIM_DIER_CC2DE /*!< DMA request is triggered by the capture/compare macth 2 event event */ +#define TIM_DMA_CC3 TIM_DIER_CC3DE /*!< DMA request is triggered by the capture/compare macth 3 event event */ +#define TIM_DMA_CC4 TIM_DIER_CC4DE /*!< DMA request is triggered by the capture/compare macth 4 event event */ +#define TIM_DMA_COM TIM_DIER_COMDE /*!< DMA request is triggered by the commutation event */ +#define TIM_DMA_TRIGGER TIM_DIER_TDE /*!< DMA request is triggered by the trigger event */ +/** + * @} + */ + +/** @defgroup TIM_Flag_definition TIM Flag Definition + * @{ + */ +#define TIM_FLAG_UPDATE TIM_SR_UIF /*!< Update interrupt flag */ +#define TIM_FLAG_CC1 TIM_SR_CC1IF /*!< Capture/Compare 1 interrupt flag */ +#define TIM_FLAG_CC2 TIM_SR_CC2IF /*!< Capture/Compare 2 interrupt flag */ +#define TIM_FLAG_CC3 TIM_SR_CC3IF /*!< Capture/Compare 3 interrupt flag */ +#define TIM_FLAG_CC4 TIM_SR_CC4IF /*!< Capture/Compare 4 interrupt flag */ +#define TIM_FLAG_CC5 TIM_SR_CC5IF /*!< Capture/Compare 5 interrupt flag */ +#define TIM_FLAG_CC6 TIM_SR_CC6IF /*!< Capture/Compare 6 interrupt flag */ +#define TIM_FLAG_COM TIM_SR_COMIF /*!< Commutation interrupt flag */ +#define TIM_FLAG_TRIGGER TIM_SR_TIF /*!< Trigger interrupt flag */ +#define TIM_FLAG_BREAK TIM_SR_BIF /*!< Break interrupt flag */ +#define TIM_FLAG_BREAK2 TIM_SR_B2IF /*!< Break 2 interrupt flag */ +#define TIM_FLAG_SYSTEM_BREAK TIM_SR_SBIF /*!< System Break interrupt flag */ +#define TIM_FLAG_CC1OF TIM_SR_CC1OF /*!< Capture 1 overcapture flag */ +#define TIM_FLAG_CC2OF TIM_SR_CC2OF /*!< Capture 2 overcapture flag */ +#define TIM_FLAG_CC3OF TIM_SR_CC3OF /*!< Capture 3 overcapture flag */ +#define TIM_FLAG_CC4OF TIM_SR_CC4OF /*!< Capture 4 overcapture flag */ +/** + * @} + */ + +/** @defgroup TIM_Channel TIM Channel + * @{ + */ +#define TIM_CHANNEL_1 0x00000000U /*!< Capture/compare channel 1 identifier */ +#define TIM_CHANNEL_2 0x00000004U /*!< Capture/compare channel 2 identifier */ +#define TIM_CHANNEL_3 0x00000008U /*!< Capture/compare channel 3 identifier */ +#define TIM_CHANNEL_4 0x0000000CU /*!< Capture/compare channel 4 identifier */ +#define TIM_CHANNEL_5 0x00000010U /*!< Compare channel 5 identifier */ +#define TIM_CHANNEL_6 0x00000014U /*!< Compare channel 6 identifier */ +#define TIM_CHANNEL_ALL 0x0000003CU /*!< Global Capture/compare channel identifier */ +/** + * @} + */ + +/** @defgroup TIM_Clock_Source TIM Clock Source + * @{ + */ +#define TIM_CLOCKSOURCE_ETRMODE2 TIM_SMCR_ETPS_1 /*!< External clock source mode 2 */ +#define TIM_CLOCKSOURCE_INTERNAL TIM_SMCR_ETPS_0 /*!< Internal clock source */ +#define TIM_CLOCKSOURCE_ITR0 TIM_TS_ITR0 /*!< External clock source mode 1 (ITR0) */ +#define TIM_CLOCKSOURCE_ITR1 TIM_TS_ITR1 /*!< External clock source mode 1 (ITR1) */ +#define TIM_CLOCKSOURCE_ITR2 TIM_TS_ITR2 /*!< External clock source mode 1 (ITR2) */ +#define TIM_CLOCKSOURCE_ITR3 TIM_TS_ITR3 /*!< External clock source mode 1 (ITR3) */ +#define TIM_CLOCKSOURCE_TI1ED TIM_TS_TI1F_ED /*!< External clock source mode 1 (TTI1FP1 + edge detect.) */ +#define TIM_CLOCKSOURCE_TI1 TIM_TS_TI1FP1 /*!< External clock source mode 1 (TTI1FP1) */ +#define TIM_CLOCKSOURCE_TI2 TIM_TS_TI2FP2 /*!< External clock source mode 1 (TTI2FP2) */ +#define TIM_CLOCKSOURCE_ETRMODE1 TIM_TS_ETRF /*!< External clock source mode 1 (ETRF) */ +/** + * @} + */ + +/** @defgroup TIM_Clock_Polarity TIM Clock Polarity + * @{ + */ +#define TIM_CLOCKPOLARITY_INVERTED TIM_ETRPOLARITY_INVERTED /*!< Polarity for ETRx clock sources */ +#define TIM_CLOCKPOLARITY_NONINVERTED TIM_ETRPOLARITY_NONINVERTED /*!< Polarity for ETRx clock sources */ +#define TIM_CLOCKPOLARITY_RISING TIM_INPUTCHANNELPOLARITY_RISING /*!< Polarity for TIx clock sources */ +#define TIM_CLOCKPOLARITY_FALLING TIM_INPUTCHANNELPOLARITY_FALLING /*!< Polarity for TIx clock sources */ +#define TIM_CLOCKPOLARITY_BOTHEDGE TIM_INPUTCHANNELPOLARITY_BOTHEDGE /*!< Polarity for TIx clock sources */ +/** + * @} + */ + +/** @defgroup TIM_Clock_Prescaler TIM Clock Prescaler + * @{ + */ +#define TIM_CLOCKPRESCALER_DIV1 TIM_ETRPRESCALER_DIV1 /*!< No prescaler is used */ +#define TIM_CLOCKPRESCALER_DIV2 TIM_ETRPRESCALER_DIV2 /*!< Prescaler for External ETR Clock: Capture performed once every 2 events. */ +#define TIM_CLOCKPRESCALER_DIV4 TIM_ETRPRESCALER_DIV4 /*!< Prescaler for External ETR Clock: Capture performed once every 4 events. */ +#define TIM_CLOCKPRESCALER_DIV8 TIM_ETRPRESCALER_DIV8 /*!< Prescaler for External ETR Clock: Capture performed once every 8 events. */ +/** + * @} + */ + +/** @defgroup TIM_ClearInput_Polarity TIM Clear Input Polarity + * @{ + */ +#define TIM_CLEARINPUTPOLARITY_INVERTED TIM_ETRPOLARITY_INVERTED /*!< Polarity for ETRx pin */ +#define TIM_CLEARINPUTPOLARITY_NONINVERTED TIM_ETRPOLARITY_NONINVERTED /*!< Polarity for ETRx pin */ +/** + * @} + */ + +/** @defgroup TIM_ClearInput_Prescaler TIM Clear Input Prescaler + * @{ + */ +#define TIM_CLEARINPUTPRESCALER_DIV1 TIM_ETRPRESCALER_DIV1 /*!< No prescaler is used */ +#define TIM_CLEARINPUTPRESCALER_DIV2 TIM_ETRPRESCALER_DIV2 /*!< Prescaler for External ETR pin: Capture performed once every 2 events. */ +#define TIM_CLEARINPUTPRESCALER_DIV4 TIM_ETRPRESCALER_DIV4 /*!< Prescaler for External ETR pin: Capture performed once every 4 events. */ +#define TIM_CLEARINPUTPRESCALER_DIV8 TIM_ETRPRESCALER_DIV8 /*!< Prescaler for External ETR pin: Capture performed once every 8 events. */ +/** + * @} + */ + +/** @defgroup TIM_OSSR_Off_State_Selection_for_Run_mode_state TIM OSSR OffState Selection for Run mode state + * @{ + */ +#define TIM_OSSR_ENABLE TIM_BDTR_OSSR /*!< When inactive, OC/OCN outputs are enabled (still controlled by the timer) */ +#define TIM_OSSR_DISABLE 0x00000000U /*!< When inactive, OC/OCN outputs are disabled (not controlled any longer by the timer) */ +/** + * @} + */ + +/** @defgroup TIM_OSSI_Off_State_Selection_for_Idle_mode_state TIM OSSI OffState Selection for Idle mode state + * @{ + */ +#define TIM_OSSI_ENABLE TIM_BDTR_OSSI /*!< When inactive, OC/OCN outputs are enabled (still controlled by the timer) */ +#define TIM_OSSI_DISABLE 0x00000000U /*!< When inactive, OC/OCN outputs are disabled (not controlled any longer by the timer) */ +/** + * @} + */ +/** @defgroup TIM_Lock_level TIM Lock level + * @{ + */ +#define TIM_LOCKLEVEL_OFF 0x00000000U /*!< LOCK OFF */ +#define TIM_LOCKLEVEL_1 TIM_BDTR_LOCK_0 /*!< LOCK Level 1 */ +#define TIM_LOCKLEVEL_2 TIM_BDTR_LOCK_1 /*!< LOCK Level 2 */ +#define TIM_LOCKLEVEL_3 TIM_BDTR_LOCK /*!< LOCK Level 3 */ +/** + * @} + */ + +/** @defgroup TIM_Break_Input_enable_disable TIM Break Input Enable + * @{ + */ +#define TIM_BREAK_ENABLE TIM_BDTR_BKE /*!< Break input BRK is enabled */ +#define TIM_BREAK_DISABLE 0x00000000U /*!< Break input BRK is disabled */ +/** + * @} + */ + +/** @defgroup TIM_Break_Polarity TIM Break Input Polarity + * @{ + */ +#define TIM_BREAKPOLARITY_LOW 0x00000000U /*!< Break input BRK is active low */ +#define TIM_BREAKPOLARITY_HIGH TIM_BDTR_BKP /*!< Break input BRK is active high */ +/** + * @} + */ + +/** @defgroup TIM_Break2_Input_enable_disable TIM Break input 2 Enable + * @{ + */ +#define TIM_BREAK2_DISABLE 0x00000000U /*!< Break input BRK2 is disabled */ +#define TIM_BREAK2_ENABLE TIM_BDTR_BK2E /*!< Break input BRK2 is enabled */ +/** + * @} + */ + +/** @defgroup TIM_Break2_Polarity TIM Break Input 2 Polarity + * @{ + */ +#define TIM_BREAK2POLARITY_LOW 0x00000000U /*!< Break input BRK2 is active low */ +#define TIM_BREAK2POLARITY_HIGH TIM_BDTR_BK2P /*!< Break input BRK2 is active high */ +/** + * @} + */ + +/** @defgroup TIM_AOE_Bit_Set_Reset TIM Automatic Output Enable + * @{ + */ +#define TIM_AUTOMATICOUTPUT_DISABLE 0x00000000U /*!< MOE can be set only by software */ +#define TIM_AUTOMATICOUTPUT_ENABLE TIM_BDTR_AOE /*!< MOE can be set by software or automatically at the next update event (if none of the break inputs BRK and BRK2 is active) */ +/** + * @} + */ + +/** @defgroup TIM_Group_Channel5 TIM Group Channel 5 and Channel 1, 2 or 3 + * @{ + */ +#define TIM_GROUPCH5_NONE 0x00000000U /*!< No effect of OC5REF on OC1REFC, OC2REFC and OC3REFC */ +#define TIM_GROUPCH5_OC1REFC TIM_CCR5_GC5C1 /*!< OC1REFC is the logical AND of OC1REFC and OC5REF */ +#define TIM_GROUPCH5_OC2REFC TIM_CCR5_GC5C2 /*!< OC2REFC is the logical AND of OC2REFC and OC5REF */ +#define TIM_GROUPCH5_OC3REFC TIM_CCR5_GC5C3 /*!< OC3REFC is the logical AND of OC3REFC and OC5REF */ +/** + * @} + */ + +/** @defgroup TIM_Master_Mode_Selection TIM Master Mode Selection + * @{ + */ +#define TIM_TRGO_RESET 0x00000000U /*!< TIMx_EGR.UG bit is used as trigger output (TRGO) */ +#define TIM_TRGO_ENABLE TIM_CR2_MMS_0 /*!< TIMx_CR1.CEN bit is used as trigger output (TRGO) */ +#define TIM_TRGO_UPDATE TIM_CR2_MMS_1 /*!< Update event is used as trigger output (TRGO) */ +#define TIM_TRGO_OC1 (TIM_CR2_MMS_1 | TIM_CR2_MMS_0) /*!< Capture or a compare match 1 is used as trigger output (TRGO) */ +#define TIM_TRGO_OC1REF TIM_CR2_MMS_2 /*!< OC1REF signal is used as trigger output (TRGO) */ +#define TIM_TRGO_OC2REF (TIM_CR2_MMS_2 | TIM_CR2_MMS_0) /*!< OC2REF signal is used as trigger output(TRGO) */ +#define TIM_TRGO_OC3REF (TIM_CR2_MMS_2 | TIM_CR2_MMS_1) /*!< OC3REF signal is used as trigger output(TRGO) */ +#define TIM_TRGO_OC4REF (TIM_CR2_MMS_2 | TIM_CR2_MMS_1 | TIM_CR2_MMS_0) /*!< OC4REF signal is used as trigger output(TRGO) */ +/** + * @} + */ + +/** @defgroup TIM_Master_Mode_Selection_2 TIM Master Mode Selection 2 (TRGO2) + * @{ + */ +#define TIM_TRGO2_RESET 0x00000000U /*!< TIMx_EGR.UG bit is used as trigger output (TRGO2) */ +#define TIM_TRGO2_ENABLE TIM_CR2_MMS2_0 /*!< TIMx_CR1.CEN bit is used as trigger output (TRGO2) */ +#define TIM_TRGO2_UPDATE TIM_CR2_MMS2_1 /*!< Update event is used as trigger output (TRGO2) */ +#define TIM_TRGO2_OC1 (TIM_CR2_MMS2_1 | TIM_CR2_MMS2_0) /*!< Capture or a compare match 1 is used as trigger output (TRGO2) */ +#define TIM_TRGO2_OC1REF TIM_CR2_MMS2_2 /*!< OC1REF signal is used as trigger output (TRGO2) */ +#define TIM_TRGO2_OC2REF (TIM_CR2_MMS2_2 | TIM_CR2_MMS2_0) /*!< OC2REF signal is used as trigger output (TRGO2) */ +#define TIM_TRGO2_OC3REF (TIM_CR2_MMS2_2 | TIM_CR2_MMS2_1) /*!< OC3REF signal is used as trigger output (TRGO2) */ +#define TIM_TRGO2_OC4REF (TIM_CR2_MMS2_2 | TIM_CR2_MMS2_1 | TIM_CR2_MMS2_0) /*!< OC4REF signal is used as trigger output (TRGO2) */ +#define TIM_TRGO2_OC5REF TIM_CR2_MMS2_3 /*!< OC5REF signal is used as trigger output (TRGO2) */ +#define TIM_TRGO2_OC6REF (TIM_CR2_MMS2_3 | TIM_CR2_MMS2_0) /*!< OC6REF signal is used as trigger output (TRGO2) */ +#define TIM_TRGO2_OC4REF_RISINGFALLING (TIM_CR2_MMS2_3 | TIM_CR2_MMS2_1) /*!< OC4REF rising or falling edges generate pulses on TRGO2 */ +#define TIM_TRGO2_OC6REF_RISINGFALLING (TIM_CR2_MMS2_3 | TIM_CR2_MMS2_1 | TIM_CR2_MMS2_0) /*!< OC6REF rising or falling edges generate pulses on TRGO2 */ +#define TIM_TRGO2_OC4REF_RISING_OC6REF_RISING (TIM_CR2_MMS2_3 | TIM_CR2_MMS2_2) /*!< OC4REF or OC6REF rising edges generate pulses on TRGO2 */ +#define TIM_TRGO2_OC4REF_RISING_OC6REF_FALLING (TIM_CR2_MMS2_3 | TIM_CR2_MMS2_2 | TIM_CR2_MMS2_0) /*!< OC4REF rising or OC6REF falling edges generate pulses on TRGO2 */ +#define TIM_TRGO2_OC5REF_RISING_OC6REF_RISING (TIM_CR2_MMS2_3 | TIM_CR2_MMS2_2 |TIM_CR2_MMS2_1) /*!< OC5REF or OC6REF rising edges generate pulses on TRGO2 */ +#define TIM_TRGO2_OC5REF_RISING_OC6REF_FALLING (TIM_CR2_MMS2_3 | TIM_CR2_MMS2_2 | TIM_CR2_MMS2_1 | TIM_CR2_MMS2_0) /*!< OC5REF or OC6REF rising edges generate pulses on TRGO2 */ +/** + * @} + */ + +/** @defgroup TIM_Master_Slave_Mode TIM Master/Slave Mode + * @{ + */ +#define TIM_MASTERSLAVEMODE_ENABLE TIM_SMCR_MSM /*!< No action */ +#define TIM_MASTERSLAVEMODE_DISABLE 0x00000000U /*!< Master/slave mode is selected */ +/** + * @} + */ + +/** @defgroup TIM_Slave_Mode TIM Slave mode + * @{ + */ +#define TIM_SLAVEMODE_DISABLE 0x00000000U /*!< Slave mode disabled */ +#define TIM_SLAVEMODE_RESET TIM_SMCR_SMS_2 /*!< Reset Mode */ +#define TIM_SLAVEMODE_GATED (TIM_SMCR_SMS_2 | TIM_SMCR_SMS_0) /*!< Gated Mode */ +#define TIM_SLAVEMODE_TRIGGER (TIM_SMCR_SMS_2 | TIM_SMCR_SMS_1) /*!< Trigger Mode */ +#define TIM_SLAVEMODE_EXTERNAL1 (TIM_SMCR_SMS_2 | TIM_SMCR_SMS_1 | TIM_SMCR_SMS_0) /*!< External Clock Mode 1 */ +#define TIM_SLAVEMODE_COMBINED_RESETTRIGGER TIM_SMCR_SMS_3 /*!< Combined reset + trigger mode */ +/** + * @} + */ + +/** @defgroup TIM_Output_Compare_and_PWM_modes TIM Output Compare and PWM Modes + * @{ + */ +#define TIM_OCMODE_TIMING 0x00000000U /*!< Frozen */ +#define TIM_OCMODE_ACTIVE TIM_CCMR1_OC1M_0 /*!< Set channel to active level on match */ +#define TIM_OCMODE_INACTIVE TIM_CCMR1_OC1M_1 /*!< Set channel to inactive level on match */ +#define TIM_OCMODE_TOGGLE (TIM_CCMR1_OC1M_1 | TIM_CCMR1_OC1M_0) /*!< Toggle */ +#define TIM_OCMODE_PWM1 (TIM_CCMR1_OC1M_2 | TIM_CCMR1_OC1M_1) /*!< PWM mode 1 */ +#define TIM_OCMODE_PWM2 (TIM_CCMR1_OC1M_2 | TIM_CCMR1_OC1M_1 | TIM_CCMR1_OC1M_0) /*!< PWM mode 2 */ +#define TIM_OCMODE_FORCED_ACTIVE (TIM_CCMR1_OC1M_2 | TIM_CCMR1_OC1M_0) /*!< Force active level */ +#define TIM_OCMODE_FORCED_INACTIVE TIM_CCMR1_OC1M_2 /*!< Force inactive level */ +#define TIM_OCMODE_RETRIGERRABLE_OPM1 TIM_CCMR1_OC1M_3 /*!< Retrigerrable OPM mode 1 */ +#define TIM_OCMODE_RETRIGERRABLE_OPM2 (TIM_CCMR1_OC1M_3 | TIM_CCMR1_OC1M_0) /*!< Retrigerrable OPM mode 2 */ +#define TIM_OCMODE_COMBINED_PWM1 (TIM_CCMR1_OC1M_3 | TIM_CCMR1_OC1M_2) /*!< Combined PWM mode 1 */ +#define TIM_OCMODE_COMBINED_PWM2 (TIM_CCMR1_OC1M_3 | TIM_CCMR1_OC1M_0 | TIM_CCMR1_OC1M_2) /*!< Combined PWM mode 2 */ +#define TIM_OCMODE_ASSYMETRIC_PWM1 (TIM_CCMR1_OC1M_3 | TIM_CCMR1_OC1M_1 | TIM_CCMR1_OC1M_2) /*!< Asymmetric PWM mode 1 */ +#define TIM_OCMODE_ASSYMETRIC_PWM2 TIM_CCMR1_OC1M /*!< Asymmetric PWM mode 2 */ +/** + * @} + */ + +/** @defgroup TIM_Trigger_Selection TIM Trigger Selection + * @{ + */ +#define TIM_TS_ITR0 0x00000000U /*!< Internal Trigger 0 (ITR0) */ +#define TIM_TS_ITR1 TIM_SMCR_TS_0 /*!< Internal Trigger 1 (ITR1) */ +#define TIM_TS_ITR2 TIM_SMCR_TS_1 /*!< Internal Trigger 2 (ITR2) */ +#define TIM_TS_ITR3 (TIM_SMCR_TS_0 | TIM_SMCR_TS_1) /*!< Internal Trigger 3 (ITR3) */ +#define TIM_TS_TI1F_ED TIM_SMCR_TS_2 /*!< TI1 Edge Detector (TI1F_ED) */ +#define TIM_TS_TI1FP1 (TIM_SMCR_TS_0 | TIM_SMCR_TS_2) /*!< Filtered Timer Input 1 (TI1FP1) */ +#define TIM_TS_TI2FP2 (TIM_SMCR_TS_1 | TIM_SMCR_TS_2) /*!< Filtered Timer Input 2 (TI2FP2) */ +#define TIM_TS_ETRF (TIM_SMCR_TS_0 | TIM_SMCR_TS_1 | TIM_SMCR_TS_2) /*!< Filtered External Trigger input (ETRF) */ +#define TIM_TS_NONE 0x0000FFFFU /*!< No trigger selected */ +/** + * @} + */ + +/** @defgroup TIM_Trigger_Polarity TIM Trigger Polarity + * @{ + */ +#define TIM_TRIGGERPOLARITY_INVERTED TIM_ETRPOLARITY_INVERTED /*!< Polarity for ETRx trigger sources */ +#define TIM_TRIGGERPOLARITY_NONINVERTED TIM_ETRPOLARITY_NONINVERTED /*!< Polarity for ETRx trigger sources */ +#define TIM_TRIGGERPOLARITY_RISING TIM_INPUTCHANNELPOLARITY_RISING /*!< Polarity for TIxFPx or TI1_ED trigger sources */ +#define TIM_TRIGGERPOLARITY_FALLING TIM_INPUTCHANNELPOLARITY_FALLING /*!< Polarity for TIxFPx or TI1_ED trigger sources */ +#define TIM_TRIGGERPOLARITY_BOTHEDGE TIM_INPUTCHANNELPOLARITY_BOTHEDGE /*!< Polarity for TIxFPx or TI1_ED trigger sources */ +/** + * @} + */ + +/** @defgroup TIM_Trigger_Prescaler TIM Trigger Prescaler + * @{ + */ +#define TIM_TRIGGERPRESCALER_DIV1 TIM_ETRPRESCALER_DIV1 /*!< No prescaler is used */ +#define TIM_TRIGGERPRESCALER_DIV2 TIM_ETRPRESCALER_DIV2 /*!< Prescaler for External ETR Trigger: Capture performed once every 2 events. */ +#define TIM_TRIGGERPRESCALER_DIV4 TIM_ETRPRESCALER_DIV4 /*!< Prescaler for External ETR Trigger: Capture performed once every 4 events. */ +#define TIM_TRIGGERPRESCALER_DIV8 TIM_ETRPRESCALER_DIV8 /*!< Prescaler for External ETR Trigger: Capture performed once every 8 events. */ +/** + * @} + */ + +/** @defgroup TIM_TI1_Selection TIM TI1 Input Selection + * @{ + */ +#define TIM_TI1SELECTION_CH1 0x00000000U /*!< The TIMx_CH1 pin is connected to TI1 input */ +#define TIM_TI1SELECTION_XORCOMBINATION TIM_CR2_TI1S /*!< The TIMx_CH1, CH2 and CH3 pins are connected to the TI1 input (XOR combination) */ +/** + * @} + */ + +/** @defgroup TIM_DMA_Burst_Length TIM DMA Burst Length + * @{ + */ +#define TIM_DMABURSTLENGTH_1TRANSFER 0x00000000U /*!< The transfer is done to 1 register starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_2TRANSFERS 0x00000100U /*!< The transfer is done to 2 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_3TRANSFERS 0x00000200U /*!< The transfer is done to 3 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_4TRANSFERS 0x00000300U /*!< The transfer is done to 4 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_5TRANSFERS 0x00000400U /*!< The transfer is done to 5 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_6TRANSFERS 0x00000500U /*!< The transfer is done to 6 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_7TRANSFERS 0x00000600U /*!< The transfer is done to 7 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_8TRANSFERS 0x00000700U /*!< The transfer is done to 8 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_9TRANSFERS 0x00000800U /*!< The transfer is done to 9 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_10TRANSFERS 0x00000900U /*!< The transfer is done to 10 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_11TRANSFERS 0x00000A00U /*!< The transfer is done to 11 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_12TRANSFERS 0x00000B00U /*!< The transfer is done to 12 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_13TRANSFERS 0x00000C00U /*!< The transfer is done to 13 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_14TRANSFERS 0x00000D00U /*!< The transfer is done to 14 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_15TRANSFERS 0x00000E00U /*!< The transfer is done to 15 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_16TRANSFERS 0x00000F00U /*!< The transfer is done to 16 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_17TRANSFERS 0x00001000U /*!< The transfer is done to 17 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +#define TIM_DMABURSTLENGTH_18TRANSFERS 0x00001100U /*!< The transfer is done to 18 registers starting from TIMx_CR1 + TIMx_DCR.DBA */ +/** + * @} + */ + +/** @defgroup DMA_Handle_index TIM DMA Handle Index + * @{ + */ +#define TIM_DMA_ID_UPDATE ((uint16_t) 0x0000) /*!< Index of the DMA handle used for Update DMA requests */ +#define TIM_DMA_ID_CC1 ((uint16_t) 0x0001) /*!< Index of the DMA handle used for Capture/Compare 1 DMA requests */ +#define TIM_DMA_ID_CC2 ((uint16_t) 0x0002) /*!< Index of the DMA handle used for Capture/Compare 2 DMA requests */ +#define TIM_DMA_ID_CC3 ((uint16_t) 0x0003) /*!< Index of the DMA handle used for Capture/Compare 3 DMA requests */ +#define TIM_DMA_ID_CC4 ((uint16_t) 0x0004) /*!< Index of the DMA handle used for Capture/Compare 4 DMA requests */ +#define TIM_DMA_ID_COMMUTATION ((uint16_t) 0x0005) /*!< Index of the DMA handle used for Commutation DMA requests */ +#define TIM_DMA_ID_TRIGGER ((uint16_t) 0x0006) /*!< Index of the DMA handle used for Trigger DMA requests */ +/** + * @} + */ + +/** @defgroup Channel_CC_State TIM Capture/Compare Channel State + * @{ + */ +#define TIM_CCx_ENABLE 0x00000001U /*!< Input or output channel is enabled */ +#define TIM_CCx_DISABLE 0x00000000U /*!< Input or output channel is disabled */ +#define TIM_CCxN_ENABLE 0x00000004U /*!< Complementary output channel is enabled */ +#define TIM_CCxN_DISABLE 0x00000000U /*!< Complementary output channel is enabled */ +/** + * @} + */ + +/** @defgroup TIM_Break_System TIM Break System + * @{ + */ +#define TIM_BREAK_SYSTEM_ECC SYSCFG_CFGR2_ECCL /*!< Enables and locks the ECC error signal with Break Input of TIM1/8/15/16/17 */ +#define TIM_BREAK_SYSTEM_PVD SYSCFG_CFGR2_PVDL /*!< Enables and locks the PVD connection with TIM1/8/15/16/17 Break Input and also the PVDE and PLS bits of the Power Control Interface */ +#define TIM_BREAK_SYSTEM_SRAM_PARITY_ERROR SYSCFG_CFGR2_SPL /*!< Enables and locks the SRAM_PARITY error signal with Break Input of TIM1/8/15/16/17 */ +#define TIM_BREAK_SYSTEM_LOCKUP SYSCFG_CFGR2_CLL /*!< Enables and locks the LOCKUP output of CortexM4 with Break Input of TIM1/8/15/16/17 */ +/** + * @} + */ + +/** + * @} + */ +/* End of exported constants -------------------------------------------------*/ + +/* Exported macros -----------------------------------------------------------*/ +/** @defgroup TIM_Exported_Macros TIM Exported Macros + * @{ + */ + +/** @brief Reset TIM handle state. + * @param __HANDLE__ TIM handle. + * @retval None + */ +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) +#define __HAL_TIM_RESET_HANDLE_STATE(__HANDLE__) do { \ + (__HANDLE__)->State = HAL_TIM_STATE_RESET; \ + (__HANDLE__)->ChannelState[0] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelState[1] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelState[2] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelState[3] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelState[4] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelState[5] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelNState[0] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelNState[1] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelNState[2] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelNState[3] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->DMABurstState = HAL_DMA_BURST_STATE_RESET; \ + (__HANDLE__)->Base_MspInitCallback = NULL; \ + (__HANDLE__)->Base_MspDeInitCallback = NULL; \ + (__HANDLE__)->IC_MspInitCallback = NULL; \ + (__HANDLE__)->IC_MspDeInitCallback = NULL; \ + (__HANDLE__)->OC_MspInitCallback = NULL; \ + (__HANDLE__)->OC_MspDeInitCallback = NULL; \ + (__HANDLE__)->PWM_MspInitCallback = NULL; \ + (__HANDLE__)->PWM_MspDeInitCallback = NULL; \ + (__HANDLE__)->OnePulse_MspInitCallback = NULL; \ + (__HANDLE__)->OnePulse_MspDeInitCallback = NULL; \ + (__HANDLE__)->Encoder_MspInitCallback = NULL; \ + (__HANDLE__)->Encoder_MspDeInitCallback = NULL; \ + (__HANDLE__)->HallSensor_MspInitCallback = NULL; \ + (__HANDLE__)->HallSensor_MspDeInitCallback = NULL; \ + } while(0) +#else +#define __HAL_TIM_RESET_HANDLE_STATE(__HANDLE__) do { \ + (__HANDLE__)->State = HAL_TIM_STATE_RESET; \ + (__HANDLE__)->ChannelState[0] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelState[1] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelState[2] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelState[3] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelState[4] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelState[5] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelNState[0] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelNState[1] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelNState[2] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->ChannelNState[3] = HAL_TIM_CHANNEL_STATE_RESET; \ + (__HANDLE__)->DMABurstState = HAL_DMA_BURST_STATE_RESET; \ + } while(0) +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + +/** + * @brief Enable the TIM peripheral. + * @param __HANDLE__ TIM handle + * @retval None + */ +#define __HAL_TIM_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR1|=(TIM_CR1_CEN)) + +/** + * @brief Enable the TIM main Output. + * @param __HANDLE__ TIM handle + * @retval None + */ +#define __HAL_TIM_MOE_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->BDTR|=(TIM_BDTR_MOE)) + +/** + * @brief Disable the TIM peripheral. + * @param __HANDLE__ TIM handle + * @retval None + */ +#define __HAL_TIM_DISABLE(__HANDLE__) \ + do { \ + if (((__HANDLE__)->Instance->CCER & TIM_CCER_CCxE_MASK) == 0UL) \ + { \ + if(((__HANDLE__)->Instance->CCER & TIM_CCER_CCxNE_MASK) == 0UL) \ + { \ + (__HANDLE__)->Instance->CR1 &= ~(TIM_CR1_CEN); \ + } \ + } \ + } while(0) + +/** + * @brief Disable the TIM main Output. + * @param __HANDLE__ TIM handle + * @retval None + * @note The Main Output Enable of a timer instance is disabled only if all the CCx and CCxN channels have been + * disabled + */ +#define __HAL_TIM_MOE_DISABLE(__HANDLE__) \ + do { \ + if (((__HANDLE__)->Instance->CCER & TIM_CCER_CCxE_MASK) == 0UL) \ + { \ + if(((__HANDLE__)->Instance->CCER & TIM_CCER_CCxNE_MASK) == 0UL) \ + { \ + (__HANDLE__)->Instance->BDTR &= ~(TIM_BDTR_MOE); \ + } \ + } \ + } while(0) + +/** + * @brief Disable the TIM main Output. + * @param __HANDLE__ TIM handle + * @retval None + * @note The Main Output Enable of a timer instance is disabled unconditionally + */ +#define __HAL_TIM_MOE_DISABLE_UNCONDITIONALLY(__HANDLE__) (__HANDLE__)->Instance->BDTR &= ~(TIM_BDTR_MOE) + +/** @brief Enable the specified TIM interrupt. + * @param __HANDLE__ specifies the TIM Handle. + * @param __INTERRUPT__ specifies the TIM interrupt source to enable. + * This parameter can be one of the following values: + * @arg TIM_IT_UPDATE: Update interrupt + * @arg TIM_IT_CC1: Capture/Compare 1 interrupt + * @arg TIM_IT_CC2: Capture/Compare 2 interrupt + * @arg TIM_IT_CC3: Capture/Compare 3 interrupt + * @arg TIM_IT_CC4: Capture/Compare 4 interrupt + * @arg TIM_IT_COM: Commutation interrupt + * @arg TIM_IT_TRIGGER: Trigger interrupt + * @arg TIM_IT_BREAK: Break interrupt + * @retval None + */ +#define __HAL_TIM_ENABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->DIER |= (__INTERRUPT__)) + +/** @brief Disable the specified TIM interrupt. + * @param __HANDLE__ specifies the TIM Handle. + * @param __INTERRUPT__ specifies the TIM interrupt source to disable. + * This parameter can be one of the following values: + * @arg TIM_IT_UPDATE: Update interrupt + * @arg TIM_IT_CC1: Capture/Compare 1 interrupt + * @arg TIM_IT_CC2: Capture/Compare 2 interrupt + * @arg TIM_IT_CC3: Capture/Compare 3 interrupt + * @arg TIM_IT_CC4: Capture/Compare 4 interrupt + * @arg TIM_IT_COM: Commutation interrupt + * @arg TIM_IT_TRIGGER: Trigger interrupt + * @arg TIM_IT_BREAK: Break interrupt + * @retval None + */ +#define __HAL_TIM_DISABLE_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->DIER &= ~(__INTERRUPT__)) + +/** @brief Enable the specified DMA request. + * @param __HANDLE__ specifies the TIM Handle. + * @param __DMA__ specifies the TIM DMA request to enable. + * This parameter can be one of the following values: + * @arg TIM_DMA_UPDATE: Update DMA request + * @arg TIM_DMA_CC1: Capture/Compare 1 DMA request + * @arg TIM_DMA_CC2: Capture/Compare 2 DMA request + * @arg TIM_DMA_CC3: Capture/Compare 3 DMA request + * @arg TIM_DMA_CC4: Capture/Compare 4 DMA request + * @arg TIM_DMA_COM: Commutation DMA request + * @arg TIM_DMA_TRIGGER: Trigger DMA request + * @retval None + */ +#define __HAL_TIM_ENABLE_DMA(__HANDLE__, __DMA__) ((__HANDLE__)->Instance->DIER |= (__DMA__)) + +/** @brief Disable the specified DMA request. + * @param __HANDLE__ specifies the TIM Handle. + * @param __DMA__ specifies the TIM DMA request to disable. + * This parameter can be one of the following values: + * @arg TIM_DMA_UPDATE: Update DMA request + * @arg TIM_DMA_CC1: Capture/Compare 1 DMA request + * @arg TIM_DMA_CC2: Capture/Compare 2 DMA request + * @arg TIM_DMA_CC3: Capture/Compare 3 DMA request + * @arg TIM_DMA_CC4: Capture/Compare 4 DMA request + * @arg TIM_DMA_COM: Commutation DMA request + * @arg TIM_DMA_TRIGGER: Trigger DMA request + * @retval None + */ +#define __HAL_TIM_DISABLE_DMA(__HANDLE__, __DMA__) ((__HANDLE__)->Instance->DIER &= ~(__DMA__)) + +/** @brief Check whether the specified TIM interrupt flag is set or not. + * @param __HANDLE__ specifies the TIM Handle. + * @param __FLAG__ specifies the TIM interrupt flag to check. + * This parameter can be one of the following values: + * @arg TIM_FLAG_UPDATE: Update interrupt flag + * @arg TIM_FLAG_CC1: Capture/Compare 1 interrupt flag + * @arg TIM_FLAG_CC2: Capture/Compare 2 interrupt flag + * @arg TIM_FLAG_CC3: Capture/Compare 3 interrupt flag + * @arg TIM_FLAG_CC4: Capture/Compare 4 interrupt flag + * @arg TIM_FLAG_CC5: Compare 5 interrupt flag + * @arg TIM_FLAG_CC6: Compare 6 interrupt flag + * @arg TIM_FLAG_COM: Commutation interrupt flag + * @arg TIM_FLAG_TRIGGER: Trigger interrupt flag + * @arg TIM_FLAG_BREAK: Break interrupt flag + * @arg TIM_FLAG_BREAK2: Break 2 interrupt flag + * @arg TIM_FLAG_SYSTEM_BREAK: System Break interrupt flag + * @arg TIM_FLAG_CC1OF: Capture/Compare 1 overcapture flag + * @arg TIM_FLAG_CC2OF: Capture/Compare 2 overcapture flag + * @arg TIM_FLAG_CC3OF: Capture/Compare 3 overcapture flag + * @arg TIM_FLAG_CC4OF: Capture/Compare 4 overcapture flag + * @retval The new state of __FLAG__ (TRUE or FALSE). + */ +#define __HAL_TIM_GET_FLAG(__HANDLE__, __FLAG__) (((__HANDLE__)->Instance->SR &(__FLAG__)) == (__FLAG__)) + +/** @brief Clear the specified TIM interrupt flag. + * @param __HANDLE__ specifies the TIM Handle. + * @param __FLAG__ specifies the TIM interrupt flag to clear. + * This parameter can be one of the following values: + * @arg TIM_FLAG_UPDATE: Update interrupt flag + * @arg TIM_FLAG_CC1: Capture/Compare 1 interrupt flag + * @arg TIM_FLAG_CC2: Capture/Compare 2 interrupt flag + * @arg TIM_FLAG_CC3: Capture/Compare 3 interrupt flag + * @arg TIM_FLAG_CC4: Capture/Compare 4 interrupt flag + * @arg TIM_FLAG_CC5: Compare 5 interrupt flag + * @arg TIM_FLAG_CC6: Compare 6 interrupt flag + * @arg TIM_FLAG_COM: Commutation interrupt flag + * @arg TIM_FLAG_TRIGGER: Trigger interrupt flag + * @arg TIM_FLAG_BREAK: Break interrupt flag + * @arg TIM_FLAG_BREAK2: Break 2 interrupt flag + * @arg TIM_FLAG_SYSTEM_BREAK: System Break interrupt flag + * @arg TIM_FLAG_CC1OF: Capture/Compare 1 overcapture flag + * @arg TIM_FLAG_CC2OF: Capture/Compare 2 overcapture flag + * @arg TIM_FLAG_CC3OF: Capture/Compare 3 overcapture flag + * @arg TIM_FLAG_CC4OF: Capture/Compare 4 overcapture flag + * @retval The new state of __FLAG__ (TRUE or FALSE). + */ +#define __HAL_TIM_CLEAR_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->Instance->SR = ~(__FLAG__)) + +/** + * @brief Check whether the specified TIM interrupt source is enabled or not. + * @param __HANDLE__ TIM handle + * @param __INTERRUPT__ specifies the TIM interrupt source to check. + * This parameter can be one of the following values: + * @arg TIM_IT_UPDATE: Update interrupt + * @arg TIM_IT_CC1: Capture/Compare 1 interrupt + * @arg TIM_IT_CC2: Capture/Compare 2 interrupt + * @arg TIM_IT_CC3: Capture/Compare 3 interrupt + * @arg TIM_IT_CC4: Capture/Compare 4 interrupt + * @arg TIM_IT_COM: Commutation interrupt + * @arg TIM_IT_TRIGGER: Trigger interrupt + * @arg TIM_IT_BREAK: Break interrupt + * @retval The state of TIM_IT (SET or RESET). + */ +#define __HAL_TIM_GET_IT_SOURCE(__HANDLE__, __INTERRUPT__) ((((__HANDLE__)->Instance->DIER & (__INTERRUPT__)) \ + == (__INTERRUPT__)) ? SET : RESET) + +/** @brief Clear the TIM interrupt pending bits. + * @param __HANDLE__ TIM handle + * @param __INTERRUPT__ specifies the interrupt pending bit to clear. + * This parameter can be one of the following values: + * @arg TIM_IT_UPDATE: Update interrupt + * @arg TIM_IT_CC1: Capture/Compare 1 interrupt + * @arg TIM_IT_CC2: Capture/Compare 2 interrupt + * @arg TIM_IT_CC3: Capture/Compare 3 interrupt + * @arg TIM_IT_CC4: Capture/Compare 4 interrupt + * @arg TIM_IT_COM: Commutation interrupt + * @arg TIM_IT_TRIGGER: Trigger interrupt + * @arg TIM_IT_BREAK: Break interrupt + * @retval None + */ +#define __HAL_TIM_CLEAR_IT(__HANDLE__, __INTERRUPT__) ((__HANDLE__)->Instance->SR = ~(__INTERRUPT__)) + +/** + * @brief Force a continuous copy of the update interrupt flag (UIF) into the timer counter register (bit 31). + * @note This allows both the counter value and a potential roll-over condition signalled by the UIFCPY flag to be read + * in an atomic way. + * @param __HANDLE__ TIM handle. + * @retval None +mode. + */ +#define __HAL_TIM_UIFREMAP_ENABLE(__HANDLE__) (((__HANDLE__)->Instance->CR1 |= TIM_CR1_UIFREMAP)) + +/** + * @brief Disable update interrupt flag (UIF) remapping. + * @param __HANDLE__ TIM handle. + * @retval None +mode. + */ +#define __HAL_TIM_UIFREMAP_DISABLE(__HANDLE__) (((__HANDLE__)->Instance->CR1 &= ~TIM_CR1_UIFREMAP)) + +/** + * @brief Get update interrupt flag (UIF) copy status. + * @param __COUNTER__ Counter value. + * @retval The state of UIFCPY (TRUE or FALSE). +mode. + */ +#define __HAL_TIM_GET_UIFCPY(__COUNTER__) (((__COUNTER__) & (TIM_CNT_UIFCPY)) == (TIM_CNT_UIFCPY)) + +/** + * @brief Indicates whether or not the TIM Counter is used as downcounter. + * @param __HANDLE__ TIM handle. + * @retval False (Counter used as upcounter) or True (Counter used as downcounter) + * @note This macro is particularly useful to get the counting mode when the timer operates in Center-aligned mode + * or Encoder mode. + */ +#define __HAL_TIM_IS_TIM_COUNTING_DOWN(__HANDLE__) (((__HANDLE__)->Instance->CR1 &(TIM_CR1_DIR)) == (TIM_CR1_DIR)) + +/** + * @brief Set the TIM Prescaler on runtime. + * @param __HANDLE__ TIM handle. + * @param __PRESC__ specifies the Prescaler new value. + * @retval None + */ +#define __HAL_TIM_SET_PRESCALER(__HANDLE__, __PRESC__) ((__HANDLE__)->Instance->PSC = (__PRESC__)) + +/** + * @brief Set the TIM Counter Register value on runtime. + * Note Please check if the bit 31 of CNT register is used as UIF copy or not, this may affect the counter range in + * case of 32 bits counter TIM instance. + * Bit 31 of CNT can be enabled/disabled using __HAL_TIM_UIFREMAP_ENABLE()/__HAL_TIM_UIFREMAP_DISABLE() macros. + * @param __HANDLE__ TIM handle. + * @param __COUNTER__ specifies the Counter register new value. + * @retval None + */ +#define __HAL_TIM_SET_COUNTER(__HANDLE__, __COUNTER__) ((__HANDLE__)->Instance->CNT = (__COUNTER__)) + +/** + * @brief Get the TIM Counter Register value on runtime. + * @param __HANDLE__ TIM handle. + * @retval 16-bit or 32-bit value of the timer counter register (TIMx_CNT) + */ +#define __HAL_TIM_GET_COUNTER(__HANDLE__) ((__HANDLE__)->Instance->CNT) + +/** + * @brief Set the TIM Autoreload Register value on runtime without calling another time any Init function. + * @param __HANDLE__ TIM handle. + * @param __AUTORELOAD__ specifies the Counter register new value. + * @retval None + */ +#define __HAL_TIM_SET_AUTORELOAD(__HANDLE__, __AUTORELOAD__) \ + do{ \ + (__HANDLE__)->Instance->ARR = (__AUTORELOAD__); \ + (__HANDLE__)->Init.Period = (__AUTORELOAD__); \ + } while(0) + +/** + * @brief Get the TIM Autoreload Register value on runtime. + * @param __HANDLE__ TIM handle. + * @retval 16-bit or 32-bit value of the timer auto-reload register(TIMx_ARR) + */ +#define __HAL_TIM_GET_AUTORELOAD(__HANDLE__) ((__HANDLE__)->Instance->ARR) + +/** + * @brief Set the TIM Clock Division value on runtime without calling another time any Init function. + * @param __HANDLE__ TIM handle. + * @param __CKD__ specifies the clock division value. + * This parameter can be one of the following value: + * @arg TIM_CLOCKDIVISION_DIV1: tDTS=tCK_INT + * @arg TIM_CLOCKDIVISION_DIV2: tDTS=2*tCK_INT + * @arg TIM_CLOCKDIVISION_DIV4: tDTS=4*tCK_INT + * @retval None + */ +#define __HAL_TIM_SET_CLOCKDIVISION(__HANDLE__, __CKD__) \ + do{ \ + (__HANDLE__)->Instance->CR1 &= (~TIM_CR1_CKD); \ + (__HANDLE__)->Instance->CR1 |= (__CKD__); \ + (__HANDLE__)->Init.ClockDivision = (__CKD__); \ + } while(0) + +/** + * @brief Get the TIM Clock Division value on runtime. + * @param __HANDLE__ TIM handle. + * @retval The clock division can be one of the following values: + * @arg TIM_CLOCKDIVISION_DIV1: tDTS=tCK_INT + * @arg TIM_CLOCKDIVISION_DIV2: tDTS=2*tCK_INT + * @arg TIM_CLOCKDIVISION_DIV4: tDTS=4*tCK_INT + */ +#define __HAL_TIM_GET_CLOCKDIVISION(__HANDLE__) ((__HANDLE__)->Instance->CR1 & TIM_CR1_CKD) + +/** + * @brief Set the TIM Input Capture prescaler on runtime without calling another time HAL_TIM_IC_ConfigChannel() + * function. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channels to be configured. + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @param __ICPSC__ specifies the Input Capture4 prescaler new value. + * This parameter can be one of the following values: + * @arg TIM_ICPSC_DIV1: no prescaler + * @arg TIM_ICPSC_DIV2: capture is done once every 2 events + * @arg TIM_ICPSC_DIV4: capture is done once every 4 events + * @arg TIM_ICPSC_DIV8: capture is done once every 8 events + * @retval None + */ +#define __HAL_TIM_SET_ICPRESCALER(__HANDLE__, __CHANNEL__, __ICPSC__) \ + do{ \ + TIM_RESET_ICPRESCALERVALUE((__HANDLE__), (__CHANNEL__)); \ + TIM_SET_ICPRESCALERVALUE((__HANDLE__), (__CHANNEL__), (__ICPSC__)); \ + } while(0) + +/** + * @brief Get the TIM Input Capture prescaler on runtime. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channels to be configured. + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: get input capture 1 prescaler value + * @arg TIM_CHANNEL_2: get input capture 2 prescaler value + * @arg TIM_CHANNEL_3: get input capture 3 prescaler value + * @arg TIM_CHANNEL_4: get input capture 4 prescaler value + * @retval The input capture prescaler can be one of the following values: + * @arg TIM_ICPSC_DIV1: no prescaler + * @arg TIM_ICPSC_DIV2: capture is done once every 2 events + * @arg TIM_ICPSC_DIV4: capture is done once every 4 events + * @arg TIM_ICPSC_DIV8: capture is done once every 8 events + */ +#define __HAL_TIM_GET_ICPRESCALER(__HANDLE__, __CHANNEL__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCMR1 & TIM_CCMR1_IC1PSC) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? (((__HANDLE__)->Instance->CCMR1 & TIM_CCMR1_IC2PSC) >> 8U) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCMR2 & TIM_CCMR2_IC3PSC) :\ + (((__HANDLE__)->Instance->CCMR2 & TIM_CCMR2_IC4PSC)) >> 8U) + +/** + * @brief Set the TIM Capture Compare Register value on runtime without calling another time ConfigChannel function. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channels to be configured. + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @arg TIM_CHANNEL_5: TIM Channel 5 selected + * @arg TIM_CHANNEL_6: TIM Channel 6 selected + * @param __COMPARE__ specifies the Capture Compare register new value. + * @retval None + */ +#define __HAL_TIM_SET_COMPARE(__HANDLE__, __CHANNEL__, __COMPARE__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCR1 = (__COMPARE__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCR2 = (__COMPARE__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCR3 = (__COMPARE__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_4) ? ((__HANDLE__)->Instance->CCR4 = (__COMPARE__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_5) ? ((__HANDLE__)->Instance->CCR5 = (__COMPARE__)) :\ + ((__HANDLE__)->Instance->CCR6 = (__COMPARE__))) + +/** + * @brief Get the TIM Capture Compare Register value on runtime. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channel associated with the capture compare register + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: get capture/compare 1 register value + * @arg TIM_CHANNEL_2: get capture/compare 2 register value + * @arg TIM_CHANNEL_3: get capture/compare 3 register value + * @arg TIM_CHANNEL_4: get capture/compare 4 register value + * @arg TIM_CHANNEL_5: get capture/compare 5 register value + * @arg TIM_CHANNEL_6: get capture/compare 6 register value + * @retval 16-bit or 32-bit value of the capture/compare register (TIMx_CCRy) + */ +#define __HAL_TIM_GET_COMPARE(__HANDLE__, __CHANNEL__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCR1) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCR2) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCR3) :\ + ((__CHANNEL__) == TIM_CHANNEL_4) ? ((__HANDLE__)->Instance->CCR4) :\ + ((__CHANNEL__) == TIM_CHANNEL_5) ? ((__HANDLE__)->Instance->CCR5) :\ + ((__HANDLE__)->Instance->CCR6)) + +/** + * @brief Set the TIM Output compare preload. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channels to be configured. + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @arg TIM_CHANNEL_5: TIM Channel 5 selected + * @arg TIM_CHANNEL_6: TIM Channel 6 selected + * @retval None + */ +#define __HAL_TIM_ENABLE_OCxPRELOAD(__HANDLE__, __CHANNEL__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCMR1 |= TIM_CCMR1_OC1PE) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCMR1 |= TIM_CCMR1_OC2PE) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCMR2 |= TIM_CCMR2_OC3PE) :\ + ((__CHANNEL__) == TIM_CHANNEL_4) ? ((__HANDLE__)->Instance->CCMR2 |= TIM_CCMR2_OC4PE) :\ + ((__CHANNEL__) == TIM_CHANNEL_5) ? ((__HANDLE__)->Instance->CCMR3 |= TIM_CCMR3_OC5PE) :\ + ((__HANDLE__)->Instance->CCMR3 |= TIM_CCMR3_OC6PE)) + +/** + * @brief Reset the TIM Output compare preload. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channels to be configured. + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @arg TIM_CHANNEL_5: TIM Channel 5 selected + * @arg TIM_CHANNEL_6: TIM Channel 6 selected + * @retval None + */ +#define __HAL_TIM_DISABLE_OCxPRELOAD(__HANDLE__, __CHANNEL__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCMR1 &= ~TIM_CCMR1_OC1PE) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCMR1 &= ~TIM_CCMR1_OC2PE) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCMR2 &= ~TIM_CCMR2_OC3PE) :\ + ((__CHANNEL__) == TIM_CHANNEL_4) ? ((__HANDLE__)->Instance->CCMR2 &= ~TIM_CCMR2_OC4PE) :\ + ((__CHANNEL__) == TIM_CHANNEL_5) ? ((__HANDLE__)->Instance->CCMR3 &= ~TIM_CCMR3_OC5PE) :\ + ((__HANDLE__)->Instance->CCMR3 &= ~TIM_CCMR3_OC6PE)) + +/** + * @brief Enable fast mode for a given channel. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channels to be configured. + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @arg TIM_CHANNEL_5: TIM Channel 5 selected + * @arg TIM_CHANNEL_6: TIM Channel 6 selected + * @note When fast mode is enabled an active edge on the trigger input acts + * like a compare match on CCx output. Delay to sample the trigger + * input and to activate CCx output is reduced to 3 clock cycles. + * @note Fast mode acts only if the channel is configured in PWM1 or PWM2 mode. + * @retval None + */ +#define __HAL_TIM_ENABLE_OCxFAST(__HANDLE__, __CHANNEL__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCMR1 |= TIM_CCMR1_OC1FE) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCMR1 |= TIM_CCMR1_OC2FE) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCMR2 |= TIM_CCMR2_OC3FE) :\ + ((__CHANNEL__) == TIM_CHANNEL_4) ? ((__HANDLE__)->Instance->CCMR2 |= TIM_CCMR2_OC4FE) :\ + ((__CHANNEL__) == TIM_CHANNEL_5) ? ((__HANDLE__)->Instance->CCMR3 |= TIM_CCMR3_OC5FE) :\ + ((__HANDLE__)->Instance->CCMR3 |= TIM_CCMR3_OC6FE)) + +/** + * @brief Disable fast mode for a given channel. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channels to be configured. + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @arg TIM_CHANNEL_5: TIM Channel 5 selected + * @arg TIM_CHANNEL_6: TIM Channel 6 selected + * @note When fast mode is disabled CCx output behaves normally depending + * on counter and CCRx values even when the trigger is ON. The minimum + * delay to activate CCx output when an active edge occurs on the + * trigger input is 5 clock cycles. + * @retval None + */ +#define __HAL_TIM_DISABLE_OCxFAST(__HANDLE__, __CHANNEL__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCMR1 &= ~TIM_CCMR1_OC1FE) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCMR1 &= ~TIM_CCMR1_OC2FE) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCMR2 &= ~TIM_CCMR2_OC3FE) :\ + ((__CHANNEL__) == TIM_CHANNEL_4) ? ((__HANDLE__)->Instance->CCMR2 &= ~TIM_CCMR2_OC4FE) :\ + ((__CHANNEL__) == TIM_CHANNEL_5) ? ((__HANDLE__)->Instance->CCMR3 &= ~TIM_CCMR3_OC5FE) :\ + ((__HANDLE__)->Instance->CCMR3 &= ~TIM_CCMR3_OC6FE)) + +/** + * @brief Set the Update Request Source (URS) bit of the TIMx_CR1 register. + * @param __HANDLE__ TIM handle. + * @note When the URS bit of the TIMx_CR1 register is set, only counter + * overflow/underflow generates an update interrupt or DMA request (if + * enabled) + * @retval None + */ +#define __HAL_TIM_URS_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR1|= TIM_CR1_URS) + +/** + * @brief Reset the Update Request Source (URS) bit of the TIMx_CR1 register. + * @param __HANDLE__ TIM handle. + * @note When the URS bit of the TIMx_CR1 register is reset, any of the + * following events generate an update interrupt or DMA request (if + * enabled): + * _ Counter overflow underflow + * _ Setting the UG bit + * _ Update generation through the slave mode controller + * @retval None + */ +#define __HAL_TIM_URS_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR1&=~TIM_CR1_URS) + +/** + * @brief Set the TIM Capture x input polarity on runtime. + * @param __HANDLE__ TIM handle. + * @param __CHANNEL__ TIM Channels to be configured. + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @param __POLARITY__ Polarity for TIx source + * @arg TIM_INPUTCHANNELPOLARITY_RISING: Rising Edge + * @arg TIM_INPUTCHANNELPOLARITY_FALLING: Falling Edge + * @arg TIM_INPUTCHANNELPOLARITY_BOTHEDGE: Rising and Falling Edge + * @retval None + */ +#define __HAL_TIM_SET_CAPTUREPOLARITY(__HANDLE__, __CHANNEL__, __POLARITY__) \ + do{ \ + TIM_RESET_CAPTUREPOLARITY((__HANDLE__), (__CHANNEL__)); \ + TIM_SET_CAPTUREPOLARITY((__HANDLE__), (__CHANNEL__), (__POLARITY__)); \ + }while(0) + +/** + * @} + */ +/* End of exported macros ----------------------------------------------------*/ + +/* Private constants ---------------------------------------------------------*/ +/** @defgroup TIM_Private_Constants TIM Private Constants + * @{ + */ +/* The counter of a timer instance is disabled only if all the CCx and CCxN + channels have been disabled */ +#define TIM_CCER_CCxE_MASK ((uint32_t)(TIM_CCER_CC1E | TIM_CCER_CC2E | TIM_CCER_CC3E | TIM_CCER_CC4E)) +#define TIM_CCER_CCxNE_MASK ((uint32_t)(TIM_CCER_CC1NE | TIM_CCER_CC2NE | TIM_CCER_CC3NE)) +/** + * @} + */ +/* End of private constants --------------------------------------------------*/ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup TIM_Private_Macros TIM Private Macros + * @{ + */ +#define IS_TIM_CLEARINPUT_SOURCE(__MODE__) (((__MODE__) == TIM_CLEARINPUTSOURCE_NONE) || \ + ((__MODE__) == TIM_CLEARINPUTSOURCE_ETR)) + +#if defined(TIM_AF1_BKINE)&&defined(TIM_AF2_BKINE) +#define IS_TIM_DMA_BASE(__BASE__) (((__BASE__) == TIM_DMABASE_CR1) || \ + ((__BASE__) == TIM_DMABASE_CR2) || \ + ((__BASE__) == TIM_DMABASE_SMCR) || \ + ((__BASE__) == TIM_DMABASE_DIER) || \ + ((__BASE__) == TIM_DMABASE_SR) || \ + ((__BASE__) == TIM_DMABASE_EGR) || \ + ((__BASE__) == TIM_DMABASE_CCMR1) || \ + ((__BASE__) == TIM_DMABASE_CCMR2) || \ + ((__BASE__) == TIM_DMABASE_CCER) || \ + ((__BASE__) == TIM_DMABASE_CNT) || \ + ((__BASE__) == TIM_DMABASE_PSC) || \ + ((__BASE__) == TIM_DMABASE_ARR) || \ + ((__BASE__) == TIM_DMABASE_RCR) || \ + ((__BASE__) == TIM_DMABASE_CCR1) || \ + ((__BASE__) == TIM_DMABASE_CCR2) || \ + ((__BASE__) == TIM_DMABASE_CCR3) || \ + ((__BASE__) == TIM_DMABASE_CCR4) || \ + ((__BASE__) == TIM_DMABASE_BDTR) || \ + ((__BASE__) == TIM_DMABASE_OR) || \ + ((__BASE__) == TIM_DMABASE_CCMR3) || \ + ((__BASE__) == TIM_DMABASE_CCR5) || \ + ((__BASE__) == TIM_DMABASE_CCR6) || \ + ((__BASE__) == TIM_DMABASE_AF1) || \ + ((__BASE__) == TIM_DMABASE_AF2)) +#else +#define IS_TIM_DMA_BASE(__BASE__) (((__BASE__) == TIM_DMABASE_CR1) || \ + ((__BASE__) == TIM_DMABASE_CR2) || \ + ((__BASE__) == TIM_DMABASE_SMCR) || \ + ((__BASE__) == TIM_DMABASE_DIER) || \ + ((__BASE__) == TIM_DMABASE_SR) || \ + ((__BASE__) == TIM_DMABASE_EGR) || \ + ((__BASE__) == TIM_DMABASE_CCMR1) || \ + ((__BASE__) == TIM_DMABASE_CCMR2) || \ + ((__BASE__) == TIM_DMABASE_CCER) || \ + ((__BASE__) == TIM_DMABASE_CNT) || \ + ((__BASE__) == TIM_DMABASE_PSC) || \ + ((__BASE__) == TIM_DMABASE_ARR) || \ + ((__BASE__) == TIM_DMABASE_RCR) || \ + ((__BASE__) == TIM_DMABASE_CCR1) || \ + ((__BASE__) == TIM_DMABASE_CCR2) || \ + ((__BASE__) == TIM_DMABASE_CCR3) || \ + ((__BASE__) == TIM_DMABASE_CCR4) || \ + ((__BASE__) == TIM_DMABASE_BDTR) || \ + ((__BASE__) == TIM_DMABASE_OR) || \ + ((__BASE__) == TIM_DMABASE_CCMR3) || \ + ((__BASE__) == TIM_DMABASE_CCR5) || \ + ((__BASE__) == TIM_DMABASE_CCR6)) +#endif /* TIM_AF1_BKINE && TIM_AF1_BKINE */ + +#define IS_TIM_EVENT_SOURCE(__SOURCE__) ((((__SOURCE__) & 0xFFFFFE00U) == 0x00000000U) && ((__SOURCE__) != 0x00000000U)) + +#define IS_TIM_COUNTER_MODE(__MODE__) (((__MODE__) == TIM_COUNTERMODE_UP) || \ + ((__MODE__) == TIM_COUNTERMODE_DOWN) || \ + ((__MODE__) == TIM_COUNTERMODE_CENTERALIGNED1) || \ + ((__MODE__) == TIM_COUNTERMODE_CENTERALIGNED2) || \ + ((__MODE__) == TIM_COUNTERMODE_CENTERALIGNED3)) + +#define IS_TIM_UIFREMAP_MODE(__MODE__) (((__MODE__) == TIM_UIFREMAP_DISABLE) || \ + ((__MODE__) == TIM_UIFREMAP_ENALE)) + +#define IS_TIM_CLOCKDIVISION_DIV(__DIV__) (((__DIV__) == TIM_CLOCKDIVISION_DIV1) || \ + ((__DIV__) == TIM_CLOCKDIVISION_DIV2) || \ + ((__DIV__) == TIM_CLOCKDIVISION_DIV4)) + +#define IS_TIM_AUTORELOAD_PRELOAD(PRELOAD) (((PRELOAD) == TIM_AUTORELOAD_PRELOAD_DISABLE) || \ + ((PRELOAD) == TIM_AUTORELOAD_PRELOAD_ENABLE)) + +#define IS_TIM_FAST_STATE(__STATE__) (((__STATE__) == TIM_OCFAST_DISABLE) || \ + ((__STATE__) == TIM_OCFAST_ENABLE)) + +#define IS_TIM_OC_POLARITY(__POLARITY__) (((__POLARITY__) == TIM_OCPOLARITY_HIGH) || \ + ((__POLARITY__) == TIM_OCPOLARITY_LOW)) + +#define IS_TIM_OCN_POLARITY(__POLARITY__) (((__POLARITY__) == TIM_OCNPOLARITY_HIGH) || \ + ((__POLARITY__) == TIM_OCNPOLARITY_LOW)) + +#define IS_TIM_OCIDLE_STATE(__STATE__) (((__STATE__) == TIM_OCIDLESTATE_SET) || \ + ((__STATE__) == TIM_OCIDLESTATE_RESET)) + +#define IS_TIM_OCNIDLE_STATE(__STATE__) (((__STATE__) == TIM_OCNIDLESTATE_SET) || \ + ((__STATE__) == TIM_OCNIDLESTATE_RESET)) + +#define IS_TIM_ENCODERINPUT_POLARITY(__POLARITY__) (((__POLARITY__) == TIM_ENCODERINPUTPOLARITY_RISING) || \ + ((__POLARITY__) == TIM_ENCODERINPUTPOLARITY_FALLING)) + +#define IS_TIM_IC_POLARITY(__POLARITY__) (((__POLARITY__) == TIM_ICPOLARITY_RISING) || \ + ((__POLARITY__) == TIM_ICPOLARITY_FALLING) || \ + ((__POLARITY__) == TIM_ICPOLARITY_BOTHEDGE)) + +#define IS_TIM_IC_SELECTION(__SELECTION__) (((__SELECTION__) == TIM_ICSELECTION_DIRECTTI) || \ + ((__SELECTION__) == TIM_ICSELECTION_INDIRECTTI) || \ + ((__SELECTION__) == TIM_ICSELECTION_TRC)) + +#define IS_TIM_IC_PRESCALER(__PRESCALER__) (((__PRESCALER__) == TIM_ICPSC_DIV1) || \ + ((__PRESCALER__) == TIM_ICPSC_DIV2) || \ + ((__PRESCALER__) == TIM_ICPSC_DIV4) || \ + ((__PRESCALER__) == TIM_ICPSC_DIV8)) + +#define IS_TIM_OPM_MODE(__MODE__) (((__MODE__) == TIM_OPMODE_SINGLE) || \ + ((__MODE__) == TIM_OPMODE_REPETITIVE)) + +#define IS_TIM_ENCODER_MODE(__MODE__) (((__MODE__) == TIM_ENCODERMODE_TI1) || \ + ((__MODE__) == TIM_ENCODERMODE_TI2) || \ + ((__MODE__) == TIM_ENCODERMODE_TI12)) + +#define IS_TIM_DMA_SOURCE(__SOURCE__) ((((__SOURCE__) & 0xFFFF80FFU) == 0x00000000U) && ((__SOURCE__) != 0x00000000U)) + +#define IS_TIM_CHANNELS(__CHANNEL__) (((__CHANNEL__) == TIM_CHANNEL_1) || \ + ((__CHANNEL__) == TIM_CHANNEL_2) || \ + ((__CHANNEL__) == TIM_CHANNEL_3) || \ + ((__CHANNEL__) == TIM_CHANNEL_4) || \ + ((__CHANNEL__) == TIM_CHANNEL_5) || \ + ((__CHANNEL__) == TIM_CHANNEL_6) || \ + ((__CHANNEL__) == TIM_CHANNEL_ALL)) + +#define IS_TIM_OPM_CHANNELS(__CHANNEL__) (((__CHANNEL__) == TIM_CHANNEL_1) || \ + ((__CHANNEL__) == TIM_CHANNEL_2)) + +#define IS_TIM_COMPLEMENTARY_CHANNELS(__CHANNEL__) (((__CHANNEL__) == TIM_CHANNEL_1) || \ + ((__CHANNEL__) == TIM_CHANNEL_2) || \ + ((__CHANNEL__) == TIM_CHANNEL_3)) + +#define IS_TIM_CLOCKSOURCE(__CLOCK__) (((__CLOCK__) == TIM_CLOCKSOURCE_INTERNAL) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_ETRMODE2) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_ITR0) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_ITR1) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_ITR2) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_ITR3) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_TI1ED) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_TI1) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_TI2) || \ + ((__CLOCK__) == TIM_CLOCKSOURCE_ETRMODE1)) + +#define IS_TIM_CLOCKPOLARITY(__POLARITY__) (((__POLARITY__) == TIM_CLOCKPOLARITY_INVERTED) || \ + ((__POLARITY__) == TIM_CLOCKPOLARITY_NONINVERTED) || \ + ((__POLARITY__) == TIM_CLOCKPOLARITY_RISING) || \ + ((__POLARITY__) == TIM_CLOCKPOLARITY_FALLING) || \ + ((__POLARITY__) == TIM_CLOCKPOLARITY_BOTHEDGE)) + +#define IS_TIM_CLOCKPRESCALER(__PRESCALER__) (((__PRESCALER__) == TIM_CLOCKPRESCALER_DIV1) || \ + ((__PRESCALER__) == TIM_CLOCKPRESCALER_DIV2) || \ + ((__PRESCALER__) == TIM_CLOCKPRESCALER_DIV4) || \ + ((__PRESCALER__) == TIM_CLOCKPRESCALER_DIV8)) + +#define IS_TIM_CLOCKFILTER(__ICFILTER__) ((__ICFILTER__) <= 0xFU) + +#define IS_TIM_CLEARINPUT_POLARITY(__POLARITY__) (((__POLARITY__) == TIM_CLEARINPUTPOLARITY_INVERTED) || \ + ((__POLARITY__) == TIM_CLEARINPUTPOLARITY_NONINVERTED)) + +#define IS_TIM_CLEARINPUT_PRESCALER(__PRESCALER__) (((__PRESCALER__) == TIM_CLEARINPUTPRESCALER_DIV1) || \ + ((__PRESCALER__) == TIM_CLEARINPUTPRESCALER_DIV2) || \ + ((__PRESCALER__) == TIM_CLEARINPUTPRESCALER_DIV4) || \ + ((__PRESCALER__) == TIM_CLEARINPUTPRESCALER_DIV8)) + +#define IS_TIM_CLEARINPUT_FILTER(__ICFILTER__) ((__ICFILTER__) <= 0xFU) + +#define IS_TIM_OSSR_STATE(__STATE__) (((__STATE__) == TIM_OSSR_ENABLE) || \ + ((__STATE__) == TIM_OSSR_DISABLE)) + +#define IS_TIM_OSSI_STATE(__STATE__) (((__STATE__) == TIM_OSSI_ENABLE) || \ + ((__STATE__) == TIM_OSSI_DISABLE)) + +#define IS_TIM_LOCK_LEVEL(__LEVEL__) (((__LEVEL__) == TIM_LOCKLEVEL_OFF) || \ + ((__LEVEL__) == TIM_LOCKLEVEL_1) || \ + ((__LEVEL__) == TIM_LOCKLEVEL_2) || \ + ((__LEVEL__) == TIM_LOCKLEVEL_3)) + +#define IS_TIM_BREAK_FILTER(__BRKFILTER__) ((__BRKFILTER__) <= 0xFUL) + + +#define IS_TIM_BREAK_STATE(__STATE__) (((__STATE__) == TIM_BREAK_ENABLE) || \ + ((__STATE__) == TIM_BREAK_DISABLE)) + +#define IS_TIM_BREAK_POLARITY(__POLARITY__) (((__POLARITY__) == TIM_BREAKPOLARITY_LOW) || \ + ((__POLARITY__) == TIM_BREAKPOLARITY_HIGH)) + +#define IS_TIM_BREAK2_STATE(__STATE__) (((__STATE__) == TIM_BREAK2_ENABLE) || \ + ((__STATE__) == TIM_BREAK2_DISABLE)) + +#define IS_TIM_BREAK2_POLARITY(__POLARITY__) (((__POLARITY__) == TIM_BREAK2POLARITY_LOW) || \ + ((__POLARITY__) == TIM_BREAK2POLARITY_HIGH)) + +#define IS_TIM_AUTOMATIC_OUTPUT_STATE(__STATE__) (((__STATE__) == TIM_AUTOMATICOUTPUT_ENABLE) || \ + ((__STATE__) == TIM_AUTOMATICOUTPUT_DISABLE)) + +#define IS_TIM_GROUPCH5(__OCREF__) ((((__OCREF__) & 0x1FFFFFFFU) == 0x00000000U)) + +#define IS_TIM_TRGO_SOURCE(__SOURCE__) (((__SOURCE__) == TIM_TRGO_RESET) || \ + ((__SOURCE__) == TIM_TRGO_ENABLE) || \ + ((__SOURCE__) == TIM_TRGO_UPDATE) || \ + ((__SOURCE__) == TIM_TRGO_OC1) || \ + ((__SOURCE__) == TIM_TRGO_OC1REF) || \ + ((__SOURCE__) == TIM_TRGO_OC2REF) || \ + ((__SOURCE__) == TIM_TRGO_OC3REF) || \ + ((__SOURCE__) == TIM_TRGO_OC4REF)) + +#define IS_TIM_TRGO2_SOURCE(__SOURCE__) (((__SOURCE__) == TIM_TRGO2_RESET) || \ + ((__SOURCE__) == TIM_TRGO2_ENABLE) || \ + ((__SOURCE__) == TIM_TRGO2_UPDATE) || \ + ((__SOURCE__) == TIM_TRGO2_OC1) || \ + ((__SOURCE__) == TIM_TRGO2_OC1REF) || \ + ((__SOURCE__) == TIM_TRGO2_OC2REF) || \ + ((__SOURCE__) == TIM_TRGO2_OC3REF) || \ + ((__SOURCE__) == TIM_TRGO2_OC3REF) || \ + ((__SOURCE__) == TIM_TRGO2_OC4REF) || \ + ((__SOURCE__) == TIM_TRGO2_OC5REF) || \ + ((__SOURCE__) == TIM_TRGO2_OC6REF) || \ + ((__SOURCE__) == TIM_TRGO2_OC4REF_RISINGFALLING) || \ + ((__SOURCE__) == TIM_TRGO2_OC6REF_RISINGFALLING) || \ + ((__SOURCE__) == TIM_TRGO2_OC4REF_RISING_OC6REF_RISING) || \ + ((__SOURCE__) == TIM_TRGO2_OC4REF_RISING_OC6REF_FALLING) || \ + ((__SOURCE__) == TIM_TRGO2_OC5REF_RISING_OC6REF_RISING) || \ + ((__SOURCE__) == TIM_TRGO2_OC5REF_RISING_OC6REF_FALLING)) + +#define IS_TIM_MSM_STATE(__STATE__) (((__STATE__) == TIM_MASTERSLAVEMODE_ENABLE) || \ + ((__STATE__) == TIM_MASTERSLAVEMODE_DISABLE)) + +#define IS_TIM_SLAVE_MODE(__MODE__) (((__MODE__) == TIM_SLAVEMODE_DISABLE) || \ + ((__MODE__) == TIM_SLAVEMODE_RESET) || \ + ((__MODE__) == TIM_SLAVEMODE_GATED) || \ + ((__MODE__) == TIM_SLAVEMODE_TRIGGER) || \ + ((__MODE__) == TIM_SLAVEMODE_EXTERNAL1) || \ + ((__MODE__) == TIM_SLAVEMODE_COMBINED_RESETTRIGGER)) + +#define IS_TIM_PWM_MODE(__MODE__) (((__MODE__) == TIM_OCMODE_PWM1) || \ + ((__MODE__) == TIM_OCMODE_PWM2) || \ + ((__MODE__) == TIM_OCMODE_COMBINED_PWM1) || \ + ((__MODE__) == TIM_OCMODE_COMBINED_PWM2) || \ + ((__MODE__) == TIM_OCMODE_ASSYMETRIC_PWM1) || \ + ((__MODE__) == TIM_OCMODE_ASSYMETRIC_PWM2)) + +#define IS_TIM_OC_MODE(__MODE__) (((__MODE__) == TIM_OCMODE_TIMING) || \ + ((__MODE__) == TIM_OCMODE_ACTIVE) || \ + ((__MODE__) == TIM_OCMODE_INACTIVE) || \ + ((__MODE__) == TIM_OCMODE_TOGGLE) || \ + ((__MODE__) == TIM_OCMODE_FORCED_ACTIVE) || \ + ((__MODE__) == TIM_OCMODE_FORCED_INACTIVE) || \ + ((__MODE__) == TIM_OCMODE_RETRIGERRABLE_OPM1) || \ + ((__MODE__) == TIM_OCMODE_RETRIGERRABLE_OPM2)) + +#define IS_TIM_TRIGGER_SELECTION(__SELECTION__) (((__SELECTION__) == TIM_TS_ITR0) || \ + ((__SELECTION__) == TIM_TS_ITR1) || \ + ((__SELECTION__) == TIM_TS_ITR2) || \ + ((__SELECTION__) == TIM_TS_ITR3) || \ + ((__SELECTION__) == TIM_TS_TI1F_ED) || \ + ((__SELECTION__) == TIM_TS_TI1FP1) || \ + ((__SELECTION__) == TIM_TS_TI2FP2) || \ + ((__SELECTION__) == TIM_TS_ETRF)) + +#define IS_TIM_INTERNAL_TRIGGEREVENT_SELECTION(__SELECTION__) (((__SELECTION__) == TIM_TS_ITR0) || \ + ((__SELECTION__) == TIM_TS_ITR1) || \ + ((__SELECTION__) == TIM_TS_ITR2) || \ + ((__SELECTION__) == TIM_TS_ITR3) || \ + ((__SELECTION__) == TIM_TS_NONE)) + +#define IS_TIM_TRIGGERPOLARITY(__POLARITY__) (((__POLARITY__) == TIM_TRIGGERPOLARITY_INVERTED ) || \ + ((__POLARITY__) == TIM_TRIGGERPOLARITY_NONINVERTED) || \ + ((__POLARITY__) == TIM_TRIGGERPOLARITY_RISING ) || \ + ((__POLARITY__) == TIM_TRIGGERPOLARITY_FALLING ) || \ + ((__POLARITY__) == TIM_TRIGGERPOLARITY_BOTHEDGE )) + +#define IS_TIM_TRIGGERPRESCALER(__PRESCALER__) (((__PRESCALER__) == TIM_TRIGGERPRESCALER_DIV1) || \ + ((__PRESCALER__) == TIM_TRIGGERPRESCALER_DIV2) || \ + ((__PRESCALER__) == TIM_TRIGGERPRESCALER_DIV4) || \ + ((__PRESCALER__) == TIM_TRIGGERPRESCALER_DIV8)) + +#define IS_TIM_TRIGGERFILTER(__ICFILTER__) ((__ICFILTER__) <= 0xFU) + +#define IS_TIM_TI1SELECTION(__TI1SELECTION__) (((__TI1SELECTION__) == TIM_TI1SELECTION_CH1) || \ + ((__TI1SELECTION__) == TIM_TI1SELECTION_XORCOMBINATION)) + +#define IS_TIM_DMA_LENGTH(__LENGTH__) (((__LENGTH__) == TIM_DMABURSTLENGTH_1TRANSFER) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_2TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_3TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_4TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_5TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_6TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_7TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_8TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_9TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_10TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_11TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_12TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_13TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_14TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_15TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_16TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_17TRANSFERS) || \ + ((__LENGTH__) == TIM_DMABURSTLENGTH_18TRANSFERS)) + +#define IS_TIM_DMA_DATA_LENGTH(LENGTH) (((LENGTH) >= 0x1U) && ((LENGTH) < 0x10000U)) + +#define IS_TIM_IC_FILTER(__ICFILTER__) ((__ICFILTER__) <= 0xFU) + +#define IS_TIM_DEADTIME(__DEADTIME__) ((__DEADTIME__) <= 0xFFU) + +#define IS_TIM_BREAK_SYSTEM(__CONFIG__) (((__CONFIG__) == TIM_BREAK_SYSTEM_ECC) || \ + ((__CONFIG__) == TIM_BREAK_SYSTEM_PVD) || \ + ((__CONFIG__) == TIM_BREAK_SYSTEM_SRAM_PARITY_ERROR) || \ + ((__CONFIG__) == TIM_BREAK_SYSTEM_LOCKUP)) + +#define IS_TIM_SLAVEMODE_TRIGGER_ENABLED(__TRIGGER__) (((__TRIGGER__) == TIM_SLAVEMODE_TRIGGER) || \ + ((__TRIGGER__) == TIM_SLAVEMODE_COMBINED_RESETTRIGGER)) + +#define TIM_SET_ICPRESCALERVALUE(__HANDLE__, __CHANNEL__, __ICPSC__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCMR1 |= (__ICPSC__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCMR1 |= ((__ICPSC__) << 8U)) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCMR2 |= (__ICPSC__)) :\ + ((__HANDLE__)->Instance->CCMR2 |= ((__ICPSC__) << 8U))) + +#define TIM_RESET_ICPRESCALERVALUE(__HANDLE__, __CHANNEL__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCMR1 &= ~TIM_CCMR1_IC1PSC) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCMR1 &= ~TIM_CCMR1_IC2PSC) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCMR2 &= ~TIM_CCMR2_IC3PSC) :\ + ((__HANDLE__)->Instance->CCMR2 &= ~TIM_CCMR2_IC4PSC)) + +#define TIM_SET_CAPTUREPOLARITY(__HANDLE__, __CHANNEL__, __POLARITY__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCER |= (__POLARITY__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCER |= ((__POLARITY__) << 4U)) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCER |= ((__POLARITY__) << 8U)) :\ + ((__HANDLE__)->Instance->CCER |= (((__POLARITY__) << 12U)))) + +#define TIM_RESET_CAPTUREPOLARITY(__HANDLE__, __CHANNEL__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->Instance->CCER &= ~(TIM_CCER_CC1P | TIM_CCER_CC1NP)) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->Instance->CCER &= ~(TIM_CCER_CC2P | TIM_CCER_CC2NP)) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->Instance->CCER &= ~(TIM_CCER_CC3P | TIM_CCER_CC3NP)) :\ + ((__HANDLE__)->Instance->CCER &= ~(TIM_CCER_CC4P | TIM_CCER_CC4NP))) + +#define TIM_CHANNEL_STATE_GET(__HANDLE__, __CHANNEL__)\ + (((__CHANNEL__) == TIM_CHANNEL_1) ? (__HANDLE__)->ChannelState[0] :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? (__HANDLE__)->ChannelState[1] :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? (__HANDLE__)->ChannelState[2] :\ + ((__CHANNEL__) == TIM_CHANNEL_4) ? (__HANDLE__)->ChannelState[3] :\ + ((__CHANNEL__) == TIM_CHANNEL_5) ? (__HANDLE__)->ChannelState[4] :\ + (__HANDLE__)->ChannelState[5]) + +#define TIM_CHANNEL_STATE_SET(__HANDLE__, __CHANNEL__, __CHANNEL_STATE__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->ChannelState[0] = (__CHANNEL_STATE__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->ChannelState[1] = (__CHANNEL_STATE__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->ChannelState[2] = (__CHANNEL_STATE__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_4) ? ((__HANDLE__)->ChannelState[3] = (__CHANNEL_STATE__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_5) ? ((__HANDLE__)->ChannelState[4] = (__CHANNEL_STATE__)) :\ + ((__HANDLE__)->ChannelState[5] = (__CHANNEL_STATE__))) + +#define TIM_CHANNEL_STATE_SET_ALL(__HANDLE__, __CHANNEL_STATE__) do { \ + (__HANDLE__)->ChannelState[0] = \ + (__CHANNEL_STATE__); \ + (__HANDLE__)->ChannelState[1] = \ + (__CHANNEL_STATE__); \ + (__HANDLE__)->ChannelState[2] = \ + (__CHANNEL_STATE__); \ + (__HANDLE__)->ChannelState[3] = \ + (__CHANNEL_STATE__); \ + (__HANDLE__)->ChannelState[4] = \ + (__CHANNEL_STATE__); \ + (__HANDLE__)->ChannelState[5] = \ + (__CHANNEL_STATE__); \ + } while(0) + +#define TIM_CHANNEL_N_STATE_GET(__HANDLE__, __CHANNEL__)\ + (((__CHANNEL__) == TIM_CHANNEL_1) ? (__HANDLE__)->ChannelNState[0] :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? (__HANDLE__)->ChannelNState[1] :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? (__HANDLE__)->ChannelNState[2] :\ + (__HANDLE__)->ChannelNState[3]) + +#define TIM_CHANNEL_N_STATE_SET(__HANDLE__, __CHANNEL__, __CHANNEL_STATE__) \ + (((__CHANNEL__) == TIM_CHANNEL_1) ? ((__HANDLE__)->ChannelNState[0] = (__CHANNEL_STATE__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_2) ? ((__HANDLE__)->ChannelNState[1] = (__CHANNEL_STATE__)) :\ + ((__CHANNEL__) == TIM_CHANNEL_3) ? ((__HANDLE__)->ChannelNState[2] = (__CHANNEL_STATE__)) :\ + ((__HANDLE__)->ChannelNState[3] = (__CHANNEL_STATE__))) + +#define TIM_CHANNEL_N_STATE_SET_ALL(__HANDLE__, __CHANNEL_STATE__) do { \ + (__HANDLE__)->ChannelNState[0] = \ + (__CHANNEL_STATE__); \ + (__HANDLE__)->ChannelNState[1] = \ + (__CHANNEL_STATE__); \ + (__HANDLE__)->ChannelNState[2] = \ + (__CHANNEL_STATE__); \ + (__HANDLE__)->ChannelNState[3] = \ + (__CHANNEL_STATE__); \ + } while(0) + +/** + * @} + */ +/* End of private macros -----------------------------------------------------*/ + +/* Include TIM HAL Extended module */ +#include "stm32f7xx_hal_tim_ex.h" + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup TIM_Exported_Functions TIM Exported Functions + * @{ + */ + +/** @addtogroup TIM_Exported_Functions_Group1 TIM Time Base functions + * @brief Time Base functions + * @{ + */ +/* Time Base functions ********************************************************/ +HAL_StatusTypeDef HAL_TIM_Base_Init(TIM_HandleTypeDef *htim); +HAL_StatusTypeDef HAL_TIM_Base_DeInit(TIM_HandleTypeDef *htim); +void HAL_TIM_Base_MspInit(TIM_HandleTypeDef *htim); +void HAL_TIM_Base_MspDeInit(TIM_HandleTypeDef *htim); +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIM_Base_Start(TIM_HandleTypeDef *htim); +HAL_StatusTypeDef HAL_TIM_Base_Stop(TIM_HandleTypeDef *htim); +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIM_Base_Start_IT(TIM_HandleTypeDef *htim); +HAL_StatusTypeDef HAL_TIM_Base_Stop_IT(TIM_HandleTypeDef *htim); +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_TIM_Base_Start_DMA(TIM_HandleTypeDef *htim, uint32_t *pData, uint16_t Length); +HAL_StatusTypeDef HAL_TIM_Base_Stop_DMA(TIM_HandleTypeDef *htim); +/** + * @} + */ + +/** @addtogroup TIM_Exported_Functions_Group2 TIM Output Compare functions + * @brief TIM Output Compare functions + * @{ + */ +/* Timer Output Compare functions *********************************************/ +HAL_StatusTypeDef HAL_TIM_OC_Init(TIM_HandleTypeDef *htim); +HAL_StatusTypeDef HAL_TIM_OC_DeInit(TIM_HandleTypeDef *htim); +void HAL_TIM_OC_MspInit(TIM_HandleTypeDef *htim); +void HAL_TIM_OC_MspDeInit(TIM_HandleTypeDef *htim); +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIM_OC_Start(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_OC_Stop(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIM_OC_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_OC_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_TIM_OC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length); +HAL_StatusTypeDef HAL_TIM_OC_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel); +/** + * @} + */ + +/** @addtogroup TIM_Exported_Functions_Group3 TIM PWM functions + * @brief TIM PWM functions + * @{ + */ +/* Timer PWM functions ********************************************************/ +HAL_StatusTypeDef HAL_TIM_PWM_Init(TIM_HandleTypeDef *htim); +HAL_StatusTypeDef HAL_TIM_PWM_DeInit(TIM_HandleTypeDef *htim); +void HAL_TIM_PWM_MspInit(TIM_HandleTypeDef *htim); +void HAL_TIM_PWM_MspDeInit(TIM_HandleTypeDef *htim); +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIM_PWM_Start(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_PWM_Stop(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIM_PWM_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_PWM_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_TIM_PWM_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length); +HAL_StatusTypeDef HAL_TIM_PWM_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel); +/** + * @} + */ + +/** @addtogroup TIM_Exported_Functions_Group4 TIM Input Capture functions + * @brief TIM Input Capture functions + * @{ + */ +/* Timer Input Capture functions **********************************************/ +HAL_StatusTypeDef HAL_TIM_IC_Init(TIM_HandleTypeDef *htim); +HAL_StatusTypeDef HAL_TIM_IC_DeInit(TIM_HandleTypeDef *htim); +void HAL_TIM_IC_MspInit(TIM_HandleTypeDef *htim); +void HAL_TIM_IC_MspDeInit(TIM_HandleTypeDef *htim); +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIM_IC_Start(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_IC_Stop(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIM_IC_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_IC_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_TIM_IC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length); +HAL_StatusTypeDef HAL_TIM_IC_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel); +/** + * @} + */ + +/** @addtogroup TIM_Exported_Functions_Group5 TIM One Pulse functions + * @brief TIM One Pulse functions + * @{ + */ +/* Timer One Pulse functions **************************************************/ +HAL_StatusTypeDef HAL_TIM_OnePulse_Init(TIM_HandleTypeDef *htim, uint32_t OnePulseMode); +HAL_StatusTypeDef HAL_TIM_OnePulse_DeInit(TIM_HandleTypeDef *htim); +void HAL_TIM_OnePulse_MspInit(TIM_HandleTypeDef *htim); +void HAL_TIM_OnePulse_MspDeInit(TIM_HandleTypeDef *htim); +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIM_OnePulse_Start(TIM_HandleTypeDef *htim, uint32_t OutputChannel); +HAL_StatusTypeDef HAL_TIM_OnePulse_Stop(TIM_HandleTypeDef *htim, uint32_t OutputChannel); +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIM_OnePulse_Start_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel); +HAL_StatusTypeDef HAL_TIM_OnePulse_Stop_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel); +/** + * @} + */ + +/** @addtogroup TIM_Exported_Functions_Group6 TIM Encoder functions + * @brief TIM Encoder functions + * @{ + */ +/* Timer Encoder functions ****************************************************/ +HAL_StatusTypeDef HAL_TIM_Encoder_Init(TIM_HandleTypeDef *htim, TIM_Encoder_InitTypeDef *sConfig); +HAL_StatusTypeDef HAL_TIM_Encoder_DeInit(TIM_HandleTypeDef *htim); +void HAL_TIM_Encoder_MspInit(TIM_HandleTypeDef *htim); +void HAL_TIM_Encoder_MspDeInit(TIM_HandleTypeDef *htim); +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIM_Encoder_Start(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_Encoder_Stop(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIM_Encoder_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_Encoder_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_TIM_Encoder_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData1, + uint32_t *pData2, uint16_t Length); +HAL_StatusTypeDef HAL_TIM_Encoder_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel); +/** + * @} + */ + +/** @addtogroup TIM_Exported_Functions_Group7 TIM IRQ handler management + * @brief IRQ handler management + * @{ + */ +/* Interrupt Handler functions ***********************************************/ +void HAL_TIM_IRQHandler(TIM_HandleTypeDef *htim); +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group8 TIM Peripheral Control functions + * @brief Peripheral Control functions + * @{ + */ +/* Control functions *********************************************************/ +HAL_StatusTypeDef HAL_TIM_OC_ConfigChannel(TIM_HandleTypeDef *htim, TIM_OC_InitTypeDef *sConfig, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_PWM_ConfigChannel(TIM_HandleTypeDef *htim, TIM_OC_InitTypeDef *sConfig, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_IC_ConfigChannel(TIM_HandleTypeDef *htim, TIM_IC_InitTypeDef *sConfig, uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_OnePulse_ConfigChannel(TIM_HandleTypeDef *htim, TIM_OnePulse_InitTypeDef *sConfig, + uint32_t OutputChannel, uint32_t InputChannel); +HAL_StatusTypeDef HAL_TIM_ConfigOCrefClear(TIM_HandleTypeDef *htim, TIM_ClearInputConfigTypeDef *sClearInputConfig, + uint32_t Channel); +HAL_StatusTypeDef HAL_TIM_ConfigClockSource(TIM_HandleTypeDef *htim, TIM_ClockConfigTypeDef *sClockSourceConfig); +HAL_StatusTypeDef HAL_TIM_ConfigTI1Input(TIM_HandleTypeDef *htim, uint32_t TI1_Selection); +HAL_StatusTypeDef HAL_TIM_SlaveConfigSynchro(TIM_HandleTypeDef *htim, TIM_SlaveConfigTypeDef *sSlaveConfig); +HAL_StatusTypeDef HAL_TIM_SlaveConfigSynchro_IT(TIM_HandleTypeDef *htim, TIM_SlaveConfigTypeDef *sSlaveConfig); +HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, uint32_t *BurstBuffer, uint32_t BurstLength); +HAL_StatusTypeDef HAL_TIM_DMABurst_MultiWriteStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, uint32_t *BurstBuffer, + uint32_t BurstLength, uint32_t DataLength); +HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStop(TIM_HandleTypeDef *htim, uint32_t BurstRequestSrc); +HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, uint32_t *BurstBuffer, uint32_t BurstLength); +HAL_StatusTypeDef HAL_TIM_DMABurst_MultiReadStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, uint32_t *BurstBuffer, + uint32_t BurstLength, uint32_t DataLength); +HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStop(TIM_HandleTypeDef *htim, uint32_t BurstRequestSrc); +HAL_StatusTypeDef HAL_TIM_GenerateEvent(TIM_HandleTypeDef *htim, uint32_t EventSource); +uint32_t HAL_TIM_ReadCapturedValue(TIM_HandleTypeDef *htim, uint32_t Channel); +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group9 TIM Callbacks functions + * @brief TIM Callbacks functions + * @{ + */ +/* Callback in non blocking modes (Interrupt and DMA) *************************/ +void HAL_TIM_PeriodElapsedCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_PeriodElapsedHalfCpltCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_OC_DelayElapsedCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_IC_CaptureCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_IC_CaptureHalfCpltCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_PWM_PulseFinishedCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_PWM_PulseFinishedHalfCpltCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_TriggerCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_TriggerHalfCpltCallback(TIM_HandleTypeDef *htim); +void HAL_TIM_ErrorCallback(TIM_HandleTypeDef *htim); + +/* Callbacks Register/UnRegister functions ***********************************/ +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) +HAL_StatusTypeDef HAL_TIM_RegisterCallback(TIM_HandleTypeDef *htim, HAL_TIM_CallbackIDTypeDef CallbackID, + pTIM_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_TIM_UnRegisterCallback(TIM_HandleTypeDef *htim, HAL_TIM_CallbackIDTypeDef CallbackID); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group10 TIM Peripheral State functions + * @brief Peripheral State functions + * @{ + */ +/* Peripheral State functions ************************************************/ +HAL_TIM_StateTypeDef HAL_TIM_Base_GetState(TIM_HandleTypeDef *htim); +HAL_TIM_StateTypeDef HAL_TIM_OC_GetState(TIM_HandleTypeDef *htim); +HAL_TIM_StateTypeDef HAL_TIM_PWM_GetState(TIM_HandleTypeDef *htim); +HAL_TIM_StateTypeDef HAL_TIM_IC_GetState(TIM_HandleTypeDef *htim); +HAL_TIM_StateTypeDef HAL_TIM_OnePulse_GetState(TIM_HandleTypeDef *htim); +HAL_TIM_StateTypeDef HAL_TIM_Encoder_GetState(TIM_HandleTypeDef *htim); + +/* Peripheral Channel state functions ************************************************/ +HAL_TIM_ActiveChannel HAL_TIM_GetActiveChannel(TIM_HandleTypeDef *htim); +HAL_TIM_ChannelStateTypeDef HAL_TIM_GetChannelState(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_TIM_DMABurstStateTypeDef HAL_TIM_DMABurstState(TIM_HandleTypeDef *htim); +/** + * @} + */ + +/** + * @} + */ +/* End of exported functions -------------------------------------------------*/ + +/* Private functions----------------------------------------------------------*/ +/** @defgroup TIM_Private_Functions TIM Private Functions + * @{ + */ +void TIM_Base_SetConfig(TIM_TypeDef *TIMx, TIM_Base_InitTypeDef *Structure); +void TIM_TI1_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, uint32_t TIM_ICFilter); +void TIM_OC2_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config); +void TIM_ETR_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ExtTRGPrescaler, + uint32_t TIM_ExtTRGPolarity, uint32_t ExtTRGFilter); + +void TIM_DMADelayPulseHalfCplt(DMA_HandleTypeDef *hdma); +void TIM_DMAError(DMA_HandleTypeDef *hdma); +void TIM_DMACaptureCplt(DMA_HandleTypeDef *hdma); +void TIM_DMACaptureHalfCplt(DMA_HandleTypeDef *hdma); +void TIM_CCxChannelCmd(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t ChannelState); + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) +void TIM_ResetCallback(TIM_HandleTypeDef *htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + +/** + * @} + */ +/* End of private functions --------------------------------------------------*/ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_HAL_TIM_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_tim_ex.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_tim_ex.h new file mode 100644 index 0000000..7e832d3 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_tim_ex.h @@ -0,0 +1,358 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_tim_ex.h + * @author MCD Application Team + * @brief Header file of TIM HAL Extended module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_TIM_EX_H +#define STM32F7xx_HAL_TIM_EX_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup TIMEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup TIMEx_Exported_Types TIM Extended Exported Types + * @{ + */ + +/** + * @brief TIM Hall sensor Configuration Structure definition + */ + +typedef struct +{ + uint32_t IC1Polarity; /*!< Specifies the active edge of the input signal. + This parameter can be a value of @ref TIM_Input_Capture_Polarity */ + + uint32_t IC1Prescaler; /*!< Specifies the Input Capture Prescaler. + This parameter can be a value of @ref TIM_Input_Capture_Prescaler */ + + uint32_t IC1Filter; /*!< Specifies the input capture filter. + This parameter can be a number between Min_Data = 0x0 and Max_Data = 0xF */ + + uint32_t Commutation_Delay; /*!< Specifies the pulse value to be loaded into the Capture Compare Register. + This parameter can be a number between Min_Data = 0x0000 and Max_Data = 0xFFFF */ +} TIM_HallSensor_InitTypeDef; +#if defined(TIM_BREAK_INPUT_SUPPORT) + +/** + * @brief TIM Break/Break2 input configuration + */ +typedef struct +{ + uint32_t Source; /*!< Specifies the source of the timer break input. + This parameter can be a value of @ref TIMEx_Break_Input_Source */ + uint32_t Enable; /*!< Specifies whether or not the break input source is enabled. + This parameter can be a value of @ref TIMEx_Break_Input_Source_Enable */ + uint32_t Polarity; /*!< Specifies the break input source polarity. + This parameter can be a value of @ref TIMEx_Break_Input_Source_Polarity + Not relevant when analog watchdog output of the DFSDM1 used as break input source */ +} TIMEx_BreakInputConfigTypeDef; + +#endif /* TIM_BREAK_INPUT_SUPPORT */ +/** + * @} + */ +/* End of exported types -----------------------------------------------------*/ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup TIMEx_Exported_Constants TIM Extended Exported Constants + * @{ + */ + +/** @defgroup TIMEx_Remap TIM Extended Remapping + * @{ + */ +#define TIM_TIM2_TIM8_TRGO (0x00000000U) +#define TIM_TIM2_ETH_PTP (0x00000400U) +#define TIM_TIM2_USBFS_SOF (0x00000800U) +#define TIM_TIM2_USBHS_SOF (0x00000C00U) +#define TIM_TIM5_GPIO (0x00000000U) +#define TIM_TIM5_LSI (0x00000040U) +#define TIM_TIM5_LSE (0x00000080U) +#define TIM_TIM5_RTC (0x000000C0U) +#define TIM_TIM11_GPIO (0x00000000U) +#define TIM_TIM11_SPDIFRX (0x00000001U) +#define TIM_TIM11_HSE (0x00000002U) +#define TIM_TIM11_MCO1 (0x00000003U) +/** + * @} + */ +#if defined(TIM_BREAK_INPUT_SUPPORT) + +/** @defgroup TIMEx_Break_Input TIM Extended Break input + * @{ + */ +#define TIM_BREAKINPUT_BRK 0x00000001U /*!< Timer break input */ +#define TIM_BREAKINPUT_BRK2 0x00000002U /*!< Timer break2 input */ +/** + * @} + */ + +/** @defgroup TIMEx_Break_Input_Source TIM Extended Break input source + * @{ + */ +#define TIM_BREAKINPUTSOURCE_BKIN (0x00000001U) /* !< An external source (GPIO) is connected to the BKIN pin */ +#define TIM_BREAKINPUTSOURCE_DFSDM1 (0x00000008U) /* !< The analog watchdog output of the DFSDM1 peripheral is connected to the break input */ +/** + * @} + */ + +/** @defgroup TIMEx_Break_Input_Source_Enable TIM Extended Break input source enabling + * @{ + */ +#define TIM_BREAKINPUTSOURCE_DISABLE 0x00000000U /*!< Break input source is disabled */ +#define TIM_BREAKINPUTSOURCE_ENABLE 0x00000001U /*!< Break input source is enabled */ +/** + * @} + */ + +/** @defgroup TIMEx_Break_Input_Source_Polarity TIM Extended Break input polarity + * @{ + */ +#define TIM_BREAKINPUTSOURCE_POLARITY_LOW 0x00000001U /*!< Break input source is active low */ +#define TIM_BREAKINPUTSOURCE_POLARITY_HIGH 0x00000000U /*!< Break input source is active_high */ +/** + * @} + */ +#endif /* TIM_BREAK_INPUT_SUPPORT */ + +/** + * @} + */ +/* End of exported constants -------------------------------------------------*/ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup TIMEx_Exported_Macros TIM Extended Exported Macros + * @{ + */ + +/** + * @} + */ +/* End of exported macro -----------------------------------------------------*/ + +/* Private macro -------------------------------------------------------------*/ +/** @defgroup TIMEx_Private_Macros TIM Extended Private Macros + * @{ + */ +#define IS_TIM_REMAP(__TIM_REMAP__) (((__TIM_REMAP__) == TIM_TIM2_TIM8_TRGO)||\ + ((__TIM_REMAP__) == TIM_TIM2_ETH_PTP) ||\ + ((__TIM_REMAP__) == TIM_TIM2_USBFS_SOF)||\ + ((__TIM_REMAP__) == TIM_TIM2_USBHS_SOF)||\ + ((__TIM_REMAP__) == TIM_TIM5_GPIO) ||\ + ((__TIM_REMAP__) == TIM_TIM5_LSI) ||\ + ((__TIM_REMAP__) == TIM_TIM5_LSE) ||\ + ((__TIM_REMAP__) == TIM_TIM5_RTC) ||\ + ((__TIM_REMAP__) == TIM_TIM11_GPIO) ||\ + ((__TIM_REMAP__) == TIM_TIM11_SPDIFRX) ||\ + ((__TIM_REMAP__) == TIM_TIM11_HSE) ||\ + ((__TIM_REMAP__) == TIM_TIM11_MCO1)) +#if defined(TIM_BREAK_INPUT_SUPPORT) + +#define IS_TIM_BREAKINPUT(__BREAKINPUT__) (((__BREAKINPUT__) == TIM_BREAKINPUT_BRK) || \ + ((__BREAKINPUT__) == TIM_BREAKINPUT_BRK2)) + +#define IS_TIM_BREAKINPUTSOURCE(__SOURCE__) (((__SOURCE__) == TIM_BREAKINPUTSOURCE_BKIN) || \ + ((__SOURCE__) == TIM_BREAKINPUTSOURCE_DFSDM)) + +#define IS_TIM_BREAKINPUTSOURCE_STATE(__STATE__) (((__STATE__) == TIM_BREAKINPUTSOURCE_DISABLE) || \ + ((__STATE__) == TIM_BREAKINPUTSOURCE_ENABLE)) + +#define IS_TIM_BREAKINPUTSOURCE_POLARITY(__POLARITY__) (((__POLARITY__) == TIM_BREAKINPUTSOURCE_POLARITY_LOW) || \ + ((__POLARITY__) == TIM_BREAKINPUTSOURCE_POLARITY_HIGH)) +#endif /* TIM_BREAK_INPUT_SUPPORT */ + +/** + * @} + */ +/* End of private macro ------------------------------------------------------*/ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup TIMEx_Exported_Functions TIM Extended Exported Functions + * @{ + */ + +/** @addtogroup TIMEx_Exported_Functions_Group1 Extended Timer Hall Sensor functions + * @brief Timer Hall Sensor functions + * @{ + */ +/* Timer Hall Sensor functions **********************************************/ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Init(TIM_HandleTypeDef *htim, TIM_HallSensor_InitTypeDef *sConfig); +HAL_StatusTypeDef HAL_TIMEx_HallSensor_DeInit(TIM_HandleTypeDef *htim); + +void HAL_TIMEx_HallSensor_MspInit(TIM_HandleTypeDef *htim); +void HAL_TIMEx_HallSensor_MspDeInit(TIM_HandleTypeDef *htim); + +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Start(TIM_HandleTypeDef *htim); +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop(TIM_HandleTypeDef *htim); +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Start_IT(TIM_HandleTypeDef *htim); +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop_IT(TIM_HandleTypeDef *htim); +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Start_DMA(TIM_HandleTypeDef *htim, uint32_t *pData, uint16_t Length); +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop_DMA(TIM_HandleTypeDef *htim); +/** + * @} + */ + +/** @addtogroup TIMEx_Exported_Functions_Group2 Extended Timer Complementary Output Compare functions + * @brief Timer Complementary Output Compare functions + * @{ + */ +/* Timer Complementary Output Compare functions *****************************/ +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Start(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIMEx_OCN_Stop(TIM_HandleTypeDef *htim, uint32_t Channel); + +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIMEx_OCN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel); + +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length); +HAL_StatusTypeDef HAL_TIMEx_OCN_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel); +/** + * @} + */ + +/** @addtogroup TIMEx_Exported_Functions_Group3 Extended Timer Complementary PWM functions + * @brief Timer Complementary PWM functions + * @{ + */ +/* Timer Complementary PWM functions ****************************************/ +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Start(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop(TIM_HandleTypeDef *htim, uint32_t Channel); + +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel); +/* Non-Blocking mode: DMA */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length); +HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel); +/** + * @} + */ + +/** @addtogroup TIMEx_Exported_Functions_Group4 Extended Timer Complementary One Pulse functions + * @brief Timer Complementary One Pulse functions + * @{ + */ +/* Timer Complementary One Pulse functions **********************************/ +/* Blocking mode: Polling */ +HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Start(TIM_HandleTypeDef *htim, uint32_t OutputChannel); +HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Stop(TIM_HandleTypeDef *htim, uint32_t OutputChannel); + +/* Non-Blocking mode: Interrupt */ +HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Start_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel); +HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel); +/** + * @} + */ + +/** @addtogroup TIMEx_Exported_Functions_Group5 Extended Peripheral Control functions + * @brief Peripheral Control functions + * @{ + */ +/* Extended Control functions ************************************************/ +HAL_StatusTypeDef HAL_TIMEx_ConfigCommutEvent(TIM_HandleTypeDef *htim, uint32_t InputTrigger, + uint32_t CommutationSource); +HAL_StatusTypeDef HAL_TIMEx_ConfigCommutEvent_IT(TIM_HandleTypeDef *htim, uint32_t InputTrigger, + uint32_t CommutationSource); +HAL_StatusTypeDef HAL_TIMEx_ConfigCommutEvent_DMA(TIM_HandleTypeDef *htim, uint32_t InputTrigger, + uint32_t CommutationSource); +HAL_StatusTypeDef HAL_TIMEx_MasterConfigSynchronization(TIM_HandleTypeDef *htim, + TIM_MasterConfigTypeDef *sMasterConfig); +HAL_StatusTypeDef HAL_TIMEx_ConfigBreakDeadTime(TIM_HandleTypeDef *htim, + TIM_BreakDeadTimeConfigTypeDef *sBreakDeadTimeConfig); +#if defined(TIM_BREAK_INPUT_SUPPORT) +HAL_StatusTypeDef HAL_TIMEx_ConfigBreakInput(TIM_HandleTypeDef *htim, uint32_t BreakInput, + TIMEx_BreakInputConfigTypeDef *sBreakInputConfig); +#endif /* TIM_BREAK_INPUT_SUPPORT */ +HAL_StatusTypeDef HAL_TIMEx_GroupChannel5(TIM_HandleTypeDef *htim, uint32_t Channels); +HAL_StatusTypeDef HAL_TIMEx_RemapConfig(TIM_HandleTypeDef *htim, uint32_t Remap); +/** + * @} + */ + +/** @addtogroup TIMEx_Exported_Functions_Group6 Extended Callbacks functions + * @brief Extended Callbacks functions + * @{ + */ +/* Extended Callback **********************************************************/ +void HAL_TIMEx_CommutCallback(TIM_HandleTypeDef *htim); +void HAL_TIMEx_CommutHalfCpltCallback(TIM_HandleTypeDef *htim); +void HAL_TIMEx_BreakCallback(TIM_HandleTypeDef *htim); +void HAL_TIMEx_Break2Callback(TIM_HandleTypeDef *htim); +/** + * @} + */ + +/** @addtogroup TIMEx_Exported_Functions_Group7 Extended Peripheral State functions + * @brief Extended Peripheral State functions + * @{ + */ +/* Extended Peripheral State functions ***************************************/ +HAL_TIM_StateTypeDef HAL_TIMEx_HallSensor_GetState(TIM_HandleTypeDef *htim); +HAL_TIM_ChannelStateTypeDef HAL_TIMEx_GetChannelNState(TIM_HandleTypeDef *htim, uint32_t ChannelN); +/** + * @} + */ + +/** + * @} + */ +/* End of exported functions -------------------------------------------------*/ + +/* Private functions----------------------------------------------------------*/ +/** @addtogroup TIMEx_Private_Functions TIM Extended Private Functions + * @{ + */ +void TIMEx_DMACommutationCplt(DMA_HandleTypeDef *hdma); +void TIMEx_DMACommutationHalfCplt(DMA_HandleTypeDef *hdma); +/** + * @} + */ +/* End of private functions --------------------------------------------------*/ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + + +#endif /* STM32F7xx_HAL_TIM_EX_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_uart.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_uart.h new file mode 100644 index 0000000..df88cc2 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_uart.h @@ -0,0 +1,1632 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_uart.h + * @author MCD Application Team + * @brief Header file of UART HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_UART_H +#define STM32F7xx_HAL_UART_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup UART + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup UART_Exported_Types UART Exported Types + * @{ + */ + +/** + * @brief UART Init Structure definition + */ +typedef struct +{ + uint32_t BaudRate; /*!< This member configures the UART communication baud rate. + The baud rate register is computed using the following formula: + - If oversampling is 16 or in LIN mode, + Baud Rate Register = ((uart_ker_ck) / ((huart->Init.BaudRate))) + - If oversampling is 8, + Baud Rate Register[15:4] = ((2 * uart_ker_ck) / + ((huart->Init.BaudRate)))[15:4] + Baud Rate Register[3] = 0 + Baud Rate Register[2:0] = (((2 * uart_ker_ck) / + ((huart->Init.BaudRate)))[3:0]) >> 1 + where uart_ker_ck is the UART input clock */ + + uint32_t WordLength; /*!< Specifies the number of data bits transmitted or received in a frame. + This parameter can be a value of @ref UARTEx_Word_Length. */ + + uint32_t StopBits; /*!< Specifies the number of stop bits transmitted. + This parameter can be a value of @ref UART_Stop_Bits. */ + + uint32_t Parity; /*!< Specifies the parity mode. + This parameter can be a value of @ref UART_Parity + @note When parity is enabled, the computed parity is inserted + at the MSB position of the transmitted data (9th bit when + the word length is set to 9 data bits; 8th bit when the + word length is set to 8 data bits). */ + + uint32_t Mode; /*!< Specifies whether the Receive or Transmit mode is enabled or disabled. + This parameter can be a value of @ref UART_Mode. */ + + uint32_t HwFlowCtl; /*!< Specifies whether the hardware flow control mode is enabled + or disabled. + This parameter can be a value of @ref UART_Hardware_Flow_Control. */ + + uint32_t OverSampling; /*!< Specifies whether the Over sampling 8 is enabled or disabled, + to achieve higher speed (up to f_PCLK/8). + This parameter can be a value of @ref UART_Over_Sampling. */ + + uint32_t OneBitSampling; /*!< Specifies whether a single sample or three samples' majority vote is selected. + Selecting the single sample method increases the receiver tolerance to clock + deviations. This parameter can be a value of @ref UART_OneBit_Sampling. */ + + +} UART_InitTypeDef; + +/** + * @brief UART Advanced Features initialization structure definition + */ +typedef struct +{ + uint32_t AdvFeatureInit; /*!< Specifies which advanced UART features is initialized. Several + Advanced Features may be initialized at the same time . + This parameter can be a value of + @ref UART_Advanced_Features_Initialization_Type. */ + + uint32_t TxPinLevelInvert; /*!< Specifies whether the TX pin active level is inverted. + This parameter can be a value of @ref UART_Tx_Inv. */ + + uint32_t RxPinLevelInvert; /*!< Specifies whether the RX pin active level is inverted. + This parameter can be a value of @ref UART_Rx_Inv. */ + + uint32_t DataInvert; /*!< Specifies whether data are inverted (positive/direct logic + vs negative/inverted logic). + This parameter can be a value of @ref UART_Data_Inv. */ + + uint32_t Swap; /*!< Specifies whether TX and RX pins are swapped. + This parameter can be a value of @ref UART_Rx_Tx_Swap. */ + + uint32_t OverrunDisable; /*!< Specifies whether the reception overrun detection is disabled. + This parameter can be a value of @ref UART_Overrun_Disable. */ + + uint32_t DMADisableonRxError; /*!< Specifies whether the DMA is disabled in case of reception error. + This parameter can be a value of @ref UART_DMA_Disable_on_Rx_Error. */ + + uint32_t AutoBaudRateEnable; /*!< Specifies whether auto Baud rate detection is enabled. + This parameter can be a value of @ref UART_AutoBaudRate_Enable. */ + + uint32_t AutoBaudRateMode; /*!< If auto Baud rate detection is enabled, specifies how the rate + detection is carried out. + This parameter can be a value of @ref UART_AutoBaud_Rate_Mode. */ + + uint32_t MSBFirst; /*!< Specifies whether MSB is sent first on UART line. + This parameter can be a value of @ref UART_MSB_First. */ +} UART_AdvFeatureInitTypeDef; + +/** + * @brief HAL UART State definition + * @note HAL UART State value is a combination of 2 different substates: + * gState and RxState (see @ref UART_State_Definition). + * - gState contains UART state information related to global Handle management + * and also information related to Tx operations. + * gState value coding follow below described bitmap : + * b7-b6 Error information + * 00 : No Error + * 01 : (Not Used) + * 10 : Timeout + * 11 : Error + * b5 Peripheral initialization status + * 0 : Reset (Peripheral not initialized) + * 1 : Init done (Peripheral initialized. HAL UART Init function already called) + * b4-b3 (not used) + * xx : Should be set to 00 + * b2 Intrinsic process state + * 0 : Ready + * 1 : Busy (Peripheral busy with some configuration or internal operations) + * b1 (not used) + * x : Should be set to 0 + * b0 Tx state + * 0 : Ready (no Tx operation ongoing) + * 1 : Busy (Tx operation ongoing) + * - RxState contains information related to Rx operations. + * RxState value coding follow below described bitmap : + * b7-b6 (not used) + * xx : Should be set to 00 + * b5 Peripheral initialization status + * 0 : Reset (Peripheral not initialized) + * 1 : Init done (Peripheral initialized) + * b4-b2 (not used) + * xxx : Should be set to 000 + * b1 Rx state + * 0 : Ready (no Rx operation ongoing) + * 1 : Busy (Rx operation ongoing) + * b0 (not used) + * x : Should be set to 0. + */ +typedef uint32_t HAL_UART_StateTypeDef; + +/** + * @brief UART clock sources definition + */ +typedef enum +{ + UART_CLOCKSOURCE_PCLK1 = 0x00U, /*!< PCLK1 clock source */ + UART_CLOCKSOURCE_PCLK2 = 0x01U, /*!< PCLK2 clock source */ + UART_CLOCKSOURCE_HSI = 0x02U, /*!< HSI clock source */ + UART_CLOCKSOURCE_SYSCLK = 0x04U, /*!< SYSCLK clock source */ + UART_CLOCKSOURCE_LSE = 0x08U, /*!< LSE clock source */ + UART_CLOCKSOURCE_UNDEFINED = 0x10U /*!< Undefined clock source */ +} UART_ClockSourceTypeDef; + +/** + * @brief HAL UART Reception type definition + * @note HAL UART Reception type value aims to identify which type of Reception is ongoing. + * It is expected to admit following values : + * HAL_UART_RECEPTION_STANDARD = 0x00U, + * HAL_UART_RECEPTION_TOIDLE = 0x01U, + * HAL_UART_RECEPTION_TORTO = 0x02U, + * HAL_UART_RECEPTION_TOCHARMATCH = 0x03U, + */ +typedef uint32_t HAL_UART_RxTypeTypeDef; + +/** + * @brief UART handle Structure definition + */ +typedef struct __UART_HandleTypeDef +{ + USART_TypeDef *Instance; /*!< UART registers base address */ + + UART_InitTypeDef Init; /*!< UART communication parameters */ + + UART_AdvFeatureInitTypeDef AdvancedInit; /*!< UART Advanced Features initialization parameters */ + + const uint8_t *pTxBuffPtr; /*!< Pointer to UART Tx transfer Buffer */ + + uint16_t TxXferSize; /*!< UART Tx Transfer size */ + + __IO uint16_t TxXferCount; /*!< UART Tx Transfer Counter */ + + uint8_t *pRxBuffPtr; /*!< Pointer to UART Rx transfer Buffer */ + + uint16_t RxXferSize; /*!< UART Rx Transfer size */ + + __IO uint16_t RxXferCount; /*!< UART Rx Transfer Counter */ + + uint16_t Mask; /*!< UART Rx RDR register mask */ + + __IO HAL_UART_RxTypeTypeDef ReceptionType; /*!< Type of ongoing reception */ + + void (*RxISR)(struct __UART_HandleTypeDef *huart); /*!< Function pointer on Rx IRQ handler */ + + void (*TxISR)(struct __UART_HandleTypeDef *huart); /*!< Function pointer on Tx IRQ handler */ + + DMA_HandleTypeDef *hdmatx; /*!< UART Tx DMA Handle parameters */ + + DMA_HandleTypeDef *hdmarx; /*!< UART Rx DMA Handle parameters */ + + HAL_LockTypeDef Lock; /*!< Locking object */ + + __IO HAL_UART_StateTypeDef gState; /*!< UART state information related to global Handle management + and also related to Tx operations. This parameter + can be a value of @ref HAL_UART_StateTypeDef */ + + __IO HAL_UART_StateTypeDef RxState; /*!< UART state information related to Rx operations. This + parameter can be a value of @ref HAL_UART_StateTypeDef */ + + __IO uint32_t ErrorCode; /*!< UART Error code */ + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + void (* TxHalfCpltCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Tx Half Complete Callback */ + void (* TxCpltCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Tx Complete Callback */ + void (* RxHalfCpltCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Rx Half Complete Callback */ + void (* RxCpltCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Rx Complete Callback */ + void (* ErrorCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Error Callback */ + void (* AbortCpltCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Abort Complete Callback */ + void (* AbortTransmitCpltCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Abort Transmit Complete Callback */ + void (* AbortReceiveCpltCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Abort Receive Complete Callback */ +#if defined(USART_CR1_UESM) +#if defined(USART_CR3_WUFIE) + void (* WakeupCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Wakeup Callback */ +#endif /* USART_CR3_WUFIE */ +#endif /* USART_CR1_UESM */ + void (* RxEventCallback)(struct __UART_HandleTypeDef *huart, uint16_t Pos); /*!< UART Reception Event Callback */ + + void (* MspInitCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Msp Init callback */ + void (* MspDeInitCallback)(struct __UART_HandleTypeDef *huart); /*!< UART Msp DeInit callback */ +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + +} UART_HandleTypeDef; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) +/** + * @brief HAL UART Callback ID enumeration definition + */ +typedef enum +{ + HAL_UART_TX_HALFCOMPLETE_CB_ID = 0x00U, /*!< UART Tx Half Complete Callback ID */ + HAL_UART_TX_COMPLETE_CB_ID = 0x01U, /*!< UART Tx Complete Callback ID */ + HAL_UART_RX_HALFCOMPLETE_CB_ID = 0x02U, /*!< UART Rx Half Complete Callback ID */ + HAL_UART_RX_COMPLETE_CB_ID = 0x03U, /*!< UART Rx Complete Callback ID */ + HAL_UART_ERROR_CB_ID = 0x04U, /*!< UART Error Callback ID */ + HAL_UART_ABORT_COMPLETE_CB_ID = 0x05U, /*!< UART Abort Complete Callback ID */ + HAL_UART_ABORT_TRANSMIT_COMPLETE_CB_ID = 0x06U, /*!< UART Abort Transmit Complete Callback ID */ + HAL_UART_ABORT_RECEIVE_COMPLETE_CB_ID = 0x07U, /*!< UART Abort Receive Complete Callback ID */ + HAL_UART_WAKEUP_CB_ID = 0x08U, /*!< UART Wakeup Callback ID */ + + HAL_UART_MSPINIT_CB_ID = 0x0BU, /*!< UART MspInit callback ID */ + HAL_UART_MSPDEINIT_CB_ID = 0x0CU /*!< UART MspDeInit callback ID */ + +} HAL_UART_CallbackIDTypeDef; + +/** + * @brief HAL UART Callback pointer definition + */ +typedef void (*pUART_CallbackTypeDef)(UART_HandleTypeDef *huart); /*!< pointer to an UART callback function */ +typedef void (*pUART_RxEventCallbackTypeDef) +(struct __UART_HandleTypeDef *huart, uint16_t Pos); /*!< pointer to a UART Rx Event specific callback function */ + +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup UART_Exported_Constants UART Exported Constants + * @{ + */ + +/** @defgroup UART_State_Definition UART State Code Definition + * @{ + */ +#define HAL_UART_STATE_RESET 0x00000000U /*!< Peripheral is not initialized + Value is allowed for gState and RxState */ +#define HAL_UART_STATE_READY 0x00000020U /*!< Peripheral Initialized and ready for use + Value is allowed for gState and RxState */ +#define HAL_UART_STATE_BUSY 0x00000024U /*!< an internal process is ongoing + Value is allowed for gState only */ +#define HAL_UART_STATE_BUSY_TX 0x00000021U /*!< Data Transmission process is ongoing + Value is allowed for gState only */ +#define HAL_UART_STATE_BUSY_RX 0x00000022U /*!< Data Reception process is ongoing + Value is allowed for RxState only */ +#define HAL_UART_STATE_BUSY_TX_RX 0x00000023U /*!< Data Transmission and Reception process is ongoing + Not to be used for neither gState nor RxState.Value is result + of combination (Or) between gState and RxState values */ +#define HAL_UART_STATE_TIMEOUT 0x000000A0U /*!< Timeout state + Value is allowed for gState only */ +#define HAL_UART_STATE_ERROR 0x000000E0U /*!< Error + Value is allowed for gState only */ +/** + * @} + */ + +/** @defgroup UART_Error_Definition UART Error Definition + * @{ + */ +#define HAL_UART_ERROR_NONE (0x00000000U) /*!< No error */ +#define HAL_UART_ERROR_PE (0x00000001U) /*!< Parity error */ +#define HAL_UART_ERROR_NE (0x00000002U) /*!< Noise error */ +#define HAL_UART_ERROR_FE (0x00000004U) /*!< Frame error */ +#define HAL_UART_ERROR_ORE (0x00000008U) /*!< Overrun error */ +#define HAL_UART_ERROR_DMA (0x00000010U) /*!< DMA transfer error */ +#define HAL_UART_ERROR_RTO (0x00000020U) /*!< Receiver Timeout error */ + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) +#define HAL_UART_ERROR_INVALID_CALLBACK (0x00000040U) /*!< Invalid Callback error */ +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +/** + * @} + */ + +/** @defgroup UART_Stop_Bits UART Number of Stop Bits + * @{ + */ +#define UART_STOPBITS_0_5 USART_CR2_STOP_0 /*!< UART frame with 0.5 stop bit */ +#define UART_STOPBITS_1 0x00000000U /*!< UART frame with 1 stop bit */ +#define UART_STOPBITS_1_5 (USART_CR2_STOP_0 | USART_CR2_STOP_1) /*!< UART frame with 1.5 stop bits */ +#define UART_STOPBITS_2 USART_CR2_STOP_1 /*!< UART frame with 2 stop bits */ +/** + * @} + */ + +/** @defgroup UART_Parity UART Parity + * @{ + */ +#define UART_PARITY_NONE 0x00000000U /*!< No parity */ +#define UART_PARITY_EVEN USART_CR1_PCE /*!< Even parity */ +#define UART_PARITY_ODD (USART_CR1_PCE | USART_CR1_PS) /*!< Odd parity */ +/** + * @} + */ + +/** @defgroup UART_Hardware_Flow_Control UART Hardware Flow Control + * @{ + */ +#define UART_HWCONTROL_NONE 0x00000000U /*!< No hardware control */ +#define UART_HWCONTROL_RTS USART_CR3_RTSE /*!< Request To Send */ +#define UART_HWCONTROL_CTS USART_CR3_CTSE /*!< Clear To Send */ +#define UART_HWCONTROL_RTS_CTS (USART_CR3_RTSE | USART_CR3_CTSE) /*!< Request and Clear To Send */ +/** + * @} + */ + +/** @defgroup UART_Mode UART Transfer Mode + * @{ + */ +#define UART_MODE_RX USART_CR1_RE /*!< RX mode */ +#define UART_MODE_TX USART_CR1_TE /*!< TX mode */ +#define UART_MODE_TX_RX (USART_CR1_TE |USART_CR1_RE) /*!< RX and TX mode */ +/** + * @} + */ + +/** @defgroup UART_State UART State + * @{ + */ +#define UART_STATE_DISABLE 0x00000000U /*!< UART disabled */ +#define UART_STATE_ENABLE USART_CR1_UE /*!< UART enabled */ +/** + * @} + */ + +/** @defgroup UART_Over_Sampling UART Over Sampling + * @{ + */ +#define UART_OVERSAMPLING_16 0x00000000U /*!< Oversampling by 16 */ +#define UART_OVERSAMPLING_8 USART_CR1_OVER8 /*!< Oversampling by 8 */ +/** + * @} + */ + +/** @defgroup UART_OneBit_Sampling UART One Bit Sampling Method + * @{ + */ +#define UART_ONE_BIT_SAMPLE_DISABLE 0x00000000U /*!< One-bit sampling disable */ +#define UART_ONE_BIT_SAMPLE_ENABLE USART_CR3_ONEBIT /*!< One-bit sampling enable */ +/** + * @} + */ + +/** @defgroup UART_AutoBaud_Rate_Mode UART Advanced Feature AutoBaud Rate Mode + * @{ + */ +#define UART_ADVFEATURE_AUTOBAUDRATE_ONSTARTBIT 0x00000000U /*!< Auto Baud rate detection + on start bit */ +#define UART_ADVFEATURE_AUTOBAUDRATE_ONFALLINGEDGE USART_CR2_ABRMODE_0 /*!< Auto Baud rate detection + on falling edge */ +#define UART_ADVFEATURE_AUTOBAUDRATE_ON0X7FFRAME USART_CR2_ABRMODE_1 /*!< Auto Baud rate detection + on 0x7F frame detection */ +#define UART_ADVFEATURE_AUTOBAUDRATE_ON0X55FRAME USART_CR2_ABRMODE /*!< Auto Baud rate detection + on 0x55 frame detection */ +/** + * @} + */ + +/** @defgroup UART_Receiver_Timeout UART Receiver Timeout + * @{ + */ +#define UART_RECEIVER_TIMEOUT_DISABLE 0x00000000U /*!< UART Receiver Timeout disable */ +#define UART_RECEIVER_TIMEOUT_ENABLE USART_CR2_RTOEN /*!< UART Receiver Timeout enable */ +/** + * @} + */ + +/** @defgroup UART_LIN UART Local Interconnection Network mode + * @{ + */ +#define UART_LIN_DISABLE 0x00000000U /*!< Local Interconnect Network disable */ +#define UART_LIN_ENABLE USART_CR2_LINEN /*!< Local Interconnect Network enable */ +/** + * @} + */ + +/** @defgroup UART_LIN_Break_Detection UART LIN Break Detection + * @{ + */ +#define UART_LINBREAKDETECTLENGTH_10B 0x00000000U /*!< LIN 10-bit break detection length */ +#define UART_LINBREAKDETECTLENGTH_11B USART_CR2_LBDL /*!< LIN 11-bit break detection length */ +/** + * @} + */ + +/** @defgroup UART_DMA_Tx UART DMA Tx + * @{ + */ +#define UART_DMA_TX_DISABLE 0x00000000U /*!< UART DMA TX disabled */ +#define UART_DMA_TX_ENABLE USART_CR3_DMAT /*!< UART DMA TX enabled */ +/** + * @} + */ + +/** @defgroup UART_DMA_Rx UART DMA Rx + * @{ + */ +#define UART_DMA_RX_DISABLE 0x00000000U /*!< UART DMA RX disabled */ +#define UART_DMA_RX_ENABLE USART_CR3_DMAR /*!< UART DMA RX enabled */ +/** + * @} + */ + +/** @defgroup UART_Half_Duplex_Selection UART Half Duplex Selection + * @{ + */ +#define UART_HALF_DUPLEX_DISABLE 0x00000000U /*!< UART half-duplex disabled */ +#define UART_HALF_DUPLEX_ENABLE USART_CR3_HDSEL /*!< UART half-duplex enabled */ +/** + * @} + */ + +/** @defgroup UART_WakeUp_Methods UART WakeUp Methods + * @{ + */ +#define UART_WAKEUPMETHOD_IDLELINE 0x00000000U /*!< UART wake-up on idle line */ +#define UART_WAKEUPMETHOD_ADDRESSMARK USART_CR1_WAKE /*!< UART wake-up on address mark */ +/** + * @} + */ + +/** @defgroup UART_Request_Parameters UART Request Parameters + * @{ + */ +#define UART_AUTOBAUD_REQUEST USART_RQR_ABRRQ /*!< Auto-Baud Rate Request */ +#define UART_SENDBREAK_REQUEST USART_RQR_SBKRQ /*!< Send Break Request */ +#define UART_MUTE_MODE_REQUEST USART_RQR_MMRQ /*!< Mute Mode Request */ +#define UART_RXDATA_FLUSH_REQUEST USART_RQR_RXFRQ /*!< Receive Data flush Request */ +#define UART_TXDATA_FLUSH_REQUEST USART_RQR_TXFRQ /*!< Transmit data flush Request */ +/** + * @} + */ + +/** @defgroup UART_Advanced_Features_Initialization_Type UART Advanced Feature Initialization Type + * @{ + */ +#define UART_ADVFEATURE_NO_INIT 0x00000000U /*!< No advanced feature initialization */ +#define UART_ADVFEATURE_TXINVERT_INIT 0x00000001U /*!< TX pin active level inversion */ +#define UART_ADVFEATURE_RXINVERT_INIT 0x00000002U /*!< RX pin active level inversion */ +#define UART_ADVFEATURE_DATAINVERT_INIT 0x00000004U /*!< Binary data inversion */ +#define UART_ADVFEATURE_SWAP_INIT 0x00000008U /*!< TX/RX pins swap */ +#define UART_ADVFEATURE_RXOVERRUNDISABLE_INIT 0x00000010U /*!< RX overrun disable */ +#define UART_ADVFEATURE_DMADISABLEONERROR_INIT 0x00000020U /*!< DMA disable on Reception Error */ +#define UART_ADVFEATURE_AUTOBAUDRATE_INIT 0x00000040U /*!< Auto Baud rate detection initialization */ +#define UART_ADVFEATURE_MSBFIRST_INIT 0x00000080U /*!< Most significant bit sent/received first */ +/** + * @} + */ + +/** @defgroup UART_Tx_Inv UART Advanced Feature TX Pin Active Level Inversion + * @{ + */ +#define UART_ADVFEATURE_TXINV_DISABLE 0x00000000U /*!< TX pin active level inversion disable */ +#define UART_ADVFEATURE_TXINV_ENABLE USART_CR2_TXINV /*!< TX pin active level inversion enable */ +/** + * @} + */ + +/** @defgroup UART_Rx_Inv UART Advanced Feature RX Pin Active Level Inversion + * @{ + */ +#define UART_ADVFEATURE_RXINV_DISABLE 0x00000000U /*!< RX pin active level inversion disable */ +#define UART_ADVFEATURE_RXINV_ENABLE USART_CR2_RXINV /*!< RX pin active level inversion enable */ +/** + * @} + */ + +/** @defgroup UART_Data_Inv UART Advanced Feature Binary Data Inversion + * @{ + */ +#define UART_ADVFEATURE_DATAINV_DISABLE 0x00000000U /*!< Binary data inversion disable */ +#define UART_ADVFEATURE_DATAINV_ENABLE USART_CR2_DATAINV /*!< Binary data inversion enable */ +/** + * @} + */ + +/** @defgroup UART_Rx_Tx_Swap UART Advanced Feature RX TX Pins Swap + * @{ + */ +#define UART_ADVFEATURE_SWAP_DISABLE 0x00000000U /*!< TX/RX pins swap disable */ +#define UART_ADVFEATURE_SWAP_ENABLE USART_CR2_SWAP /*!< TX/RX pins swap enable */ +/** + * @} + */ + +/** @defgroup UART_Overrun_Disable UART Advanced Feature Overrun Disable + * @{ + */ +#define UART_ADVFEATURE_OVERRUN_ENABLE 0x00000000U /*!< RX overrun enable */ +#define UART_ADVFEATURE_OVERRUN_DISABLE USART_CR3_OVRDIS /*!< RX overrun disable */ +/** + * @} + */ + +/** @defgroup UART_AutoBaudRate_Enable UART Advanced Feature Auto BaudRate Enable + * @{ + */ +#define UART_ADVFEATURE_AUTOBAUDRATE_DISABLE 0x00000000U /*!< RX Auto Baud rate detection enable */ +#define UART_ADVFEATURE_AUTOBAUDRATE_ENABLE USART_CR2_ABREN /*!< RX Auto Baud rate detection disable */ +/** + * @} + */ + +/** @defgroup UART_DMA_Disable_on_Rx_Error UART Advanced Feature DMA Disable On Rx Error + * @{ + */ +#define UART_ADVFEATURE_DMA_ENABLEONRXERROR 0x00000000U /*!< DMA enable on Reception Error */ +#define UART_ADVFEATURE_DMA_DISABLEONRXERROR USART_CR3_DDRE /*!< DMA disable on Reception Error */ +/** + * @} + */ + +/** @defgroup UART_MSB_First UART Advanced Feature MSB First + * @{ + */ +#define UART_ADVFEATURE_MSBFIRST_DISABLE 0x00000000U /*!< Most significant bit sent/received + first disable */ +#define UART_ADVFEATURE_MSBFIRST_ENABLE USART_CR2_MSBFIRST /*!< Most significant bit sent/received + first enable */ +/** + * @} + */ +#if defined(USART_CR1_UESM) + +/** @defgroup UART_Stop_Mode_Enable UART Advanced Feature Stop Mode Enable + * @{ + */ +#define UART_ADVFEATURE_STOPMODE_DISABLE 0x00000000U /*!< UART stop mode disable */ +#define UART_ADVFEATURE_STOPMODE_ENABLE USART_CR1_UESM /*!< UART stop mode enable */ +/** + * @} + */ +#endif /* USART_CR1_UESM */ + +/** @defgroup UART_Mute_Mode UART Advanced Feature Mute Mode Enable + * @{ + */ +#define UART_ADVFEATURE_MUTEMODE_DISABLE 0x00000000U /*!< UART mute mode disable */ +#define UART_ADVFEATURE_MUTEMODE_ENABLE USART_CR1_MME /*!< UART mute mode enable */ +/** + * @} + */ + +/** @defgroup UART_CR2_ADDRESS_LSB_POS UART Address-matching LSB Position In CR2 Register + * @{ + */ +#define UART_CR2_ADDRESS_LSB_POS 24U /*!< UART address-matching LSB position in CR2 register */ +/** + * @} + */ +#if defined(USART_CR1_UESM) + +/** @defgroup UART_WakeUp_from_Stop_Selection UART WakeUp From Stop Selection + * @{ + */ +#if defined(USART_CR3_WUS) +#define UART_WAKEUP_ON_ADDRESS 0x00000000U /*!< UART wake-up on address */ +#define UART_WAKEUP_ON_STARTBIT USART_CR3_WUS_1 /*!< UART wake-up on start bit */ +#define UART_WAKEUP_ON_READDATA_NONEMPTY USART_CR3_WUS /*!< UART wake-up on receive data register + not empty or RXFIFO is not empty */ +#else +#define UART_WAKEUP_ON_ADDRESS 0x00000000U /*!< UART wake-up on address */ +#define UART_WAKEUP_ON_READDATA_NONEMPTY 0x00000001U /*!< UART wake-up on receive data register + not empty or RXFIFO is not empty */ +#endif /* USART_CR3_WUS */ +/** + * @} + */ +#endif /* USART_CR1_UESM */ + +/** @defgroup UART_DriverEnable_Polarity UART DriverEnable Polarity + * @{ + */ +#define UART_DE_POLARITY_HIGH 0x00000000U /*!< Driver enable signal is active high */ +#define UART_DE_POLARITY_LOW USART_CR3_DEP /*!< Driver enable signal is active low */ +/** + * @} + */ + +/** @defgroup UART_CR1_DEAT_ADDRESS_LSB_POS UART Driver Enable Assertion Time LSB Position In CR1 Register + * @{ + */ +#define UART_CR1_DEAT_ADDRESS_LSB_POS 21U /*!< UART Driver Enable assertion time LSB + position in CR1 register */ +/** + * @} + */ + +/** @defgroup UART_CR1_DEDT_ADDRESS_LSB_POS UART Driver Enable DeAssertion Time LSB Position In CR1 Register + * @{ + */ +#define UART_CR1_DEDT_ADDRESS_LSB_POS 16U /*!< UART Driver Enable de-assertion time LSB + position in CR1 register */ +/** + * @} + */ + +/** @defgroup UART_Interruption_Mask UART Interruptions Flag Mask + * @{ + */ +#define UART_IT_MASK 0x001FU /*!< UART interruptions flags mask */ +/** + * @} + */ + +/** @defgroup UART_TimeOut_Value UART polling-based communications time-out value + * @{ + */ +#define HAL_UART_TIMEOUT_VALUE 0x1FFFFFFU /*!< UART polling-based communications time-out value */ +/** + * @} + */ + +/** @defgroup UART_Flags UART Status Flags + * Elements values convention: 0xXXXX + * - 0xXXXX : Flag mask in the ISR register + * @{ + */ +#if defined(USART_ISR_REACK) +#define UART_FLAG_REACK USART_ISR_REACK /*!< UART receive enable acknowledge flag */ +#endif /* USART_ISR_REACK */ +#define UART_FLAG_TEACK USART_ISR_TEACK /*!< UART transmit enable acknowledge flag */ +#if defined(USART_CR1_UESM) +#if defined(USART_CR3_WUFIE) +#define UART_FLAG_WUF USART_ISR_WUF /*!< UART wake-up from stop mode flag */ +#endif /* USART_CR3_WUFIE */ +#endif /* USART_CR1_UESM */ +#define UART_FLAG_RWU USART_ISR_RWU /*!< UART receiver wake-up from mute mode flag */ +#define UART_FLAG_SBKF USART_ISR_SBKF /*!< UART send break flag */ +#define UART_FLAG_CMF USART_ISR_CMF /*!< UART character match flag */ +#define UART_FLAG_BUSY USART_ISR_BUSY /*!< UART busy flag */ +#define UART_FLAG_ABRF USART_ISR_ABRF /*!< UART auto Baud rate flag */ +#define UART_FLAG_ABRE USART_ISR_ABRE /*!< UART auto Baud rate error */ +#define UART_FLAG_RTOF USART_ISR_RTOF /*!< UART receiver timeout flag */ +#define UART_FLAG_CTS USART_ISR_CTS /*!< UART clear to send flag */ +#define UART_FLAG_CTSIF USART_ISR_CTSIF /*!< UART clear to send interrupt flag */ +#define UART_FLAG_LBDF USART_ISR_LBDF /*!< UART LIN break detection flag */ +#define UART_FLAG_TXE USART_ISR_TXE /*!< UART transmit data register empty */ +#define UART_FLAG_TC USART_ISR_TC /*!< UART transmission complete */ +#define UART_FLAG_RXNE USART_ISR_RXNE /*!< UART read data register not empty */ +#define UART_FLAG_IDLE USART_ISR_IDLE /*!< UART idle flag */ +#define UART_FLAG_ORE USART_ISR_ORE /*!< UART overrun error */ +#define UART_FLAG_NE USART_ISR_NE /*!< UART noise error */ +#define UART_FLAG_FE USART_ISR_FE /*!< UART frame error */ +#define UART_FLAG_PE USART_ISR_PE /*!< UART parity error */ +/** + * @} + */ + +/** @defgroup UART_Interrupt_definition UART Interrupts Definition + * Elements values convention: 000ZZZZZ0XXYYYYYb + * - YYYYY : Interrupt source position in the XX register (5bits) + * - XX : Interrupt source register (2bits) + * - 01: CR1 register + * - 10: CR2 register + * - 11: CR3 register + * - ZZZZZ : Flag position in the ISR register(5bits) + * Elements values convention: 000000000XXYYYYYb + * - YYYYY : Interrupt source position in the XX register (5bits) + * - XX : Interrupt source register (2bits) + * - 01: CR1 register + * - 10: CR2 register + * - 11: CR3 register + * Elements values convention: 0000ZZZZ00000000b + * - ZZZZ : Flag position in the ISR register(4bits) + * @{ + */ +#define UART_IT_PE 0x0028U /*!< UART parity error interruption */ +#define UART_IT_TXE 0x0727U /*!< UART transmit data register empty interruption */ +#define UART_IT_TC 0x0626U /*!< UART transmission complete interruption */ +#define UART_IT_RXNE 0x0525U /*!< UART read data register not empty interruption */ +#define UART_IT_IDLE 0x0424U /*!< UART idle interruption */ +#define UART_IT_LBD 0x0846U /*!< UART LIN break detection interruption */ +#define UART_IT_CTS 0x096AU /*!< UART CTS interruption */ +#define UART_IT_CM 0x112EU /*!< UART character match interruption */ +#if defined(USART_CR1_UESM) +#if defined(USART_CR3_WUFIE) +#define UART_IT_WUF 0x1476U /*!< UART wake-up from stop mode interruption */ +#endif /* USART_CR3_WUFIE */ +#endif /* USART_CR1_UESM */ +#define UART_IT_RTO 0x0B3AU /*!< UART receiver timeout interruption */ + +#define UART_IT_ERR 0x0060U /*!< UART error interruption */ + +#define UART_IT_ORE 0x0300U /*!< UART overrun error interruption */ +#define UART_IT_NE 0x0200U /*!< UART noise error interruption */ +#define UART_IT_FE 0x0100U /*!< UART frame error interruption */ +/** + * @} + */ + +/** @defgroup UART_IT_CLEAR_Flags UART Interruption Clear Flags + * @{ + */ +#define UART_CLEAR_PEF USART_ICR_PECF /*!< Parity Error Clear Flag */ +#define UART_CLEAR_FEF USART_ICR_FECF /*!< Framing Error Clear Flag */ +#define UART_CLEAR_NEF USART_ICR_NCF /*!< Noise Error detected Clear Flag */ +#define UART_CLEAR_OREF USART_ICR_ORECF /*!< Overrun Error Clear Flag */ +#define UART_CLEAR_IDLEF USART_ICR_IDLECF /*!< IDLE line detected Clear Flag */ +#define UART_CLEAR_TCF USART_ICR_TCCF /*!< Transmission Complete Clear Flag */ +#define UART_CLEAR_LBDF USART_ICR_LBDCF /*!< LIN Break Detection Clear Flag */ +#define UART_CLEAR_CTSF USART_ICR_CTSCF /*!< CTS Interrupt Clear Flag */ +#define UART_CLEAR_CMF USART_ICR_CMCF /*!< Character Match Clear Flag */ +#if defined(USART_CR1_UESM) +#if defined(USART_CR3_WUFIE) +#define UART_CLEAR_WUF USART_ICR_WUCF /*!< Wake Up from stop mode Clear Flag */ +#endif /* USART_CR3_WUFIE */ +#endif /* USART_CR1_UESM */ +#define UART_CLEAR_RTOF USART_ICR_RTOCF /*!< UART receiver timeout clear flag */ +/** + * @} + */ + +/** @defgroup UART_RECEPTION_TYPE_Values UART Reception type values + * @{ + */ +#define HAL_UART_RECEPTION_STANDARD (0x00000000U) /*!< Standard reception */ +#define HAL_UART_RECEPTION_TOIDLE (0x00000001U) /*!< Reception till completion or IDLE event */ +#define HAL_UART_RECEPTION_TORTO (0x00000002U) /*!< Reception till completion or RTO event */ +#define HAL_UART_RECEPTION_TOCHARMATCH (0x00000003U) /*!< Reception till completion or CM event */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macros -----------------------------------------------------------*/ +/** @defgroup UART_Exported_Macros UART Exported Macros + * @{ + */ + +/** @brief Reset UART handle states. + * @param __HANDLE__ UART handle. + * @retval None + */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) +#define __HAL_UART_RESET_HANDLE_STATE(__HANDLE__) do{ \ + (__HANDLE__)->gState = HAL_UART_STATE_RESET; \ + (__HANDLE__)->RxState = HAL_UART_STATE_RESET; \ + (__HANDLE__)->MspInitCallback = NULL; \ + (__HANDLE__)->MspDeInitCallback = NULL; \ + } while(0U) +#else +#define __HAL_UART_RESET_HANDLE_STATE(__HANDLE__) do{ \ + (__HANDLE__)->gState = HAL_UART_STATE_RESET; \ + (__HANDLE__)->RxState = HAL_UART_STATE_RESET; \ + } while(0U) +#endif /*USE_HAL_UART_REGISTER_CALLBACKS */ + +/** @brief Flush the UART Data registers. + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_FLUSH_DRREGISTER(__HANDLE__) \ + do{ \ + SET_BIT((__HANDLE__)->Instance->RQR, UART_RXDATA_FLUSH_REQUEST); \ + SET_BIT((__HANDLE__)->Instance->RQR, UART_TXDATA_FLUSH_REQUEST); \ + } while(0U) + +/** @brief Clear the specified UART pending flag. + * @param __HANDLE__ specifies the UART Handle. + * @param __FLAG__ specifies the flag to check. + * This parameter can be any combination of the following values: + * @arg @ref UART_CLEAR_PEF Parity Error Clear Flag + * @arg @ref UART_CLEAR_FEF Framing Error Clear Flag + * @arg @ref UART_CLEAR_NEF Noise detected Clear Flag + * @arg @ref UART_CLEAR_OREF Overrun Error Clear Flag + * @arg @ref UART_CLEAR_IDLEF IDLE line detected Clear Flag + * @arg @ref UART_CLEAR_TCF Transmission Complete Clear Flag + * @arg @ref UART_CLEAR_RTOF Receiver Timeout clear flag + * @arg @ref UART_CLEAR_LBDF LIN Break Detection Clear Flag + * @arg @ref UART_CLEAR_CTSF CTS Interrupt Clear Flag + * @arg @ref UART_CLEAR_CMF Character Match Clear Flag +#if defined(USART_CR1_UESM) +#if defined(USART_CR3_WUFIE) + * @arg @ref UART_CLEAR_WUF Wake Up from stop mode Clear Flag +#endif +#endif + * @retval None + */ +#define __HAL_UART_CLEAR_FLAG(__HANDLE__, __FLAG__) ((__HANDLE__)->Instance->ICR = (__FLAG__)) + +/** @brief Clear the UART PE pending flag. + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_CLEAR_PEFLAG(__HANDLE__) __HAL_UART_CLEAR_FLAG((__HANDLE__), UART_CLEAR_PEF) + +/** @brief Clear the UART FE pending flag. + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_CLEAR_FEFLAG(__HANDLE__) __HAL_UART_CLEAR_FLAG((__HANDLE__), UART_CLEAR_FEF) + +/** @brief Clear the UART NE pending flag. + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_CLEAR_NEFLAG(__HANDLE__) __HAL_UART_CLEAR_FLAG((__HANDLE__), UART_CLEAR_NEF) + +/** @brief Clear the UART ORE pending flag. + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_CLEAR_OREFLAG(__HANDLE__) __HAL_UART_CLEAR_FLAG((__HANDLE__), UART_CLEAR_OREF) + +/** @brief Clear the UART IDLE pending flag. + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_CLEAR_IDLEFLAG(__HANDLE__) __HAL_UART_CLEAR_FLAG((__HANDLE__), UART_CLEAR_IDLEF) + + +/** @brief Check whether the specified UART flag is set or not. + * @param __HANDLE__ specifies the UART Handle. + * @param __FLAG__ specifies the flag to check. + * This parameter can be one of the following values: +#if defined(USART_ISR_REACK) + * @arg @ref UART_FLAG_REACK Receive enable acknowledge flag +#endif + * @arg @ref UART_FLAG_TEACK Transmit enable acknowledge flag +#if defined(USART_CR1_UESM) +#if defined(USART_CR3_WUFIE) + * @arg @ref UART_FLAG_WUF Wake up from stop mode flag +#endif +#endif + * @arg @ref UART_FLAG_RWU Receiver wake up flag (if the UART in mute mode) + * @arg @ref UART_FLAG_SBKF Send Break flag + * @arg @ref UART_FLAG_CMF Character match flag + * @arg @ref UART_FLAG_BUSY Busy flag + * @arg @ref UART_FLAG_ABRF Auto Baud rate detection flag + * @arg @ref UART_FLAG_ABRE Auto Baud rate detection error flag + * @arg @ref UART_FLAG_CTS CTS Change flag + * @arg @ref UART_FLAG_LBDF LIN Break detection flag + * @arg @ref UART_FLAG_TXE Transmit data register empty flag + * @arg @ref UART_FLAG_TC Transmission Complete flag + * @arg @ref UART_FLAG_RXNE Receive data register not empty flag + * @arg @ref UART_FLAG_RTOF Receiver Timeout flag + * @arg @ref UART_FLAG_IDLE Idle Line detection flag + * @arg @ref UART_FLAG_ORE Overrun Error flag + * @arg @ref UART_FLAG_NE Noise Error flag + * @arg @ref UART_FLAG_FE Framing Error flag + * @arg @ref UART_FLAG_PE Parity Error flag + * @retval The new state of __FLAG__ (TRUE or FALSE). + */ +#define __HAL_UART_GET_FLAG(__HANDLE__, __FLAG__) (((__HANDLE__)->Instance->ISR & (__FLAG__)) == (__FLAG__)) + +/** @brief Enable the specified UART interrupt. + * @param __HANDLE__ specifies the UART Handle. + * @param __INTERRUPT__ specifies the UART interrupt source to enable. + * This parameter can be one of the following values: +#if defined(USART_CR1_UESM) +#if defined(USART_CR3_WUFIE) + * @arg @ref UART_IT_WUF Wakeup from stop mode interrupt +#endif +#endif + * @arg @ref UART_IT_CM Character match interrupt + * @arg @ref UART_IT_CTS CTS change interrupt + * @arg @ref UART_IT_LBD LIN Break detection interrupt + * @arg @ref UART_IT_TXE Transmit Data Register empty interrupt + * @arg @ref UART_IT_TC Transmission complete interrupt + * @arg @ref UART_IT_RXNE Receive Data register not empty interrupt + * @arg @ref UART_IT_RTO Receive Timeout interrupt + * @arg @ref UART_IT_IDLE Idle line detection interrupt + * @arg @ref UART_IT_PE Parity Error interrupt + * @arg @ref UART_IT_ERR Error interrupt (frame error, noise error, overrun error) + * @retval None + */ +#define __HAL_UART_ENABLE_IT(__HANDLE__, __INTERRUPT__) (\ + ((((uint8_t)(__INTERRUPT__)) >> 5U) == 1U)?\ + ((__HANDLE__)->Instance->CR1 |= (1U <<\ + ((__INTERRUPT__) & UART_IT_MASK))): \ + ((((uint8_t)(__INTERRUPT__)) >> 5U) == 2U)?\ + ((__HANDLE__)->Instance->CR2 |= (1U <<\ + ((__INTERRUPT__) & UART_IT_MASK))): \ + ((__HANDLE__)->Instance->CR3 |= (1U <<\ + ((__INTERRUPT__) & UART_IT_MASK)))) + +/** @brief Disable the specified UART interrupt. + * @param __HANDLE__ specifies the UART Handle. + * @param __INTERRUPT__ specifies the UART interrupt source to disable. + * This parameter can be one of the following values: +#if defined(USART_CR1_UESM) +#if defined(USART_CR3_WUFIE) + * @arg @ref UART_IT_WUF Wakeup from stop mode interrupt +#endif +#endif + * @arg @ref UART_IT_CM Character match interrupt + * @arg @ref UART_IT_CTS CTS change interrupt + * @arg @ref UART_IT_LBD LIN Break detection interrupt + * @arg @ref UART_IT_TXE Transmit Data Register empty interrupt + * @arg @ref UART_IT_TC Transmission complete interrupt + * @arg @ref UART_IT_RXNE Receive Data register not empty interrupt + * @arg @ref UART_IT_RTO Receive Timeout interrupt + * @arg @ref UART_IT_IDLE Idle line detection interrupt + * @arg @ref UART_IT_PE Parity Error interrupt + * @arg @ref UART_IT_ERR Error interrupt (Frame error, noise error, overrun error) + * @retval None + */ +#define __HAL_UART_DISABLE_IT(__HANDLE__, __INTERRUPT__) (\ + ((((uint8_t)(__INTERRUPT__)) >> 5U) == 1U)?\ + ((__HANDLE__)->Instance->CR1 &= ~ (1U <<\ + ((__INTERRUPT__) & UART_IT_MASK))): \ + ((((uint8_t)(__INTERRUPT__)) >> 5U) == 2U)?\ + ((__HANDLE__)->Instance->CR2 &= ~ (1U <<\ + ((__INTERRUPT__) & UART_IT_MASK))): \ + ((__HANDLE__)->Instance->CR3 &= ~ (1U <<\ + ((__INTERRUPT__) & UART_IT_MASK)))) + +/** @brief Check whether the specified UART interrupt has occurred or not. + * @param __HANDLE__ specifies the UART Handle. + * @param __INTERRUPT__ specifies the UART interrupt to check. + * This parameter can be one of the following values: +#if defined(USART_CR1_UESM) +#if defined(USART_CR3_WUFIE) + * @arg @ref UART_IT_WUF Wakeup from stop mode interrupt +#endif +#endif + * @arg @ref UART_IT_CM Character match interrupt + * @arg @ref UART_IT_CTS CTS change interrupt + * @arg @ref UART_IT_LBD LIN Break detection interrupt + * @arg @ref UART_IT_TXE Transmit Data Register empty interrupt + * @arg @ref UART_IT_TC Transmission complete interrupt + * @arg @ref UART_IT_RXNE Receive Data register not empty interrupt + * @arg @ref UART_IT_RTO Receive Timeout interrupt + * @arg @ref UART_IT_IDLE Idle line detection interrupt + * @arg @ref UART_IT_PE Parity Error interrupt + * @arg @ref UART_IT_ERR Error interrupt (Frame error, noise error, overrun error) + * @retval The new state of __INTERRUPT__ (SET or RESET). + */ +#define __HAL_UART_GET_IT(__HANDLE__, __INTERRUPT__) ((((__HANDLE__)->Instance->ISR\ + & (1U << ((__INTERRUPT__)>> 8U))) != RESET) ? SET : RESET) + +/** @brief Check whether the specified UART interrupt source is enabled or not. + * @param __HANDLE__ specifies the UART Handle. + * @param __INTERRUPT__ specifies the UART interrupt source to check. + * This parameter can be one of the following values: +#if defined(USART_CR1_UESM) +#if defined(USART_CR3_WUFIE) + * @arg @ref UART_IT_WUF Wakeup from stop mode interrupt +#endif +#endif + * @arg @ref UART_IT_CM Character match interrupt + * @arg @ref UART_IT_CTS CTS change interrupt + * @arg @ref UART_IT_LBD LIN Break detection interrupt + * @arg @ref UART_IT_TXE Transmit Data Register empty interrupt + * @arg @ref UART_IT_TC Transmission complete interrupt + * @arg @ref UART_IT_RXNE Receive Data register not empty interrupt + * @arg @ref UART_IT_RTO Receive Timeout interrupt + * @arg @ref UART_IT_IDLE Idle line detection interrupt + * @arg @ref UART_IT_PE Parity Error interrupt + * @arg @ref UART_IT_ERR Error interrupt (Frame error, noise error, overrun error) + * @retval The new state of __INTERRUPT__ (SET or RESET). + */ +#define __HAL_UART_GET_IT_SOURCE(__HANDLE__, __INTERRUPT__) ((((((((uint8_t)(__INTERRUPT__)) >> 5U) == 1U) ?\ + (__HANDLE__)->Instance->CR1 : \ + (((((uint8_t)(__INTERRUPT__)) >> 5U) == 2U) ?\ + (__HANDLE__)->Instance->CR2 : \ + (__HANDLE__)->Instance->CR3)) & (1U <<\ + (((uint16_t)(__INTERRUPT__)) &\ + UART_IT_MASK))) != RESET) ? SET : RESET) + +/** @brief Clear the specified UART ISR flag, in setting the proper ICR register flag. + * @param __HANDLE__ specifies the UART Handle. + * @param __IT_CLEAR__ specifies the interrupt clear register flag that needs to be set + * to clear the corresponding interrupt + * This parameter can be one of the following values: + * @arg @ref UART_CLEAR_PEF Parity Error Clear Flag + * @arg @ref UART_CLEAR_FEF Framing Error Clear Flag + * @arg @ref UART_CLEAR_NEF Noise detected Clear Flag + * @arg @ref UART_CLEAR_OREF Overrun Error Clear Flag + * @arg @ref UART_CLEAR_IDLEF IDLE line detected Clear Flag + * @arg @ref UART_CLEAR_RTOF Receiver timeout clear flag + * @arg @ref UART_CLEAR_TCF Transmission Complete Clear Flag + * @arg @ref UART_CLEAR_LBDF LIN Break Detection Clear Flag + * @arg @ref UART_CLEAR_CTSF CTS Interrupt Clear Flag + * @arg @ref UART_CLEAR_CMF Character Match Clear Flag +#if defined(USART_CR1_UESM) +#if defined(USART_CR3_WUFIE) + * @arg @ref UART_CLEAR_WUF Wake Up from stop mode Clear Flag +#endif +#endif + * @retval None + */ +#define __HAL_UART_CLEAR_IT(__HANDLE__, __IT_CLEAR__) ((__HANDLE__)->Instance->ICR = (uint32_t)(__IT_CLEAR__)) + +/** @brief Set a specific UART request flag. + * @param __HANDLE__ specifies the UART Handle. + * @param __REQ__ specifies the request flag to set + * This parameter can be one of the following values: + * @arg @ref UART_AUTOBAUD_REQUEST Auto-Baud Rate Request + * @arg @ref UART_SENDBREAK_REQUEST Send Break Request + * @arg @ref UART_MUTE_MODE_REQUEST Mute Mode Request + * @arg @ref UART_RXDATA_FLUSH_REQUEST Receive Data flush Request + * @arg @ref UART_TXDATA_FLUSH_REQUEST Transmit data flush Request + * @retval None + */ +#define __HAL_UART_SEND_REQ(__HANDLE__, __REQ__) ((__HANDLE__)->Instance->RQR |= (uint16_t)(__REQ__)) + +/** @brief Enable the UART one bit sample method. + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_ONE_BIT_SAMPLE_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR3|= USART_CR3_ONEBIT) + +/** @brief Disable the UART one bit sample method. + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_ONE_BIT_SAMPLE_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR3 &= ~USART_CR3_ONEBIT) + +/** @brief Enable UART. + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_ENABLE(__HANDLE__) ((__HANDLE__)->Instance->CR1 |= USART_CR1_UE) + +/** @brief Disable UART. + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_DISABLE(__HANDLE__) ((__HANDLE__)->Instance->CR1 &= ~USART_CR1_UE) + +/** @brief Enable CTS flow control. + * @note This macro allows to enable CTS hardware flow control for a given UART instance, + * without need to call HAL_UART_Init() function. + * As involving direct access to UART registers, usage of this macro should be fully endorsed by user. + * @note As macro is expected to be used for modifying CTS Hw flow control feature activation, without need + * for USART instance Deinit/Init, following conditions for macro call should be fulfilled : + * - UART instance should have already been initialised (through call of HAL_UART_Init() ) + * - macro could only be called when corresponding UART instance is disabled + * (i.e. __HAL_UART_DISABLE(__HANDLE__)) and should be followed by an Enable + * macro (i.e. __HAL_UART_ENABLE(__HANDLE__)). + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_HWCONTROL_CTS_ENABLE(__HANDLE__) \ + do{ \ + ATOMIC_SET_BIT((__HANDLE__)->Instance->CR3, USART_CR3_CTSE); \ + (__HANDLE__)->Init.HwFlowCtl |= USART_CR3_CTSE; \ + } while(0U) + +/** @brief Disable CTS flow control. + * @note This macro allows to disable CTS hardware flow control for a given UART instance, + * without need to call HAL_UART_Init() function. + * As involving direct access to UART registers, usage of this macro should be fully endorsed by user. + * @note As macro is expected to be used for modifying CTS Hw flow control feature activation, without need + * for USART instance Deinit/Init, following conditions for macro call should be fulfilled : + * - UART instance should have already been initialised (through call of HAL_UART_Init() ) + * - macro could only be called when corresponding UART instance is disabled + * (i.e. __HAL_UART_DISABLE(__HANDLE__)) and should be followed by an Enable + * macro (i.e. __HAL_UART_ENABLE(__HANDLE__)). + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_HWCONTROL_CTS_DISABLE(__HANDLE__) \ + do{ \ + ATOMIC_CLEAR_BIT((__HANDLE__)->Instance->CR3, USART_CR3_CTSE); \ + (__HANDLE__)->Init.HwFlowCtl &= ~(USART_CR3_CTSE); \ + } while(0U) + +/** @brief Enable RTS flow control. + * @note This macro allows to enable RTS hardware flow control for a given UART instance, + * without need to call HAL_UART_Init() function. + * As involving direct access to UART registers, usage of this macro should be fully endorsed by user. + * @note As macro is expected to be used for modifying RTS Hw flow control feature activation, without need + * for USART instance Deinit/Init, following conditions for macro call should be fulfilled : + * - UART instance should have already been initialised (through call of HAL_UART_Init() ) + * - macro could only be called when corresponding UART instance is disabled + * (i.e. __HAL_UART_DISABLE(__HANDLE__)) and should be followed by an Enable + * macro (i.e. __HAL_UART_ENABLE(__HANDLE__)). + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_HWCONTROL_RTS_ENABLE(__HANDLE__) \ + do{ \ + ATOMIC_SET_BIT((__HANDLE__)->Instance->CR3, USART_CR3_RTSE); \ + (__HANDLE__)->Init.HwFlowCtl |= USART_CR3_RTSE; \ + } while(0U) + +/** @brief Disable RTS flow control. + * @note This macro allows to disable RTS hardware flow control for a given UART instance, + * without need to call HAL_UART_Init() function. + * As involving direct access to UART registers, usage of this macro should be fully endorsed by user. + * @note As macro is expected to be used for modifying RTS Hw flow control feature activation, without need + * for USART instance Deinit/Init, following conditions for macro call should be fulfilled : + * - UART instance should have already been initialised (through call of HAL_UART_Init() ) + * - macro could only be called when corresponding UART instance is disabled + * (i.e. __HAL_UART_DISABLE(__HANDLE__)) and should be followed by an Enable + * macro (i.e. __HAL_UART_ENABLE(__HANDLE__)). + * @param __HANDLE__ specifies the UART Handle. + * @retval None + */ +#define __HAL_UART_HWCONTROL_RTS_DISABLE(__HANDLE__) \ + do{ \ + ATOMIC_CLEAR_BIT((__HANDLE__)->Instance->CR3, USART_CR3_RTSE);\ + (__HANDLE__)->Init.HwFlowCtl &= ~(USART_CR3_RTSE); \ + } while(0U) +/** + * @} + */ + +/* Private macros --------------------------------------------------------*/ +/** @defgroup UART_Private_Macros UART Private Macros + * @{ + */ + + +/** @brief BRR division operation to set BRR register in 8-bit oversampling mode. + * @param __PCLK__ UART clock. + * @param __BAUD__ Baud rate set by the user. + * @retval Division result + */ +#define UART_DIV_SAMPLING8(__PCLK__, __BAUD__) ((((__PCLK__)*2U) + ((__BAUD__)/2U)) / (__BAUD__)) + +/** @brief BRR division operation to set BRR register in 16-bit oversampling mode. + * @param __PCLK__ UART clock. + * @param __BAUD__ Baud rate set by the user. + * @retval Division result + */ +#define UART_DIV_SAMPLING16(__PCLK__, __BAUD__) (((__PCLK__) + ((__BAUD__)/2U)) / (__BAUD__)) + + +/** @brief Check UART Baud rate. + * @param __BAUDRATE__ Baudrate specified by the user. + * The maximum Baud Rate is derived from the maximum clock on F7 (i.e. 216 MHz) + * divided by the smallest oversampling used on the USART (i.e. 8) + * @retval SET (__BAUDRATE__ is valid) or RESET (__BAUDRATE__ is invalid) + */ +#define IS_UART_BAUDRATE(__BAUDRATE__) ((__BAUDRATE__) < 27000001U) + +/** @brief Check UART assertion time. + * @param __TIME__ 5-bit value assertion time. + * @retval Test result (TRUE or FALSE). + */ +#define IS_UART_ASSERTIONTIME(__TIME__) ((__TIME__) <= 0x1FU) + +/** @brief Check UART deassertion time. + * @param __TIME__ 5-bit value deassertion time. + * @retval Test result (TRUE or FALSE). + */ +#define IS_UART_DEASSERTIONTIME(__TIME__) ((__TIME__) <= 0x1FU) + +/** + * @brief Ensure that UART frame number of stop bits is valid. + * @param __STOPBITS__ UART frame number of stop bits. + * @retval SET (__STOPBITS__ is valid) or RESET (__STOPBITS__ is invalid) + */ +#define IS_UART_STOPBITS(__STOPBITS__) (((__STOPBITS__) == UART_STOPBITS_0_5) || \ + ((__STOPBITS__) == UART_STOPBITS_1) || \ + ((__STOPBITS__) == UART_STOPBITS_1_5) || \ + ((__STOPBITS__) == UART_STOPBITS_2)) + + +/** + * @brief Ensure that UART frame parity is valid. + * @param __PARITY__ UART frame parity. + * @retval SET (__PARITY__ is valid) or RESET (__PARITY__ is invalid) + */ +#define IS_UART_PARITY(__PARITY__) (((__PARITY__) == UART_PARITY_NONE) || \ + ((__PARITY__) == UART_PARITY_EVEN) || \ + ((__PARITY__) == UART_PARITY_ODD)) + +/** + * @brief Ensure that UART hardware flow control is valid. + * @param __CONTROL__ UART hardware flow control. + * @retval SET (__CONTROL__ is valid) or RESET (__CONTROL__ is invalid) + */ +#define IS_UART_HARDWARE_FLOW_CONTROL(__CONTROL__)\ + (((__CONTROL__) == UART_HWCONTROL_NONE) || \ + ((__CONTROL__) == UART_HWCONTROL_RTS) || \ + ((__CONTROL__) == UART_HWCONTROL_CTS) || \ + ((__CONTROL__) == UART_HWCONTROL_RTS_CTS)) + +/** + * @brief Ensure that UART communication mode is valid. + * @param __MODE__ UART communication mode. + * @retval SET (__MODE__ is valid) or RESET (__MODE__ is invalid) + */ +#define IS_UART_MODE(__MODE__) ((((__MODE__) & (~((uint32_t)(UART_MODE_TX_RX)))) == 0x00U) && ((__MODE__) != 0x00U)) + +/** + * @brief Ensure that UART state is valid. + * @param __STATE__ UART state. + * @retval SET (__STATE__ is valid) or RESET (__STATE__ is invalid) + */ +#define IS_UART_STATE(__STATE__) (((__STATE__) == UART_STATE_DISABLE) || \ + ((__STATE__) == UART_STATE_ENABLE)) + +/** + * @brief Ensure that UART oversampling is valid. + * @param __SAMPLING__ UART oversampling. + * @retval SET (__SAMPLING__ is valid) or RESET (__SAMPLING__ is invalid) + */ +#define IS_UART_OVERSAMPLING(__SAMPLING__) (((__SAMPLING__) == UART_OVERSAMPLING_16) || \ + ((__SAMPLING__) == UART_OVERSAMPLING_8)) + +/** + * @brief Ensure that UART frame sampling is valid. + * @param __ONEBIT__ UART frame sampling. + * @retval SET (__ONEBIT__ is valid) or RESET (__ONEBIT__ is invalid) + */ +#define IS_UART_ONE_BIT_SAMPLE(__ONEBIT__) (((__ONEBIT__) == UART_ONE_BIT_SAMPLE_DISABLE) || \ + ((__ONEBIT__) == UART_ONE_BIT_SAMPLE_ENABLE)) + +/** + * @brief Ensure that UART auto Baud rate detection mode is valid. + * @param __MODE__ UART auto Baud rate detection mode. + * @retval SET (__MODE__ is valid) or RESET (__MODE__ is invalid) + */ +#define IS_UART_ADVFEATURE_AUTOBAUDRATEMODE(__MODE__) (((__MODE__) == UART_ADVFEATURE_AUTOBAUDRATE_ONSTARTBIT) || \ + ((__MODE__) == UART_ADVFEATURE_AUTOBAUDRATE_ONFALLINGEDGE) || \ + ((__MODE__) == UART_ADVFEATURE_AUTOBAUDRATE_ON0X7FFRAME) || \ + ((__MODE__) == UART_ADVFEATURE_AUTOBAUDRATE_ON0X55FRAME)) + +/** + * @brief Ensure that UART receiver timeout setting is valid. + * @param __TIMEOUT__ UART receiver timeout setting. + * @retval SET (__TIMEOUT__ is valid) or RESET (__TIMEOUT__ is invalid) + */ +#define IS_UART_RECEIVER_TIMEOUT(__TIMEOUT__) (((__TIMEOUT__) == UART_RECEIVER_TIMEOUT_DISABLE) || \ + ((__TIMEOUT__) == UART_RECEIVER_TIMEOUT_ENABLE)) + +/** @brief Check the receiver timeout value. + * @note The maximum UART receiver timeout value is 0xFFFFFF. + * @param __TIMEOUTVALUE__ receiver timeout value. + * @retval Test result (TRUE or FALSE) + */ +#define IS_UART_RECEIVER_TIMEOUT_VALUE(__TIMEOUTVALUE__) ((__TIMEOUTVALUE__) <= 0xFFFFFFU) + +/** + * @brief Ensure that UART LIN state is valid. + * @param __LIN__ UART LIN state. + * @retval SET (__LIN__ is valid) or RESET (__LIN__ is invalid) + */ +#define IS_UART_LIN(__LIN__) (((__LIN__) == UART_LIN_DISABLE) || \ + ((__LIN__) == UART_LIN_ENABLE)) + +/** + * @brief Ensure that UART LIN break detection length is valid. + * @param __LENGTH__ UART LIN break detection length. + * @retval SET (__LENGTH__ is valid) or RESET (__LENGTH__ is invalid) + */ +#define IS_UART_LIN_BREAK_DETECT_LENGTH(__LENGTH__) (((__LENGTH__) == UART_LINBREAKDETECTLENGTH_10B) || \ + ((__LENGTH__) == UART_LINBREAKDETECTLENGTH_11B)) + +/** + * @brief Ensure that UART DMA TX state is valid. + * @param __DMATX__ UART DMA TX state. + * @retval SET (__DMATX__ is valid) or RESET (__DMATX__ is invalid) + */ +#define IS_UART_DMA_TX(__DMATX__) (((__DMATX__) == UART_DMA_TX_DISABLE) || \ + ((__DMATX__) == UART_DMA_TX_ENABLE)) + +/** + * @brief Ensure that UART DMA RX state is valid. + * @param __DMARX__ UART DMA RX state. + * @retval SET (__DMARX__ is valid) or RESET (__DMARX__ is invalid) + */ +#define IS_UART_DMA_RX(__DMARX__) (((__DMARX__) == UART_DMA_RX_DISABLE) || \ + ((__DMARX__) == UART_DMA_RX_ENABLE)) + +/** + * @brief Ensure that UART half-duplex state is valid. + * @param __HDSEL__ UART half-duplex state. + * @retval SET (__HDSEL__ is valid) or RESET (__HDSEL__ is invalid) + */ +#define IS_UART_HALF_DUPLEX(__HDSEL__) (((__HDSEL__) == UART_HALF_DUPLEX_DISABLE) || \ + ((__HDSEL__) == UART_HALF_DUPLEX_ENABLE)) + +/** + * @brief Ensure that UART wake-up method is valid. + * @param __WAKEUP__ UART wake-up method . + * @retval SET (__WAKEUP__ is valid) or RESET (__WAKEUP__ is invalid) + */ +#define IS_UART_WAKEUPMETHOD(__WAKEUP__) (((__WAKEUP__) == UART_WAKEUPMETHOD_IDLELINE) || \ + ((__WAKEUP__) == UART_WAKEUPMETHOD_ADDRESSMARK)) + +/** + * @brief Ensure that UART request parameter is valid. + * @param __PARAM__ UART request parameter. + * @retval SET (__PARAM__ is valid) or RESET (__PARAM__ is invalid) + */ +#define IS_UART_REQUEST_PARAMETER(__PARAM__) (((__PARAM__) == UART_AUTOBAUD_REQUEST) || \ + ((__PARAM__) == UART_SENDBREAK_REQUEST) || \ + ((__PARAM__) == UART_MUTE_MODE_REQUEST) || \ + ((__PARAM__) == UART_RXDATA_FLUSH_REQUEST) || \ + ((__PARAM__) == UART_TXDATA_FLUSH_REQUEST)) + +/** + * @brief Ensure that UART advanced features initialization is valid. + * @param __INIT__ UART advanced features initialization. + * @retval SET (__INIT__ is valid) or RESET (__INIT__ is invalid) + */ +#define IS_UART_ADVFEATURE_INIT(__INIT__) ((__INIT__) <= (UART_ADVFEATURE_NO_INIT | \ + UART_ADVFEATURE_TXINVERT_INIT | \ + UART_ADVFEATURE_RXINVERT_INIT | \ + UART_ADVFEATURE_DATAINVERT_INIT | \ + UART_ADVFEATURE_SWAP_INIT | \ + UART_ADVFEATURE_RXOVERRUNDISABLE_INIT | \ + UART_ADVFEATURE_DMADISABLEONERROR_INIT | \ + UART_ADVFEATURE_AUTOBAUDRATE_INIT | \ + UART_ADVFEATURE_MSBFIRST_INIT)) + +/** + * @brief Ensure that UART frame TX inversion setting is valid. + * @param __TXINV__ UART frame TX inversion setting. + * @retval SET (__TXINV__ is valid) or RESET (__TXINV__ is invalid) + */ +#define IS_UART_ADVFEATURE_TXINV(__TXINV__) (((__TXINV__) == UART_ADVFEATURE_TXINV_DISABLE) || \ + ((__TXINV__) == UART_ADVFEATURE_TXINV_ENABLE)) + +/** + * @brief Ensure that UART frame RX inversion setting is valid. + * @param __RXINV__ UART frame RX inversion setting. + * @retval SET (__RXINV__ is valid) or RESET (__RXINV__ is invalid) + */ +#define IS_UART_ADVFEATURE_RXINV(__RXINV__) (((__RXINV__) == UART_ADVFEATURE_RXINV_DISABLE) || \ + ((__RXINV__) == UART_ADVFEATURE_RXINV_ENABLE)) + +/** + * @brief Ensure that UART frame data inversion setting is valid. + * @param __DATAINV__ UART frame data inversion setting. + * @retval SET (__DATAINV__ is valid) or RESET (__DATAINV__ is invalid) + */ +#define IS_UART_ADVFEATURE_DATAINV(__DATAINV__) (((__DATAINV__) == UART_ADVFEATURE_DATAINV_DISABLE) || \ + ((__DATAINV__) == UART_ADVFEATURE_DATAINV_ENABLE)) + +/** + * @brief Ensure that UART frame RX/TX pins swap setting is valid. + * @param __SWAP__ UART frame RX/TX pins swap setting. + * @retval SET (__SWAP__ is valid) or RESET (__SWAP__ is invalid) + */ +#define IS_UART_ADVFEATURE_SWAP(__SWAP__) (((__SWAP__) == UART_ADVFEATURE_SWAP_DISABLE) || \ + ((__SWAP__) == UART_ADVFEATURE_SWAP_ENABLE)) + +/** + * @brief Ensure that UART frame overrun setting is valid. + * @param __OVERRUN__ UART frame overrun setting. + * @retval SET (__OVERRUN__ is valid) or RESET (__OVERRUN__ is invalid) + */ +#define IS_UART_OVERRUN(__OVERRUN__) (((__OVERRUN__) == UART_ADVFEATURE_OVERRUN_ENABLE) || \ + ((__OVERRUN__) == UART_ADVFEATURE_OVERRUN_DISABLE)) + +/** + * @brief Ensure that UART auto Baud rate state is valid. + * @param __AUTOBAUDRATE__ UART auto Baud rate state. + * @retval SET (__AUTOBAUDRATE__ is valid) or RESET (__AUTOBAUDRATE__ is invalid) + */ +#define IS_UART_ADVFEATURE_AUTOBAUDRATE(__AUTOBAUDRATE__) (((__AUTOBAUDRATE__) == \ + UART_ADVFEATURE_AUTOBAUDRATE_DISABLE) || \ + ((__AUTOBAUDRATE__) == UART_ADVFEATURE_AUTOBAUDRATE_ENABLE)) + +/** + * @brief Ensure that UART DMA enabling or disabling on error setting is valid. + * @param __DMA__ UART DMA enabling or disabling on error setting. + * @retval SET (__DMA__ is valid) or RESET (__DMA__ is invalid) + */ +#define IS_UART_ADVFEATURE_DMAONRXERROR(__DMA__) (((__DMA__) == UART_ADVFEATURE_DMA_ENABLEONRXERROR) || \ + ((__DMA__) == UART_ADVFEATURE_DMA_DISABLEONRXERROR)) + +/** + * @brief Ensure that UART frame MSB first setting is valid. + * @param __MSBFIRST__ UART frame MSB first setting. + * @retval SET (__MSBFIRST__ is valid) or RESET (__MSBFIRST__ is invalid) + */ +#define IS_UART_ADVFEATURE_MSBFIRST(__MSBFIRST__) (((__MSBFIRST__) == UART_ADVFEATURE_MSBFIRST_DISABLE) || \ + ((__MSBFIRST__) == UART_ADVFEATURE_MSBFIRST_ENABLE)) + +#if defined(USART_CR1_UESM) +/** + * @brief Ensure that UART stop mode state is valid. + * @param __STOPMODE__ UART stop mode state. + * @retval SET (__STOPMODE__ is valid) or RESET (__STOPMODE__ is invalid) + */ +#define IS_UART_ADVFEATURE_STOPMODE(__STOPMODE__) (((__STOPMODE__) == UART_ADVFEATURE_STOPMODE_DISABLE) || \ + ((__STOPMODE__) == UART_ADVFEATURE_STOPMODE_ENABLE)) + +#endif /* USART_CR1_UESM */ +/** + * @brief Ensure that UART mute mode state is valid. + * @param __MUTE__ UART mute mode state. + * @retval SET (__MUTE__ is valid) or RESET (__MUTE__ is invalid) + */ +#define IS_UART_MUTE_MODE(__MUTE__) (((__MUTE__) == UART_ADVFEATURE_MUTEMODE_DISABLE) || \ + ((__MUTE__) == UART_ADVFEATURE_MUTEMODE_ENABLE)) +#if defined(USART_CR1_UESM) + +/** + * @brief Ensure that UART wake-up selection is valid. + * @param __WAKE__ UART wake-up selection. + * @retval SET (__WAKE__ is valid) or RESET (__WAKE__ is invalid) + */ +#if defined(USART_CR3_WUFIE) +#define IS_UART_WAKEUP_SELECTION(__WAKE__) (((__WAKE__) == UART_WAKEUP_ON_ADDRESS) || \ + ((__WAKE__) == UART_WAKEUP_ON_STARTBIT) || \ + ((__WAKE__) == UART_WAKEUP_ON_READDATA_NONEMPTY)) +#else +#define IS_UART_WAKEUP_SELECTION(__WAKE__) (((__WAKE__) == UART_WAKEUP_ON_ADDRESS) || \ + ((__WAKE__) == UART_WAKEUP_ON_READDATA_NONEMPTY)) +#endif /* USART_CR3_WUFIE */ +#endif /* USART_CR1_UESM */ + +/** + * @brief Ensure that UART driver enable polarity is valid. + * @param __POLARITY__ UART driver enable polarity. + * @retval SET (__POLARITY__ is valid) or RESET (__POLARITY__ is invalid) + */ +#define IS_UART_DE_POLARITY(__POLARITY__) (((__POLARITY__) == UART_DE_POLARITY_HIGH) || \ + ((__POLARITY__) == UART_DE_POLARITY_LOW)) + + +/** + * @} + */ + +/* Include UART HAL Extended module */ +#include "stm32f7xx_hal_uart_ex.h" + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup UART_Exported_Functions UART Exported Functions + * @{ + */ + +/** @addtogroup UART_Exported_Functions_Group1 Initialization and de-initialization functions + * @{ + */ + +/* Initialization and de-initialization functions ****************************/ +HAL_StatusTypeDef HAL_UART_Init(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_HalfDuplex_Init(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_LIN_Init(UART_HandleTypeDef *huart, uint32_t BreakDetectLength); +HAL_StatusTypeDef HAL_MultiProcessor_Init(UART_HandleTypeDef *huart, uint8_t Address, uint32_t WakeUpMethod); +HAL_StatusTypeDef HAL_UART_DeInit(UART_HandleTypeDef *huart); +void HAL_UART_MspInit(UART_HandleTypeDef *huart); +void HAL_UART_MspDeInit(UART_HandleTypeDef *huart); + +/* Callbacks Register/UnRegister functions ***********************************/ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) +HAL_StatusTypeDef HAL_UART_RegisterCallback(UART_HandleTypeDef *huart, HAL_UART_CallbackIDTypeDef CallbackID, + pUART_CallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_UART_UnRegisterCallback(UART_HandleTypeDef *huart, HAL_UART_CallbackIDTypeDef CallbackID); + +HAL_StatusTypeDef HAL_UART_RegisterRxEventCallback(UART_HandleTypeDef *huart, pUART_RxEventCallbackTypeDef pCallback); +HAL_StatusTypeDef HAL_UART_UnRegisterRxEventCallback(UART_HandleTypeDef *huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @addtogroup UART_Exported_Functions_Group2 IO operation functions + * @{ + */ + +/* IO operation functions *****************************************************/ +HAL_StatusTypeDef HAL_UART_Transmit(UART_HandleTypeDef *huart, const uint8_t *pData, uint16_t Size, uint32_t Timeout); +HAL_StatusTypeDef HAL_UART_Receive(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size, uint32_t Timeout); +HAL_StatusTypeDef HAL_UART_Transmit_IT(UART_HandleTypeDef *huart, const uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_UART_Receive_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_UART_Transmit_DMA(UART_HandleTypeDef *huart, const uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_UART_Receive_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_UART_DMAPause(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UART_DMAResume(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UART_DMAStop(UART_HandleTypeDef *huart); +/* Transfer Abort functions */ +HAL_StatusTypeDef HAL_UART_Abort(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UART_AbortTransmit(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UART_AbortReceive(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UART_Abort_IT(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UART_AbortTransmit_IT(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UART_AbortReceive_IT(UART_HandleTypeDef *huart); + +void HAL_UART_IRQHandler(UART_HandleTypeDef *huart); +void HAL_UART_TxHalfCpltCallback(UART_HandleTypeDef *huart); +void HAL_UART_TxCpltCallback(UART_HandleTypeDef *huart); +void HAL_UART_RxHalfCpltCallback(UART_HandleTypeDef *huart); +void HAL_UART_RxCpltCallback(UART_HandleTypeDef *huart); +void HAL_UART_ErrorCallback(UART_HandleTypeDef *huart); +void HAL_UART_AbortCpltCallback(UART_HandleTypeDef *huart); +void HAL_UART_AbortTransmitCpltCallback(UART_HandleTypeDef *huart); +void HAL_UART_AbortReceiveCpltCallback(UART_HandleTypeDef *huart); + +void HAL_UARTEx_RxEventCallback(UART_HandleTypeDef *huart, uint16_t Size); + +/** + * @} + */ + +/** @addtogroup UART_Exported_Functions_Group3 Peripheral Control functions + * @{ + */ + +/* Peripheral Control functions ************************************************/ +void HAL_UART_ReceiverTimeout_Config(UART_HandleTypeDef *huart, uint32_t TimeoutValue); +HAL_StatusTypeDef HAL_UART_EnableReceiverTimeout(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UART_DisableReceiverTimeout(UART_HandleTypeDef *huart); + +HAL_StatusTypeDef HAL_LIN_SendBreak(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_MultiProcessor_EnableMuteMode(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_MultiProcessor_DisableMuteMode(UART_HandleTypeDef *huart); +void HAL_MultiProcessor_EnterMuteMode(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_HalfDuplex_EnableTransmitter(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_HalfDuplex_EnableReceiver(UART_HandleTypeDef *huart); + +/** + * @} + */ + +/** @addtogroup UART_Exported_Functions_Group4 Peripheral State and Error functions + * @{ + */ + +/* Peripheral State and Errors functions **************************************************/ +HAL_UART_StateTypeDef HAL_UART_GetState(UART_HandleTypeDef *huart); +uint32_t HAL_UART_GetError(UART_HandleTypeDef *huart); + +/** + * @} + */ + +/** + * @} + */ + +/* Private functions -----------------------------------------------------------*/ +/** @addtogroup UART_Private_Functions UART Private Functions + * @{ + */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) +void UART_InitCallbacksToDefault(UART_HandleTypeDef *huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +HAL_StatusTypeDef UART_SetConfig(UART_HandleTypeDef *huart); +HAL_StatusTypeDef UART_CheckIdleState(UART_HandleTypeDef *huart); +HAL_StatusTypeDef UART_WaitOnFlagUntilTimeout(UART_HandleTypeDef *huart, uint32_t Flag, FlagStatus Status, + uint32_t Tickstart, uint32_t Timeout); +void UART_AdvFeatureConfig(UART_HandleTypeDef *huart); +HAL_StatusTypeDef UART_Start_Receive_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef UART_Start_Receive_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size); + +/** + * @} + */ + +/* Private variables -----------------------------------------------------------*/ +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_HAL_UART_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_uart_ex.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_uart_ex.h new file mode 100644 index 0000000..7b3b374 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_hal_uart_ex.h @@ -0,0 +1,428 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_uart_ex.h + * @author MCD Application Team + * @brief Header file of UART HAL Extended module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_HAL_UART_EX_H +#define STM32F7xx_HAL_UART_EX_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup UARTEx + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ +/** @defgroup UARTEx_Exported_Types UARTEx Exported Types + * @{ + */ + +#if defined(USART_CR1_UESM) +/** + * @brief UART wake up from stop mode parameters + */ +typedef struct +{ + uint32_t WakeUpEvent; /*!< Specifies which event will activate the Wakeup from Stop mode flag (WUF). + This parameter can be a value of @ref UART_WakeUp_from_Stop_Selection. + If set to UART_WAKEUP_ON_ADDRESS, the two other fields below must + be filled up. */ + + uint16_t AddressLength; /*!< Specifies whether the address is 4 or 7-bit long. + This parameter can be a value of @ref UARTEx_WakeUp_Address_Length. */ + + uint8_t Address; /*!< UART/USART node address (7-bit long max). */ +} UART_WakeUpTypeDef; + +#endif /* USART_CR1_UESM */ +/** + * @} + */ + +/* Exported constants --------------------------------------------------------*/ +/** @defgroup UARTEx_Exported_Constants UARTEx Exported Constants + * @{ + */ + +/** @defgroup UARTEx_Word_Length UARTEx Word Length + * @{ + */ +#define UART_WORDLENGTH_7B USART_CR1_M1 /*!< 7-bit long UART frame */ +#define UART_WORDLENGTH_8B 0x00000000U /*!< 8-bit long UART frame */ +#define UART_WORDLENGTH_9B USART_CR1_M0 /*!< 9-bit long UART frame */ +/** + * @} + */ + +/** @defgroup UARTEx_WakeUp_Address_Length UARTEx WakeUp Address Length + * @{ + */ +#define UART_ADDRESS_DETECT_4B 0x00000000U /*!< 4-bit long wake-up address */ +#define UART_ADDRESS_DETECT_7B USART_CR2_ADDM7 /*!< 7-bit long wake-up address */ +/** + * @} + */ + +/** + * @} + */ + +/* Exported macros -----------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup UARTEx_Exported_Functions + * @{ + */ + +/** @addtogroup UARTEx_Exported_Functions_Group1 + * @{ + */ + +/* Initialization and de-initialization functions ****************************/ +HAL_StatusTypeDef HAL_RS485Ex_Init(UART_HandleTypeDef *huart, uint32_t Polarity, uint32_t AssertionTime, + uint32_t DeassertionTime); + +/** + * @} + */ + +/** @addtogroup UARTEx_Exported_Functions_Group2 + * @{ + */ + +#if defined(USART_CR1_UESM) +void HAL_UARTEx_WakeupCallback(UART_HandleTypeDef *huart); + +#endif /* USART_CR1_UESM */ +/** + * @} + */ + +/** @addtogroup UARTEx_Exported_Functions_Group3 + * @{ + */ + +/* Peripheral Control functions **********************************************/ +#if defined(USART_CR1_UESM) +HAL_StatusTypeDef HAL_UARTEx_StopModeWakeUpSourceConfig(UART_HandleTypeDef *huart, UART_WakeUpTypeDef WakeUpSelection); +HAL_StatusTypeDef HAL_UARTEx_EnableStopMode(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UARTEx_DisableStopMode(UART_HandleTypeDef *huart); + +#endif/* USART_CR1_UESM */ +#if defined(USART_CR3_UCESM) +HAL_StatusTypeDef HAL_UARTEx_EnableClockStopMode(UART_HandleTypeDef *huart); +HAL_StatusTypeDef HAL_UARTEx_DisableClockStopMode(UART_HandleTypeDef *huart); + +#endif /* USART_CR3_UCESM */ +HAL_StatusTypeDef HAL_MultiProcessorEx_AddressLength_Set(UART_HandleTypeDef *huart, uint32_t AddressLength); + + +HAL_StatusTypeDef HAL_UARTEx_ReceiveToIdle(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size, uint16_t *RxLen, + uint32_t Timeout); +HAL_StatusTypeDef HAL_UARTEx_ReceiveToIdle_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size); +HAL_StatusTypeDef HAL_UARTEx_ReceiveToIdle_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size); + + +/** + * @} + */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/** @defgroup UARTEx_Private_Macros UARTEx Private Macros + * @{ + */ + +/** @brief Report the UART clock source. + * @param __HANDLE__ specifies the UART Handle. + * @param __CLOCKSOURCE__ output variable. + * @retval UART clocking source, written in __CLOCKSOURCE__. + */ +#define UART_GETCLOCKSOURCE(__HANDLE__,__CLOCKSOURCE__) \ + do { \ + if((__HANDLE__)->Instance == USART1) \ + { \ + switch(__HAL_RCC_GET_USART1_SOURCE()) \ + { \ + case RCC_USART1CLKSOURCE_PCLK2: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_PCLK2; \ + break; \ + case RCC_USART1CLKSOURCE_HSI: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_HSI; \ + break; \ + case RCC_USART1CLKSOURCE_SYSCLK: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_SYSCLK; \ + break; \ + case RCC_USART1CLKSOURCE_LSE: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_LSE; \ + break; \ + default: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_UNDEFINED; \ + break; \ + } \ + } \ + else if((__HANDLE__)->Instance == USART2) \ + { \ + switch(__HAL_RCC_GET_USART2_SOURCE()) \ + { \ + case RCC_USART2CLKSOURCE_PCLK1: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_PCLK1; \ + break; \ + case RCC_USART2CLKSOURCE_HSI: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_HSI; \ + break; \ + case RCC_USART2CLKSOURCE_SYSCLK: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_SYSCLK; \ + break; \ + case RCC_USART2CLKSOURCE_LSE: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_LSE; \ + break; \ + default: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_UNDEFINED; \ + break; \ + } \ + } \ + else if((__HANDLE__)->Instance == USART3) \ + { \ + switch(__HAL_RCC_GET_USART3_SOURCE()) \ + { \ + case RCC_USART3CLKSOURCE_PCLK1: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_PCLK1; \ + break; \ + case RCC_USART3CLKSOURCE_HSI: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_HSI; \ + break; \ + case RCC_USART3CLKSOURCE_SYSCLK: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_SYSCLK; \ + break; \ + case RCC_USART3CLKSOURCE_LSE: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_LSE; \ + break; \ + default: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_UNDEFINED; \ + break; \ + } \ + } \ + else if((__HANDLE__)->Instance == UART4) \ + { \ + switch(__HAL_RCC_GET_UART4_SOURCE()) \ + { \ + case RCC_UART4CLKSOURCE_PCLK1: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_PCLK1; \ + break; \ + case RCC_UART4CLKSOURCE_HSI: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_HSI; \ + break; \ + case RCC_UART4CLKSOURCE_SYSCLK: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_SYSCLK; \ + break; \ + case RCC_UART4CLKSOURCE_LSE: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_LSE; \ + break; \ + default: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_UNDEFINED; \ + break; \ + } \ + } \ + else if ((__HANDLE__)->Instance == UART5) \ + { \ + switch(__HAL_RCC_GET_UART5_SOURCE()) \ + { \ + case RCC_UART5CLKSOURCE_PCLK1: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_PCLK1; \ + break; \ + case RCC_UART5CLKSOURCE_HSI: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_HSI; \ + break; \ + case RCC_UART5CLKSOURCE_SYSCLK: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_SYSCLK; \ + break; \ + case RCC_UART5CLKSOURCE_LSE: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_LSE; \ + break; \ + default: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_UNDEFINED; \ + break; \ + } \ + } \ + else if((__HANDLE__)->Instance == USART6) \ + { \ + switch(__HAL_RCC_GET_USART6_SOURCE()) \ + { \ + case RCC_USART6CLKSOURCE_PCLK2: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_PCLK2; \ + break; \ + case RCC_USART6CLKSOURCE_HSI: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_HSI; \ + break; \ + case RCC_USART6CLKSOURCE_SYSCLK: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_SYSCLK; \ + break; \ + case RCC_USART6CLKSOURCE_LSE: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_LSE; \ + break; \ + default: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_UNDEFINED; \ + break; \ + } \ + } \ + else if ((__HANDLE__)->Instance == UART7) \ + { \ + switch(__HAL_RCC_GET_UART7_SOURCE()) \ + { \ + case RCC_UART7CLKSOURCE_PCLK1: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_PCLK1; \ + break; \ + case RCC_UART7CLKSOURCE_HSI: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_HSI; \ + break; \ + case RCC_UART7CLKSOURCE_SYSCLK: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_SYSCLK; \ + break; \ + case RCC_UART7CLKSOURCE_LSE: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_LSE; \ + break; \ + default: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_UNDEFINED; \ + break; \ + } \ + } \ + else if ((__HANDLE__)->Instance == UART8) \ + { \ + switch(__HAL_RCC_GET_UART8_SOURCE()) \ + { \ + case RCC_UART8CLKSOURCE_PCLK1: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_PCLK1; \ + break; \ + case RCC_UART8CLKSOURCE_HSI: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_HSI; \ + break; \ + case RCC_UART8CLKSOURCE_SYSCLK: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_SYSCLK; \ + break; \ + case RCC_UART8CLKSOURCE_LSE: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_LSE; \ + break; \ + default: \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_UNDEFINED; \ + break; \ + } \ + } \ + else \ + { \ + (__CLOCKSOURCE__) = UART_CLOCKSOURCE_UNDEFINED; \ + } \ + } while(0U) + +/** @brief Report the UART mask to apply to retrieve the received data + * according to the word length and to the parity bits activation. + * @note If PCE = 1, the parity bit is not included in the data extracted + * by the reception API(). + * This masking operation is not carried out in the case of + * DMA transfers. + * @param __HANDLE__ specifies the UART Handle. + * @retval None, the mask to apply to UART RDR register is stored in (__HANDLE__)->Mask field. + */ +#define UART_MASK_COMPUTATION(__HANDLE__) \ + do { \ + if ((__HANDLE__)->Init.WordLength == UART_WORDLENGTH_9B) \ + { \ + if ((__HANDLE__)->Init.Parity == UART_PARITY_NONE) \ + { \ + (__HANDLE__)->Mask = 0x01FFU ; \ + } \ + else \ + { \ + (__HANDLE__)->Mask = 0x00FFU ; \ + } \ + } \ + else if ((__HANDLE__)->Init.WordLength == UART_WORDLENGTH_8B) \ + { \ + if ((__HANDLE__)->Init.Parity == UART_PARITY_NONE) \ + { \ + (__HANDLE__)->Mask = 0x00FFU ; \ + } \ + else \ + { \ + (__HANDLE__)->Mask = 0x007FU ; \ + } \ + } \ + else if ((__HANDLE__)->Init.WordLength == UART_WORDLENGTH_7B) \ + { \ + if ((__HANDLE__)->Init.Parity == UART_PARITY_NONE) \ + { \ + (__HANDLE__)->Mask = 0x007FU ; \ + } \ + else \ + { \ + (__HANDLE__)->Mask = 0x003FU ; \ + } \ + } \ + else \ + { \ + (__HANDLE__)->Mask = 0x0000U; \ + } \ + } while(0U) + +/** + * @brief Ensure that UART frame length is valid. + * @param __LENGTH__ UART frame length. + * @retval SET (__LENGTH__ is valid) or RESET (__LENGTH__ is invalid) + */ +#define IS_UART_WORD_LENGTH(__LENGTH__) (((__LENGTH__) == UART_WORDLENGTH_7B) || \ + ((__LENGTH__) == UART_WORDLENGTH_8B) || \ + ((__LENGTH__) == UART_WORDLENGTH_9B)) + +/** + * @brief Ensure that UART wake-up address length is valid. + * @param __ADDRESS__ UART wake-up address length. + * @retval SET (__ADDRESS__ is valid) or RESET (__ADDRESS__ is invalid) + */ +#define IS_UART_ADDRESSLENGTH_DETECT(__ADDRESS__) (((__ADDRESS__) == UART_ADDRESS_DETECT_4B) || \ + ((__ADDRESS__) == UART_ADDRESS_DETECT_7B)) + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ + +/** + * @} + */ + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* STM32F7xx_HAL_UART_EX_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_ll_usb.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_ll_usb.h new file mode 100644 index 0000000..5091a2c --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Inc/stm32f7xx_ll_usb.h @@ -0,0 +1,537 @@ +/** + ****************************************************************************** + * @file stm32f7xx_ll_usb.h + * @author MCD Application Team + * @brief Header file of USB Low Layer HAL module. + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef STM32F7xx_LL_USB_H +#define STM32F7xx_LL_USB_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal_def.h" + +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup USB_LL + * @{ + */ + +/* Exported types ------------------------------------------------------------*/ + +/** + * @brief USB Mode definition + */ +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) + +typedef enum +{ + USB_DEVICE_MODE = 0, + USB_HOST_MODE = 1, + USB_DRD_MODE = 2 +} USB_OTG_ModeTypeDef; + +/** + * @brief URB States definition + */ +typedef enum +{ + URB_IDLE = 0, + URB_DONE, + URB_NOTREADY, + URB_NYET, + URB_ERROR, + URB_STALL +} USB_OTG_URBStateTypeDef; + +/** + * @brief Host channel States definition + */ +typedef enum +{ + HC_IDLE = 0, + HC_XFRC, + HC_HALTED, + HC_NAK, + HC_NYET, + HC_STALL, + HC_XACTERR, + HC_BBLERR, + HC_DATATGLERR +} USB_OTG_HCStateTypeDef; + + +/** + * @brief USB Instance Initialization Structure definition + */ +typedef struct +{ + uint32_t dev_endpoints; /*!< Device Endpoints number. + This parameter depends on the used USB core. + This parameter must be a number between Min_Data = 1 and Max_Data = 15 */ + + uint32_t Host_channels; /*!< Host Channels number. + This parameter Depends on the used USB core. + This parameter must be a number between Min_Data = 1 and Max_Data = 15 */ + + uint32_t speed; /*!< USB Core speed. + This parameter can be any value of @ref PCD_Speed/HCD_Speed + (HCD_SPEED_xxx, HCD_SPEED_xxx) */ + + uint32_t dma_enable; /*!< Enable or disable of the USB embedded DMA used only for OTG HS. */ + + uint32_t ep0_mps; /*!< Set the Endpoint 0 Max Packet size. */ + + uint32_t phy_itface; /*!< Select the used PHY interface. + This parameter can be any value of @ref PCD_PHY_Module/HCD_PHY_Module */ + + uint32_t Sof_enable; /*!< Enable or disable the output of the SOF signal. */ + + uint32_t low_power_enable; /*!< Enable or disable the low power mode. */ + + uint32_t lpm_enable; /*!< Enable or disable Link Power Management. */ + + uint32_t battery_charging_enable; /*!< Enable or disable Battery charging. */ + + uint32_t vbus_sensing_enable; /*!< Enable or disable the VBUS Sensing feature. */ + + uint32_t use_dedicated_ep1; /*!< Enable or disable the use of the dedicated EP1 interrupt. */ + + uint32_t use_external_vbus; /*!< Enable or disable the use of the external VBUS. */ + +} USB_OTG_CfgTypeDef; + +typedef struct +{ + uint8_t num; /*!< Endpoint number + This parameter must be a number between Min_Data = 1 and Max_Data = 15 */ + + uint8_t is_in; /*!< Endpoint direction + This parameter must be a number between Min_Data = 0 and Max_Data = 1 */ + + uint8_t is_stall; /*!< Endpoint stall condition + This parameter must be a number between Min_Data = 0 and Max_Data = 1 */ + + uint8_t type; /*!< Endpoint type + This parameter can be any value of @ref USB_LL_EP_Type */ + + uint8_t data_pid_start; /*!< Initial data PID + This parameter must be a number between Min_Data = 0 and Max_Data = 1 */ + + uint8_t even_odd_frame; /*!< IFrame parity + This parameter must be a number between Min_Data = 0 and Max_Data = 1 */ + + uint16_t tx_fifo_num; /*!< Transmission FIFO number + This parameter must be a number between Min_Data = 1 and Max_Data = 15 */ + + uint32_t maxpacket; /*!< Endpoint Max packet size + This parameter must be a number between Min_Data = 0 and Max_Data = 64KB */ + + uint8_t *xfer_buff; /*!< Pointer to transfer buffer */ + + uint32_t dma_addr; /*!< 32 bits aligned transfer buffer address */ + + uint32_t xfer_len; /*!< Current transfer length */ + + uint32_t xfer_count; /*!< Partial transfer length in case of multi packet transfer */ +} USB_OTG_EPTypeDef; + +typedef struct +{ + uint8_t dev_addr; /*!< USB device address. + This parameter must be a number between Min_Data = 1 and Max_Data = 255 */ + + uint8_t ch_num; /*!< Host channel number. + This parameter must be a number between Min_Data = 1 and Max_Data = 15 */ + + uint8_t ep_num; /*!< Endpoint number. + This parameter must be a number between Min_Data = 1 and Max_Data = 15 */ + + uint8_t ep_is_in; /*!< Endpoint direction + This parameter must be a number between Min_Data = 0 and Max_Data = 1 */ + + uint8_t speed; /*!< USB Host Channel speed. + This parameter can be any value of @ref HCD_Device_Speed: + (HCD_DEVICE_SPEED_xxx) */ + + uint8_t do_ping; /*!< Enable or disable the use of the PING protocol for HS mode. */ + + uint8_t process_ping; /*!< Execute the PING protocol for HS mode. */ + + uint8_t ep_type; /*!< Endpoint Type. + This parameter can be any value of @ref USB_LL_EP_Type */ + + uint16_t max_packet; /*!< Endpoint Max packet size. + This parameter must be a number between Min_Data = 0 and Max_Data = 64KB */ + + uint8_t data_pid; /*!< Initial data PID. + This parameter must be a number between Min_Data = 0 and Max_Data = 1 */ + + uint8_t *xfer_buff; /*!< Pointer to transfer buffer. */ + + uint32_t XferSize; /*!< OTG Channel transfer size. */ + + uint32_t xfer_len; /*!< Current transfer length. */ + + uint32_t xfer_count; /*!< Partial transfer length in case of multi packet transfer. */ + + uint8_t toggle_in; /*!< IN transfer current toggle flag. + This parameter must be a number between Min_Data = 0 and Max_Data = 1 */ + + uint8_t toggle_out; /*!< OUT transfer current toggle flag + This parameter must be a number between Min_Data = 0 and Max_Data = 1 */ + + uint32_t dma_addr; /*!< 32 bits aligned transfer buffer address. */ + + uint32_t ErrCnt; /*!< Host channel error count. */ + + USB_OTG_URBStateTypeDef urb_state; /*!< URB state. + This parameter can be any value of @ref USB_OTG_URBStateTypeDef */ + + USB_OTG_HCStateTypeDef state; /*!< Host Channel state. + This parameter can be any value of @ref USB_OTG_HCStateTypeDef */ +} USB_OTG_HCTypeDef; +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + + +/* Exported constants --------------------------------------------------------*/ + +/** @defgroup PCD_Exported_Constants PCD Exported Constants + * @{ + */ + +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +/** @defgroup USB_OTG_CORE VERSION ID + * @{ + */ +#define USB_OTG_CORE_ID_300A 0x4F54300AU +#define USB_OTG_CORE_ID_310A 0x4F54310AU +/** + * @} + */ + +/** @defgroup USB_Core_Mode_ USB Core Mode + * @{ + */ +#define USB_OTG_MODE_DEVICE 0U +#define USB_OTG_MODE_HOST 1U +#define USB_OTG_MODE_DRD 2U +/** + * @} + */ + +/** @defgroup USB_LL Device Speed + * @{ + */ +#define USBD_HS_SPEED 0U +#define USBD_HSINFS_SPEED 1U +#define USBH_HS_SPEED 0U +#define USBD_FS_SPEED 2U +#define USBH_FSLS_SPEED 1U +/** + * @} + */ + +/** @defgroup USB_LL_Core_Speed USB Low Layer Core Speed + * @{ + */ +#define USB_OTG_SPEED_HIGH 0U +#define USB_OTG_SPEED_HIGH_IN_FULL 1U +#define USB_OTG_SPEED_FULL 3U +/** + * @} + */ + +/** @defgroup USB_LL_Core_PHY USB Low Layer Core PHY + * @{ + */ +#define USB_OTG_ULPI_PHY 1U +#define USB_OTG_EMBEDDED_PHY 2U +#define USB_OTG_HS_EMBEDDED_PHY 3U +#if !defined (USB_HS_PHYC_TUNE_VALUE) +#define USB_HS_PHYC_TUNE_VALUE 0x00000F13U /*!< Value of USB HS PHY Tune */ +#endif /* USB_HS_PHYC_TUNE_VALUE */ +/** + * @} + */ + +/** @defgroup USB_LL_Turnaround_Timeout Turnaround Timeout Value + * @{ + */ +#ifndef USBD_HS_TRDT_VALUE +#define USBD_HS_TRDT_VALUE 9U +#endif /* USBD_HS_TRDT_VALUE */ +#ifndef USBD_FS_TRDT_VALUE +#define USBD_FS_TRDT_VALUE 5U +#define USBD_DEFAULT_TRDT_VALUE 9U +#endif /* USBD_HS_TRDT_VALUE */ +/** + * @} + */ + +/** @defgroup USB_LL_Core_MPS USB Low Layer Core MPS + * @{ + */ +#define USB_OTG_HS_MAX_PACKET_SIZE 512U +#define USB_OTG_FS_MAX_PACKET_SIZE 64U +#define USB_OTG_MAX_EP0_SIZE 64U +/** + * @} + */ + +/** @defgroup USB_LL_Core_PHY_Frequency USB Low Layer Core PHY Frequency + * @{ + */ +#define DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ (0U << 1) +#define DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ (1U << 1) +#define DSTS_ENUMSPD_FS_PHY_48MHZ (3U << 1) +/** + * @} + */ + +/** @defgroup USB_LL_CORE_Frame_Interval USB Low Layer Core Frame Interval + * @{ + */ +#define DCFG_FRAME_INTERVAL_80 0U +#define DCFG_FRAME_INTERVAL_85 1U +#define DCFG_FRAME_INTERVAL_90 2U +#define DCFG_FRAME_INTERVAL_95 3U +/** + * @} + */ + +/** @defgroup USB_LL_EP0_MPS USB Low Layer EP0 MPS + * @{ + */ +#define EP_MPS_64 0U +#define EP_MPS_32 1U +#define EP_MPS_16 2U +#define EP_MPS_8 3U +/** + * @} + */ + +/** @defgroup USB_LL_EP_Speed USB Low Layer EP Speed + * @{ + */ +#define EP_SPEED_LOW 0U +#define EP_SPEED_FULL 1U +#define EP_SPEED_HIGH 2U +/** + * @} + */ + +/** @defgroup USB_LL_EP_Type USB Low Layer EP Type + * @{ + */ +#define EP_TYPE_CTRL 0U +#define EP_TYPE_ISOC 1U +#define EP_TYPE_BULK 2U +#define EP_TYPE_INTR 3U +#define EP_TYPE_MSK 3U +/** + * @} + */ + +/** @defgroup USB_LL_STS_Defines USB Low Layer STS Defines + * @{ + */ +#define STS_GOUT_NAK 1U +#define STS_DATA_UPDT 2U +#define STS_XFER_COMP 3U +#define STS_SETUP_COMP 4U +#define STS_SETUP_UPDT 6U +/** + * @} + */ + +/** @defgroup USB_LL_HCFG_SPEED_Defines USB Low Layer HCFG Speed Defines + * @{ + */ +#define HCFG_30_60_MHZ 0U +#define HCFG_48_MHZ 1U +#define HCFG_6_MHZ 2U +/** + * @} + */ + +/** @defgroup USB_LL_HPRT0_PRTSPD_SPEED_Defines USB Low Layer HPRT0 PRTSPD Speed Defines + * @{ + */ +#define HPRT0_PRTSPD_HIGH_SPEED 0U +#define HPRT0_PRTSPD_FULL_SPEED 1U +#define HPRT0_PRTSPD_LOW_SPEED 2U +/** + * @} + */ + +#define HCCHAR_CTRL 0U +#define HCCHAR_ISOC 1U +#define HCCHAR_BULK 2U +#define HCCHAR_INTR 3U + +#define HC_PID_DATA0 0U +#define HC_PID_DATA2 1U +#define HC_PID_DATA1 2U +#define HC_PID_SETUP 3U + +#define GRXSTS_PKTSTS_IN 2U +#define GRXSTS_PKTSTS_IN_XFER_COMP 3U +#define GRXSTS_PKTSTS_DATA_TOGGLE_ERR 5U +#define GRXSTS_PKTSTS_CH_HALTED 7U + +#define TEST_J 1U +#define TEST_K 2U +#define TEST_SE0_NAK 3U +#define TEST_PACKET 4U +#define TEST_FORCE_EN 5U + +#define USBx_PCGCCTL *(__IO uint32_t *)((uint32_t)USBx_BASE + USB_OTG_PCGCCTL_BASE) +#define USBx_HPRT0 *(__IO uint32_t *)((uint32_t)USBx_BASE + USB_OTG_HOST_PORT_BASE) + +#define USBx_DEVICE ((USB_OTG_DeviceTypeDef *)(USBx_BASE + USB_OTG_DEVICE_BASE)) +#define USBx_INEP(i) ((USB_OTG_INEndpointTypeDef *)(USBx_BASE\ + + USB_OTG_IN_ENDPOINT_BASE + ((i) * USB_OTG_EP_REG_SIZE))) + +#define USBx_OUTEP(i) ((USB_OTG_OUTEndpointTypeDef *)(USBx_BASE\ + + USB_OTG_OUT_ENDPOINT_BASE + ((i) * USB_OTG_EP_REG_SIZE))) + +#define USBx_DFIFO(i) *(__IO uint32_t *)(USBx_BASE + USB_OTG_FIFO_BASE + ((i) * USB_OTG_FIFO_SIZE)) + +#define USBx_HOST ((USB_OTG_HostTypeDef *)(USBx_BASE + USB_OTG_HOST_BASE)) +#define USBx_HC(i) ((USB_OTG_HostChannelTypeDef *)(USBx_BASE\ + + USB_OTG_HOST_CHANNEL_BASE\ + + ((i) * USB_OTG_HOST_CHANNEL_SIZE))) + +#ifdef USB_HS_PHYC /* Legacy name for USBPHYC defined in CMSIS device but USBCPHYC used in USB driver to determine if peripheral is present or not */ +#define USBPHYC USB_HS_PHYC +#endif +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + +#define EP_ADDR_MSK 0xFU + +#ifndef USE_USB_DOUBLE_BUFFER +#define USE_USB_DOUBLE_BUFFER 1U +#endif /* USE_USB_DOUBLE_BUFFER */ +/** + * @} + */ + +/* Exported macro ------------------------------------------------------------*/ +/** @defgroup USB_LL_Exported_Macros USB Low Layer Exported Macros + * @{ + */ +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +#define USB_MASK_INTERRUPT(__INSTANCE__, __INTERRUPT__) ((__INSTANCE__)->GINTMSK &= ~(__INTERRUPT__)) +#define USB_UNMASK_INTERRUPT(__INSTANCE__, __INTERRUPT__) ((__INSTANCE__)->GINTMSK |= (__INTERRUPT__)) + +#define CLEAR_IN_EP_INTR(__EPNUM__, __INTERRUPT__) (USBx_INEP(__EPNUM__)->DIEPINT = (__INTERRUPT__)) +#define CLEAR_OUT_EP_INTR(__EPNUM__, __INTERRUPT__) (USBx_OUTEP(__EPNUM__)->DOEPINT = (__INTERRUPT__)) +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @addtogroup USB_LL_Exported_Functions USB Low Layer Exported Functions + * @{ + */ +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +HAL_StatusTypeDef USB_CoreInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef cfg); +HAL_StatusTypeDef USB_DevInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef cfg); +HAL_StatusTypeDef USB_EnableGlobalInt(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_DisableGlobalInt(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_SetTurnaroundTime(USB_OTG_GlobalTypeDef *USBx, uint32_t hclk, uint8_t speed); +HAL_StatusTypeDef USB_SetCurrentMode(USB_OTG_GlobalTypeDef *USBx, USB_OTG_ModeTypeDef mode); +HAL_StatusTypeDef USB_SetDevSpeed(USB_OTG_GlobalTypeDef *USBx, uint8_t speed); +HAL_StatusTypeDef USB_FlushRxFifo(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_FlushTxFifo(USB_OTG_GlobalTypeDef *USBx, uint32_t num); +HAL_StatusTypeDef USB_ActivateEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep); +HAL_StatusTypeDef USB_DeactivateEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep); +HAL_StatusTypeDef USB_ActivateDedicatedEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep); +HAL_StatusTypeDef USB_DeactivateDedicatedEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep); +HAL_StatusTypeDef USB_EPStartXfer(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep, uint8_t dma); +HAL_StatusTypeDef USB_EP0StartXfer(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep, uint8_t dma); +HAL_StatusTypeDef USB_WritePacket(USB_OTG_GlobalTypeDef *USBx, uint8_t *src, + uint8_t ch_ep_num, uint16_t len, uint8_t dma); + +void *USB_ReadPacket(USB_OTG_GlobalTypeDef *USBx, uint8_t *dest, uint16_t len); +HAL_StatusTypeDef USB_EPSetStall(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep); +HAL_StatusTypeDef USB_EPClearStall(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep); +HAL_StatusTypeDef USB_SetDevAddress(USB_OTG_GlobalTypeDef *USBx, uint8_t address); +HAL_StatusTypeDef USB_DevConnect(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_DevDisconnect(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_StopDevice(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_ActivateSetup(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_EP0_OutStart(USB_OTG_GlobalTypeDef *USBx, uint8_t dma, uint8_t *psetup); +uint8_t USB_GetDevSpeed(USB_OTG_GlobalTypeDef *USBx); +uint32_t USB_GetMode(USB_OTG_GlobalTypeDef *USBx); +uint32_t USB_ReadInterrupts(USB_OTG_GlobalTypeDef *USBx); +uint32_t USB_ReadDevAllOutEpInterrupt(USB_OTG_GlobalTypeDef *USBx); +uint32_t USB_ReadDevOutEPInterrupt(USB_OTG_GlobalTypeDef *USBx, uint8_t epnum); +uint32_t USB_ReadDevAllInEpInterrupt(USB_OTG_GlobalTypeDef *USBx); +uint32_t USB_ReadDevInEPInterrupt(USB_OTG_GlobalTypeDef *USBx, uint8_t epnum); +void USB_ClearInterrupts(USB_OTG_GlobalTypeDef *USBx, uint32_t interrupt); + +HAL_StatusTypeDef USB_HostInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef cfg); +HAL_StatusTypeDef USB_InitFSLSPClkSel(USB_OTG_GlobalTypeDef *USBx, uint8_t freq); +HAL_StatusTypeDef USB_ResetPort(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_DriveVbus(USB_OTG_GlobalTypeDef *USBx, uint8_t state); +uint32_t USB_GetHostSpeed(USB_OTG_GlobalTypeDef *USBx); +uint32_t USB_GetCurrentFrame(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_HC_Init(USB_OTG_GlobalTypeDef *USBx, uint8_t ch_num, + uint8_t epnum, uint8_t dev_address, uint8_t speed, + uint8_t ep_type, uint16_t mps); +HAL_StatusTypeDef USB_HC_StartXfer(USB_OTG_GlobalTypeDef *USBx, + USB_OTG_HCTypeDef *hc, uint8_t dma); + +uint32_t USB_HC_ReadInterrupt(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_HC_Halt(USB_OTG_GlobalTypeDef *USBx, uint8_t hc_num); +HAL_StatusTypeDef USB_DoPing(USB_OTG_GlobalTypeDef *USBx, uint8_t ch_num); +HAL_StatusTypeDef USB_StopHost(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_ActivateRemoteWakeup(USB_OTG_GlobalTypeDef *USBx); +HAL_StatusTypeDef USB_DeActivateRemoteWakeup(USB_OTG_GlobalTypeDef *USBx); +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + + +#endif /* STM32F7xx_LL_USB_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal.c new file mode 100644 index 0000000..fbd2946 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal.c @@ -0,0 +1,619 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal.c + * @author MCD Application Team + * @brief HAL module driver. + * This is the common part of the HAL initialization + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + The common HAL driver contains a set of generic and common APIs that can be + used by the PPP peripheral drivers and the user to start using the HAL. + [..] + The HAL contains two APIs' categories: + (+) Common HAL APIs + (+) Services HAL APIs + + @endverbatim + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup HAL HAL + * @brief HAL module driver. + * @{ + */ + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup HAL_Private_Constants + * @{ + */ +/** + * @brief STM32F7xx HAL Driver version number V1.2.10 + */ +#define __STM32F7xx_HAL_VERSION_MAIN (0x01) /*!< [31:24] main version */ +#define __STM32F7xx_HAL_VERSION_SUB1 (0x02) /*!< [23:16] sub1 version */ +#define __STM32F7xx_HAL_VERSION_SUB2 (0x0A) /*!< [15:8] sub2 version */ +#define __STM32F7xx_HAL_VERSION_RC (0x00) /*!< [7:0] release candidate */ +#define __STM32F7xx_HAL_VERSION ((__STM32F7xx_HAL_VERSION_MAIN << 24)\ + |(__STM32F7xx_HAL_VERSION_SUB1 << 16)\ + |(__STM32F7xx_HAL_VERSION_SUB2 << 8 )\ + |(__STM32F7xx_HAL_VERSION_RC)) + +#define IDCODE_DEVID_MASK ((uint32_t)0x00000FFF) +/** + * @} + */ + +/* Private macro -------------------------------------------------------------*/ +/* Exported variables ---------------------------------------------------------*/ +/** @addtogroup HAL_Exported_Variables + * @{ + */ +__IO uint32_t uwTick; +uint32_t uwTickPrio = (1UL << __NVIC_PRIO_BITS); /* Invalid PRIO */ +HAL_TickFreqTypeDef uwTickFreq = HAL_TICK_FREQ_DEFAULT; /* 1KHz */ +/** + * @} + */ + +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ + +/** @defgroup HAL_Exported_Functions HAL Exported Functions + * @{ + */ + +/** @defgroup HAL_Exported_Functions_Group1 Initialization and de-initialization Functions + * @brief Initialization and de-initialization functions + * +@verbatim + =============================================================================== + ##### Initialization and Configuration functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Initializes the Flash interface the NVIC allocation and initial clock + configuration. It initializes the systick also when timeout is needed + and the backup domain when enabled. + (+) De-Initializes common part of the HAL. + (+) Configure the time base source to have 1ms time base with a dedicated + Tick interrupt priority. + (++) SysTick timer is used by default as source of time base, but user + can eventually implement his proper time base source (a general purpose + timer for example or other time source), keeping in mind that Time base + duration should be kept 1ms since PPP_TIMEOUT_VALUEs are defined and + handled in milliseconds basis. + (++) Time base configuration function (HAL_InitTick ()) is called automatically + at the beginning of the program after reset by HAL_Init() or at any time + when clock is configured, by HAL_RCC_ClockConfig(). + (++) Source of time base is configured to generate interrupts at regular + time intervals. Care must be taken if HAL_Delay() is called from a + peripheral ISR process, the Tick interrupt line must have higher priority + (numerically lower) than the peripheral interrupt. Otherwise the caller + ISR process will be blocked. + (++) functions affecting time base configurations are declared as __weak + to make override possible in case of other implementations in user file. +@endverbatim + * @{ + */ + +/** + * @brief This function is used to initialize the HAL Library; it must be the first + * instruction to be executed in the main program (before to call any other + * HAL function), it performs the following: + * Configure the Flash prefetch, and instruction cache through ART accelerator. + * Configures the SysTick to generate an interrupt each 1 millisecond, + * which is clocked by the HSI (at this stage, the clock is not yet + * configured and thus the system is running from the internal HSI at 16 MHz). + * Set NVIC Group Priority to 4. + * Calls the HAL_MspInit() callback function defined in user file + * "stm32f7xx_hal_msp.c" to do the global low level hardware initialization + * + * @note SysTick is used as time base for the HAL_Delay() function, the application + * need to ensure that the SysTick time base is always set to 1 millisecond + * to have correct HAL operation. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_Init(void) +{ + /* Configure Instruction cache through ART accelerator */ +#if (ART_ACCLERATOR_ENABLE != 0) + __HAL_FLASH_ART_ENABLE(); +#endif /* ART_ACCLERATOR_ENABLE */ + + /* Configure Flash prefetch */ +#if (PREFETCH_ENABLE != 0U) + __HAL_FLASH_PREFETCH_BUFFER_ENABLE(); +#endif /* PREFETCH_ENABLE */ + + /* Set Interrupt Group Priority */ + HAL_NVIC_SetPriorityGrouping(NVIC_PRIORITYGROUP_4); + + /* Use systick as time base source and configure 1ms tick (default clock after Reset is HSI) */ + HAL_InitTick(TICK_INT_PRIORITY); + + /* Init the low level hardware */ + HAL_MspInit(); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief This function de-Initializes common part of the HAL and stops the systick. + * This function is optional. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DeInit(void) +{ + /* Reset of all peripherals */ + __HAL_RCC_APB1_FORCE_RESET(); + __HAL_RCC_APB1_RELEASE_RESET(); + + __HAL_RCC_APB2_FORCE_RESET(); + __HAL_RCC_APB2_RELEASE_RESET(); + + __HAL_RCC_AHB1_FORCE_RESET(); + __HAL_RCC_AHB1_RELEASE_RESET(); + + __HAL_RCC_AHB2_FORCE_RESET(); + __HAL_RCC_AHB2_RELEASE_RESET(); + + __HAL_RCC_AHB3_FORCE_RESET(); + __HAL_RCC_AHB3_RELEASE_RESET(); + + /* De-Init the low level hardware */ + HAL_MspDeInit(); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Initialize the MSP. + * @retval None + */ +__weak void HAL_MspInit(void) +{ + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes the MSP. + * @retval None + */ +__weak void HAL_MspDeInit(void) +{ + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_MspDeInit could be implemented in the user file + */ +} + +/** + * @brief This function configures the source of the time base. + * The time source is configured to have 1ms time base with a dedicated + * Tick interrupt priority. + * @note This function is called automatically at the beginning of program after + * reset by HAL_Init() or at any time when clock is reconfigured by HAL_RCC_ClockConfig(). + * @note In the default implementation, SysTick timer is the source of time base. + * It is used to generate interrupts at regular time intervals. + * Care must be taken if HAL_Delay() is called from a peripheral ISR process, + * The SysTick interrupt must have higher priority (numerically lower) + * than the peripheral interrupt. Otherwise the caller ISR process will be blocked. + * The function is declared as __weak to be overwritten in case of other + * implementation in user file. + * @param TickPriority Tick interrupt priority. + * @retval HAL status + */ +__weak HAL_StatusTypeDef HAL_InitTick(uint32_t TickPriority) +{ + /* Configure the SysTick to have interrupt in 1ms time basis*/ + if (HAL_SYSTICK_Config(SystemCoreClock / (1000U / uwTickFreq)) > 0U) + { + return HAL_ERROR; + } + + /* Configure the SysTick IRQ priority */ + if (TickPriority < (1UL << __NVIC_PRIO_BITS)) + { + HAL_NVIC_SetPriority(SysTick_IRQn, TickPriority, 0U); + uwTickPrio = TickPriority; + } + else + { + return HAL_ERROR; + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup HAL_Exported_Functions_Group2 HAL Control functions + * @brief HAL Control functions + * +@verbatim + =============================================================================== + ##### HAL Control functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Provide a tick value in millisecond + (+) Provide a blocking delay in millisecond + (+) Suspend the time base source interrupt + (+) Resume the time base source interrupt + (+) Get the HAL API driver version + (+) Get the device identifier + (+) Get the device revision identifier + (+) Enable/Disable Debug module during SLEEP mode + (+) Enable/Disable Debug module during STOP mode + (+) Enable/Disable Debug module during STANDBY mode + +@endverbatim + * @{ + */ + +/** + * @brief This function is called to increment a global variable "uwTick" + * used as application time base. + * @note In the default implementation, this variable is incremented each 1ms + * in SysTick ISR. + * @note This function is declared as __weak to be overwritten in case of other + * implementations in user file. + * @retval None + */ +__weak void HAL_IncTick(void) +{ + uwTick += uwTickFreq; +} + +/** + * @brief Provides a tick value in millisecond. + * @note This function is declared as __weak to be overwritten in case of other + * implementations in user file. + * @retval tick value + */ +__weak uint32_t HAL_GetTick(void) +{ + return uwTick; +} + +/** + * @brief This function returns a tick priority. + * @retval tick priority + */ +uint32_t HAL_GetTickPrio(void) +{ + return uwTickPrio; +} + +/** + * @brief Set new tick Freq. + * @retval Status + */ +HAL_StatusTypeDef HAL_SetTickFreq(HAL_TickFreqTypeDef Freq) +{ + HAL_StatusTypeDef status = HAL_OK; + HAL_TickFreqTypeDef prevTickFreq; + + assert_param(IS_TICKFREQ(Freq)); + + if (uwTickFreq != Freq) + { + /* Back up uwTickFreq frequency */ + prevTickFreq = uwTickFreq; + + /* Update uwTickFreq global variable used by HAL_InitTick() */ + uwTickFreq = Freq; + + /* Apply the new tick Freq */ + status = HAL_InitTick(uwTickPrio); + + if (status != HAL_OK) + { + /* Restore previous tick frequency */ + uwTickFreq = prevTickFreq; + } + } + + return status; +} + +/** + * @brief Return tick frequency. + * @retval tick period in Hz + */ +HAL_TickFreqTypeDef HAL_GetTickFreq(void) +{ + return uwTickFreq; +} + +/** + * @brief This function provides minimum delay (in milliseconds) based + * on variable incremented. + * @note In the default implementation , SysTick timer is the source of time base. + * It is used to generate interrupts at regular time intervals where uwTick + * is incremented. + * @note This function is declared as __weak to be overwritten in case of other + * implementations in user file. + * @param Delay specifies the delay time length, in milliseconds. + * @retval None + */ +__weak void HAL_Delay(uint32_t Delay) +{ + uint32_t tickstart = HAL_GetTick(); + uint32_t wait = Delay; + + /* Add a freq to guarantee minimum wait */ + if (wait < HAL_MAX_DELAY) + { + wait += (uint32_t)(uwTickFreq); + } + + while ((HAL_GetTick() - tickstart) < wait) + { + } +} + +/** + * @brief Suspend Tick increment. + * @note In the default implementation , SysTick timer is the source of time base. It is + * used to generate interrupts at regular time intervals. Once HAL_SuspendTick() + * is called, the SysTick interrupt will be disabled and so Tick increment + * is suspended. + * @note This function is declared as __weak to be overwritten in case of other + * implementations in user file. + * @retval None + */ +__weak void HAL_SuspendTick(void) +{ + /* Disable SysTick Interrupt */ + SysTick->CTRL &= ~SysTick_CTRL_TICKINT_Msk; +} + +/** + * @brief Resume Tick increment. + * @note In the default implementation , SysTick timer is the source of time base. It is + * used to generate interrupts at regular time intervals. Once HAL_ResumeTick() + * is called, the SysTick interrupt will be enabled and so Tick increment + * is resumed. + * @note This function is declared as __weak to be overwritten in case of other + * implementations in user file. + * @retval None + */ +__weak void HAL_ResumeTick(void) +{ + /* Enable SysTick Interrupt */ + SysTick->CTRL |= SysTick_CTRL_TICKINT_Msk; +} + +/** + * @brief Returns the HAL revision + * @retval version : 0xXYZR (8bits for each decimal, R for RC) + */ +uint32_t HAL_GetHalVersion(void) +{ + return __STM32F7xx_HAL_VERSION; +} + +/** + * @brief Returns the device revision identifier. + * @retval Device revision identifier + */ +uint32_t HAL_GetREVID(void) +{ + return((DBGMCU->IDCODE) >> 16U); +} + +/** + * @brief Returns the device identifier. + * @retval Device identifier + */ +uint32_t HAL_GetDEVID(void) +{ + return((DBGMCU->IDCODE) & IDCODE_DEVID_MASK); +} + +/** + * @brief Returns first word of the unique device identifier (UID based on 96 bits) + * @retval Device identifier + */ +uint32_t HAL_GetUIDw0(void) +{ + return(READ_REG(*((uint32_t *)UID_BASE))); +} + +/** + * @brief Returns second word of the unique device identifier (UID based on 96 bits) + * @retval Device identifier + */ +uint32_t HAL_GetUIDw1(void) +{ + return(READ_REG(*((uint32_t *)(UID_BASE + 4U)))); +} + +/** + * @brief Returns third word of the unique device identifier (UID based on 96 bits) + * @retval Device identifier + */ +uint32_t HAL_GetUIDw2(void) +{ + return(READ_REG(*((uint32_t *)(UID_BASE + 8U)))); +} + +/** + * @brief Enable the Debug Module during SLEEP mode + * @retval None + */ +void HAL_DBGMCU_EnableDBGSleepMode(void) +{ + SET_BIT(DBGMCU->CR, DBGMCU_CR_DBG_SLEEP); +} + +/** + * @brief Disable the Debug Module during SLEEP mode + * @retval None + */ +void HAL_DBGMCU_DisableDBGSleepMode(void) +{ + CLEAR_BIT(DBGMCU->CR, DBGMCU_CR_DBG_SLEEP); +} + +/** + * @brief Enable the Debug Module during STOP mode + * @retval None + */ +void HAL_DBGMCU_EnableDBGStopMode(void) +{ + SET_BIT(DBGMCU->CR, DBGMCU_CR_DBG_STOP); +} + +/** + * @brief Disable the Debug Module during STOP mode + * @retval None + */ +void HAL_DBGMCU_DisableDBGStopMode(void) +{ + CLEAR_BIT(DBGMCU->CR, DBGMCU_CR_DBG_STOP); +} + +/** + * @brief Enable the Debug Module during STANDBY mode + * @retval None + */ +void HAL_DBGMCU_EnableDBGStandbyMode(void) +{ + SET_BIT(DBGMCU->CR, DBGMCU_CR_DBG_STANDBY); +} + +/** + * @brief Disable the Debug Module during STANDBY mode + * @retval None + */ +void HAL_DBGMCU_DisableDBGStandbyMode(void) +{ + CLEAR_BIT(DBGMCU->CR, DBGMCU_CR_DBG_STANDBY); +} + +/** + * @brief Enables the I/O Compensation Cell. + * @note The I/O compensation cell can be used only when the device supply + * voltage ranges from 2.4 to 3.6 V. + * @retval None + */ +void HAL_EnableCompensationCell(void) +{ + SYSCFG->CMPCR |= SYSCFG_CMPCR_CMP_PD; +} + +/** + * @brief Power-down the I/O Compensation Cell. + * @note The I/O compensation cell can be used only when the device supply + * voltage ranges from 2.4 to 3.6 V. + * @retval None + */ +void HAL_DisableCompensationCell(void) +{ + SYSCFG->CMPCR &= (uint32_t)~((uint32_t)SYSCFG_CMPCR_CMP_PD); +} + +/** + * @brief Enables the FMC Memory Mapping Swapping. + * + * @note SDRAM is accessible at 0x60000000 + * and NOR/RAM is accessible at 0xC0000000 + * + * @retval None + */ +void HAL_EnableFMCMemorySwapping(void) +{ + SYSCFG->MEMRMP |= SYSCFG_MEMRMP_SWP_FMC_0; +} + +/** + * @brief Disables the FMC Memory Mapping Swapping + * + * @note SDRAM is accessible at 0xC0000000 (default mapping) + * and NOR/RAM is accessible at 0x60000000 (default mapping) + * + * @retval None + */ +void HAL_DisableFMCMemorySwapping(void) +{ + SYSCFG->MEMRMP &= (uint32_t)~((uint32_t)SYSCFG_MEMRMP_SWP_FMC); +} + +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) +/** +* @brief Enable the Internal FLASH Bank Swapping. +* +* @note This function can be used only for STM32F77xx/STM32F76xx devices. +* +* @note Flash Bank2 mapped at 0x08000000 (AXI) (aliased at 0x00200000 (TCM)) +* and Flash Bank1 mapped at 0x08100000 (AXI) (aliased at 0x00300000 (TCM)) +* +* @retval None +*/ +void HAL_EnableMemorySwappingBank(void) +{ + SET_BIT(SYSCFG->MEMRMP, SYSCFG_MEMRMP_SWP_FB); +} + +/** +* @brief Disable the Internal FLASH Bank Swapping. +* +* @note This function can be used only for STM32F77xx/STM32F76xx devices. +* +* @note The default state : Flash Bank1 mapped at 0x08000000 (AXI) (aliased at 0x00200000 (TCM)) +* and Flash Bank2 mapped at 0x08100000 (AXI)( aliased at 0x00300000 (TCM)) +* +* @retval None +*/ +void HAL_DisableMemorySwappingBank(void) +{ + CLEAR_BIT(SYSCFG->MEMRMP, SYSCFG_MEMRMP_SWP_FB); +} +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + +/** + * @} + */ + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_cortex.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_cortex.c new file mode 100644 index 0000000..c729a7a --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_cortex.c @@ -0,0 +1,503 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_cortex.c + * @author MCD Application Team + * @brief CORTEX HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the CORTEX: + * + Initialization and de-initialization functions + * + Peripheral Control functions + * + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + + [..] + *** How to configure Interrupts using CORTEX HAL driver *** + =========================================================== + [..] + This section provides functions allowing to configure the NVIC interrupts (IRQ). + The Cortex-M4 exceptions are managed by CMSIS functions. + + (#) Configure the NVIC Priority Grouping using HAL_NVIC_SetPriorityGrouping() + function according to the following table. + (#) Configure the priority of the selected IRQ Channels using HAL_NVIC_SetPriority(). + (#) Enable the selected IRQ Channels using HAL_NVIC_EnableIRQ(). + (#) please refer to programming manual for details in how to configure priority. + + -@- When the NVIC_PRIORITYGROUP_0 is selected, IRQ preemption is no more possible. + The pending IRQ priority will be managed only by the sub priority. + + -@- IRQ priority order (sorted by highest to lowest priority): + (+@) Lowest preemption priority + (+@) Lowest sub priority + (+@) Lowest hardware priority (IRQ number) + + [..] + *** How to configure Systick using CORTEX HAL driver *** + ======================================================== + [..] + Setup SysTick Timer for time base. + + (+) The HAL_SYSTICK_Config() function calls the SysTick_Config() function which + is a CMSIS function that: + (++) Configures the SysTick Reload register with value passed as function parameter. + (++) Configures the SysTick IRQ priority to the lowest value (0x0F). + (++) Resets the SysTick Counter register. + (++) Configures the SysTick Counter clock source to be Core Clock Source (HCLK). + (++) Enables the SysTick Interrupt. + (++) Starts the SysTick Counter. + + (+) You can change the SysTick Clock source to be HCLK_Div8 by calling the macro + __HAL_CORTEX_SYSTICKCLK_CONFIG(SYSTICK_CLKSOURCE_HCLK_DIV8) just after the + HAL_SYSTICK_Config() function call. The __HAL_CORTEX_SYSTICKCLK_CONFIG() macro is defined + inside the stm32f7xx_hal_cortex.h file. + + (+) You can change the SysTick IRQ priority by calling the + HAL_NVIC_SetPriority(SysTick_IRQn,...) function just after the HAL_SYSTICK_Config() function + call. The HAL_NVIC_SetPriority() call the NVIC_SetPriority() function which is a CMSIS function. + + (+) To adjust the SysTick time base, use the following formula: + + Reload Value = SysTick Counter Clock (Hz) x Desired Time base (s) + (++) Reload Value is the parameter to be passed for HAL_SYSTICK_Config() function + (++) Reload Value should not exceed 0xFFFFFF + + @endverbatim + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup CORTEX CORTEX + * @brief CORTEX HAL module driver + * @{ + */ + +#ifdef HAL_CORTEX_MODULE_ENABLED + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup CORTEX_Exported_Functions CORTEX Exported Functions + * @{ + */ + + +/** @defgroup CORTEX_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * +@verbatim + ============================================================================== + ##### Initialization and de-initialization functions ##### + ============================================================================== + [..] + This section provides the CORTEX HAL driver functions allowing to configure Interrupts + Systick functionalities + +@endverbatim + * @{ + */ + + +/** + * @brief Sets the priority grouping field (preemption priority and subpriority) + * using the required unlock sequence. + * @param PriorityGroup The priority grouping bits length. + * This parameter can be one of the following values: + * @arg NVIC_PRIORITYGROUP_0: 0 bits for preemption priority + * 4 bits for subpriority + * @arg NVIC_PRIORITYGROUP_1: 1 bits for preemption priority + * 3 bits for subpriority + * @arg NVIC_PRIORITYGROUP_2: 2 bits for preemption priority + * 2 bits for subpriority + * @arg NVIC_PRIORITYGROUP_3: 3 bits for preemption priority + * 1 bits for subpriority + * @arg NVIC_PRIORITYGROUP_4: 4 bits for preemption priority + * 0 bits for subpriority + * @note When the NVIC_PriorityGroup_0 is selected, IRQ preemption is no more possible. + * The pending IRQ priority will be managed only by the subpriority. + * @retval None + */ +void HAL_NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + /* Check the parameters */ + assert_param(IS_NVIC_PRIORITY_GROUP(PriorityGroup)); + + /* Set the PRIGROUP[10:8] bits according to the PriorityGroup parameter value */ + NVIC_SetPriorityGrouping(PriorityGroup); +} + +/** + * @brief Sets the priority of an interrupt. + * @param IRQn External interrupt number. + * This parameter can be an enumerator of IRQn_Type enumeration + * (For the complete STM32 Devices IRQ Channels list, please refer to the appropriate CMSIS device file (stm32f7xxxx.h)) + * @param PreemptPriority The preemption priority for the IRQn channel. + * This parameter can be a value between 0 and 15 + * A lower priority value indicates a higher priority + * @param SubPriority the subpriority level for the IRQ channel. + * This parameter can be a value between 0 and 15 + * A lower priority value indicates a higher priority. + * @retval None + */ +void HAL_NVIC_SetPriority(IRQn_Type IRQn, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t prioritygroup = 0x00; + + /* Check the parameters */ + assert_param(IS_NVIC_SUB_PRIORITY(SubPriority)); + assert_param(IS_NVIC_PREEMPTION_PRIORITY(PreemptPriority)); + + prioritygroup = NVIC_GetPriorityGrouping(); + + NVIC_SetPriority(IRQn, NVIC_EncodePriority(prioritygroup, PreemptPriority, SubPriority)); +} + +/** + * @brief Enables a device specific interrupt in the NVIC interrupt controller. + * @note To configure interrupts priority correctly, the NVIC_PriorityGroupConfig() + * function should be called before. + * @param IRQn External interrupt number. + * This parameter can be an enumerator of IRQn_Type enumeration + * (For the complete STM32 Devices IRQ Channels list, please refer to the appropriate CMSIS device file (stm32f7xxxx.h)) + * @retval None + */ +void HAL_NVIC_EnableIRQ(IRQn_Type IRQn) +{ + /* Check the parameters */ + assert_param(IS_NVIC_DEVICE_IRQ(IRQn)); + + /* Enable interrupt */ + NVIC_EnableIRQ(IRQn); +} + +/** + * @brief Disables a device specific interrupt in the NVIC interrupt controller. + * @param IRQn External interrupt number. + * This parameter can be an enumerator of IRQn_Type enumeration + * (For the complete STM32 Devices IRQ Channels list, please refer to the appropriate CMSIS device file (stm32f7xxxx.h)) + * @retval None + */ +void HAL_NVIC_DisableIRQ(IRQn_Type IRQn) +{ + /* Check the parameters */ + assert_param(IS_NVIC_DEVICE_IRQ(IRQn)); + + /* Disable interrupt */ + NVIC_DisableIRQ(IRQn); +} + +/** + * @brief Initiates a system reset request to reset the MCU. + * @retval None + */ +void HAL_NVIC_SystemReset(void) +{ + /* System Reset */ + NVIC_SystemReset(); +} + +/** + * @brief Initializes the System Timer and its interrupt, and starts the System Tick Timer. + * Counter is in free running mode to generate periodic interrupts. + * @param TicksNumb Specifies the ticks Number of ticks between two interrupts. + * @retval status: - 0 Function succeeded. + * - 1 Function failed. + */ +uint32_t HAL_SYSTICK_Config(uint32_t TicksNumb) +{ + return SysTick_Config(TicksNumb); +} +/** + * @} + */ + +/** @defgroup CORTEX_Exported_Functions_Group2 Peripheral Control functions + * @brief Cortex control functions + * +@verbatim + ============================================================================== + ##### Peripheral Control functions ##### + ============================================================================== + [..] + This subsection provides a set of functions allowing to control the CORTEX + (NVIC, SYSTICK, MPU) functionalities. + + +@endverbatim + * @{ + */ + +#if (__MPU_PRESENT == 1) +/** + * @brief Disables the MPU + * @retval None + */ +void HAL_MPU_Disable(void) +{ + /* Make sure outstanding transfers are done */ + __DMB(); + + /* Disable fault exceptions */ + SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk; + + /* Disable the MPU and clear the control register*/ + MPU->CTRL = 0; +} + +/** + * @brief Enables the MPU + * @param MPU_Control Specifies the control mode of the MPU during hard fault, + * NMI, FAULTMASK and privileged access to the default memory + * This parameter can be one of the following values: + * @arg MPU_HFNMI_PRIVDEF_NONE + * @arg MPU_HARDFAULT_NMI + * @arg MPU_PRIVILEGED_DEFAULT + * @arg MPU_HFNMI_PRIVDEF + * @retval None + */ +void HAL_MPU_Enable(uint32_t MPU_Control) +{ + /* Enable the MPU */ + MPU->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk; + + /* Enable fault exceptions */ + SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk; + + /* Ensure MPU setting take effects */ + __DSB(); + __ISB(); +} + +/** + * @brief Initializes and configures the Region and the memory to be protected. + * @param MPU_Init Pointer to a MPU_Region_InitTypeDef structure that contains + * the initialization and configuration information. + * @retval None + */ +void HAL_MPU_ConfigRegion(MPU_Region_InitTypeDef *MPU_Init) +{ + /* Check the parameters */ + assert_param(IS_MPU_REGION_NUMBER(MPU_Init->Number)); + assert_param(IS_MPU_REGION_ENABLE(MPU_Init->Enable)); + + /* Set the Region number */ + MPU->RNR = MPU_Init->Number; + + if ((MPU_Init->Enable) != RESET) + { + /* Check the parameters */ + assert_param(IS_MPU_INSTRUCTION_ACCESS(MPU_Init->DisableExec)); + assert_param(IS_MPU_REGION_PERMISSION_ATTRIBUTE(MPU_Init->AccessPermission)); + assert_param(IS_MPU_TEX_LEVEL(MPU_Init->TypeExtField)); + assert_param(IS_MPU_ACCESS_SHAREABLE(MPU_Init->IsShareable)); + assert_param(IS_MPU_ACCESS_CACHEABLE(MPU_Init->IsCacheable)); + assert_param(IS_MPU_ACCESS_BUFFERABLE(MPU_Init->IsBufferable)); + assert_param(IS_MPU_SUB_REGION_DISABLE(MPU_Init->SubRegionDisable)); + assert_param(IS_MPU_REGION_SIZE(MPU_Init->Size)); + + MPU->RBAR = MPU_Init->BaseAddress; + MPU->RASR = ((uint32_t)MPU_Init->DisableExec << MPU_RASR_XN_Pos) | + ((uint32_t)MPU_Init->AccessPermission << MPU_RASR_AP_Pos) | + ((uint32_t)MPU_Init->TypeExtField << MPU_RASR_TEX_Pos) | + ((uint32_t)MPU_Init->IsShareable << MPU_RASR_S_Pos) | + ((uint32_t)MPU_Init->IsCacheable << MPU_RASR_C_Pos) | + ((uint32_t)MPU_Init->IsBufferable << MPU_RASR_B_Pos) | + ((uint32_t)MPU_Init->SubRegionDisable << MPU_RASR_SRD_Pos) | + ((uint32_t)MPU_Init->Size << MPU_RASR_SIZE_Pos) | + ((uint32_t)MPU_Init->Enable << MPU_RASR_ENABLE_Pos); + } + else + { + MPU->RBAR = 0x00; + MPU->RASR = 0x00; + } +} +#endif /* __MPU_PRESENT */ + +/** + * @brief Gets the priority grouping field from the NVIC Interrupt Controller. + * @retval Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field) + */ +uint32_t HAL_NVIC_GetPriorityGrouping(void) +{ + /* Get the PRIGROUP[10:8] field value */ + return NVIC_GetPriorityGrouping(); +} + +/** + * @brief Gets the priority of an interrupt. + * @param IRQn External interrupt number. + * This parameter can be an enumerator of IRQn_Type enumeration + * (For the complete STM32 Devices IRQ Channels list, please refer to the appropriate CMSIS device file (stm32f7xxxx.h)) + * @param PriorityGroup the priority grouping bits length. + * This parameter can be one of the following values: + * @arg NVIC_PRIORITYGROUP_0: 0 bits for preemption priority + * 4 bits for subpriority + * @arg NVIC_PRIORITYGROUP_1: 1 bits for preemption priority + * 3 bits for subpriority + * @arg NVIC_PRIORITYGROUP_2: 2 bits for preemption priority + * 2 bits for subpriority + * @arg NVIC_PRIORITYGROUP_3: 3 bits for preemption priority + * 1 bits for subpriority + * @arg NVIC_PRIORITYGROUP_4: 4 bits for preemption priority + * 0 bits for subpriority + * @param pPreemptPriority Pointer on the Preemptive priority value (starting from 0). + * @param pSubPriority Pointer on the Subpriority value (starting from 0). + * @retval None + */ +void HAL_NVIC_GetPriority(IRQn_Type IRQn, uint32_t PriorityGroup, uint32_t *pPreemptPriority, uint32_t *pSubPriority) +{ + /* Check the parameters */ + assert_param(IS_NVIC_PRIORITY_GROUP(PriorityGroup)); + /* Get priority for Cortex-M system or device specific interrupts */ + NVIC_DecodePriority(NVIC_GetPriority(IRQn), PriorityGroup, pPreemptPriority, pSubPriority); +} + +/** + * @brief Sets Pending bit of an external interrupt. + * @param IRQn External interrupt number + * This parameter can be an enumerator of IRQn_Type enumeration + * (For the complete STM32 Devices IRQ Channels list, please refer to the appropriate CMSIS device file (stm32f7xxxx.h)) + * @retval None + */ +void HAL_NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + /* Check the parameters */ + assert_param(IS_NVIC_DEVICE_IRQ(IRQn)); + + /* Set interrupt pending */ + NVIC_SetPendingIRQ(IRQn); +} + +/** + * @brief Gets Pending Interrupt (reads the pending register in the NVIC + * and returns the pending bit for the specified interrupt). + * @param IRQn External interrupt number. + * This parameter can be an enumerator of IRQn_Type enumeration + * (For the complete STM32 Devices IRQ Channels list, please refer to the appropriate CMSIS device file (stm32f7xxxx.h)) + * @retval status: - 0 Interrupt status is not pending. + * - 1 Interrupt status is pending. + */ +uint32_t HAL_NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + /* Check the parameters */ + assert_param(IS_NVIC_DEVICE_IRQ(IRQn)); + + /* Return 1 if pending else 0 */ + return NVIC_GetPendingIRQ(IRQn); +} + +/** + * @brief Clears the pending bit of an external interrupt. + * @param IRQn External interrupt number. + * This parameter can be an enumerator of IRQn_Type enumeration + * (For the complete STM32 Devices IRQ Channels list, please refer to the appropriate CMSIS device file (stm32f7xxxx.h)) + * @retval None + */ +void HAL_NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + /* Check the parameters */ + assert_param(IS_NVIC_DEVICE_IRQ(IRQn)); + + /* Clear pending interrupt */ + NVIC_ClearPendingIRQ(IRQn); +} + +/** + * @brief Gets active interrupt ( reads the active register in NVIC and returns the active bit). + * @param IRQn External interrupt number + * This parameter can be an enumerator of IRQn_Type enumeration + * (For the complete STM32 Devices IRQ Channels list, please refer to the appropriate CMSIS device file (stm32f7xxxx.h)) + * @retval status: - 0 Interrupt status is not pending. + * - 1 Interrupt status is pending. + */ +uint32_t HAL_NVIC_GetActive(IRQn_Type IRQn) +{ + /* Check the parameters */ + assert_param(IS_NVIC_DEVICE_IRQ(IRQn)); + + /* Return 1 if active else 0 */ + return NVIC_GetActive(IRQn); +} + +/** + * @brief Configures the SysTick clock source. + * @param CLKSource specifies the SysTick clock source. + * This parameter can be one of the following values: + * @arg SYSTICK_CLKSOURCE_HCLK_DIV8: AHB clock divided by 8 selected as SysTick clock source. + * @arg SYSTICK_CLKSOURCE_HCLK: AHB clock selected as SysTick clock source. + * @retval None + */ +void HAL_SYSTICK_CLKSourceConfig(uint32_t CLKSource) +{ + /* Check the parameters */ + assert_param(IS_SYSTICK_CLK_SOURCE(CLKSource)); + if (CLKSource == SYSTICK_CLKSOURCE_HCLK) + { + SysTick->CTRL |= SYSTICK_CLKSOURCE_HCLK; + } + else + { + SysTick->CTRL &= ~SYSTICK_CLKSOURCE_HCLK; + } +} + +/** + * @brief This function handles SYSTICK interrupt request. + * @retval None + */ +void HAL_SYSTICK_IRQHandler(void) +{ + HAL_SYSTICK_Callback(); +} + +/** + * @brief SYSTICK callback. + * @retval None + */ +__weak void HAL_SYSTICK_Callback(void) +{ + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_SYSTICK_Callback could be implemented in the user file + */ +} + +/** + * @} + */ + +/** + * @} + */ + +#endif /* HAL_CORTEX_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma.c new file mode 100644 index 0000000..f3229b7 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma.c @@ -0,0 +1,1312 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_dma.c + * @author MCD Application Team + * @brief DMA HAL module driver. + * + * This file provides firmware functions to manage the following + * functionalities of the Direct Memory Access (DMA) peripheral: + * + Initialization and de-initialization functions + * + IO operation functions + * + Peripheral State and errors functions + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + (#) Enable and configure the peripheral to be connected to the DMA Stream + (except for internal SRAM/FLASH memories: no initialization is + necessary) please refer to Reference manual for connection between peripherals + and DMA requests. + + (#) For a given Stream, program the required configuration through the following parameters: + Transfer Direction, Source and Destination data formats, + Circular, Normal or peripheral flow control mode, Stream Priority level, + Source and Destination Increment mode, FIFO mode and its Threshold (if needed), + Burst mode for Source and/or Destination (if needed) using HAL_DMA_Init() function. + + -@- Prior to HAL_DMA_Init() the clock must be enabled for DMA through the following macros: + __HAL_RCC_DMA1_CLK_ENABLE() or __HAL_RCC_DMA2_CLK_ENABLE(). + + *** Polling mode IO operation *** + ================================= + [..] + (+) Use HAL_DMA_Start() to start DMA transfer after the configuration of Source + address and destination address and the Length of data to be transferred. + (+) Use HAL_DMA_PollForTransfer() to poll for the end of current transfer, in this + case a fixed Timeout can be configured by User depending from his application. + (+) Use HAL_DMA_Abort() function to abort the current transfer. + + *** Interrupt mode IO operation *** + =================================== + [..] + (+) Configure the DMA interrupt priority using HAL_NVIC_SetPriority() + (+) Enable the DMA IRQ handler using HAL_NVIC_EnableIRQ() + (+) Select Callbacks functions using HAL_DMA_RegisterCallback() + (+) Use HAL_DMA_Start_IT() to start DMA transfer after the configuration of + Source address and destination address and the Length of data to be transferred. In this + case the DMA interrupt is configured + (+) Use HAL_DMA_IRQHandler() called under DMA_IRQHandler() Interrupt subroutine + (+) At the end of data transfer HAL_DMA_IRQHandler() function is executed and user can + add his own function by customization of function pointer XferCpltCallback and + XferErrorCallback (i.e a member of DMA handle structure). + [..] + (#) Use HAL_DMA_GetState() function to return the DMA state and HAL_DMA_GetError() in case of error + detection. + + (#) Use HAL_DMA_Abort_IT() function to abort the current transfer + + -@- In Memory-to-Memory transfer mode, Circular mode is not allowed. + + -@- The FIFO is used mainly to reduce bus usage and to allow data packing/unpacking: it is + possible to set different Data Sizes for the Peripheral and the Memory (ie. you can set + Half-Word data size for the peripheral to access its data register and set Word data size + for the Memory to gain in access time. Each two half words will be packed and written in + a single access to a Word in the Memory). + + -@- When FIFO is disabled, it is not allowed to configure different Data Sizes for Source + and Destination. In this case the Peripheral Data Size will be applied to both Source + and Destination. + + *** DMA HAL driver macros list *** + ============================================= + [..] + Below the list of most used macros in DMA HAL driver. + + (+) __HAL_DMA_ENABLE: Enable the specified DMA Stream. + (+) __HAL_DMA_DISABLE: Disable the specified DMA Stream. + (+) __HAL_DMA_GET_IT_SOURCE: Check whether the specified DMA Stream interrupt has occurred or not. + + [..] + (@) You can refer to the DMA HAL driver header file for more useful macros + + @endverbatim + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup DMA DMA + * @brief DMA HAL module driver + * @{ + */ + +#ifdef HAL_DMA_MODULE_ENABLED + +/* Private types -------------------------------------------------------------*/ +typedef struct +{ + __IO uint32_t ISR; /*!< DMA interrupt status register */ + __IO uint32_t Reserved0; + __IO uint32_t IFCR; /*!< DMA interrupt flag clear register */ +} DMA_Base_Registers; + +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/** @addtogroup DMA_Private_Constants + * @{ + */ + #define HAL_TIMEOUT_DMA_ABORT ((uint32_t)5) /* 5 ms */ +/** + * @} + */ +/* Private macros ------------------------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/** @addtogroup DMA_Private_Functions + * @{ + */ +static void DMA_SetConfig(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength); +static uint32_t DMA_CalcBaseAndBitshift(DMA_HandleTypeDef *hdma); +static HAL_StatusTypeDef DMA_CheckFifoParam(DMA_HandleTypeDef *hdma); + +/** + * @} + */ + +/* Exported functions ---------------------------------------------------------*/ +/** @addtogroup DMA_Exported_Functions + * @{ + */ + +/** @addtogroup DMA_Exported_Functions_Group1 + * +@verbatim + =============================================================================== + ##### Initialization and de-initialization functions ##### + =============================================================================== + [..] + This section provides functions allowing to initialize the DMA Stream source + and destination addresses, incrementation and data sizes, transfer direction, + circular/normal mode selection, memory-to-memory mode selection and Stream priority value. + [..] + The HAL_DMA_Init() function follows the DMA configuration procedures as described in + reference manual. + +@endverbatim + * @{ + */ + +/** + * @brief Initialize the DMA according to the specified + * parameters in the DMA_InitTypeDef and create the associated handle. + * @param hdma Pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_Init(DMA_HandleTypeDef *hdma) +{ + uint32_t tmp = 0U; + uint32_t tickstart = HAL_GetTick(); + DMA_Base_Registers *regs; + + /* Check the DMA peripheral state */ + if(hdma == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_DMA_STREAM_ALL_INSTANCE(hdma->Instance)); + assert_param(IS_DMA_CHANNEL(hdma->Init.Channel)); + assert_param(IS_DMA_DIRECTION(hdma->Init.Direction)); + assert_param(IS_DMA_PERIPHERAL_INC_STATE(hdma->Init.PeriphInc)); + assert_param(IS_DMA_MEMORY_INC_STATE(hdma->Init.MemInc)); + assert_param(IS_DMA_PERIPHERAL_DATA_SIZE(hdma->Init.PeriphDataAlignment)); + assert_param(IS_DMA_MEMORY_DATA_SIZE(hdma->Init.MemDataAlignment)); + assert_param(IS_DMA_MODE(hdma->Init.Mode)); + assert_param(IS_DMA_PRIORITY(hdma->Init.Priority)); + assert_param(IS_DMA_FIFO_MODE_STATE(hdma->Init.FIFOMode)); + /* Check the memory burst, peripheral burst and FIFO threshold parameters only + when FIFO mode is enabled */ + if(hdma->Init.FIFOMode != DMA_FIFOMODE_DISABLE) + { + assert_param(IS_DMA_FIFO_THRESHOLD(hdma->Init.FIFOThreshold)); + assert_param(IS_DMA_MEMORY_BURST(hdma->Init.MemBurst)); + assert_param(IS_DMA_PERIPHERAL_BURST(hdma->Init.PeriphBurst)); + } + + /* Change DMA peripheral state */ + hdma->State = HAL_DMA_STATE_BUSY; + + /* Allocate lock resource */ + __HAL_UNLOCK(hdma); + + + /* Disable the peripheral */ + __HAL_DMA_DISABLE(hdma); + + /* Check if the DMA Stream is effectively disabled */ + while((hdma->Instance->CR & DMA_SxCR_EN) != RESET) + { + /* Check for the Timeout */ + if((HAL_GetTick() - tickstart ) > HAL_TIMEOUT_DMA_ABORT) + { + /* Update error code */ + hdma->ErrorCode = HAL_DMA_ERROR_TIMEOUT; + + /* Change the DMA state */ + hdma->State = HAL_DMA_STATE_TIMEOUT; + + return HAL_TIMEOUT; + } + } + + /* Get the CR register value */ + tmp = hdma->Instance->CR; + + /* Clear CHSEL, MBURST, PBURST, PL, MSIZE, PSIZE, MINC, PINC, CIRC, DIR, CT and DBM bits */ + tmp &= ((uint32_t)~(DMA_SxCR_CHSEL | DMA_SxCR_MBURST | DMA_SxCR_PBURST | \ + DMA_SxCR_PL | DMA_SxCR_MSIZE | DMA_SxCR_PSIZE | \ + DMA_SxCR_MINC | DMA_SxCR_PINC | DMA_SxCR_CIRC | \ + DMA_SxCR_DIR | DMA_SxCR_CT | DMA_SxCR_DBM)); + + /* Prepare the DMA Stream configuration */ + tmp |= hdma->Init.Channel | hdma->Init.Direction | + hdma->Init.PeriphInc | hdma->Init.MemInc | + hdma->Init.PeriphDataAlignment | hdma->Init.MemDataAlignment | + hdma->Init.Mode | hdma->Init.Priority; + + /* the Memory burst and peripheral burst are not used when the FIFO is disabled */ + if(hdma->Init.FIFOMode == DMA_FIFOMODE_ENABLE) + { + /* Get memory burst and peripheral burst */ + tmp |= hdma->Init.MemBurst | hdma->Init.PeriphBurst; + } + + /* Write to DMA Stream CR register */ + hdma->Instance->CR = tmp; + + /* Get the FCR register value */ + tmp = hdma->Instance->FCR; + + /* Clear Direct mode and FIFO threshold bits */ + tmp &= (uint32_t)~(DMA_SxFCR_DMDIS | DMA_SxFCR_FTH); + + /* Prepare the DMA Stream FIFO configuration */ + tmp |= hdma->Init.FIFOMode; + + /* The FIFO threshold is not used when the FIFO mode is disabled */ + if(hdma->Init.FIFOMode == DMA_FIFOMODE_ENABLE) + { + /* Get the FIFO threshold */ + tmp |= hdma->Init.FIFOThreshold; + + /* Check compatibility between FIFO threshold level and size of the memory burst */ + /* for INCR4, INCR8, INCR16 bursts */ + if (hdma->Init.MemBurst != DMA_MBURST_SINGLE) + { + if (DMA_CheckFifoParam(hdma) != HAL_OK) + { + /* Update error code */ + hdma->ErrorCode = HAL_DMA_ERROR_PARAM; + + /* Change the DMA state */ + hdma->State = HAL_DMA_STATE_READY; + + return HAL_ERROR; + } + } + } + + /* Write to DMA Stream FCR */ + hdma->Instance->FCR = tmp; + + /* Initialize StreamBaseAddress and StreamIndex parameters to be used to calculate + DMA steam Base Address needed by HAL_DMA_IRQHandler() and HAL_DMA_PollForTransfer() */ + regs = (DMA_Base_Registers *)DMA_CalcBaseAndBitshift(hdma); + + /* Clear all interrupt flags */ + regs->IFCR = 0x3FU << hdma->StreamIndex; + + /* Initialize the error code */ + hdma->ErrorCode = HAL_DMA_ERROR_NONE; + + /* Initialize the DMA state */ + hdma->State = HAL_DMA_STATE_READY; + + return HAL_OK; +} + +/** + * @brief DeInitializes the DMA peripheral + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_DeInit(DMA_HandleTypeDef *hdma) +{ + DMA_Base_Registers *regs; + + /* Check the DMA peripheral state */ + if(hdma == NULL) + { + return HAL_ERROR; + } + + /* Check the DMA peripheral state */ + if(hdma->State == HAL_DMA_STATE_BUSY) + { + /* Return error status */ + return HAL_BUSY; + } + + /* Check the parameters */ + assert_param(IS_DMA_STREAM_ALL_INSTANCE(hdma->Instance)); + + /* Disable the selected DMA Streamx */ + __HAL_DMA_DISABLE(hdma); + + /* Reset DMA Streamx control register */ + hdma->Instance->CR = 0U; + + /* Reset DMA Streamx number of data to transfer register */ + hdma->Instance->NDTR = 0U; + + /* Reset DMA Streamx peripheral address register */ + hdma->Instance->PAR = 0U; + + /* Reset DMA Streamx memory 0 address register */ + hdma->Instance->M0AR = 0U; + + /* Reset DMA Streamx memory 1 address register */ + hdma->Instance->M1AR = 0U; + + /* Reset DMA Streamx FIFO control register */ + hdma->Instance->FCR = (uint32_t)0x00000021U; + + /* Get DMA steam Base Address */ + regs = (DMA_Base_Registers *)DMA_CalcBaseAndBitshift(hdma); + + /* Clear all interrupt flags at correct offset within the register */ + regs->IFCR = 0x3FU << hdma->StreamIndex; + + /* Clean all callbacks */ + hdma->XferCpltCallback = NULL; + hdma->XferHalfCpltCallback = NULL; + hdma->XferM1CpltCallback = NULL; + hdma->XferM1HalfCpltCallback = NULL; + hdma->XferErrorCallback = NULL; + hdma->XferAbortCallback = NULL; + + /* Reset the error code */ + hdma->ErrorCode = HAL_DMA_ERROR_NONE; + + /* Reset the DMA state */ + hdma->State = HAL_DMA_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(hdma); + + return HAL_OK; +} + +/** + * @} + */ + +/** @addtogroup DMA_Exported_Functions_Group2 + * +@verbatim + =============================================================================== + ##### IO operation functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Configure the source, destination address and data length and Start DMA transfer + (+) Configure the source, destination address and data length and + Start DMA transfer with interrupt + (+) Abort DMA transfer + (+) Poll for transfer complete + (+) Handle DMA interrupt request + +@endverbatim + * @{ + */ + +/** + * @brief Starts the DMA Transfer. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param SrcAddress The source memory Buffer address + * @param DstAddress The destination memory Buffer address + * @param DataLength The length of data to be transferred from source to destination + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_Start(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_DMA_BUFFER_SIZE(DataLength)); + + /* Process locked */ + __HAL_LOCK(hdma); + + if(HAL_DMA_STATE_READY == hdma->State) + { + /* Change DMA peripheral state */ + hdma->State = HAL_DMA_STATE_BUSY; + + /* Initialize the error code */ + hdma->ErrorCode = HAL_DMA_ERROR_NONE; + + /* Configure the source, destination address and the data length */ + DMA_SetConfig(hdma, SrcAddress, DstAddress, DataLength); + + /* Enable the Peripheral */ + __HAL_DMA_ENABLE(hdma); + } + else + { + /* Process unlocked */ + __HAL_UNLOCK(hdma); + + /* Return error status */ + status = HAL_BUSY; + } + return status; +} + +/** + * @brief Start the DMA Transfer with interrupt enabled. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param SrcAddress The source memory Buffer address + * @param DstAddress The destination memory Buffer address + * @param DataLength The length of data to be transferred from source to destination + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_Start_IT(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* calculate DMA base and stream number */ + DMA_Base_Registers *regs = (DMA_Base_Registers *)hdma->StreamBaseAddress; + + /* Check the parameters */ + assert_param(IS_DMA_BUFFER_SIZE(DataLength)); + + /* Process locked */ + __HAL_LOCK(hdma); + + if(HAL_DMA_STATE_READY == hdma->State) + { + /* Change DMA peripheral state */ + hdma->State = HAL_DMA_STATE_BUSY; + + /* Initialize the error code */ + hdma->ErrorCode = HAL_DMA_ERROR_NONE; + + /* Configure the source, destination address and the data length */ + DMA_SetConfig(hdma, SrcAddress, DstAddress, DataLength); + + /* Clear all interrupt flags at correct offset within the register */ + regs->IFCR = 0x3FU << hdma->StreamIndex; + + /* Enable Common interrupts*/ + hdma->Instance->CR |= DMA_IT_TC | DMA_IT_TE | DMA_IT_DME; + hdma->Instance->FCR |= DMA_IT_FE; + + if(hdma->XferHalfCpltCallback != NULL) + { + hdma->Instance->CR |= DMA_IT_HT; + } + + /* Enable the Peripheral */ + __HAL_DMA_ENABLE(hdma); + } + else + { + /* Process unlocked */ + __HAL_UNLOCK(hdma); + + /* Return error status */ + status = HAL_BUSY; + } + + return status; +} + +/** + * @brief Aborts the DMA Transfer. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * + * @note After disabling a DMA Stream, a check for wait until the DMA Stream is + * effectively disabled is added. If a Stream is disabled + * while a data transfer is ongoing, the current data will be transferred + * and the Stream will be effectively disabled only after the transfer of + * this single data is finished. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_Abort(DMA_HandleTypeDef *hdma) +{ + /* calculate DMA base and stream number */ + DMA_Base_Registers *regs = (DMA_Base_Registers *)hdma->StreamBaseAddress; + + uint32_t tickstart = HAL_GetTick(); + + if(hdma->State != HAL_DMA_STATE_BUSY) + { + hdma->ErrorCode = HAL_DMA_ERROR_NO_XFER; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + + return HAL_ERROR; + } + else + { + /* Disable all the transfer interrupts */ + hdma->Instance->CR &= ~(DMA_IT_TC | DMA_IT_TE | DMA_IT_DME); + hdma->Instance->FCR &= ~(DMA_IT_FE); + + if((hdma->XferHalfCpltCallback != NULL) || (hdma->XferM1HalfCpltCallback != NULL)) + { + hdma->Instance->CR &= ~(DMA_IT_HT); + } + + /* Disable the stream */ + __HAL_DMA_DISABLE(hdma); + + /* Check if the DMA Stream is effectively disabled */ + while((hdma->Instance->CR & DMA_SxCR_EN) != RESET) + { + /* Check for the Timeout */ + if((HAL_GetTick() - tickstart ) > HAL_TIMEOUT_DMA_ABORT) + { + /* Update error code */ + hdma->ErrorCode = HAL_DMA_ERROR_TIMEOUT; + + /* Change the DMA state */ + hdma->State = HAL_DMA_STATE_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + + return HAL_TIMEOUT; + } + } + + /* Clear all interrupt flags at correct offset within the register */ + regs->IFCR = 0x3FU << hdma->StreamIndex; + + /* Change the DMA state*/ + hdma->State = HAL_DMA_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + + } + return HAL_OK; +} + +/** + * @brief Aborts the DMA Transfer in Interrupt mode. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_Abort_IT(DMA_HandleTypeDef *hdma) +{ + if(hdma->State != HAL_DMA_STATE_BUSY) + { + hdma->ErrorCode = HAL_DMA_ERROR_NO_XFER; + return HAL_ERROR; + } + else + { + /* Set Abort State */ + hdma->State = HAL_DMA_STATE_ABORT; + + /* Disable the stream */ + __HAL_DMA_DISABLE(hdma); + } + + return HAL_OK; +} + +/** + * @brief Polling for transfer complete. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param CompleteLevel Specifies the DMA level complete. + * @note The polling mode is kept in this version for legacy. it is recommended to use the IT model instead. + * This model could be used for debug purpose. + * @note The HAL_DMA_PollForTransfer API cannot be used in circular and double buffering mode (automatic circular mode). + * @param Timeout Timeout duration. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_PollForTransfer(DMA_HandleTypeDef *hdma, HAL_DMA_LevelCompleteTypeDef CompleteLevel, uint32_t Timeout) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t mask_cpltlevel; + uint32_t tickstart = HAL_GetTick(); + uint32_t tmpisr; + + /* calculate DMA base and stream number */ + DMA_Base_Registers *regs; + + if(HAL_DMA_STATE_BUSY != hdma->State) + { + /* No transfer ongoing */ + hdma->ErrorCode = HAL_DMA_ERROR_NO_XFER; + __HAL_UNLOCK(hdma); + return HAL_ERROR; + } + + /* Polling mode not supported in circular mode and double buffering mode */ + if ((hdma->Instance->CR & DMA_SxCR_CIRC) != RESET) + { + hdma->ErrorCode = HAL_DMA_ERROR_NOT_SUPPORTED; + return HAL_ERROR; + } + + /* Get the level transfer complete flag */ + if(CompleteLevel == HAL_DMA_FULL_TRANSFER) + { + /* Transfer Complete flag */ + mask_cpltlevel = DMA_FLAG_TCIF0_4 << hdma->StreamIndex; + } + else + { + /* Half Transfer Complete flag */ + mask_cpltlevel = DMA_FLAG_HTIF0_4 << hdma->StreamIndex; + } + + regs = (DMA_Base_Registers *)hdma->StreamBaseAddress; + tmpisr = regs->ISR; + + while(((tmpisr & mask_cpltlevel) == RESET) && ((hdma->ErrorCode & HAL_DMA_ERROR_TE) == RESET)) + { + /* Check for the Timeout (Not applicable in circular mode)*/ + if(Timeout != HAL_MAX_DELAY) + { + if((Timeout == 0)||((HAL_GetTick() - tickstart ) > Timeout)) + { + /* Update error code */ + hdma->ErrorCode = HAL_DMA_ERROR_TIMEOUT; + + /* Change the DMA state */ + hdma->State = HAL_DMA_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + + return HAL_TIMEOUT; + } + } + + /* Get the ISR register value */ + tmpisr = regs->ISR; + + if((tmpisr & (DMA_FLAG_TEIF0_4 << hdma->StreamIndex)) != RESET) + { + /* Update error code */ + hdma->ErrorCode |= HAL_DMA_ERROR_TE; + + /* Clear the transfer error flag */ + regs->IFCR = DMA_FLAG_TEIF0_4 << hdma->StreamIndex; + } + + if((tmpisr & (DMA_FLAG_FEIF0_4 << hdma->StreamIndex)) != RESET) + { + /* Update error code */ + hdma->ErrorCode |= HAL_DMA_ERROR_FE; + + /* Clear the FIFO error flag */ + regs->IFCR = DMA_FLAG_FEIF0_4 << hdma->StreamIndex; + } + + if((tmpisr & (DMA_FLAG_DMEIF0_4 << hdma->StreamIndex)) != RESET) + { + /* Update error code */ + hdma->ErrorCode |= HAL_DMA_ERROR_DME; + + /* Clear the Direct Mode error flag */ + regs->IFCR = DMA_FLAG_DMEIF0_4 << hdma->StreamIndex; + } + } + + if(hdma->ErrorCode != HAL_DMA_ERROR_NONE) + { + if((hdma->ErrorCode & HAL_DMA_ERROR_TE) != RESET) + { + HAL_DMA_Abort(hdma); + + /* Clear the half transfer and transfer complete flags */ + regs->IFCR = (DMA_FLAG_HTIF0_4 | DMA_FLAG_TCIF0_4) << hdma->StreamIndex; + + /* Change the DMA state */ + hdma->State= HAL_DMA_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + + return HAL_ERROR; + } + } + + /* Get the level transfer complete flag */ + if(CompleteLevel == HAL_DMA_FULL_TRANSFER) + { + /* Clear the half transfer and transfer complete flags */ + regs->IFCR = (DMA_FLAG_HTIF0_4 | DMA_FLAG_TCIF0_4) << hdma->StreamIndex; + + hdma->State = HAL_DMA_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + + } + else + { + /* Clear the half transfer flag */ + regs->IFCR = (DMA_FLAG_HTIF0_4) << hdma->StreamIndex; + } + + return status; +} + +/** + * @brief Handles DMA interrupt request. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @retval None + */ +void HAL_DMA_IRQHandler(DMA_HandleTypeDef *hdma) +{ + uint32_t tmpisr; + __IO uint32_t count = 0; + uint32_t timeout = SystemCoreClock / 9600; + + /* calculate DMA base and stream number */ + DMA_Base_Registers *regs = (DMA_Base_Registers *)hdma->StreamBaseAddress; + + tmpisr = regs->ISR; + + /* Transfer Error Interrupt management ***************************************/ + if ((tmpisr & (DMA_FLAG_TEIF0_4 << hdma->StreamIndex)) != RESET) + { + if(__HAL_DMA_GET_IT_SOURCE(hdma, DMA_IT_TE) != RESET) + { + /* Disable the transfer error interrupt */ + hdma->Instance->CR &= ~(DMA_IT_TE); + + /* Clear the transfer error flag */ + regs->IFCR = DMA_FLAG_TEIF0_4 << hdma->StreamIndex; + + /* Update error code */ + hdma->ErrorCode |= HAL_DMA_ERROR_TE; + } + } + /* FIFO Error Interrupt management ******************************************/ + if ((tmpisr & (DMA_FLAG_FEIF0_4 << hdma->StreamIndex)) != RESET) + { + if(__HAL_DMA_GET_IT_SOURCE(hdma, DMA_IT_FE) != RESET) + { + /* Clear the FIFO error flag */ + regs->IFCR = DMA_FLAG_FEIF0_4 << hdma->StreamIndex; + + /* Update error code */ + hdma->ErrorCode |= HAL_DMA_ERROR_FE; + } + } + /* Direct Mode Error Interrupt management ***********************************/ + if ((tmpisr & (DMA_FLAG_DMEIF0_4 << hdma->StreamIndex)) != RESET) + { + if(__HAL_DMA_GET_IT_SOURCE(hdma, DMA_IT_DME) != RESET) + { + /* Clear the direct mode error flag */ + regs->IFCR = DMA_FLAG_DMEIF0_4 << hdma->StreamIndex; + + /* Update error code */ + hdma->ErrorCode |= HAL_DMA_ERROR_DME; + } + } + /* Half Transfer Complete Interrupt management ******************************/ + if ((tmpisr & (DMA_FLAG_HTIF0_4 << hdma->StreamIndex)) != RESET) + { + if(__HAL_DMA_GET_IT_SOURCE(hdma, DMA_IT_HT) != RESET) + { + /* Clear the half transfer complete flag */ + regs->IFCR = DMA_FLAG_HTIF0_4 << hdma->StreamIndex; + + /* Multi_Buffering mode enabled */ + if(((hdma->Instance->CR) & (uint32_t)(DMA_SxCR_DBM)) != RESET) + { + /* Current memory buffer used is Memory 0 */ + if((hdma->Instance->CR & DMA_SxCR_CT) == RESET) + { + if(hdma->XferHalfCpltCallback != NULL) + { + /* Half transfer callback */ + hdma->XferHalfCpltCallback(hdma); + } + } + /* Current memory buffer used is Memory 1 */ + else + { + if(hdma->XferM1HalfCpltCallback != NULL) + { + /* Half transfer callback */ + hdma->XferM1HalfCpltCallback(hdma); + } + } + } + else + { + /* Disable the half transfer interrupt if the DMA mode is not CIRCULAR */ + if((hdma->Instance->CR & DMA_SxCR_CIRC) == RESET) + { + /* Disable the half transfer interrupt */ + hdma->Instance->CR &= ~(DMA_IT_HT); + } + + if(hdma->XferHalfCpltCallback != NULL) + { + /* Half transfer callback */ + hdma->XferHalfCpltCallback(hdma); + } + } + } + } + /* Transfer Complete Interrupt management ***********************************/ + if ((tmpisr & (DMA_FLAG_TCIF0_4 << hdma->StreamIndex)) != RESET) + { + if(__HAL_DMA_GET_IT_SOURCE(hdma, DMA_IT_TC) != RESET) + { + /* Clear the transfer complete flag */ + regs->IFCR = DMA_FLAG_TCIF0_4 << hdma->StreamIndex; + + if(HAL_DMA_STATE_ABORT == hdma->State) + { + /* Disable all the transfer interrupts */ + hdma->Instance->CR &= ~(DMA_IT_TC | DMA_IT_TE | DMA_IT_DME); + hdma->Instance->FCR &= ~(DMA_IT_FE); + + if((hdma->XferHalfCpltCallback != NULL) || (hdma->XferM1HalfCpltCallback != NULL)) + { + hdma->Instance->CR &= ~(DMA_IT_HT); + } + + /* Clear all interrupt flags at correct offset within the register */ + regs->IFCR = 0x3FU << hdma->StreamIndex; + + /* Change the DMA state */ + hdma->State = HAL_DMA_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + + if(hdma->XferAbortCallback != NULL) + { + hdma->XferAbortCallback(hdma); + } + return; + } + + if(((hdma->Instance->CR) & (uint32_t)(DMA_SxCR_DBM)) != RESET) + { + /* Current memory buffer used is Memory 0 */ + if((hdma->Instance->CR & DMA_SxCR_CT) == RESET) + { + if(hdma->XferM1CpltCallback != NULL) + { + /* Transfer complete Callback for memory1 */ + hdma->XferM1CpltCallback(hdma); + } + } + /* Current memory buffer used is Memory 1 */ + else + { + if(hdma->XferCpltCallback != NULL) + { + /* Transfer complete Callback for memory0 */ + hdma->XferCpltCallback(hdma); + } + } + } + /* Disable the transfer complete interrupt if the DMA mode is not CIRCULAR */ + else + { + if((hdma->Instance->CR & DMA_SxCR_CIRC) == RESET) + { + /* Disable the transfer complete interrupt */ + hdma->Instance->CR &= ~(DMA_IT_TC); + + /* Change the DMA state */ + hdma->State = HAL_DMA_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + + } + + if(hdma->XferCpltCallback != NULL) + { + /* Transfer complete callback */ + hdma->XferCpltCallback(hdma); + } + } + } + } + + /* manage error case */ + if(hdma->ErrorCode != HAL_DMA_ERROR_NONE) + { + if((hdma->ErrorCode & HAL_DMA_ERROR_TE) != RESET) + { + hdma->State = HAL_DMA_STATE_ABORT; + + /* Disable the stream */ + __HAL_DMA_DISABLE(hdma); + + do + { + if (++count > timeout) + { + break; + } + } + while((hdma->Instance->CR & DMA_SxCR_EN) != RESET); + + /* Change the DMA state */ + hdma->State = HAL_DMA_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hdma); + + } + + if(hdma->XferErrorCallback != NULL) + { + /* Transfer error callback */ + hdma->XferErrorCallback(hdma); + } + } +} + +/** + * @brief Register callbacks + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param CallbackID User Callback identifier + * a DMA_HandleTypeDef structure as parameter. + * @param pCallback pointer to private callbacsk function which has pointer to + * a DMA_HandleTypeDef structure as parameter. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_RegisterCallback(DMA_HandleTypeDef *hdma, HAL_DMA_CallbackIDTypeDef CallbackID, void (* pCallback)(DMA_HandleTypeDef *_hdma)) +{ + + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hdma); + + if(HAL_DMA_STATE_READY == hdma->State) + { + switch (CallbackID) + { + case HAL_DMA_XFER_CPLT_CB_ID: + hdma->XferCpltCallback = pCallback; + break; + + case HAL_DMA_XFER_HALFCPLT_CB_ID: + hdma->XferHalfCpltCallback = pCallback; + break; + + case HAL_DMA_XFER_M1CPLT_CB_ID: + hdma->XferM1CpltCallback = pCallback; + break; + + case HAL_DMA_XFER_M1HALFCPLT_CB_ID: + hdma->XferM1HalfCpltCallback = pCallback; + break; + + case HAL_DMA_XFER_ERROR_CB_ID: + hdma->XferErrorCallback = pCallback; + break; + + case HAL_DMA_XFER_ABORT_CB_ID: + hdma->XferAbortCallback = pCallback; + break; + + default: + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hdma); + + return status; +} + +/** + * @brief UnRegister callbacks + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param CallbackID User Callback identifier + * a HAL_DMA_CallbackIDTypeDef ENUM as parameter. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMA_UnRegisterCallback(DMA_HandleTypeDef *hdma, HAL_DMA_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hdma); + + if(HAL_DMA_STATE_READY == hdma->State) + { + switch (CallbackID) + { + case HAL_DMA_XFER_CPLT_CB_ID: + hdma->XferCpltCallback = NULL; + break; + + case HAL_DMA_XFER_HALFCPLT_CB_ID: + hdma->XferHalfCpltCallback = NULL; + break; + + case HAL_DMA_XFER_M1CPLT_CB_ID: + hdma->XferM1CpltCallback = NULL; + break; + + case HAL_DMA_XFER_M1HALFCPLT_CB_ID: + hdma->XferM1HalfCpltCallback = NULL; + break; + + case HAL_DMA_XFER_ERROR_CB_ID: + hdma->XferErrorCallback = NULL; + break; + + case HAL_DMA_XFER_ABORT_CB_ID: + hdma->XferAbortCallback = NULL; + break; + + case HAL_DMA_XFER_ALL_CB_ID: + hdma->XferCpltCallback = NULL; + hdma->XferHalfCpltCallback = NULL; + hdma->XferM1CpltCallback = NULL; + hdma->XferM1HalfCpltCallback = NULL; + hdma->XferErrorCallback = NULL; + hdma->XferAbortCallback = NULL; + break; + + default: + status = HAL_ERROR; + break; + } + } + else + { + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hdma); + + return status; +} + +/** + * @} + */ + +/** @addtogroup DMA_Exported_Functions_Group3 + * +@verbatim + =============================================================================== + ##### State and Errors functions ##### + =============================================================================== + [..] + This subsection provides functions allowing to + (+) Check the DMA state + (+) Get error code + +@endverbatim + * @{ + */ + +/** + * @brief Returns the DMA state. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @retval HAL state + */ +HAL_DMA_StateTypeDef HAL_DMA_GetState(DMA_HandleTypeDef *hdma) +{ + return hdma->State; +} + +/** + * @brief Return the DMA error code + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @retval DMA Error Code + */ +uint32_t HAL_DMA_GetError(DMA_HandleTypeDef *hdma) +{ + return hdma->ErrorCode; +} + +/** + * @} + */ + +/** + * @} + */ + +/** @addtogroup DMA_Private_Functions + * @{ + */ + +/** + * @brief Sets the DMA Transfer parameter. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param SrcAddress The source memory Buffer address + * @param DstAddress The destination memory Buffer address + * @param DataLength The length of data to be transferred from source to destination + * @retval HAL status + */ +static void DMA_SetConfig(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength) +{ + /* Clear DBM bit */ + hdma->Instance->CR &= (uint32_t)(~DMA_SxCR_DBM); + + /* Configure DMA Stream data length */ + hdma->Instance->NDTR = DataLength; + + /* Memory to Peripheral */ + if((hdma->Init.Direction) == DMA_MEMORY_TO_PERIPH) + { + /* Configure DMA Stream destination address */ + hdma->Instance->PAR = DstAddress; + + /* Configure DMA Stream source address */ + hdma->Instance->M0AR = SrcAddress; + } + /* Peripheral to Memory */ + else + { + /* Configure DMA Stream source address */ + hdma->Instance->PAR = SrcAddress; + + /* Configure DMA Stream destination address */ + hdma->Instance->M0AR = DstAddress; + } +} + +/** + * @brief Returns the DMA Stream base address depending on stream number + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @retval Stream base address + */ +static uint32_t DMA_CalcBaseAndBitshift(DMA_HandleTypeDef *hdma) +{ + uint32_t stream_number = (((uint32_t)hdma->Instance & 0xFFU) - 16U) / 24U; + + /* lookup table for necessary bitshift of flags within status registers */ + static const uint8_t flagBitshiftOffset[8U] = {0U, 6U, 16U, 22U, 0U, 6U, 16U, 22U}; + hdma->StreamIndex = flagBitshiftOffset[stream_number]; + + if (stream_number > 3U) + { + /* return pointer to HISR and HIFCR */ + hdma->StreamBaseAddress = (((uint32_t)hdma->Instance & (uint32_t)(~0x3FFU)) + 4U); + } + else + { + /* return pointer to LISR and LIFCR */ + hdma->StreamBaseAddress = ((uint32_t)hdma->Instance & (uint32_t)(~0x3FFU)); + } + + return hdma->StreamBaseAddress; +} + +/** + * @brief Check compatibility between FIFO threshold level and size of the memory burst + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @retval HAL status + */ +static HAL_StatusTypeDef DMA_CheckFifoParam(DMA_HandleTypeDef *hdma) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmp = hdma->Init.FIFOThreshold; + + /* Memory Data size equal to Byte */ + if(hdma->Init.MemDataAlignment == DMA_MDATAALIGN_BYTE) + { + switch (tmp) + { + case DMA_FIFO_THRESHOLD_1QUARTERFULL: + case DMA_FIFO_THRESHOLD_3QUARTERSFULL: + if ((hdma->Init.MemBurst & DMA_SxCR_MBURST_1) == DMA_SxCR_MBURST_1) + { + status = HAL_ERROR; + } + break; + case DMA_FIFO_THRESHOLD_HALFFULL: + if (hdma->Init.MemBurst == DMA_MBURST_INC16) + { + status = HAL_ERROR; + } + break; + case DMA_FIFO_THRESHOLD_FULL: + break; + default: + break; + } + } + + /* Memory Data size equal to Half-Word */ + else if (hdma->Init.MemDataAlignment == DMA_MDATAALIGN_HALFWORD) + { + switch (tmp) + { + case DMA_FIFO_THRESHOLD_1QUARTERFULL: + case DMA_FIFO_THRESHOLD_3QUARTERSFULL: + status = HAL_ERROR; + break; + case DMA_FIFO_THRESHOLD_HALFFULL: + if ((hdma->Init.MemBurst & DMA_SxCR_MBURST_1) == DMA_SxCR_MBURST_1) + { + status = HAL_ERROR; + } + break; + case DMA_FIFO_THRESHOLD_FULL: + if (hdma->Init.MemBurst == DMA_MBURST_INC16) + { + status = HAL_ERROR; + } + break; + default: + break; + } + } + + /* Memory Data size equal to Word */ + else + { + switch (tmp) + { + case DMA_FIFO_THRESHOLD_1QUARTERFULL: + case DMA_FIFO_THRESHOLD_HALFFULL: + case DMA_FIFO_THRESHOLD_3QUARTERSFULL: + status = HAL_ERROR; + break; + case DMA_FIFO_THRESHOLD_FULL: + if ((hdma->Init.MemBurst & DMA_SxCR_MBURST_1) == DMA_SxCR_MBURST_1) + { + status = HAL_ERROR; + } + break; + default: + break; + } + } + + return status; +} + +/** + * @} + */ + +#endif /* HAL_DMA_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma_ex.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma_ex.c new file mode 100644 index 0000000..8aadcc4 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma_ex.c @@ -0,0 +1,308 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_dma_ex.c + * @author MCD Application Team + * @brief DMA Extension HAL module driver + * This file provides firmware functions to manage the following + * functionalities of the DMA Extension peripheral: + * + Extended features functions + * + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + The DMA Extension HAL driver can be used as follows: + (+) Start a multi buffer transfer using the HAL_DMA_MultiBufferStart() function + for polling mode or HAL_DMA_MultiBufferStart_IT() for interrupt mode. + + -@- In Memory-to-Memory transfer mode, Multi (Double) Buffer mode is not allowed. + -@- When Multi (Double) Buffer mode is enabled, the transfer is circular by default. + -@- In Multi (Double) buffer mode, it is possible to update the base address for + the AHB memory port on the fly (DMA_SxM0AR or DMA_SxM1AR) when the stream is enabled. + + @endverbatim + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup DMAEx DMAEx + * @brief DMA Extended HAL module driver + * @{ + */ + +#ifdef HAL_DMA_MODULE_ENABLED + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private Constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/** @addtogroup DMAEx_Private_Functions + * @{ + */ + +static void DMA_MultiBufferSetConfig(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength); + +/** + * @} + */ + +/* Exported functions ---------------------------------------------------------*/ + +/** @addtogroup DMAEx_Exported_Functions + * @{ + */ + + +/** @addtogroup DMAEx_Exported_Functions_Group1 + * +@verbatim + =============================================================================== + ##### Extended features functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Configure the source, destination address and data length and + Start MultiBuffer DMA transfer + (+) Configure the source, destination address and data length and + Start MultiBuffer DMA transfer with interrupt + (+) Change on the fly the memory0 or memory1 address. + +@endverbatim + * @{ + */ + + +/** + * @brief Starts the multi_buffer DMA Transfer. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param SrcAddress The source memory Buffer address + * @param DstAddress The destination memory Buffer address + * @param SecondMemAddress The second memory Buffer address in case of multi buffer Transfer + * @param DataLength The length of data to be transferred from source to destination + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMAEx_MultiBufferStart(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t SecondMemAddress, uint32_t DataLength) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_DMA_BUFFER_SIZE(DataLength)); + + /* Memory-to-memory transfer not supported in double buffering mode */ + if (hdma->Init.Direction == DMA_MEMORY_TO_MEMORY) + { + hdma->ErrorCode = HAL_DMA_ERROR_NOT_SUPPORTED; + status = HAL_ERROR; + } + else + { + /* Process Locked */ + __HAL_LOCK(hdma); + + if(HAL_DMA_STATE_READY == hdma->State) + { + /* Change DMA peripheral state */ + hdma->State = HAL_DMA_STATE_BUSY; + + /* Enable the double buffer mode */ + hdma->Instance->CR |= (uint32_t)DMA_SxCR_DBM; + + /* Configure DMA Stream destination address */ + hdma->Instance->M1AR = SecondMemAddress; + + /* Configure the source, destination address and the data length */ + DMA_MultiBufferSetConfig(hdma, SrcAddress, DstAddress, DataLength); + + /* Enable the peripheral */ + __HAL_DMA_ENABLE(hdma); + } + else + { + /* Return error status */ + status = HAL_BUSY; + } + } + return status; +} + +/** + * @brief Starts the multi_buffer DMA Transfer with interrupt enabled. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param SrcAddress The source memory Buffer address + * @param DstAddress The destination memory Buffer address + * @param SecondMemAddress The second memory Buffer address in case of multi buffer Transfer + * @param DataLength The length of data to be transferred from source to destination + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMAEx_MultiBufferStart_IT(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t SecondMemAddress, uint32_t DataLength) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_DMA_BUFFER_SIZE(DataLength)); + + /* Memory-to-memory transfer not supported in double buffering mode */ + if (hdma->Init.Direction == DMA_MEMORY_TO_MEMORY) + { + hdma->ErrorCode = HAL_DMA_ERROR_NOT_SUPPORTED; + return HAL_ERROR; + } + + /* Process locked */ + __HAL_LOCK(hdma); + + if(HAL_DMA_STATE_READY == hdma->State) + { + /* Change DMA peripheral state */ + hdma->State = HAL_DMA_STATE_BUSY; + + /* Initialize the error code */ + hdma->ErrorCode = HAL_DMA_ERROR_NONE; + + /* Enable the Double buffer mode */ + hdma->Instance->CR |= (uint32_t)DMA_SxCR_DBM; + + /* Configure DMA Stream destination address */ + hdma->Instance->M1AR = SecondMemAddress; + + /* Configure the source, destination address and the data length */ + DMA_MultiBufferSetConfig(hdma, SrcAddress, DstAddress, DataLength); + + /* Clear all flags */ + __HAL_DMA_CLEAR_FLAG (hdma, __HAL_DMA_GET_TC_FLAG_INDEX(hdma)); + __HAL_DMA_CLEAR_FLAG (hdma, __HAL_DMA_GET_HT_FLAG_INDEX(hdma)); + __HAL_DMA_CLEAR_FLAG (hdma, __HAL_DMA_GET_TE_FLAG_INDEX(hdma)); + __HAL_DMA_CLEAR_FLAG (hdma, __HAL_DMA_GET_DME_FLAG_INDEX(hdma)); + __HAL_DMA_CLEAR_FLAG (hdma, __HAL_DMA_GET_FE_FLAG_INDEX(hdma)); + + /* Enable Common interrupts*/ + hdma->Instance->CR |= DMA_IT_TC | DMA_IT_TE | DMA_IT_DME; + hdma->Instance->FCR |= DMA_IT_FE; + + if((hdma->XferHalfCpltCallback != NULL) || (hdma->XferM1HalfCpltCallback != NULL)) + { + hdma->Instance->CR |= DMA_IT_HT; + } + + /* Enable the peripheral */ + __HAL_DMA_ENABLE(hdma); + } + else + { + /* Process unlocked */ + __HAL_UNLOCK(hdma); + + /* Return error status */ + status = HAL_BUSY; + } + return status; +} + +/** + * @brief Change the memory0 or memory1 address on the fly. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param Address The new address + * @param memory the memory to be changed, This parameter can be one of + * the following values: + * MEMORY0 / + * MEMORY1 + * @note The MEMORY0 address can be changed only when the current transfer use + * MEMORY1 and the MEMORY1 address can be changed only when the current + * transfer use MEMORY0. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_DMAEx_ChangeMemory(DMA_HandleTypeDef *hdma, uint32_t Address, HAL_DMA_MemoryTypeDef memory) +{ + if(memory == MEMORY0) + { + /* change the memory0 address */ + hdma->Instance->M0AR = Address; + } + else + { + /* change the memory1 address */ + hdma->Instance->M1AR = Address; + } + + return HAL_OK; +} + +/** + * @} + */ + +/** + * @} + */ + +/** @addtogroup DMAEx_Private_Functions + * @{ + */ + +/** + * @brief Set the DMA Transfer parameter. + * @param hdma pointer to a DMA_HandleTypeDef structure that contains + * the configuration information for the specified DMA Stream. + * @param SrcAddress The source memory Buffer address + * @param DstAddress The destination memory Buffer address + * @param DataLength The length of data to be transferred from source to destination + * @retval HAL status + */ +static void DMA_MultiBufferSetConfig(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength) +{ + /* Configure DMA Stream data length */ + hdma->Instance->NDTR = DataLength; + + /* Peripheral to Memory */ + if((hdma->Init.Direction) == DMA_MEMORY_TO_PERIPH) + { + /* Configure DMA Stream destination address */ + hdma->Instance->PAR = DstAddress; + + /* Configure DMA Stream source address */ + hdma->Instance->M0AR = SrcAddress; + } + /* Memory to Peripheral */ + else + { + /* Configure DMA Stream source address */ + hdma->Instance->PAR = SrcAddress; + + /* Configure DMA Stream destination address */ + hdma->Instance->M0AR = DstAddress; + } +} + +/** + * @} + */ + +#endif /* HAL_DMA_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_eth.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_eth.c new file mode 100644 index 0000000..1a21815 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_eth.c @@ -0,0 +1,2290 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_eth.c + * @author MCD Application Team + * @brief ETH HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Ethernet (ETH) peripheral: + * + Initialization and de-initialization functions + * + IO operation functions + * + Peripheral Control functions + * + Peripheral State and Errors functions + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + (#)Declare a ETH_HandleTypeDef handle structure, for example: + ETH_HandleTypeDef heth; + + (#)Fill parameters of Init structure in heth handle + + (#)Call HAL_ETH_Init() API to initialize the Ethernet peripheral (MAC, DMA, ...) + + (#)Initialize the ETH low level resources through the HAL_ETH_MspInit() API: + (##) Enable the Ethernet interface clock using + (+++) __HAL_RCC_ETHMAC_CLK_ENABLE(); + (+++) __HAL_RCC_ETHMACTX_CLK_ENABLE(); + (+++) __HAL_RCC_ETHMACRX_CLK_ENABLE(); + + (##) Initialize the related GPIO clocks + (##) Configure Ethernet pin-out + (##) Configure Ethernet NVIC interrupt (IT mode) + + (#)Initialize Ethernet DMA Descriptors in chain mode and point to allocated buffers: + (##) HAL_ETH_DMATxDescListInit(); for Transmission process + (##) HAL_ETH_DMARxDescListInit(); for Reception process + + (#)Enable MAC and DMA transmission and reception: + (##) HAL_ETH_Start(); + + (#)Prepare ETH DMA TX Descriptors and give the hand to ETH DMA to transfer + the frame to MAC TX FIFO: + (##) HAL_ETH_TransmitFrame(); + + (#)Poll for a received frame in ETH RX DMA Descriptors and get received + frame parameters + (##) HAL_ETH_GetReceivedFrame(); (should be called into an infinite loop) + + (#) Get a received frame when an ETH RX interrupt occurs: + (##) HAL_ETH_GetReceivedFrame_IT(); (called in IT mode only) + + (#) Communicate with external PHY device: + (##) Read a specific register from the PHY + HAL_ETH_ReadPHYRegister(); + (##) Write data to a specific RHY register: + HAL_ETH_WritePHYRegister(); + + (#) Configure the Ethernet MAC after ETH peripheral initialization + HAL_ETH_ConfigMAC(); all MAC parameters should be filled. + + (#) Configure the Ethernet DMA after ETH peripheral initialization + HAL_ETH_ConfigDMA(); all DMA parameters should be filled. + +*** Callback registration *** + ============================================= + + The compilation define USE_HAL_ETH_REGISTER_CALLBACKS when set to 1 + allows the user to configure dynamically the driver callbacks. + Use Function @ref HAL_ETH_RegisterCallback() to register an interrupt callback. + + Function @ref HAL_ETH_RegisterCallback() allows to register following callbacks: + (+) TxCpltCallback : Tx Complete Callback. + (+) RxCpltCallback : Rx Complete Callback. + (+) DMAErrorCallback : DMA Error Callback. + (+) MspInitCallback : MspInit Callback. + (+) MspDeInitCallback: MspDeInit Callback. + + This function takes as parameters the HAL peripheral handle, the Callback ID + and a pointer to the user callback function. + + Use function @ref HAL_ETH_UnRegisterCallback() to reset a callback to the default + weak function. + @ref HAL_ETH_UnRegisterCallback takes as parameters the HAL peripheral handle, + and the Callback ID. + This function allows to reset following callbacks: + (+) TxCpltCallback : Tx Complete Callback. + (+) RxCpltCallback : Rx Complete Callback. + (+) DMAErrorCallback : DMA Error Callback. + (+) MspInitCallback : MspInit Callback. + (+) MspDeInitCallback: MspDeInit Callback. + + By default, after the HAL_ETH_Init and when the state is HAL_ETH_STATE_RESET + all callbacks are set to the corresponding weak functions: + examples @ref HAL_ETH_TxCpltCallback(), @ref HAL_ETH_RxCpltCallback(). + Exception done for MspInit and MspDeInit functions that are + reset to the legacy weak function in the HAL_ETH_Init/ @ref HAL_ETH_DeInit only when + these callbacks are null (not registered beforehand). + if not, MspInit or MspDeInit are not null, the HAL_ETH_Init/ @ref HAL_ETH_DeInit + keep and use the user MspInit/MspDeInit callbacks (registered beforehand) + + Callbacks can be registered/unregistered in HAL_ETH_STATE_READY state only. + Exception done MspInit/MspDeInit that can be registered/unregistered + in HAL_ETH_STATE_READY or HAL_ETH_STATE_RESET state, + thus registered (user) MspInit/DeInit callbacks can be used during the Init/DeInit. + In that case first register the MspInit/MspDeInit user callbacks + using @ref HAL_ETH_RegisterCallback() before calling @ref HAL_ETH_DeInit + or HAL_ETH_Init function. + + When The compilation define USE_HAL_ETH_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registration feature is not available and all callbacks + are set to the corresponding weak functions. + + @endverbatim + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup ETH ETH + * @brief ETH HAL module driver + * @{ + */ + +#ifdef HAL_ETH_MODULE_ENABLED +#if defined (ETH) + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @defgroup ETH_Private_Constants ETH Private Constants + * @{ + */ +#define ETH_TIMEOUT_SWRESET ((uint32_t)500) +#define ETH_TIMEOUT_LINKED_STATE ((uint32_t)5000) +#define ETH_TIMEOUT_AUTONEGO_COMPLETED ((uint32_t)5000) + +/** + * @} + */ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/** @defgroup ETH_Private_Functions ETH Private Functions + * @{ + */ +static void ETH_MACDMAConfig(ETH_HandleTypeDef *heth, uint32_t err); +static void ETH_MACAddressConfig(ETH_HandleTypeDef *heth, uint32_t MacAddr, uint8_t *Addr); +static void ETH_MACReceptionEnable(ETH_HandleTypeDef *heth); +static void ETH_MACReceptionDisable(ETH_HandleTypeDef *heth); +static void ETH_MACTransmissionEnable(ETH_HandleTypeDef *heth); +static void ETH_MACTransmissionDisable(ETH_HandleTypeDef *heth); +static void ETH_DMATransmissionEnable(ETH_HandleTypeDef *heth); +static void ETH_DMATransmissionDisable(ETH_HandleTypeDef *heth); +static void ETH_DMAReceptionEnable(ETH_HandleTypeDef *heth); +static void ETH_DMAReceptionDisable(ETH_HandleTypeDef *heth); +static void ETH_FlushTransmitFIFO(ETH_HandleTypeDef *heth); +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) +static void ETH_InitCallbacksToDefault(ETH_HandleTypeDef *heth); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + +/** + * @} + */ +/* Private functions ---------------------------------------------------------*/ + +/** @defgroup ETH_Exported_Functions ETH Exported Functions + * @{ + */ + +/** @defgroup ETH_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * + @verbatim + =============================================================================== + ##### Initialization and de-initialization functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Initialize and configure the Ethernet peripheral + (+) De-initialize the Ethernet peripheral + + @endverbatim + * @{ + */ + +/** + * @brief Initializes the Ethernet MAC and DMA according to default + * parameters. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_Init(ETH_HandleTypeDef *heth) +{ + uint32_t tempreg = 0, phyreg = 0; + uint32_t hclk = 60000000; + uint32_t tickstart = 0; + uint32_t err = ETH_SUCCESS; + + /* Check the ETH peripheral state */ + if(heth == NULL) + { + return HAL_ERROR; + } + + /* Check parameters */ + assert_param(IS_ETH_AUTONEGOTIATION(heth->Init.AutoNegotiation)); + assert_param(IS_ETH_RX_MODE(heth->Init.RxMode)); + assert_param(IS_ETH_CHECKSUM_MODE(heth->Init.ChecksumMode)); + assert_param(IS_ETH_MEDIA_INTERFACE(heth->Init.MediaInterface)); + + if(heth->State == HAL_ETH_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + heth->Lock = HAL_UNLOCKED; +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + ETH_InitCallbacksToDefault(heth); + + if(heth->MspInitCallback == NULL) + { + /* Init the low level hardware : GPIO, CLOCK, NVIC. */ + heth->MspInitCallback = HAL_ETH_MspInit; + } + heth->MspInitCallback(heth); + +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC. */ + HAL_ETH_MspInit(heth); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + } + + /* Enable SYSCFG Clock */ + __HAL_RCC_SYSCFG_CLK_ENABLE(); + + /* Select MII or RMII Mode*/ + SYSCFG->PMC &= ~(SYSCFG_PMC_MII_RMII_SEL); + SYSCFG->PMC |= (uint32_t)heth->Init.MediaInterface; + + /* Ethernet Software reset */ + /* Set the SWR bit: resets all MAC subsystem internal registers and logic */ + /* After reset all the registers holds their respective reset values */ + (heth->Instance)->DMABMR |= ETH_DMABMR_SR; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait for software reset */ + while (((heth->Instance)->DMABMR & ETH_DMABMR_SR) != (uint32_t)RESET) + { + /* Check for the Timeout */ + if((HAL_GetTick() - tickstart ) > ETH_TIMEOUT_SWRESET) + { + heth->State= HAL_ETH_STATE_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + /* Note: The SWR is not performed if the ETH_RX_CLK or the ETH_TX_CLK are + not available, please check your external PHY or the IO configuration */ + + return HAL_TIMEOUT; + } + } + + /*-------------------------------- MAC Initialization ----------------------*/ + /* Get the ETHERNET MACMIIAR value */ + tempreg = (heth->Instance)->MACMIIAR; + /* Clear CSR Clock Range CR[2:0] bits */ + tempreg &= ETH_MACMIIAR_CR_MASK; + + /* Get hclk frequency value */ + hclk = HAL_RCC_GetHCLKFreq(); + + /* Set CR bits depending on hclk value */ + if((hclk >= 20000000)&&(hclk < 35000000)) + { + /* CSR Clock Range between 20-35 MHz */ + tempreg |= (uint32_t)ETH_MACMIIAR_CR_Div16; + } + else if((hclk >= 35000000)&&(hclk < 60000000)) + { + /* CSR Clock Range between 35-60 MHz */ + tempreg |= (uint32_t)ETH_MACMIIAR_CR_Div26; + } + else if((hclk >= 60000000)&&(hclk < 100000000)) + { + /* CSR Clock Range between 60-100 MHz */ + tempreg |= (uint32_t)ETH_MACMIIAR_CR_Div42; + } + else if((hclk >= 100000000)&&(hclk < 150000000)) + { + /* CSR Clock Range between 100-150 MHz */ + tempreg |= (uint32_t)ETH_MACMIIAR_CR_Div62; + } + else /* ((hclk >= 150000000)&&(hclk <= 216000000)) */ + { + /* CSR Clock Range between 150-216 MHz */ + tempreg |= (uint32_t)ETH_MACMIIAR_CR_Div102; + } + + /* Write to ETHERNET MAC MIIAR: Configure the ETHERNET CSR Clock Range */ + (heth->Instance)->MACMIIAR = (uint32_t)tempreg; + + /*-------------------- PHY initialization and configuration ----------------*/ + /* Put the PHY in reset mode */ + if((HAL_ETH_WritePHYRegister(heth, PHY_BCR, PHY_RESET)) != HAL_OK) + { + /* In case of write timeout */ + err = ETH_ERROR; + + /* Config MAC and DMA */ + ETH_MACDMAConfig(heth, err); + + /* Set the ETH peripheral state to READY */ + heth->State = HAL_ETH_STATE_READY; + + /* Return HAL_ERROR */ + return HAL_ERROR; + } + + /* Delay to assure PHY reset */ + HAL_Delay(PHY_RESET_DELAY); + + if((heth->Init).AutoNegotiation != ETH_AUTONEGOTIATION_DISABLE) + { + /* Get tick */ + tickstart = HAL_GetTick(); + + /* We wait for linked status */ + do + { + HAL_ETH_ReadPHYRegister(heth, PHY_BSR, &phyreg); + + /* Check for the Timeout */ + if((HAL_GetTick() - tickstart ) > ETH_TIMEOUT_LINKED_STATE) + { + /* In case of write timeout */ + err = ETH_ERROR; + + /* Config MAC and DMA */ + ETH_MACDMAConfig(heth, err); + + heth->State= HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + return HAL_TIMEOUT; + } + } while (((phyreg & PHY_LINKED_STATUS) != PHY_LINKED_STATUS)); + + + /* Enable Auto-Negotiation */ + if((HAL_ETH_WritePHYRegister(heth, PHY_BCR, PHY_AUTONEGOTIATION)) != HAL_OK) + { + /* In case of write timeout */ + err = ETH_ERROR; + + /* Config MAC and DMA */ + ETH_MACDMAConfig(heth, err); + + /* Set the ETH peripheral state to READY */ + heth->State = HAL_ETH_STATE_READY; + + /* Return HAL_ERROR */ + return HAL_ERROR; + } + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait until the auto-negotiation will be completed */ + do + { + HAL_ETH_ReadPHYRegister(heth, PHY_BSR, &phyreg); + + /* Check for the Timeout */ + if((HAL_GetTick() - tickstart ) > ETH_TIMEOUT_AUTONEGO_COMPLETED) + { + /* In case of write timeout */ + err = ETH_ERROR; + + /* Config MAC and DMA */ + ETH_MACDMAConfig(heth, err); + + heth->State= HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + return HAL_TIMEOUT; + } + + } while (((phyreg & PHY_AUTONEGO_COMPLETE) != PHY_AUTONEGO_COMPLETE)); + + /* Read the result of the auto-negotiation */ + if((HAL_ETH_ReadPHYRegister(heth, PHY_SR, &phyreg)) != HAL_OK) + { + /* In case of write timeout */ + err = ETH_ERROR; + + /* Config MAC and DMA */ + ETH_MACDMAConfig(heth, err); + + /* Set the ETH peripheral state to READY */ + heth->State = HAL_ETH_STATE_READY; + + /* Return HAL_ERROR */ + return HAL_ERROR; + } + + /* Configure the MAC with the Duplex Mode fixed by the auto-negotiation process */ + if((phyreg & PHY_DUPLEX_STATUS) != (uint32_t)RESET) + { + /* Set Ethernet duplex mode to Full-duplex following the auto-negotiation */ + (heth->Init).DuplexMode = ETH_MODE_FULLDUPLEX; + } + else + { + /* Set Ethernet duplex mode to Half-duplex following the auto-negotiation */ + (heth->Init).DuplexMode = ETH_MODE_HALFDUPLEX; + } + /* Configure the MAC with the speed fixed by the auto-negotiation process */ + if((phyreg & PHY_SPEED_STATUS) == PHY_SPEED_STATUS) + { + /* Set Ethernet speed to 10M following the auto-negotiation */ + (heth->Init).Speed = ETH_SPEED_10M; + } + else + { + /* Set Ethernet speed to 100M following the auto-negotiation */ + (heth->Init).Speed = ETH_SPEED_100M; + } + } + else /* AutoNegotiation Disable */ + { + /* Check parameters */ + assert_param(IS_ETH_SPEED(heth->Init.Speed)); + assert_param(IS_ETH_DUPLEX_MODE(heth->Init.DuplexMode)); + + /* Set MAC Speed and Duplex Mode */ + if(HAL_ETH_WritePHYRegister(heth, PHY_BCR, ((uint16_t)((heth->Init).DuplexMode >> 3) | + (uint16_t)((heth->Init).Speed >> 1))) != HAL_OK) + { + /* In case of write timeout */ + err = ETH_ERROR; + + /* Config MAC and DMA */ + ETH_MACDMAConfig(heth, err); + + /* Set the ETH peripheral state to READY */ + heth->State = HAL_ETH_STATE_READY; + + /* Return HAL_ERROR */ + return HAL_ERROR; + } + + /* Delay to assure PHY configuration */ + HAL_Delay(PHY_CONFIG_DELAY); + } + + /* Config MAC and DMA */ + ETH_MACDMAConfig(heth, err); + + /* Set ETH HAL State to Ready */ + heth->State= HAL_ETH_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief De-Initializes the ETH peripheral. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_DeInit(ETH_HandleTypeDef *heth) +{ + /* Set the ETH peripheral state to BUSY */ + heth->State = HAL_ETH_STATE_BUSY; + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + if(heth->MspDeInitCallback == NULL) + { + heth->MspDeInitCallback = HAL_ETH_MspDeInit; + } + /* De-Init the low level hardware : GPIO, CLOCK, NVIC. */ + heth->MspDeInitCallback(heth); +#else + /* De-Init the low level hardware : GPIO, CLOCK, NVIC. */ + HAL_ETH_MspDeInit(heth); +#endif + + /* Set ETH HAL state to Disabled */ + heth->State= HAL_ETH_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(heth); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Initializes the DMA Tx descriptors in chain mode. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param DMATxDescTab Pointer to the first Tx desc list + * @param TxBuff Pointer to the first TxBuffer list + * @param TxBuffCount Number of the used Tx desc in the list + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_DMATxDescListInit(ETH_HandleTypeDef *heth, ETH_DMADescTypeDef *DMATxDescTab, uint8_t *TxBuff, uint32_t TxBuffCount) +{ + uint32_t i = 0; + ETH_DMADescTypeDef *dmatxdesc; + + /* Process Locked */ + __HAL_LOCK(heth); + + /* Set the ETH peripheral state to BUSY */ + heth->State = HAL_ETH_STATE_BUSY; + + /* Set the DMATxDescToSet pointer with the first one of the DMATxDescTab list */ + heth->TxDesc = DMATxDescTab; + + /* Fill each DMATxDesc descriptor with the right values */ + for(i=0; i < TxBuffCount; i++) + { + /* Get the pointer on the member (i) of the Tx Desc list */ + dmatxdesc = DMATxDescTab + i; + + /* Set Second Address Chained bit */ + dmatxdesc->Status = ETH_DMATXDESC_TCH; + + /* Set Buffer1 address pointer */ + dmatxdesc->Buffer1Addr = (uint32_t)(&TxBuff[i*ETH_TX_BUF_SIZE]); + + if ((heth->Init).ChecksumMode == ETH_CHECKSUM_BY_HARDWARE) + { + /* Set the DMA Tx descriptors checksum insertion */ + dmatxdesc->Status |= ETH_DMATXDESC_CHECKSUMTCPUDPICMPFULL; + } + + /* Initialize the next descriptor with the Next Descriptor Polling Enable */ + if(i < (TxBuffCount-1)) + { + /* Set next descriptor address register with next descriptor base address */ + dmatxdesc->Buffer2NextDescAddr = (uint32_t)(DMATxDescTab+i+1); + } + else + { + /* For last descriptor, set next descriptor address register equal to the first descriptor base address */ + dmatxdesc->Buffer2NextDescAddr = (uint32_t) DMATxDescTab; + } + } + + /* Set Transmit Descriptor List Address Register */ + (heth->Instance)->DMATDLAR = (uint32_t) DMATxDescTab; + + /* Set ETH HAL State to Ready */ + heth->State= HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Initializes the DMA Rx descriptors in chain mode. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param DMARxDescTab Pointer to the first Rx desc list + * @param RxBuff Pointer to the first RxBuffer list + * @param RxBuffCount Number of the used Rx desc in the list + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_DMARxDescListInit(ETH_HandleTypeDef *heth, ETH_DMADescTypeDef *DMARxDescTab, uint8_t *RxBuff, uint32_t RxBuffCount) +{ + uint32_t i = 0; + ETH_DMADescTypeDef *DMARxDesc; + + /* Process Locked */ + __HAL_LOCK(heth); + + /* Set the ETH peripheral state to BUSY */ + heth->State = HAL_ETH_STATE_BUSY; + + /* Set the Ethernet RxDesc pointer with the first one of the DMARxDescTab list */ + heth->RxDesc = DMARxDescTab; + + /* Fill each DMARxDesc descriptor with the right values */ + for(i=0; i < RxBuffCount; i++) + { + /* Get the pointer on the member (i) of the Rx Desc list */ + DMARxDesc = DMARxDescTab+i; + + /* Set Own bit of the Rx descriptor Status */ + DMARxDesc->Status = ETH_DMARXDESC_OWN; + + /* Set Buffer1 size and Second Address Chained bit */ + DMARxDesc->ControlBufferSize = ETH_DMARXDESC_RCH | ETH_RX_BUF_SIZE; + + /* Set Buffer1 address pointer */ + DMARxDesc->Buffer1Addr = (uint32_t)(&RxBuff[i*ETH_RX_BUF_SIZE]); + + if((heth->Init).RxMode == ETH_RXINTERRUPT_MODE) + { + /* Enable Ethernet DMA Rx Descriptor interrupt */ + DMARxDesc->ControlBufferSize &= ~ETH_DMARXDESC_DIC; + } + + /* Initialize the next descriptor with the Next Descriptor Polling Enable */ + if(i < (RxBuffCount-1)) + { + /* Set next descriptor address register with next descriptor base address */ + DMARxDesc->Buffer2NextDescAddr = (uint32_t)(DMARxDescTab+i+1); + } + else + { + /* For last descriptor, set next descriptor address register equal to the first descriptor base address */ + DMARxDesc->Buffer2NextDescAddr = (uint32_t)(DMARxDescTab); + } + } + + /* Set Receive Descriptor List Address Register */ + (heth->Instance)->DMARDLAR = (uint32_t) DMARxDescTab; + + /* Set ETH HAL State to Ready */ + heth->State= HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Initializes the ETH MSP. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +__weak void HAL_ETH_MspInit(ETH_HandleTypeDef *heth) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes ETH MSP. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +__weak void HAL_ETH_MspDeInit(ETH_HandleTypeDef *heth) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_MspDeInit could be implemented in the user file + */ +} + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) +/** + * @brief Register a User ETH Callback + * To be used instead of the weak predefined callback + * @param heth eth handle + * @param CallbackID ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_ETH_TX_COMPLETE_CB_ID Tx Complete Callback ID + * @arg @ref HAL_ETH_RX_COMPLETE_CB_ID Rx Complete Callback ID + * @arg @ref HAL_ETH_DMA_ERROR_CB_ID DMA Error Callback ID + * @arg @ref HAL_ETH_MSPINIT_CB_ID MspInit callback ID + * @arg @ref HAL_ETH_MSPDEINIT_CB_ID MspDeInit callback ID + * @param pCallback pointer to the Callback function + * @retval status + */ +HAL_StatusTypeDef HAL_ETH_RegisterCallback(ETH_HandleTypeDef *heth, HAL_ETH_CallbackIDTypeDef CallbackID, pETH_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if(pCallback == NULL) + { + return HAL_ERROR; + } + /* Process locked */ + __HAL_LOCK(heth); + + if(heth->State == HAL_ETH_STATE_READY) + { + switch (CallbackID) + { + case HAL_ETH_TX_COMPLETE_CB_ID : + heth->TxCpltCallback = pCallback; + break; + + case HAL_ETH_RX_COMPLETE_CB_ID : + heth->RxCpltCallback = pCallback; + break; + + case HAL_ETH_DMA_ERROR_CB_ID : + heth->DMAErrorCallback = pCallback; + break; + + case HAL_ETH_MSPINIT_CB_ID : + heth->MspInitCallback = pCallback; + break; + + case HAL_ETH_MSPDEINIT_CB_ID : + heth->MspDeInitCallback = pCallback; + break; + + default : + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if(heth->State == HAL_ETH_STATE_RESET) + { + switch (CallbackID) + { + case HAL_ETH_MSPINIT_CB_ID : + heth->MspInitCallback = pCallback; + break; + + case HAL_ETH_MSPDEINIT_CB_ID : + heth->MspDeInitCallback = pCallback; + break; + + default : + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(heth); + + return status; +} + +/** + * @brief Unregister an ETH Callback + * ETH callabck is redirected to the weak predefined callback + * @param heth eth handle + * @param CallbackID ID of the callback to be unregistered + * This parameter can be one of the following values: + * @arg @ref HAL_ETH_TX_COMPLETE_CB_ID Tx Complete Callback ID + * @arg @ref HAL_ETH_RX_COMPLETE_CB_ID Rx Complete Callback ID + * @arg @ref HAL_ETH_DMA_ERROR_CB_ID DMA Error Callback ID + * @arg @ref HAL_ETH_MSPINIT_CB_ID MspInit callback ID + * @arg @ref HAL_ETH_MSPDEINIT_CB_ID MspDeInit callback ID + * @retval status + */ +HAL_StatusTypeDef HAL_ETH_UnRegisterCallback(ETH_HandleTypeDef *heth, HAL_ETH_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(heth); + + if(heth->State == HAL_ETH_STATE_READY) + { + switch (CallbackID) + { + case HAL_ETH_TX_COMPLETE_CB_ID : + heth->TxCpltCallback = HAL_ETH_TxCpltCallback; + break; + + case HAL_ETH_RX_COMPLETE_CB_ID : + heth->RxCpltCallback = HAL_ETH_RxCpltCallback; + break; + + case HAL_ETH_DMA_ERROR_CB_ID : + heth->DMAErrorCallback = HAL_ETH_ErrorCallback; + break; + + case HAL_ETH_MSPINIT_CB_ID : + heth->MspInitCallback = HAL_ETH_MspInit; + break; + + case HAL_ETH_MSPDEINIT_CB_ID : + heth->MspDeInitCallback = HAL_ETH_MspDeInit; + break; + + default : + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if(heth->State == HAL_ETH_STATE_RESET) + { + switch (CallbackID) + { + case HAL_ETH_MSPINIT_CB_ID : + heth->MspInitCallback = HAL_ETH_MspInit; + break; + + case HAL_ETH_MSPDEINIT_CB_ID : + heth->MspDeInitCallback = HAL_ETH_MspDeInit; + break; + + default : + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(heth); + + return status; +} +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @defgroup ETH_Exported_Functions_Group2 IO operation functions + * @brief Data transfers functions + * + @verbatim + ============================================================================== + ##### IO operation functions ##### + ============================================================================== + [..] This section provides functions allowing to: + (+) Transmit a frame + HAL_ETH_TransmitFrame(); + (+) Receive a frame + HAL_ETH_GetReceivedFrame(); + HAL_ETH_GetReceivedFrame_IT(); + (+) Read from an External PHY register + HAL_ETH_ReadPHYRegister(); + (+) Write to an External PHY register + HAL_ETH_WritePHYRegister(); + + @endverbatim + + * @{ + */ + +/** + * @brief Sends an Ethernet frame. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param FrameLength Amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_TransmitFrame(ETH_HandleTypeDef *heth, uint32_t FrameLength) +{ + uint32_t bufcount = 0, size = 0, i = 0; + + /* Process Locked */ + __HAL_LOCK(heth); + + /* Set the ETH peripheral state to BUSY */ + heth->State = HAL_ETH_STATE_BUSY; + + if (FrameLength == 0) + { + /* Set ETH HAL state to READY */ + heth->State = HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + return HAL_ERROR; + } + + /* Check if the descriptor is owned by the ETHERNET DMA (when set) or CPU (when reset) */ + if(((heth->TxDesc)->Status & ETH_DMATXDESC_OWN) != (uint32_t)RESET) + { + /* OWN bit set */ + heth->State = HAL_ETH_STATE_BUSY_TX; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + return HAL_ERROR; + } + + /* Get the number of needed Tx buffers for the current frame */ + if (FrameLength > ETH_TX_BUF_SIZE) + { + bufcount = FrameLength/ETH_TX_BUF_SIZE; + if (FrameLength % ETH_TX_BUF_SIZE) + { + bufcount++; + } + } + else + { + bufcount = 1; + } + if (bufcount == 1) + { + /* Set LAST and FIRST segment */ + heth->TxDesc->Status |=ETH_DMATXDESC_FS|ETH_DMATXDESC_LS; + /* Set frame size */ + heth->TxDesc->ControlBufferSize = (FrameLength & ETH_DMATXDESC_TBS1); + /* Set Own bit of the Tx descriptor Status: gives the buffer back to ETHERNET DMA */ + heth->TxDesc->Status |= ETH_DMATXDESC_OWN; + /* Point to next descriptor */ + heth->TxDesc= (ETH_DMADescTypeDef *)(heth->TxDesc->Buffer2NextDescAddr); + } + else + { + for (i=0; i< bufcount; i++) + { + /* Clear FIRST and LAST segment bits */ + heth->TxDesc->Status &= ~(ETH_DMATXDESC_FS | ETH_DMATXDESC_LS); + + if (i == 0) + { + /* Setting the first segment bit */ + heth->TxDesc->Status |= ETH_DMATXDESC_FS; + } + + /* Program size */ + heth->TxDesc->ControlBufferSize = (ETH_TX_BUF_SIZE & ETH_DMATXDESC_TBS1); + + if (i == (bufcount-1)) + { + /* Setting the last segment bit */ + heth->TxDesc->Status |= ETH_DMATXDESC_LS; + size = FrameLength - (bufcount-1)*ETH_TX_BUF_SIZE; + heth->TxDesc->ControlBufferSize = (size & ETH_DMATXDESC_TBS1); + } + + /* Set Own bit of the Tx descriptor Status: gives the buffer back to ETHERNET DMA */ + heth->TxDesc->Status |= ETH_DMATXDESC_OWN; + /* point to next descriptor */ + heth->TxDesc = (ETH_DMADescTypeDef *)(heth->TxDesc->Buffer2NextDescAddr); + } + } + + /* When Tx Buffer unavailable flag is set: clear it and resume transmission */ + if (((heth->Instance)->DMASR & ETH_DMASR_TBUS) != (uint32_t)RESET) + { + /* Clear TBUS ETHERNET DMA flag */ + (heth->Instance)->DMASR = ETH_DMASR_TBUS; + /* Resume DMA transmission*/ + (heth->Instance)->DMATPDR = 0; + } + + /* Set ETH HAL State to Ready */ + heth->State = HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Checks for received frames. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_GetReceivedFrame(ETH_HandleTypeDef *heth) +{ + uint32_t framelength = 0; + + /* Process Locked */ + __HAL_LOCK(heth); + + /* Check the ETH state to BUSY */ + heth->State = HAL_ETH_STATE_BUSY; + + /* Check if segment is not owned by DMA */ + /* (((heth->RxDesc->Status & ETH_DMARXDESC_OWN) == (uint32_t)RESET) && ((heth->RxDesc->Status & ETH_DMARXDESC_LS) != (uint32_t)RESET)) */ + if(((heth->RxDesc->Status & ETH_DMARXDESC_OWN) == (uint32_t)RESET)) + { + /* Check if last segment */ + if(((heth->RxDesc->Status & ETH_DMARXDESC_LS) != (uint32_t)RESET)) + { + /* increment segment count */ + (heth->RxFrameInfos).SegCount++; + + /* Check if last segment is first segment: one segment contains the frame */ + if ((heth->RxFrameInfos).SegCount == 1) + { + (heth->RxFrameInfos).FSRxDesc =heth->RxDesc; + } + + heth->RxFrameInfos.LSRxDesc = heth->RxDesc; + + /* Get the Frame Length of the received packet: substruct 4 bytes of the CRC */ + framelength = (((heth->RxDesc)->Status & ETH_DMARXDESC_FL) >> ETH_DMARXDESC_FRAMELENGTHSHIFT) - 4; + heth->RxFrameInfos.length = framelength; + + /* Get the address of the buffer start address */ + heth->RxFrameInfos.buffer = ((heth->RxFrameInfos).FSRxDesc)->Buffer1Addr; + /* point to next descriptor */ + heth->RxDesc = (ETH_DMADescTypeDef*) ((heth->RxDesc)->Buffer2NextDescAddr); + + /* Set HAL State to Ready */ + heth->State = HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + /* Return function status */ + return HAL_OK; + } + /* Check if first segment */ + else if((heth->RxDesc->Status & ETH_DMARXDESC_FS) != (uint32_t)RESET) + { + (heth->RxFrameInfos).FSRxDesc = heth->RxDesc; + (heth->RxFrameInfos).LSRxDesc = NULL; + (heth->RxFrameInfos).SegCount = 1; + /* Point to next descriptor */ + heth->RxDesc = (ETH_DMADescTypeDef*) (heth->RxDesc->Buffer2NextDescAddr); + } + /* Check if intermediate segment */ + else + { + (heth->RxFrameInfos).SegCount++; + /* Point to next descriptor */ + heth->RxDesc = (ETH_DMADescTypeDef*) (heth->RxDesc->Buffer2NextDescAddr); + } + } + + /* Set ETH HAL State to Ready */ + heth->State = HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + /* Return function status */ + return HAL_ERROR; +} + +/** + * @brief Gets the Received frame in interrupt mode. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_GetReceivedFrame_IT(ETH_HandleTypeDef *heth) +{ + uint32_t descriptorscancounter = 0; + + /* Process Locked */ + __HAL_LOCK(heth); + + /* Set ETH HAL State to BUSY */ + heth->State = HAL_ETH_STATE_BUSY; + + /* Scan descriptors owned by CPU */ + while (((heth->RxDesc->Status & ETH_DMARXDESC_OWN) == (uint32_t)RESET) && (descriptorscancounter < ETH_RXBUFNB)) + { + /* Just for security */ + descriptorscancounter++; + + /* Check if first segment in frame */ + /* ((heth->RxDesc->Status & ETH_DMARXDESC_FS) != (uint32_t)RESET) && ((heth->RxDesc->Status & ETH_DMARXDESC_LS) == (uint32_t)RESET)) */ + if((heth->RxDesc->Status & (ETH_DMARXDESC_FS | ETH_DMARXDESC_LS)) == (uint32_t)ETH_DMARXDESC_FS) + { + heth->RxFrameInfos.FSRxDesc = heth->RxDesc; + heth->RxFrameInfos.SegCount = 1; + /* Point to next descriptor */ + heth->RxDesc = (ETH_DMADescTypeDef*) (heth->RxDesc->Buffer2NextDescAddr); + } + /* Check if intermediate segment */ + /* ((heth->RxDesc->Status & ETH_DMARXDESC_LS) == (uint32_t)RESET)&& ((heth->RxDesc->Status & ETH_DMARXDESC_FS) == (uint32_t)RESET)) */ + else if ((heth->RxDesc->Status & (ETH_DMARXDESC_LS | ETH_DMARXDESC_FS)) == (uint32_t)RESET) + { + /* Increment segment count */ + (heth->RxFrameInfos.SegCount)++; + /* Point to next descriptor */ + heth->RxDesc = (ETH_DMADescTypeDef*)(heth->RxDesc->Buffer2NextDescAddr); + } + /* Should be last segment */ + else + { + /* Last segment */ + heth->RxFrameInfos.LSRxDesc = heth->RxDesc; + + /* Increment segment count */ + (heth->RxFrameInfos.SegCount)++; + + /* Check if last segment is first segment: one segment contains the frame */ + if ((heth->RxFrameInfos.SegCount) == 1) + { + heth->RxFrameInfos.FSRxDesc = heth->RxDesc; + } + + /* Get the Frame Length of the received packet: substruct 4 bytes of the CRC */ + heth->RxFrameInfos.length = (((heth->RxDesc)->Status & ETH_DMARXDESC_FL) >> ETH_DMARXDESC_FRAMELENGTHSHIFT) - 4; + + /* Get the address of the buffer start address */ + heth->RxFrameInfos.buffer =((heth->RxFrameInfos).FSRxDesc)->Buffer1Addr; + + /* Point to next descriptor */ + heth->RxDesc = (ETH_DMADescTypeDef*) (heth->RxDesc->Buffer2NextDescAddr); + + /* Set HAL State to Ready */ + heth->State = HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + /* Return function status */ + return HAL_OK; + } + } + + /* Set HAL State to Ready */ + heth->State = HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + /* Return function status */ + return HAL_ERROR; +} + +/** + * @brief This function handles ETH interrupt request. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +void HAL_ETH_IRQHandler(ETH_HandleTypeDef *heth) +{ + /* Frame received */ + if (__HAL_ETH_DMA_GET_FLAG(heth, ETH_DMA_FLAG_R)) + { +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + /*Call registered Receive complete callback*/ + heth->RxCpltCallback(heth); +#else + /* Receive complete callback */ + HAL_ETH_RxCpltCallback(heth); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + + /* Clear the Eth DMA Rx IT pending bits */ + __HAL_ETH_DMA_CLEAR_IT(heth, ETH_DMA_IT_R); + + /* Set HAL State to Ready */ + heth->State = HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + } + /* Frame transmitted */ + else if (__HAL_ETH_DMA_GET_FLAG(heth, ETH_DMA_FLAG_T)) + { +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + /* Call resgistered Transfer complete callback*/ + heth->TxCpltCallback(heth); +#else + /* Transfer complete callback */ + HAL_ETH_TxCpltCallback(heth); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + + /* Clear the Eth DMA Tx IT pending bits */ + __HAL_ETH_DMA_CLEAR_IT(heth, ETH_DMA_IT_T); + + /* Set HAL State to Ready */ + heth->State = HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + } + + /* Clear the interrupt flags */ + __HAL_ETH_DMA_CLEAR_IT(heth, ETH_DMA_IT_NIS); + + /* ETH DMA Error */ + if(__HAL_ETH_DMA_GET_FLAG(heth, ETH_DMA_FLAG_AIS)) + { +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) + heth->DMAErrorCallback(heth); +#else + /* Ethernet Error callback */ + HAL_ETH_ErrorCallback(heth); +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + + /* Clear the interrupt flags */ + __HAL_ETH_DMA_CLEAR_IT(heth, ETH_DMA_FLAG_AIS); + + /* Set HAL State to Ready */ + heth->State = HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + } +} + +/** + * @brief Tx Transfer completed callbacks. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +__weak void HAL_ETH_TxCpltCallback(ETH_HandleTypeDef *heth) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_TxCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Rx Transfer completed callbacks. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +__weak void HAL_ETH_RxCpltCallback(ETH_HandleTypeDef *heth) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_RxCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Ethernet transfer error callbacks + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +__weak void HAL_ETH_ErrorCallback(ETH_HandleTypeDef *heth) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(heth); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_ETH_ErrorCallback could be implemented in the user file + */ +} + +/** + * @brief Reads a PHY register + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param PHYReg PHY register address, is the index of one of the 32 PHY register. + * This parameter can be one of the following values: + * PHY_BCR: Transceiver Basic Control Register, + * PHY_BSR: Transceiver Basic Status Register. + * More PHY register could be read depending on the used PHY + * @param RegValue PHY register value + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_ReadPHYRegister(ETH_HandleTypeDef *heth, uint16_t PHYReg, uint32_t *RegValue) +{ + uint32_t tmpreg = 0; + uint32_t tickstart = 0; + + /* Check parameters */ + assert_param(IS_ETH_PHY_ADDRESS(heth->Init.PhyAddress)); + + /* Check the ETH peripheral state */ + if(heth->State == HAL_ETH_STATE_BUSY_RD) + { + return HAL_BUSY; + } + /* Set ETH HAL State to BUSY_RD */ + heth->State = HAL_ETH_STATE_BUSY_RD; + + /* Get the ETHERNET MACMIIAR value */ + tmpreg = heth->Instance->MACMIIAR; + + /* Keep only the CSR Clock Range CR[2:0] bits value */ + tmpreg &= ~ETH_MACMIIAR_CR_MASK; + + /* Prepare the MII address register value */ + tmpreg |=(((uint32_t)heth->Init.PhyAddress << 11) & ETH_MACMIIAR_PA); /* Set the PHY device address */ + tmpreg |=(((uint32_t)PHYReg<<6) & ETH_MACMIIAR_MR); /* Set the PHY register address */ + tmpreg &= ~ETH_MACMIIAR_MW; /* Set the read mode */ + tmpreg |= ETH_MACMIIAR_MB; /* Set the MII Busy bit */ + + /* Write the result value into the MII Address register */ + heth->Instance->MACMIIAR = tmpreg; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Check for the Busy flag */ + while((tmpreg & ETH_MACMIIAR_MB) == ETH_MACMIIAR_MB) + { + /* Check for the Timeout */ + if((HAL_GetTick() - tickstart ) > PHY_READ_TO) + { + heth->State= HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + return HAL_TIMEOUT; + } + + tmpreg = heth->Instance->MACMIIAR; + } + + /* Get MACMIIDR value */ + *RegValue = (uint16_t)(heth->Instance->MACMIIDR); + + /* Set ETH HAL State to READY */ + heth->State = HAL_ETH_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Writes to a PHY register. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param PHYReg PHY register address, is the index of one of the 32 PHY register. + * This parameter can be one of the following values: + * PHY_BCR: Transceiver Control Register. + * More PHY register could be written depending on the used PHY + * @param RegValue the value to write + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_WritePHYRegister(ETH_HandleTypeDef *heth, uint16_t PHYReg, uint32_t RegValue) +{ + uint32_t tmpreg = 0; + uint32_t tickstart = 0; + + /* Check parameters */ + assert_param(IS_ETH_PHY_ADDRESS(heth->Init.PhyAddress)); + + /* Check the ETH peripheral state */ + if(heth->State == HAL_ETH_STATE_BUSY_WR) + { + return HAL_BUSY; + } + /* Set ETH HAL State to BUSY_WR */ + heth->State = HAL_ETH_STATE_BUSY_WR; + + /* Get the ETHERNET MACMIIAR value */ + tmpreg = heth->Instance->MACMIIAR; + + /* Keep only the CSR Clock Range CR[2:0] bits value */ + tmpreg &= ~ETH_MACMIIAR_CR_MASK; + + /* Prepare the MII register address value */ + tmpreg |=(((uint32_t)heth->Init.PhyAddress<<11) & ETH_MACMIIAR_PA); /* Set the PHY device address */ + tmpreg |=(((uint32_t)PHYReg<<6) & ETH_MACMIIAR_MR); /* Set the PHY register address */ + tmpreg |= ETH_MACMIIAR_MW; /* Set the write mode */ + tmpreg |= ETH_MACMIIAR_MB; /* Set the MII Busy bit */ + + /* Give the value to the MII data register */ + heth->Instance->MACMIIDR = (uint16_t)RegValue; + + /* Write the result value into the MII Address register */ + heth->Instance->MACMIIAR = tmpreg; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Check for the Busy flag */ + while((tmpreg & ETH_MACMIIAR_MB) == ETH_MACMIIAR_MB) + { + /* Check for the Timeout */ + if((HAL_GetTick() - tickstart ) > PHY_WRITE_TO) + { + heth->State= HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + return HAL_TIMEOUT; + } + + tmpreg = heth->Instance->MACMIIAR; + } + + /* Set ETH HAL State to READY */ + heth->State = HAL_ETH_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup ETH_Exported_Functions_Group3 Peripheral Control functions + * @brief Peripheral Control functions + * +@verbatim + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Enable MAC and DMA transmission and reception. + HAL_ETH_Start(); + (+) Disable MAC and DMA transmission and reception. + HAL_ETH_Stop(); + (+) Set the MAC configuration in runtime mode + HAL_ETH_ConfigMAC(); + (+) Set the DMA configuration in runtime mode + HAL_ETH_ConfigDMA(); + +@endverbatim + * @{ + */ + + /** + * @brief Enables Ethernet MAC and DMA reception/transmission + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_Start(ETH_HandleTypeDef *heth) +{ + /* Process Locked */ + __HAL_LOCK(heth); + + /* Set the ETH peripheral state to BUSY */ + heth->State = HAL_ETH_STATE_BUSY; + + /* Enable transmit state machine of the MAC for transmission on the MII */ + ETH_MACTransmissionEnable(heth); + + /* Enable receive state machine of the MAC for reception from the MII */ + ETH_MACReceptionEnable(heth); + + /* Flush Transmit FIFO */ + ETH_FlushTransmitFIFO(heth); + + /* Start DMA transmission */ + ETH_DMATransmissionEnable(heth); + + /* Start DMA reception */ + ETH_DMAReceptionEnable(heth); + + /* Set the ETH state to READY*/ + heth->State= HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stop Ethernet MAC and DMA reception/transmission + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_Stop(ETH_HandleTypeDef *heth) +{ + /* Process Locked */ + __HAL_LOCK(heth); + + /* Set the ETH peripheral state to BUSY */ + heth->State = HAL_ETH_STATE_BUSY; + + /* Stop DMA transmission */ + ETH_DMATransmissionDisable(heth); + + /* Stop DMA reception */ + ETH_DMAReceptionDisable(heth); + + /* Disable receive state machine of the MAC for reception from the MII */ + ETH_MACReceptionDisable(heth); + + /* Flush Transmit FIFO */ + ETH_FlushTransmitFIFO(heth); + + /* Disable transmit state machine of the MAC for transmission on the MII */ + ETH_MACTransmissionDisable(heth); + + /* Set the ETH state*/ + heth->State = HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Set ETH MAC Configuration. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param macconf MAC Configuration structure + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_ConfigMAC(ETH_HandleTypeDef *heth, ETH_MACInitTypeDef *macconf) +{ + uint32_t tmpreg = 0; + + /* Process Locked */ + __HAL_LOCK(heth); + + /* Set the ETH peripheral state to BUSY */ + heth->State= HAL_ETH_STATE_BUSY; + + assert_param(IS_ETH_SPEED(heth->Init.Speed)); + assert_param(IS_ETH_DUPLEX_MODE(heth->Init.DuplexMode)); + + if (macconf != NULL) + { + /* Check the parameters */ + assert_param(IS_ETH_WATCHDOG(macconf->Watchdog)); + assert_param(IS_ETH_JABBER(macconf->Jabber)); + assert_param(IS_ETH_INTER_FRAME_GAP(macconf->InterFrameGap)); + assert_param(IS_ETH_CARRIER_SENSE(macconf->CarrierSense)); + assert_param(IS_ETH_RECEIVE_OWN(macconf->ReceiveOwn)); + assert_param(IS_ETH_LOOPBACK_MODE(macconf->LoopbackMode)); + assert_param(IS_ETH_CHECKSUM_OFFLOAD(macconf->ChecksumOffload)); + assert_param(IS_ETH_RETRY_TRANSMISSION(macconf->RetryTransmission)); + assert_param(IS_ETH_AUTOMATIC_PADCRC_STRIP(macconf->AutomaticPadCRCStrip)); + assert_param(IS_ETH_BACKOFF_LIMIT(macconf->BackOffLimit)); + assert_param(IS_ETH_DEFERRAL_CHECK(macconf->DeferralCheck)); + assert_param(IS_ETH_RECEIVE_ALL(macconf->ReceiveAll)); + assert_param(IS_ETH_SOURCE_ADDR_FILTER(macconf->SourceAddrFilter)); + assert_param(IS_ETH_CONTROL_FRAMES(macconf->PassControlFrames)); + assert_param(IS_ETH_BROADCAST_FRAMES_RECEPTION(macconf->BroadcastFramesReception)); + assert_param(IS_ETH_DESTINATION_ADDR_FILTER(macconf->DestinationAddrFilter)); + assert_param(IS_ETH_PROMISCUOUS_MODE(macconf->PromiscuousMode)); + assert_param(IS_ETH_MULTICAST_FRAMES_FILTER(macconf->MulticastFramesFilter)); + assert_param(IS_ETH_UNICAST_FRAMES_FILTER(macconf->UnicastFramesFilter)); + assert_param(IS_ETH_PAUSE_TIME(macconf->PauseTime)); + assert_param(IS_ETH_ZEROQUANTA_PAUSE(macconf->ZeroQuantaPause)); + assert_param(IS_ETH_PAUSE_LOW_THRESHOLD(macconf->PauseLowThreshold)); + assert_param(IS_ETH_UNICAST_PAUSE_FRAME_DETECT(macconf->UnicastPauseFrameDetect)); + assert_param(IS_ETH_RECEIVE_FLOWCONTROL(macconf->ReceiveFlowControl)); + assert_param(IS_ETH_TRANSMIT_FLOWCONTROL(macconf->TransmitFlowControl)); + assert_param(IS_ETH_VLAN_TAG_COMPARISON(macconf->VLANTagComparison)); + assert_param(IS_ETH_VLAN_TAG_IDENTIFIER(macconf->VLANTagIdentifier)); + + /*------------------------ ETHERNET MACCR Configuration --------------------*/ + /* Get the ETHERNET MACCR value */ + tmpreg = (heth->Instance)->MACCR; + /* Clear WD, PCE, PS, TE and RE bits */ + tmpreg &= ETH_MACCR_CLEAR_MASK; + + tmpreg |= (uint32_t)(macconf->Watchdog | + macconf->Jabber | + macconf->InterFrameGap | + macconf->CarrierSense | + (heth->Init).Speed | + macconf->ReceiveOwn | + macconf->LoopbackMode | + (heth->Init).DuplexMode | + macconf->ChecksumOffload | + macconf->RetryTransmission | + macconf->AutomaticPadCRCStrip | + macconf->BackOffLimit | + macconf->DeferralCheck); + + /* Write to ETHERNET MACCR */ + (heth->Instance)->MACCR = (uint32_t)tmpreg; + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg; + + /*----------------------- ETHERNET MACFFR Configuration --------------------*/ + /* Write to ETHERNET MACFFR */ + (heth->Instance)->MACFFR = (uint32_t)(macconf->ReceiveAll | + macconf->SourceAddrFilter | + macconf->PassControlFrames | + macconf->BroadcastFramesReception | + macconf->DestinationAddrFilter | + macconf->PromiscuousMode | + macconf->MulticastFramesFilter | + macconf->UnicastFramesFilter); + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACFFR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACFFR = tmpreg; + + /*--------------- ETHERNET MACHTHR and MACHTLR Configuration ---------------*/ + /* Write to ETHERNET MACHTHR */ + (heth->Instance)->MACHTHR = (uint32_t)macconf->HashTableHigh; + + /* Write to ETHERNET MACHTLR */ + (heth->Instance)->MACHTLR = (uint32_t)macconf->HashTableLow; + /*----------------------- ETHERNET MACFCR Configuration --------------------*/ + + /* Get the ETHERNET MACFCR value */ + tmpreg = (heth->Instance)->MACFCR; + /* Clear xx bits */ + tmpreg &= ETH_MACFCR_CLEAR_MASK; + + tmpreg |= (uint32_t)((macconf->PauseTime << 16) | + macconf->ZeroQuantaPause | + macconf->PauseLowThreshold | + macconf->UnicastPauseFrameDetect | + macconf->ReceiveFlowControl | + macconf->TransmitFlowControl); + + /* Write to ETHERNET MACFCR */ + (heth->Instance)->MACFCR = (uint32_t)tmpreg; + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACFCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACFCR = tmpreg; + + /*----------------------- ETHERNET MACVLANTR Configuration -----------------*/ + (heth->Instance)->MACVLANTR = (uint32_t)(macconf->VLANTagComparison | + macconf->VLANTagIdentifier); + + /* Wait until the write operation will be taken into account : + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACVLANTR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACVLANTR = tmpreg; + } + else /* macconf == NULL : here we just configure Speed and Duplex mode */ + { + /*------------------------ ETHERNET MACCR Configuration --------------------*/ + /* Get the ETHERNET MACCR value */ + tmpreg = (heth->Instance)->MACCR; + + /* Clear FES and DM bits */ + tmpreg &= ~((uint32_t)0x00004800); + + tmpreg |= (uint32_t)(heth->Init.Speed | heth->Init.DuplexMode); + + /* Write to ETHERNET MACCR */ + (heth->Instance)->MACCR = (uint32_t)tmpreg; + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg; + } + + /* Set the ETH state to Ready */ + heth->State= HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Sets ETH DMA Configuration. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param dmaconf DMA Configuration structure + * @retval HAL status + */ +HAL_StatusTypeDef HAL_ETH_ConfigDMA(ETH_HandleTypeDef *heth, ETH_DMAInitTypeDef *dmaconf) +{ + uint32_t tmpreg = 0; + + /* Process Locked */ + __HAL_LOCK(heth); + + /* Set the ETH peripheral state to BUSY */ + heth->State= HAL_ETH_STATE_BUSY; + + /* Check parameters */ + assert_param(IS_ETH_DROP_TCPIP_CHECKSUM_FRAME(dmaconf->DropTCPIPChecksumErrorFrame)); + assert_param(IS_ETH_RECEIVE_STORE_FORWARD(dmaconf->ReceiveStoreForward)); + assert_param(IS_ETH_FLUSH_RECEIVE_FRAME(dmaconf->FlushReceivedFrame)); + assert_param(IS_ETH_TRANSMIT_STORE_FORWARD(dmaconf->TransmitStoreForward)); + assert_param(IS_ETH_TRANSMIT_THRESHOLD_CONTROL(dmaconf->TransmitThresholdControl)); + assert_param(IS_ETH_FORWARD_ERROR_FRAMES(dmaconf->ForwardErrorFrames)); + assert_param(IS_ETH_FORWARD_UNDERSIZED_GOOD_FRAMES(dmaconf->ForwardUndersizedGoodFrames)); + assert_param(IS_ETH_RECEIVE_THRESHOLD_CONTROL(dmaconf->ReceiveThresholdControl)); + assert_param(IS_ETH_SECOND_FRAME_OPERATE(dmaconf->SecondFrameOperate)); + assert_param(IS_ETH_ADDRESS_ALIGNED_BEATS(dmaconf->AddressAlignedBeats)); + assert_param(IS_ETH_FIXED_BURST(dmaconf->FixedBurst)); + assert_param(IS_ETH_RXDMA_BURST_LENGTH(dmaconf->RxDMABurstLength)); + assert_param(IS_ETH_TXDMA_BURST_LENGTH(dmaconf->TxDMABurstLength)); + assert_param(IS_ETH_ENHANCED_DESCRIPTOR_FORMAT(dmaconf->EnhancedDescriptorFormat)); + assert_param(IS_ETH_DMA_DESC_SKIP_LENGTH(dmaconf->DescriptorSkipLength)); + assert_param(IS_ETH_DMA_ARBITRATION_ROUNDROBIN_RXTX(dmaconf->DMAArbitration)); + + /*----------------------- ETHERNET DMAOMR Configuration --------------------*/ + /* Get the ETHERNET DMAOMR value */ + tmpreg = (heth->Instance)->DMAOMR; + /* Clear xx bits */ + tmpreg &= ETH_DMAOMR_CLEAR_MASK; + + tmpreg |= (uint32_t)(dmaconf->DropTCPIPChecksumErrorFrame | + dmaconf->ReceiveStoreForward | + dmaconf->FlushReceivedFrame | + dmaconf->TransmitStoreForward | + dmaconf->TransmitThresholdControl | + dmaconf->ForwardErrorFrames | + dmaconf->ForwardUndersizedGoodFrames | + dmaconf->ReceiveThresholdControl | + dmaconf->SecondFrameOperate); + + /* Write to ETHERNET DMAOMR */ + (heth->Instance)->DMAOMR = (uint32_t)tmpreg; + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->DMAOMR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->DMAOMR = tmpreg; + + /*----------------------- ETHERNET DMABMR Configuration --------------------*/ + (heth->Instance)->DMABMR = (uint32_t)(dmaconf->AddressAlignedBeats | + dmaconf->FixedBurst | + dmaconf->RxDMABurstLength | /* !! if 4xPBL is selected for Tx or Rx it is applied for the other */ + dmaconf->TxDMABurstLength | + dmaconf->EnhancedDescriptorFormat | + (dmaconf->DescriptorSkipLength << 2) | + dmaconf->DMAArbitration | + ETH_DMABMR_USP); /* Enable use of separate PBL for Rx and Tx */ + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->DMABMR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->DMABMR = tmpreg; + + /* Set the ETH state to Ready */ + heth->State= HAL_ETH_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(heth); + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup ETH_Exported_Functions_Group4 Peripheral State functions + * @brief Peripheral State functions + * + @verbatim + =============================================================================== + ##### Peripheral State functions ##### + =============================================================================== + [..] + This subsection permits to get in run-time the status of the peripheral + and the data flow. + (+) Get the ETH handle state: + HAL_ETH_GetState(); + + + @endverbatim + * @{ + */ + +/** + * @brief Return the ETH HAL state + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval HAL state + */ +HAL_ETH_StateTypeDef HAL_ETH_GetState(ETH_HandleTypeDef *heth) +{ + /* Return ETH state */ + return heth->State; +} + +/** + * @} + */ + +/** + * @} + */ + +/** @addtogroup ETH_Private_Functions + * @{ + */ + +/** + * @brief Configures Ethernet MAC and DMA with default parameters. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param err Ethernet Init error + * @retval HAL status + */ +static void ETH_MACDMAConfig(ETH_HandleTypeDef *heth, uint32_t err) +{ + ETH_MACInitTypeDef macinit; + ETH_DMAInitTypeDef dmainit; + uint32_t tmpreg = 0; + + if (err != ETH_SUCCESS) /* Auto-negotiation failed */ + { + /* Set Ethernet duplex mode to Full-duplex */ + (heth->Init).DuplexMode = ETH_MODE_FULLDUPLEX; + + /* Set Ethernet speed to 100M */ + (heth->Init).Speed = ETH_SPEED_100M; + } + + /* Ethernet MAC default initialization **************************************/ + macinit.Watchdog = ETH_WATCHDOG_ENABLE; + macinit.Jabber = ETH_JABBER_ENABLE; + macinit.InterFrameGap = ETH_INTERFRAMEGAP_96BIT; + macinit.CarrierSense = ETH_CARRIERSENCE_ENABLE; + macinit.ReceiveOwn = ETH_RECEIVEOWN_ENABLE; + macinit.LoopbackMode = ETH_LOOPBACKMODE_DISABLE; + if(heth->Init.ChecksumMode == ETH_CHECKSUM_BY_HARDWARE) + { + macinit.ChecksumOffload = ETH_CHECKSUMOFFLAOD_ENABLE; + } + else + { + macinit.ChecksumOffload = ETH_CHECKSUMOFFLAOD_DISABLE; + } + macinit.RetryTransmission = ETH_RETRYTRANSMISSION_DISABLE; + macinit.AutomaticPadCRCStrip = ETH_AUTOMATICPADCRCSTRIP_DISABLE; + macinit.BackOffLimit = ETH_BACKOFFLIMIT_10; + macinit.DeferralCheck = ETH_DEFFERRALCHECK_DISABLE; + macinit.ReceiveAll = ETH_RECEIVEAll_DISABLE; + macinit.SourceAddrFilter = ETH_SOURCEADDRFILTER_DISABLE; + macinit.PassControlFrames = ETH_PASSCONTROLFRAMES_BLOCKALL; + macinit.BroadcastFramesReception = ETH_BROADCASTFRAMESRECEPTION_ENABLE; + macinit.DestinationAddrFilter = ETH_DESTINATIONADDRFILTER_NORMAL; + macinit.PromiscuousMode = ETH_PROMISCUOUS_MODE_DISABLE; + macinit.MulticastFramesFilter = ETH_MULTICASTFRAMESFILTER_NONE; + macinit.UnicastFramesFilter = ETH_UNICASTFRAMESFILTER_PERFECT; + macinit.HashTableHigh = 0x0; + macinit.HashTableLow = 0x0; + macinit.PauseTime = 0x0; + macinit.ZeroQuantaPause = ETH_ZEROQUANTAPAUSE_DISABLE; + macinit.PauseLowThreshold = ETH_PAUSELOWTHRESHOLD_MINUS4; + macinit.UnicastPauseFrameDetect = ETH_UNICASTPAUSEFRAMEDETECT_DISABLE; + macinit.ReceiveFlowControl = ETH_RECEIVEFLOWCONTROL_DISABLE; + macinit.TransmitFlowControl = ETH_TRANSMITFLOWCONTROL_DISABLE; + macinit.VLANTagComparison = ETH_VLANTAGCOMPARISON_16BIT; + macinit.VLANTagIdentifier = 0x0; + + /*------------------------ ETHERNET MACCR Configuration --------------------*/ + /* Get the ETHERNET MACCR value */ + tmpreg = (heth->Instance)->MACCR; + /* Clear WD, PCE, PS, TE and RE bits */ + tmpreg &= ETH_MACCR_CLEAR_MASK; + /* Set the WD bit according to ETH Watchdog value */ + /* Set the JD: bit according to ETH Jabber value */ + /* Set the IFG bit according to ETH InterFrameGap value */ + /* Set the DCRS bit according to ETH CarrierSense value */ + /* Set the FES bit according to ETH Speed value */ + /* Set the DO bit according to ETH ReceiveOwn value */ + /* Set the LM bit according to ETH LoopbackMode value */ + /* Set the DM bit according to ETH Mode value */ + /* Set the IPCO bit according to ETH ChecksumOffload value */ + /* Set the DR bit according to ETH RetryTransmission value */ + /* Set the ACS bit according to ETH AutomaticPadCRCStrip value */ + /* Set the BL bit according to ETH BackOffLimit value */ + /* Set the DC bit according to ETH DeferralCheck value */ + tmpreg |= (uint32_t)(macinit.Watchdog | + macinit.Jabber | + macinit.InterFrameGap | + macinit.CarrierSense | + (heth->Init).Speed | + macinit.ReceiveOwn | + macinit.LoopbackMode | + (heth->Init).DuplexMode | + macinit.ChecksumOffload | + macinit.RetryTransmission | + macinit.AutomaticPadCRCStrip | + macinit.BackOffLimit | + macinit.DeferralCheck); + + /* Write to ETHERNET MACCR */ + (heth->Instance)->MACCR = (uint32_t)tmpreg; + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg; + + /*----------------------- ETHERNET MACFFR Configuration --------------------*/ + /* Set the RA bit according to ETH ReceiveAll value */ + /* Set the SAF and SAIF bits according to ETH SourceAddrFilter value */ + /* Set the PCF bit according to ETH PassControlFrames value */ + /* Set the DBF bit according to ETH BroadcastFramesReception value */ + /* Set the DAIF bit according to ETH DestinationAddrFilter value */ + /* Set the PR bit according to ETH PromiscuousMode value */ + /* Set the PM, HMC and HPF bits according to ETH MulticastFramesFilter value */ + /* Set the HUC and HPF bits according to ETH UnicastFramesFilter value */ + /* Write to ETHERNET MACFFR */ + (heth->Instance)->MACFFR = (uint32_t)(macinit.ReceiveAll | + macinit.SourceAddrFilter | + macinit.PassControlFrames | + macinit.BroadcastFramesReception | + macinit.DestinationAddrFilter | + macinit.PromiscuousMode | + macinit.MulticastFramesFilter | + macinit.UnicastFramesFilter); + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACFFR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACFFR = tmpreg; + + /*--------------- ETHERNET MACHTHR and MACHTLR Configuration --------------*/ + /* Write to ETHERNET MACHTHR */ + (heth->Instance)->MACHTHR = (uint32_t)macinit.HashTableHigh; + + /* Write to ETHERNET MACHTLR */ + (heth->Instance)->MACHTLR = (uint32_t)macinit.HashTableLow; + /*----------------------- ETHERNET MACFCR Configuration -------------------*/ + + /* Get the ETHERNET MACFCR value */ + tmpreg = (heth->Instance)->MACFCR; + /* Clear xx bits */ + tmpreg &= ETH_MACFCR_CLEAR_MASK; + + /* Set the PT bit according to ETH PauseTime value */ + /* Set the DZPQ bit according to ETH ZeroQuantaPause value */ + /* Set the PLT bit according to ETH PauseLowThreshold value */ + /* Set the UP bit according to ETH UnicastPauseFrameDetect value */ + /* Set the RFE bit according to ETH ReceiveFlowControl value */ + /* Set the TFE bit according to ETH TransmitFlowControl value */ + tmpreg |= (uint32_t)((macinit.PauseTime << 16) | + macinit.ZeroQuantaPause | + macinit.PauseLowThreshold | + macinit.UnicastPauseFrameDetect | + macinit.ReceiveFlowControl | + macinit.TransmitFlowControl); + + /* Write to ETHERNET MACFCR */ + (heth->Instance)->MACFCR = (uint32_t)tmpreg; + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACFCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACFCR = tmpreg; + + /*----------------------- ETHERNET MACVLANTR Configuration ----------------*/ + /* Set the ETV bit according to ETH VLANTagComparison value */ + /* Set the VL bit according to ETH VLANTagIdentifier value */ + (heth->Instance)->MACVLANTR = (uint32_t)(macinit.VLANTagComparison | + macinit.VLANTagIdentifier); + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACVLANTR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACVLANTR = tmpreg; + + /* Ethernet DMA default initialization ************************************/ + dmainit.DropTCPIPChecksumErrorFrame = ETH_DROPTCPIPCHECKSUMERRORFRAME_ENABLE; + dmainit.ReceiveStoreForward = ETH_RECEIVESTOREFORWARD_ENABLE; + dmainit.FlushReceivedFrame = ETH_FLUSHRECEIVEDFRAME_ENABLE; + dmainit.TransmitStoreForward = ETH_TRANSMITSTOREFORWARD_ENABLE; + dmainit.TransmitThresholdControl = ETH_TRANSMITTHRESHOLDCONTROL_64BYTES; + dmainit.ForwardErrorFrames = ETH_FORWARDERRORFRAMES_DISABLE; + dmainit.ForwardUndersizedGoodFrames = ETH_FORWARDUNDERSIZEDGOODFRAMES_DISABLE; + dmainit.ReceiveThresholdControl = ETH_RECEIVEDTHRESHOLDCONTROL_64BYTES; + dmainit.SecondFrameOperate = ETH_SECONDFRAMEOPERARTE_ENABLE; + dmainit.AddressAlignedBeats = ETH_ADDRESSALIGNEDBEATS_ENABLE; + dmainit.FixedBurst = ETH_FIXEDBURST_ENABLE; + dmainit.RxDMABurstLength = ETH_RXDMABURSTLENGTH_32BEAT; + dmainit.TxDMABurstLength = ETH_TXDMABURSTLENGTH_32BEAT; + dmainit.EnhancedDescriptorFormat = ETH_DMAENHANCEDDESCRIPTOR_ENABLE; + dmainit.DescriptorSkipLength = 0x0; + dmainit.DMAArbitration = ETH_DMAARBITRATION_ROUNDROBIN_RXTX_1_1; + + /* Get the ETHERNET DMAOMR value */ + tmpreg = (heth->Instance)->DMAOMR; + /* Clear xx bits */ + tmpreg &= ETH_DMAOMR_CLEAR_MASK; + + /* Set the DT bit according to ETH DropTCPIPChecksumErrorFrame value */ + /* Set the RSF bit according to ETH ReceiveStoreForward value */ + /* Set the DFF bit according to ETH FlushReceivedFrame value */ + /* Set the TSF bit according to ETH TransmitStoreForward value */ + /* Set the TTC bit according to ETH TransmitThresholdControl value */ + /* Set the FEF bit according to ETH ForwardErrorFrames value */ + /* Set the FUF bit according to ETH ForwardUndersizedGoodFrames value */ + /* Set the RTC bit according to ETH ReceiveThresholdControl value */ + /* Set the OSF bit according to ETH SecondFrameOperate value */ + tmpreg |= (uint32_t)(dmainit.DropTCPIPChecksumErrorFrame | + dmainit.ReceiveStoreForward | + dmainit.FlushReceivedFrame | + dmainit.TransmitStoreForward | + dmainit.TransmitThresholdControl | + dmainit.ForwardErrorFrames | + dmainit.ForwardUndersizedGoodFrames | + dmainit.ReceiveThresholdControl | + dmainit.SecondFrameOperate); + + /* Write to ETHERNET DMAOMR */ + (heth->Instance)->DMAOMR = (uint32_t)tmpreg; + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->DMAOMR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->DMAOMR = tmpreg; + + /*----------------------- ETHERNET DMABMR Configuration ------------------*/ + /* Set the AAL bit according to ETH AddressAlignedBeats value */ + /* Set the FB bit according to ETH FixedBurst value */ + /* Set the RPBL and 4*PBL bits according to ETH RxDMABurstLength value */ + /* Set the PBL and 4*PBL bits according to ETH TxDMABurstLength value */ + /* Set the Enhanced DMA descriptors bit according to ETH EnhancedDescriptorFormat value*/ + /* Set the DSL bit according to ETH DesciptorSkipLength value */ + /* Set the PR and DA bits according to ETH DMAArbitration value */ + (heth->Instance)->DMABMR = (uint32_t)(dmainit.AddressAlignedBeats | + dmainit.FixedBurst | + dmainit.RxDMABurstLength | /* !! if 4xPBL is selected for Tx or Rx it is applied for the other */ + dmainit.TxDMABurstLength | + dmainit.EnhancedDescriptorFormat | + (dmainit.DescriptorSkipLength << 2) | + dmainit.DMAArbitration | + ETH_DMABMR_USP); /* Enable use of separate PBL for Rx and Tx */ + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->DMABMR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->DMABMR = tmpreg; + + if((heth->Init).RxMode == ETH_RXINTERRUPT_MODE) + { + /* Enable the Ethernet Rx Interrupt */ + __HAL_ETH_DMA_ENABLE_IT((heth), ETH_DMA_IT_NIS | ETH_DMA_IT_R); + } + + /* Initialize MAC address in ethernet MAC */ + ETH_MACAddressConfig(heth, ETH_MAC_ADDRESS0, heth->Init.MACAddr); +} + +/** + * @brief Configures the selected MAC address. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @param MacAddr The MAC address to configure + * This parameter can be one of the following values: + * @arg ETH_MAC_Address0: MAC Address0 + * @arg ETH_MAC_Address1: MAC Address1 + * @arg ETH_MAC_Address2: MAC Address2 + * @arg ETH_MAC_Address3: MAC Address3 + * @param Addr Pointer to MAC address buffer data (6 bytes) + * @retval HAL status + */ +static void ETH_MACAddressConfig(ETH_HandleTypeDef *heth, uint32_t MacAddr, uint8_t *Addr) +{ + uint32_t tmpreg; + + /* Check the parameters */ + assert_param(IS_ETH_MAC_ADDRESS0123(MacAddr)); + + /* Calculate the selected MAC address high register */ + tmpreg = ((uint32_t)Addr[5] << 8) | (uint32_t)Addr[4]; + /* Load the selected MAC address high register */ + (*(__IO uint32_t *)((uint32_t)(ETH_MAC_ADDR_HBASE + MacAddr))) = tmpreg; + /* Calculate the selected MAC address low register */ + tmpreg = ((uint32_t)Addr[3] << 24) | ((uint32_t)Addr[2] << 16) | ((uint32_t)Addr[1] << 8) | Addr[0]; + + /* Load the selected MAC address low register */ + (*(__IO uint32_t *)((uint32_t)(ETH_MAC_ADDR_LBASE + MacAddr))) = tmpreg; +} + +/** + * @brief Enables the MAC transmission. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +static void ETH_MACTransmissionEnable(ETH_HandleTypeDef *heth) +{ + __IO uint32_t tmpreg = 0; + + /* Enable the MAC transmission */ + (heth->Instance)->MACCR |= ETH_MACCR_TE; + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg; +} + +/** + * @brief Disables the MAC transmission. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +static void ETH_MACTransmissionDisable(ETH_HandleTypeDef *heth) +{ + __IO uint32_t tmpreg = 0; + + /* Disable the MAC transmission */ + (heth->Instance)->MACCR &= ~ETH_MACCR_TE; + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg; +} + +/** + * @brief Enables the MAC reception. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +static void ETH_MACReceptionEnable(ETH_HandleTypeDef *heth) +{ + __IO uint32_t tmpreg = 0; + + /* Enable the MAC reception */ + (heth->Instance)->MACCR |= ETH_MACCR_RE; + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg; +} + +/** + * @brief Disables the MAC reception. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +static void ETH_MACReceptionDisable(ETH_HandleTypeDef *heth) +{ + __IO uint32_t tmpreg = 0; + + /* Disable the MAC reception */ + (heth->Instance)->MACCR &= ~ETH_MACCR_RE; + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->MACCR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->MACCR = tmpreg; +} + +/** + * @brief Enables the DMA transmission. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +static void ETH_DMATransmissionEnable(ETH_HandleTypeDef *heth) +{ + /* Enable the DMA transmission */ + (heth->Instance)->DMAOMR |= ETH_DMAOMR_ST; +} + +/** + * @brief Disables the DMA transmission. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +static void ETH_DMATransmissionDisable(ETH_HandleTypeDef *heth) +{ + /* Disable the DMA transmission */ + (heth->Instance)->DMAOMR &= ~ETH_DMAOMR_ST; +} + +/** + * @brief Enables the DMA reception. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +static void ETH_DMAReceptionEnable(ETH_HandleTypeDef *heth) +{ + /* Enable the DMA reception */ + (heth->Instance)->DMAOMR |= ETH_DMAOMR_SR; +} + +/** + * @brief Disables the DMA reception. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +static void ETH_DMAReceptionDisable(ETH_HandleTypeDef *heth) +{ + /* Disable the DMA reception */ + (heth->Instance)->DMAOMR &= ~ETH_DMAOMR_SR; +} + +/** + * @brief Clears the ETHERNET transmit FIFO. + * @param heth pointer to a ETH_HandleTypeDef structure that contains + * the configuration information for ETHERNET module + * @retval None + */ +static void ETH_FlushTransmitFIFO(ETH_HandleTypeDef *heth) +{ + __IO uint32_t tmpreg = 0; + + /* Set the Flush Transmit FIFO bit */ + (heth->Instance)->DMAOMR |= ETH_DMAOMR_FTF; + + /* Wait until the write operation will be taken into account: + at least four TX_CLK/RX_CLK clock cycles */ + tmpreg = (heth->Instance)->DMAOMR; + HAL_Delay(ETH_REG_WRITE_DELAY); + (heth->Instance)->DMAOMR = tmpreg; +} + +#if (USE_HAL_ETH_REGISTER_CALLBACKS == 1) +static void ETH_InitCallbacksToDefault(ETH_HandleTypeDef *heth) +{ + /* Init the ETH Callback settings */ + heth->TxCpltCallback = HAL_ETH_TxCpltCallback; /* Legacy weak TxCpltCallback */ + heth->RxCpltCallback = HAL_ETH_RxCpltCallback; /* Legacy weak RxCpltCallback */ + heth->DMAErrorCallback = HAL_ETH_ErrorCallback; /* Legacy weak DMAErrorCallback */ +} +#endif /* USE_HAL_ETH_REGISTER_CALLBACKS */ + +/** + * @} + */ + +#endif /* ETH */ +#endif /* HAL_ETH_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_exti.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_exti.c new file mode 100644 index 0000000..ecf0395 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_exti.c @@ -0,0 +1,547 @@ +/** + ****************************************************************************** + * @file stm32F7xx_hal_exti.c + * @author MCD Application Team + * @brief EXTI HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Extended Interrupts and events controller (EXTI) peripheral: + * + Initialization and de-initialization functions + * + IO operation functions + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2018 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + @verbatim + ============================================================================== + ##### EXTI Peripheral features ##### + ============================================================================== + [..] + (+) Each Exti line can be configured within this driver. + + (+) Exti line can be configured in 3 different modes + (++) Interrupt + (++) Event + (++) Both of them + + (+) Configurable Exti lines can be configured with 3 different triggers + (++) Rising + (++) Falling + (++) Both of them + + (+) When set in interrupt mode, configurable Exti lines have two different + interrupts pending registers which allow to distinguish which transition + occurs: + (++) Rising edge pending interrupt + (++) Falling + + (+) Exti lines 0 to 15 are linked to gpio pin number 0 to 15. Gpio port can + be selected through multiplexer. + + ##### How to use this driver ##### + ============================================================================== + [..] + + (#) Configure the EXTI line using HAL_EXTI_SetConfigLine(). + (++) Choose the interrupt line number by setting "Line" member from + EXTI_ConfigTypeDef structure. + (++) Configure the interrupt and/or event mode using "Mode" member from + EXTI_ConfigTypeDef structure. + (++) For configurable lines, configure rising and/or falling trigger + "Trigger" member from EXTI_ConfigTypeDef structure. + (++) For Exti lines linked to gpio, choose gpio port using "GPIOSel" + member from GPIO_InitTypeDef structure. + + (#) Get current Exti configuration of a dedicated line using + HAL_EXTI_GetConfigLine(). + (++) Provide exiting handle as parameter. + (++) Provide pointer on EXTI_ConfigTypeDef structure as second parameter. + + (#) Clear Exti configuration of a dedicated line using HAL_EXTI_GetConfigLine(). + (++) Provide exiting handle as parameter. + + (#) Register callback to treat Exti interrupts using HAL_EXTI_RegisterCallback(). + (++) Provide exiting handle as first parameter. + (++) Provide which callback will be registered using one value from + EXTI_CallbackIDTypeDef. + (++) Provide callback function pointer. + + (#) Get interrupt pending bit using HAL_EXTI_GetPending(). + + (#) Clear interrupt pending bit using HAL_EXTI_GetPending(). + + (#) Generate software interrupt using HAL_EXTI_GenerateSWI(). + + @endverbatim + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @addtogroup EXTI + * @{ + */ +/** MISRA C:2012 deviation rule has been granted for following rule: + * Rule-18.1_b - Medium: Array `EXTICR' 1st subscript interval [0,7] may be out + * of bounds [0,3] in following API : + * HAL_EXTI_SetConfigLine + * HAL_EXTI_GetConfigLine + * HAL_EXTI_ClearConfigLine + */ + +#ifdef HAL_EXTI_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private defines -----------------------------------------------------------*/ +/** @defgroup EXTI_Private_Constants EXTI Private Constants + * @{ + */ + +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ + +/** @addtogroup EXTI_Exported_Functions + * @{ + */ + +/** @addtogroup EXTI_Exported_Functions_Group1 + * @brief Configuration functions + * +@verbatim + =============================================================================== + ##### Configuration functions ##### + =============================================================================== + +@endverbatim + * @{ + */ + +/** + * @brief Set configuration of a dedicated Exti line. + * @param hexti Exti handle. + * @param pExtiConfig Pointer on EXTI configuration to be set. + * @retval HAL Status. + */ +HAL_StatusTypeDef HAL_EXTI_SetConfigLine(EXTI_HandleTypeDef *hexti, EXTI_ConfigTypeDef *pExtiConfig) +{ + uint32_t regval; + uint32_t linepos; + uint32_t maskline; + + /* Check null pointer */ + if ((hexti == NULL) || (pExtiConfig == NULL)) + { + return HAL_ERROR; + } + + /* Check parameters */ + assert_param(IS_EXTI_LINE(pExtiConfig->Line)); + assert_param(IS_EXTI_MODE(pExtiConfig->Mode)); + + /* Assign line number to handle */ + hexti->Line = pExtiConfig->Line; + + /* Compute line mask */ + linepos = (pExtiConfig->Line & EXTI_PIN_MASK); + maskline = (1uL << linepos); + + /* Configure triggers for configurable lines */ + if ((pExtiConfig->Line & EXTI_CONFIG) != 0x00u) + { + assert_param(IS_EXTI_TRIGGER(pExtiConfig->Trigger)); + + /* Configure rising trigger */ + /* Mask or set line */ + if ((pExtiConfig->Trigger & EXTI_TRIGGER_RISING) != 0x00u) + { + EXTI->RTSR |= maskline; + } + else + { + EXTI->RTSR &= ~maskline; + } + + /* Configure falling trigger */ + /* Mask or set line */ + if ((pExtiConfig->Trigger & EXTI_TRIGGER_FALLING) != 0x00u) + { + EXTI->FTSR |= maskline; + } + else + { + EXTI->FTSR &= ~maskline; + } + + + /* Configure gpio port selection in case of gpio exti line */ + if ((pExtiConfig->Line & EXTI_GPIO) == EXTI_GPIO) + { + assert_param(IS_EXTI_GPIO_PORT(pExtiConfig->GPIOSel)); + assert_param(IS_EXTI_GPIO_PIN(linepos)); + + regval = SYSCFG->EXTICR[linepos >> 2u]; + regval &= ~(SYSCFG_EXTICR1_EXTI0 << (SYSCFG_EXTICR1_EXTI1_Pos * (linepos & 0x03u))); + regval |= (pExtiConfig->GPIOSel << (SYSCFG_EXTICR1_EXTI1_Pos * (linepos & 0x03u))); + SYSCFG->EXTICR[linepos >> 2u] = regval; + } + } + + /* Configure interrupt mode : read current mode */ + /* Mask or set line */ + if ((pExtiConfig->Mode & EXTI_MODE_INTERRUPT) != 0x00u) + { + EXTI->IMR |= maskline; + } + else + { + EXTI->IMR &= ~maskline; + } + + /* Configure event mode : read current mode */ + /* Mask or set line */ + if ((pExtiConfig->Mode & EXTI_MODE_EVENT) != 0x00u) + { + EXTI->EMR |= maskline; + } + else + { + EXTI->EMR &= ~maskline; + } + + return HAL_OK; +} + +/** + * @brief Get configuration of a dedicated Exti line. + * @param hexti Exti handle. + * @param pExtiConfig Pointer on structure to store Exti configuration. + * @retval HAL Status. + */ +HAL_StatusTypeDef HAL_EXTI_GetConfigLine(EXTI_HandleTypeDef *hexti, EXTI_ConfigTypeDef *pExtiConfig) +{ + uint32_t regval; + uint32_t linepos; + uint32_t maskline; + + /* Check null pointer */ + if ((hexti == NULL) || (pExtiConfig == NULL)) + { + return HAL_ERROR; + } + + /* Check the parameter */ + assert_param(IS_EXTI_LINE(hexti->Line)); + + /* Store handle line number to configuration structure */ + pExtiConfig->Line = hexti->Line; + + /* Compute line mask */ + linepos = (pExtiConfig->Line & EXTI_PIN_MASK); + maskline = (1uL << linepos); + + /* 1] Get core mode : interrupt */ + + /* Check if selected line is enable */ + if ((EXTI->IMR & maskline) != 0x00u) + { + pExtiConfig->Mode = EXTI_MODE_INTERRUPT; + } + else + { + pExtiConfig->Mode = EXTI_MODE_NONE; + } + + /* Get event mode */ + /* Check if selected line is enable */ + if ((EXTI->EMR & maskline) != 0x00u) + { + pExtiConfig->Mode |= EXTI_MODE_EVENT; + } + + /* Get default Trigger and GPIOSel configuration */ + pExtiConfig->Trigger = EXTI_TRIGGER_NONE; + pExtiConfig->GPIOSel = 0x00u; + + /* 2] Get trigger for configurable lines : rising */ + if ((pExtiConfig->Line & EXTI_CONFIG) != 0x00u) + { + /* Check if configuration of selected line is enable */ + if ((EXTI->RTSR & maskline) != 0x00u) + { + pExtiConfig->Trigger = EXTI_TRIGGER_RISING; + } + + /* Get falling configuration */ + /* Check if configuration of selected line is enable */ + if ((EXTI->FTSR & maskline) != 0x00u) + { + pExtiConfig->Trigger |= EXTI_TRIGGER_FALLING; + } + + /* Get Gpio port selection for gpio lines */ + if ((pExtiConfig->Line & EXTI_GPIO) == EXTI_GPIO) + { + assert_param(IS_EXTI_GPIO_PIN(linepos)); + + regval = SYSCFG->EXTICR[linepos >> 2u]; + pExtiConfig->GPIOSel = ((regval << (SYSCFG_EXTICR1_EXTI1_Pos * (3uL - (linepos & 0x03u)))) >> 24); + } + } + + return HAL_OK; +} + +/** + * @brief Clear whole configuration of a dedicated Exti line. + * @param hexti Exti handle. + * @retval HAL Status. + */ +HAL_StatusTypeDef HAL_EXTI_ClearConfigLine(EXTI_HandleTypeDef *hexti) +{ + uint32_t regval; + uint32_t linepos; + uint32_t maskline; + + /* Check null pointer */ + if (hexti == NULL) + { + return HAL_ERROR; + } + + /* Check the parameter */ + assert_param(IS_EXTI_LINE(hexti->Line)); + + /* compute line mask */ + linepos = (hexti->Line & EXTI_PIN_MASK); + maskline = (1uL << linepos); + + /* 1] Clear interrupt mode */ + EXTI->IMR = (EXTI->IMR & ~maskline); + + /* 2] Clear event mode */ + EXTI->EMR = (EXTI->EMR & ~maskline); + + /* 3] Clear triggers in case of configurable lines */ + if ((hexti->Line & EXTI_CONFIG) != 0x00u) + { + EXTI->RTSR = (EXTI->RTSR & ~maskline); + EXTI->FTSR = (EXTI->FTSR & ~maskline); + + /* Get Gpio port selection for gpio lines */ + if ((hexti->Line & EXTI_GPIO) == EXTI_GPIO) + { + assert_param(IS_EXTI_GPIO_PIN(linepos)); + + regval = SYSCFG->EXTICR[linepos >> 2u]; + regval &= ~(SYSCFG_EXTICR1_EXTI0 << (SYSCFG_EXTICR1_EXTI1_Pos * (linepos & 0x03u))); + SYSCFG->EXTICR[linepos >> 2u] = regval; + } + } + + return HAL_OK; +} + +/** + * @brief Register callback for a dedicated Exti line. + * @param hexti Exti handle. + * @param CallbackID User callback identifier. + * This parameter can be one of @arg @ref EXTI_CallbackIDTypeDef values. + * @param pPendingCbfn function pointer to be stored as callback. + * @retval HAL Status. + */ +HAL_StatusTypeDef HAL_EXTI_RegisterCallback(EXTI_HandleTypeDef *hexti, EXTI_CallbackIDTypeDef CallbackID, void (*pPendingCbfn)(void)) +{ + HAL_StatusTypeDef status = HAL_OK; + + switch (CallbackID) + { + case HAL_EXTI_COMMON_CB_ID: + hexti->PendingCallback = pPendingCbfn; + break; + + default: + status = HAL_ERROR; + break; + } + + return status; +} + +/** + * @brief Store line number as handle private field. + * @param hexti Exti handle. + * @param ExtiLine Exti line number. + * This parameter can be from 0 to @ref EXTI_LINE_NB. + * @retval HAL Status. + */ +HAL_StatusTypeDef HAL_EXTI_GetHandle(EXTI_HandleTypeDef *hexti, uint32_t ExtiLine) +{ + /* Check the parameters */ + assert_param(IS_EXTI_LINE(ExtiLine)); + + /* Check null pointer */ + if (hexti == NULL) + { + return HAL_ERROR; + } + else + { + /* Store line number as handle private field */ + hexti->Line = ExtiLine; + + return HAL_OK; + } +} + +/** + * @} + */ + +/** @addtogroup EXTI_Exported_Functions_Group2 + * @brief EXTI IO functions. + * +@verbatim + =============================================================================== + ##### IO operation functions ##### + =============================================================================== + +@endverbatim + * @{ + */ + +/** + * @brief Handle EXTI interrupt request. + * @param hexti Exti handle. + * @retval none. + */ +void HAL_EXTI_IRQHandler(EXTI_HandleTypeDef *hexti) +{ + uint32_t regval; + uint32_t maskline; + + /* Compute line mask */ + maskline = (1uL << (hexti->Line & EXTI_PIN_MASK)); + + /* Get pending bit */ + regval = (EXTI->PR & maskline); + if (regval != 0x00u) + { + /* Clear pending bit */ + EXTI->PR = maskline; + + /* Call callback */ + if (hexti->PendingCallback != NULL) + { + hexti->PendingCallback(); + } + } +} + +/** + * @brief Get interrupt pending bit of a dedicated line. + * @param hexti Exti handle. + * @param Edge Specify which pending edge as to be checked. + * This parameter can be one of the following values: + * @arg @ref EXTI_TRIGGER_RISING_FALLING + * This parameter is kept for compatibility with other series. + * @retval 1 if interrupt is pending else 0. + */ +uint32_t HAL_EXTI_GetPending(EXTI_HandleTypeDef *hexti, uint32_t Edge) +{ + uint32_t regval; + uint32_t linepos; + uint32_t maskline; + + /* Check parameters */ + assert_param(IS_EXTI_LINE(hexti->Line)); + assert_param(IS_EXTI_CONFIG_LINE(hexti->Line)); + assert_param(IS_EXTI_PENDING_EDGE(Edge)); + + /* Compute line mask */ + linepos = (hexti->Line & EXTI_PIN_MASK); + maskline = (1uL << linepos); + + /* return 1 if bit is set else 0 */ + regval = ((EXTI->PR & maskline) >> linepos); + return regval; +} + +/** + * @brief Clear interrupt pending bit of a dedicated line. + * @param hexti Exti handle. + * @param Edge Specify which pending edge as to be clear. + * This parameter can be one of the following values: + * @arg @ref EXTI_TRIGGER_RISING_FALLING + * This parameter is kept for compatibility with other series. + * @retval None. + */ +void HAL_EXTI_ClearPending(EXTI_HandleTypeDef *hexti, uint32_t Edge) +{ + uint32_t maskline; + + /* Check parameters */ + assert_param(IS_EXTI_LINE(hexti->Line)); + assert_param(IS_EXTI_CONFIG_LINE(hexti->Line)); + assert_param(IS_EXTI_PENDING_EDGE(Edge)); + + /* Compute line mask */ + maskline = (1uL << (hexti->Line & EXTI_PIN_MASK)); + + /* Clear Pending bit */ + EXTI->PR = maskline; +} + +/** + * @brief Generate a software interrupt for a dedicated line. + * @param hexti Exti handle. + * @retval None. + */ +void HAL_EXTI_GenerateSWI(EXTI_HandleTypeDef *hexti) +{ + uint32_t maskline; + + /* Check parameters */ + assert_param(IS_EXTI_LINE(hexti->Line)); + assert_param(IS_EXTI_CONFIG_LINE(hexti->Line)); + + /* Compute line mask */ + maskline = (1uL << (hexti->Line & EXTI_PIN_MASK)); + + /* Generate Software interrupt */ + EXTI->SWIER = maskline; +} + +/** + * @} + */ + +/** + * @} + */ + +#endif /* HAL_EXTI_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash.c new file mode 100644 index 0000000..d82d629 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash.c @@ -0,0 +1,819 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_flash.c + * @author MCD Application Team + * @brief FLASH HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the internal FLASH memory: + * + Program operations functions + * + Memory Control functions + * + Peripheral Errors functions + * + @verbatim + ============================================================================== + ##### FLASH peripheral features ##### + ============================================================================== + + [..] The Flash memory interface manages CPU AHB I-Code and D-Code accesses + to the Flash memory. It implements the erase and program Flash memory operations + and the read and write protection mechanisms. + + [..] The Flash memory interface accelerates code execution with a system of instruction + prefetch and cache lines. + + [..] The FLASH main features are: + (+) Flash memory read operations + (+) Flash memory program/erase operations + (+) Read / write protections + (+) Prefetch on I-Code + (+) 64 cache lines of 128 bits on I-Code + (+) 8 cache lines of 128 bits on D-Code + + ##### How to use this driver ##### + ============================================================================== + [..] + This driver provides functions and macros to configure and program the FLASH + memory of all STM32F7xx devices. + + (#) FLASH Memory IO Programming functions: + (++) Lock and Unlock the FLASH interface using HAL_FLASH_Unlock() and + HAL_FLASH_Lock() functions + (++) Program functions: byte, half word, word and double word + (++) There Two modes of programming : + (+++) Polling mode using HAL_FLASH_Program() function + (+++) Interrupt mode using HAL_FLASH_Program_IT() function + + (#) Interrupts and flags management functions : + (++) Handle FLASH interrupts by calling HAL_FLASH_IRQHandler() + (++) Wait for last FLASH operation according to its status + (++) Get error flag status by calling HAL_SetErrorCode() + [..] + In addition to these functions, this driver includes a set of macros allowing + to handle the following operations: + (+) Set the latency + (+) Enable/Disable the prefetch buffer + (+) Enable/Disable the Instruction cache and the Data cache + (+) Reset the Instruction cache and the Data cache + (+) Enable/Disable the FLASH interrupts + (+) Monitor the FLASH flags status + [..] + (@) For any Flash memory program operation (erase or program), the CPU clock frequency + (HCLK) must be at least 1MHz. + (@) The contents of the Flash memory are not guaranteed if a device reset occurs during + a Flash memory operation. + (@) Any attempt to read the Flash memory while it is being written or erased, causes the + bus to stall. Read operations are processed correctly once the program operation has + completed. This means that code or data fetches cannot be performed while a write/erase + operation is ongoing. + + @endverbatim + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup FLASH FLASH + * @brief FLASH HAL module driver + * @{ + */ + +#ifdef HAL_FLASH_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup FLASH_Private_Constants + * @{ + */ +#define SECTOR_MASK ((uint32_t)0xFFFFFF07U) +#define FLASH_TIMEOUT_VALUE ((uint32_t)50000U)/* 50 s */ +/** + * @} + */ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/** @addtogroup FLASH_Private_Variables + * @{ + */ +/* Variable used for Erase sectors under interruption */ +FLASH_ProcessTypeDef pFlash; +/** + * @} + */ + +/* Private function prototypes -----------------------------------------------*/ +/** @addtogroup FLASH_Private_Functions + * @{ + */ +/* Program operations */ +static void FLASH_Program_DoubleWord(uint32_t Address, uint64_t Data); +static void FLASH_Program_Word(uint32_t Address, uint32_t Data); +static void FLASH_Program_HalfWord(uint32_t Address, uint16_t Data); +static void FLASH_Program_Byte(uint32_t Address, uint8_t Data); +static void FLASH_SetErrorCode(void); + +HAL_StatusTypeDef FLASH_WaitForLastOperation(uint32_t Timeout); +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup FLASH_Exported_Functions FLASH Exported Functions + * @{ + */ + +/** @defgroup FLASH_Exported_Functions_Group1 Programming operation functions + * @brief Programming operation functions + * +@verbatim + =============================================================================== + ##### Programming operation functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to manage the FLASH + program operations. + +@endverbatim + * @{ + */ + +/** + * @brief Program byte, halfword, word or double word at a specified address + * @param TypeProgram Indicate the way to program at a specified address. + * This parameter can be a value of @ref FLASH_Type_Program + * @param Address specifies the address to be programmed. + * @param Data specifies the data to be programmed + * + * @retval HAL_StatusTypeDef HAL Status + */ +HAL_StatusTypeDef HAL_FLASH_Program(uint32_t TypeProgram, uint32_t Address, uint64_t Data) +{ + HAL_StatusTypeDef status = HAL_ERROR; + + /* Process Locked */ + __HAL_LOCK(&pFlash); + + /* Check the parameters */ + assert_param(IS_FLASH_TYPEPROGRAM(TypeProgram)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if(status == HAL_OK) + { + switch(TypeProgram) + { + case FLASH_TYPEPROGRAM_BYTE : + { + /*Program byte (8-bit) at a specified address.*/ + FLASH_Program_Byte(Address, (uint8_t) Data); + break; + } + + case FLASH_TYPEPROGRAM_HALFWORD : + { + /*Program halfword (16-bit) at a specified address.*/ + FLASH_Program_HalfWord(Address, (uint16_t) Data); + break; + } + + case FLASH_TYPEPROGRAM_WORD : + { + /*Program word (32-bit) at a specified address.*/ + FLASH_Program_Word(Address, (uint32_t) Data); + break; + } + + case FLASH_TYPEPROGRAM_DOUBLEWORD : + { + /*Program double word (64-bit) at a specified address.*/ + FLASH_Program_DoubleWord(Address, Data); + break; + } + default : + break; + } + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + /* If the program operation is completed, disable the PG Bit */ + FLASH->CR &= (~FLASH_CR_PG); + } + + /* Process Unlocked */ + __HAL_UNLOCK(&pFlash); + + return status; +} + +/** + * @brief Program byte, halfword, word or double word at a specified address with interrupt enabled. + * @param TypeProgram Indicate the way to program at a specified address. + * This parameter can be a value of @ref FLASH_Type_Program + * @param Address specifies the address to be programmed. + * @param Data specifies the data to be programmed + * + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASH_Program_IT(uint32_t TypeProgram, uint32_t Address, uint64_t Data) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process Locked */ + __HAL_LOCK(&pFlash); + + /* Check the parameters */ + assert_param(IS_FLASH_TYPEPROGRAM(TypeProgram)); + + /* Enable End of FLASH Operation interrupt */ + __HAL_FLASH_ENABLE_IT(FLASH_IT_EOP); + + /* Enable Error source interrupt */ + __HAL_FLASH_ENABLE_IT(FLASH_IT_ERR); + + /* Clear pending flags (if any) */ + __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_EOP | FLASH_FLAG_OPERR | FLASH_FLAG_WRPERR |\ + FLASH_FLAG_PGAERR | FLASH_FLAG_PGPERR| FLASH_FLAG_ERSERR); + + pFlash.ProcedureOnGoing = FLASH_PROC_PROGRAM; + pFlash.Address = Address; + + switch(TypeProgram) + { + case FLASH_TYPEPROGRAM_BYTE : + { + /*Program byte (8-bit) at a specified address.*/ + FLASH_Program_Byte(Address, (uint8_t) Data); + break; + } + + case FLASH_TYPEPROGRAM_HALFWORD : + { + /*Program halfword (16-bit) at a specified address.*/ + FLASH_Program_HalfWord(Address, (uint16_t) Data); + break; + } + + case FLASH_TYPEPROGRAM_WORD : + { + /*Program word (32-bit) at a specified address.*/ + FLASH_Program_Word(Address, (uint32_t) Data); + break; + } + + case FLASH_TYPEPROGRAM_DOUBLEWORD : + { + /*Program double word (64-bit) at a specified address.*/ + FLASH_Program_DoubleWord(Address, Data); + break; + } + default : + break; + } + return status; +} + +/** + * @brief This function handles FLASH interrupt request. + * @retval None + */ +void HAL_FLASH_IRQHandler(void) +{ + uint32_t temp = 0; + + /* If the program operation is completed, disable the PG Bit */ + FLASH->CR &= (~FLASH_CR_PG); + + /* If the erase operation is completed, disable the SER Bit */ + FLASH->CR &= (~FLASH_CR_SER); + FLASH->CR &= SECTOR_MASK; + + /* if the erase operation is completed, disable the MER Bit */ + FLASH->CR &= (~FLASH_MER_BIT); + + /* Check FLASH End of Operation flag */ + if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_EOP) != RESET) + { + /* Clear FLASH End of Operation pending bit */ + __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_EOP); + + switch (pFlash.ProcedureOnGoing) + { + case FLASH_PROC_SECTERASE : + { + /* Nb of sector to erased can be decreased */ + pFlash.NbSectorsToErase--; + + /* Check if there are still sectors to erase */ + if(pFlash.NbSectorsToErase != 0) + { + temp = pFlash.Sector; + /* Indicate user which sector has been erased */ + HAL_FLASH_EndOfOperationCallback(temp); + + /* Increment sector number */ + temp = ++pFlash.Sector; + FLASH_Erase_Sector(temp, pFlash.VoltageForErase); + } + else + { + /* No more sectors to Erase, user callback can be called.*/ + /* Reset Sector and stop Erase sectors procedure */ + pFlash.Sector = temp = 0xFFFFFFFFU; + /* FLASH EOP interrupt user callback */ + HAL_FLASH_EndOfOperationCallback(temp); + /* Sector Erase procedure is completed */ + pFlash.ProcedureOnGoing = FLASH_PROC_NONE; + } + break; + } + + case FLASH_PROC_MASSERASE : + { + /* MassErase ended. Return the selected bank : in this product we don't have Banks */ + /* FLASH EOP interrupt user callback */ + HAL_FLASH_EndOfOperationCallback(0); + /* MAss Erase procedure is completed */ + pFlash.ProcedureOnGoing = FLASH_PROC_NONE; + break; + } + + case FLASH_PROC_PROGRAM : + { + /*Program ended. Return the selected address*/ + /* FLASH EOP interrupt user callback */ + HAL_FLASH_EndOfOperationCallback(pFlash.Address); + /* Programming procedure is completed */ + pFlash.ProcedureOnGoing = FLASH_PROC_NONE; + break; + } + default : + break; + } + } + + /* Check FLASH operation error flags */ + if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_ALL_ERRORS) != RESET) + { + switch (pFlash.ProcedureOnGoing) + { + case FLASH_PROC_SECTERASE : + { + /* return the faulty sector */ + temp = pFlash.Sector; + pFlash.Sector = 0xFFFFFFFFU; + break; + } + case FLASH_PROC_MASSERASE : + { + /* No return in case of Mass Erase */ + temp = 0; + break; + } + case FLASH_PROC_PROGRAM : + { + /*return the faulty address*/ + temp = pFlash.Address; + break; + } + default : + break; + } + /*Save the Error code*/ + FLASH_SetErrorCode(); + + /* FLASH error interrupt user callback */ + HAL_FLASH_OperationErrorCallback(temp); + + /*Stop the procedure ongoing */ + pFlash.ProcedureOnGoing = FLASH_PROC_NONE; + } + + if(pFlash.ProcedureOnGoing == FLASH_PROC_NONE) + { + /* Disable End of FLASH Operation interrupt */ + __HAL_FLASH_DISABLE_IT(FLASH_IT_EOP); + + /* Disable Error source interrupt */ + __HAL_FLASH_DISABLE_IT(FLASH_IT_ERR); + + /* Process Unlocked */ + __HAL_UNLOCK(&pFlash); + } + +} + +/** + * @brief FLASH end of operation interrupt callback + * @param ReturnValue The value saved in this parameter depends on the ongoing procedure + * - Sectors Erase: Sector which has been erased (if 0xFFFFFFFF, it means that + * all the selected sectors have been erased) + * - Program : Address which was selected for data program + * - Mass Erase : No return value expected + * @retval None + */ +__weak void HAL_FLASH_EndOfOperationCallback(uint32_t ReturnValue) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(ReturnValue); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_FLASH_EndOfOperationCallback could be implemented in the user file + */ +} + +/** + * @brief FLASH operation error interrupt callback + * @param ReturnValue The value saved in this parameter depends on the ongoing procedure + * - Sectors Erase: Sector which has been erased (if 0xFFFFFFFF, it means that + * all the selected sectors have been erased) + * - Program : Address which was selected for data program + * - Mass Erase : No return value expected + * @retval None + */ +__weak void HAL_FLASH_OperationErrorCallback(uint32_t ReturnValue) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(ReturnValue); + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_FLASH_OperationErrorCallback could be implemented in the user file + */ +} + +/** + * @} + */ + +/** @defgroup FLASH_Exported_Functions_Group2 Peripheral Control functions + * @brief management functions + * +@verbatim + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to control the FLASH + memory operations. + +@endverbatim + * @{ + */ + +/** + * @brief Unlock the FLASH control register access + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASH_Unlock(void) +{ + HAL_StatusTypeDef status = HAL_OK; + + if(READ_BIT(FLASH->CR, FLASH_CR_LOCK) != RESET) + { + /* Authorize the FLASH Registers access */ + WRITE_REG(FLASH->KEYR, FLASH_KEY1); + WRITE_REG(FLASH->KEYR, FLASH_KEY2); + + /* Verify Flash is unlocked */ + if(READ_BIT(FLASH->CR, FLASH_CR_LOCK) != RESET) + { + status = HAL_ERROR; + } + } + + return status; +} + +/** + * @brief Locks the FLASH control register access + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASH_Lock(void) +{ + /* Set the LOCK Bit to lock the FLASH Registers access */ + FLASH->CR |= FLASH_CR_LOCK; + + return HAL_OK; +} + +/** + * @brief Unlock the FLASH Option Control Registers access. + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASH_OB_Unlock(void) +{ + if((FLASH->OPTCR & FLASH_OPTCR_OPTLOCK) != RESET) + { + /* Authorizes the Option Byte register programming */ + FLASH->OPTKEYR = FLASH_OPT_KEY1; + FLASH->OPTKEYR = FLASH_OPT_KEY2; + } + else + { + return HAL_ERROR; + } + + return HAL_OK; +} + +/** + * @brief Lock the FLASH Option Control Registers access. + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASH_OB_Lock(void) +{ + /* Set the OPTLOCK Bit to lock the FLASH Option Byte Registers access */ + FLASH->OPTCR |= FLASH_OPTCR_OPTLOCK; + + return HAL_OK; +} + +/** + * @brief Launch the option byte loading. + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASH_OB_Launch(void) +{ + /* Set the OPTSTRT bit in OPTCR register */ + FLASH->OPTCR |= FLASH_OPTCR_OPTSTRT; + + /* Wait for last operation to be completed */ + return(FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE)); +} + +/** + * @} + */ + +/** @defgroup FLASH_Exported_Functions_Group3 Peripheral State and Errors functions + * @brief Peripheral Errors functions + * +@verbatim + =============================================================================== + ##### Peripheral Errors functions ##### + =============================================================================== + [..] + This subsection permits to get in run-time Errors of the FLASH peripheral. + +@endverbatim + * @{ + */ + +/** + * @brief Get the specific FLASH error flag. + * @retval FLASH_ErrorCode: The returned value can be: + * @arg FLASH_ERROR_ERS: FLASH Erasing Sequence error flag + * @arg FLASH_ERROR_PGP: FLASH Programming Parallelism error flag + * @arg FLASH_ERROR_PGA: FLASH Programming Alignment error flag + * @arg FLASH_ERROR_WRP: FLASH Write protected error flag + * @arg FLASH_ERROR_OPERATION: FLASH operation Error flag + */ +uint32_t HAL_FLASH_GetError(void) +{ + return pFlash.ErrorCode; +} + +/** + * @} + */ + +/** + * @brief Wait for a FLASH operation to complete. + * @param Timeout maximum flash operationtimeout + * @retval HAL Status + */ +HAL_StatusTypeDef FLASH_WaitForLastOperation(uint32_t Timeout) +{ + uint32_t tickstart = 0; + + /* Clear Error Code */ + pFlash.ErrorCode = HAL_FLASH_ERROR_NONE; + + /* Wait for the FLASH operation to complete by polling on BUSY flag to be reset. + Even if the FLASH operation fails, the BUSY flag will be reset and an error + flag will be set */ + /* Get tick */ + tickstart = HAL_GetTick(); + + while(__HAL_FLASH_GET_FLAG(FLASH_FLAG_BSY) != RESET) + { + if(Timeout != HAL_MAX_DELAY) + { + if((Timeout == 0)||((HAL_GetTick() - tickstart ) > Timeout)) + { + return HAL_TIMEOUT; + } + } + } + + if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_ALL_ERRORS) != RESET) + { + /*Save the error code*/ + FLASH_SetErrorCode(); + return HAL_ERROR; + } + + /* Check FLASH End of Operation flag */ + if (__HAL_FLASH_GET_FLAG(FLASH_FLAG_EOP) != RESET) + { + /* Clear FLASH End of Operation pending bit */ + __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_EOP); + } + + /* If there is an error flag set */ + return HAL_OK; + +} + +/** + * @brief Program a double word (64-bit) at a specified address. + * @note This function must be used when the device voltage range is from + * 2.7V to 3.6V and an External Vpp is present. + * + * @note If an erase and a program operations are requested simultaneously, + * the erase operation is performed before the program one. + * + * @param Address specifies the address to be programmed. + * @param Data specifies the data to be programmed. + * @retval None + */ +static void FLASH_Program_DoubleWord(uint32_t Address, uint64_t Data) +{ + /* Check the parameters */ + assert_param(IS_FLASH_ADDRESS(Address)); + + /* If the previous operation is completed, proceed to program the new data */ + FLASH->CR &= CR_PSIZE_MASK; + FLASH->CR |= FLASH_PSIZE_DOUBLE_WORD; + FLASH->CR |= FLASH_CR_PG; + + /* Program first word */ + *(__IO uint32_t*)Address = (uint32_t)Data; + /* Barrier to ensure programming is performed in 2 steps, in right order + (independently of compiler optimization behavior) */ + __ISB(); + + /* Program second word */ + *(__IO uint32_t*)(Address+4) = (uint32_t)(Data >> 32); + + /* Data synchronous Barrier (DSB) Just after the write operation + This will force the CPU to respect the sequence of instruction (no optimization).*/ + __DSB(); +} + + +/** + * @brief Program word (32-bit) at a specified address. + * @note This function must be used when the device voltage range is from + * 2.7V to 3.3V. + * + * @note If an erase and a program operations are requested simultaneously, + * the erase operation is performed before the program one. + * + * @param Address specifies the address to be programmed. + * @param Data specifies the data to be programmed. + * @retval None + */ +static void FLASH_Program_Word(uint32_t Address, uint32_t Data) +{ + /* Check the parameters */ + assert_param(IS_FLASH_ADDRESS(Address)); + + /* If the previous operation is completed, proceed to program the new data */ + FLASH->CR &= CR_PSIZE_MASK; + FLASH->CR |= FLASH_PSIZE_WORD; + FLASH->CR |= FLASH_CR_PG; + + *(__IO uint32_t*)Address = Data; + + /* Data synchronous Barrier (DSB) Just after the write operation + This will force the CPU to respect the sequence of instruction (no optimization).*/ + __DSB(); +} + +/** + * @brief Program a half-word (16-bit) at a specified address. + * @note This function must be used when the device voltage range is from + * 2.1V to 3.6V. + * + * @note If an erase and a program operations are requested simultaneously, + * the erase operation is performed before the program one. + * + * @param Address specifies the address to be programmed. + * @param Data specifies the data to be programmed. + * @retval None + */ +static void FLASH_Program_HalfWord(uint32_t Address, uint16_t Data) +{ + /* Check the parameters */ + assert_param(IS_FLASH_ADDRESS(Address)); + + /* If the previous operation is completed, proceed to program the new data */ + FLASH->CR &= CR_PSIZE_MASK; + FLASH->CR |= FLASH_PSIZE_HALF_WORD; + FLASH->CR |= FLASH_CR_PG; + + *(__IO uint16_t*)Address = Data; + + /* Data synchronous Barrier (DSB) Just after the write operation + This will force the CPU to respect the sequence of instruction (no optimization).*/ + __DSB(); + +} + +/** + * @brief Program byte (8-bit) at a specified address. + * @note This function must be used when the device voltage range is from + * 1.7V to 3.6V. + * + * @note If an erase and a program operations are requested simultaneously, + * the erase operation is performed before the program one. + * + * @param Address specifies the address to be programmed. + * @param Data specifies the data to be programmed. + * @retval None + */ +static void FLASH_Program_Byte(uint32_t Address, uint8_t Data) +{ + /* Check the parameters */ + assert_param(IS_FLASH_ADDRESS(Address)); + + /* If the previous operation is completed, proceed to program the new data */ + FLASH->CR &= CR_PSIZE_MASK; + FLASH->CR |= FLASH_PSIZE_BYTE; + FLASH->CR |= FLASH_CR_PG; + + *(__IO uint8_t*)Address = Data; + + /* Data synchronous Barrier (DSB) Just after the write operation + This will force the CPU to respect the sequence of instruction (no optimization).*/ + __DSB(); +} + +/** + * @brief Set the specific FLASH error flag. + * @retval None + */ +static void FLASH_SetErrorCode(void) +{ + if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_OPERR) != RESET) + { + pFlash.ErrorCode |= HAL_FLASH_ERROR_OPERATION; + } + + if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_WRPERR) != RESET) + { + pFlash.ErrorCode |= HAL_FLASH_ERROR_WRP; + } + + if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_PGAERR) != RESET) + { + pFlash.ErrorCode |= HAL_FLASH_ERROR_PGA; + } + + if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_PGPERR) != RESET) + { + pFlash.ErrorCode |= HAL_FLASH_ERROR_PGP; + } + + if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_ERSERR) != RESET) + { + pFlash.ErrorCode |= HAL_FLASH_ERROR_ERS; + } + +#if defined (FLASH_OPTCR2_PCROP) + if(__HAL_FLASH_GET_FLAG(FLASH_FLAG_RDERR) != RESET) + { + pFlash.ErrorCode |= HAL_FLASH_ERROR_RD; + } +#endif /* FLASH_OPTCR2_PCROP */ + + /* Clear error programming flags */ + __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_ALL_ERRORS); +} + +/** + * @} + */ + +#endif /* HAL_FLASH_MODULE_ENABLED */ + +/** + * @} + */ + +/** + * @} + */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash_ex.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash_ex.c new file mode 100644 index 0000000..b10c0d0 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash_ex.c @@ -0,0 +1,1119 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_flash_ex.c + * @author MCD Application Team + * @brief Extended FLASH HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the FLASH extension peripheral: + * + Extended programming operations functions + * + @verbatim + ============================================================================== + ##### Flash Extension features ##### + ============================================================================== + + [..] Comparing to other previous devices, the FLASH interface for STM32F76xx/STM32F77xx + devices contains the following additional features + + (+) Capacity up to 2 Mbyte with dual bank architecture supporting read-while-write + capability (RWW) + (+) Dual bank memory organization + (+) Dual boot mode + + ##### How to use this driver ##### + ============================================================================== + [..] This driver provides functions to configure and program the FLASH memory + of all STM32F7xx devices. It includes + (#) FLASH Memory Erase functions: + (++) Lock and Unlock the FLASH interface using HAL_FLASH_Unlock() and + HAL_FLASH_Lock() functions + (++) Erase function: Erase sector, erase all sectors + (++) There are two modes of erase : + (+++) Polling Mode using HAL_FLASHEx_Erase() + (+++) Interrupt Mode using HAL_FLASHEx_Erase_IT() + + (#) Option Bytes Programming functions: Use HAL_FLASHEx_OBProgram() to : + (++) Set/Reset the write protection + (++) Set the Read protection Level + (++) Set the BOR level + (++) Program the user Option Bytes + + @endverbatim + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup FLASHEx FLASHEx + * @brief FLASH HAL Extension module driver + * @{ + */ + +#ifdef HAL_FLASH_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup FLASHEx_Private_Constants + * @{ + */ +#define SECTOR_MASK 0xFFFFFF07U +#define FLASH_TIMEOUT_VALUE 50000U/* 50 s */ +/** + * @} + */ + +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/** @addtogroup FLASHEx_Private_Variables + * @{ + */ +extern FLASH_ProcessTypeDef pFlash; +/** + * @} + */ + +/* Private function prototypes -----------------------------------------------*/ +/** @addtogroup FLASHEx_Private_Functions + * @{ + */ +/* Option bytes control */ +static HAL_StatusTypeDef FLASH_OB_EnableWRP(uint32_t WRPSector); +static HAL_StatusTypeDef FLASH_OB_DisableWRP(uint32_t WRPSector); +static HAL_StatusTypeDef FLASH_OB_RDP_LevelConfig(uint8_t Level); +static HAL_StatusTypeDef FLASH_OB_BOR_LevelConfig(uint8_t Level); +static HAL_StatusTypeDef FLASH_OB_BootAddressConfig(uint32_t BootOption, uint32_t Address); +static uint32_t FLASH_OB_GetUser(void); +static uint32_t FLASH_OB_GetWRP(void); +static uint8_t FLASH_OB_GetRDP(void); +static uint32_t FLASH_OB_GetBOR(void); +static uint32_t FLASH_OB_GetBootAddress(uint32_t BootOption); + +#if defined (FLASH_OPTCR_nDBANK) +static void FLASH_MassErase(uint8_t VoltageRange, uint32_t Banks); +static HAL_StatusTypeDef FLASH_OB_UserConfig(uint32_t Wwdg, uint32_t Iwdg, uint32_t Stop, uint32_t Stdby, uint32_t Iwdgstop, \ + uint32_t Iwdgstdby, uint32_t NDBank, uint32_t NDBoot); +#else +static void FLASH_MassErase(uint8_t VoltageRange); +static HAL_StatusTypeDef FLASH_OB_UserConfig(uint32_t Wwdg, uint32_t Iwdg, uint32_t Stop, uint32_t Stdby, uint32_t Iwdgstop, uint32_t Iwdgstdby); +#endif /* FLASH_OPTCR_nDBANK */ + +#if defined (FLASH_OPTCR2_PCROP) +static HAL_StatusTypeDef FLASH_OB_PCROP_Config(uint32_t PCROPSector); +static HAL_StatusTypeDef FLASH_OB_PCROP_RDP_Config(uint32_t Pcrop_Rdp); +static uint32_t FLASH_OB_GetPCROP(void); +static uint32_t FLASH_OB_GetPCROPRDP(void); +#endif /* FLASH_OPTCR2_PCROP */ + +extern HAL_StatusTypeDef FLASH_WaitForLastOperation(uint32_t Timeout); +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup FLASHEx_Exported_Functions FLASHEx Exported Functions + * @{ + */ + +/** @defgroup FLASHEx_Exported_Functions_Group1 Extended IO operation functions + * @brief Extended IO operation functions + * +@verbatim + =============================================================================== + ##### Extended programming operation functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to manage the Extension FLASH + programming operations Operations. + +@endverbatim + * @{ + */ +/** + * @brief Perform a mass erase or erase the specified FLASH memory sectors + * @param[in] pEraseInit pointer to an FLASH_EraseInitTypeDef structure that + * contains the configuration information for the erasing. + * + * @param[out] SectorError pointer to variable that + * contains the configuration information on faulty sector in case of error + * (0xFFFFFFFF means that all the sectors have been correctly erased) + * + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASHEx_Erase(FLASH_EraseInitTypeDef *pEraseInit, uint32_t *SectorError) +{ + HAL_StatusTypeDef status = HAL_ERROR; + uint32_t index = 0; + + /* Process Locked */ + __HAL_LOCK(&pFlash); + + /* Check the parameters */ + assert_param(IS_FLASH_TYPEERASE(pEraseInit->TypeErase)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if(status == HAL_OK) + { + /*Initialization of SectorError variable*/ + *SectorError = 0xFFFFFFFFU; + + if(pEraseInit->TypeErase == FLASH_TYPEERASE_MASSERASE) + { + /*Mass erase to be done*/ +#if defined (FLASH_OPTCR_nDBANK) + FLASH_MassErase((uint8_t) pEraseInit->VoltageRange, pEraseInit->Banks); +#else + FLASH_MassErase((uint8_t) pEraseInit->VoltageRange); +#endif /* FLASH_OPTCR_nDBANK */ + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + /* if the erase operation is completed, disable the MER Bit */ + FLASH->CR &= (~FLASH_MER_BIT); + } + else + { + /* Check the parameters */ + assert_param(IS_FLASH_NBSECTORS(pEraseInit->NbSectors + pEraseInit->Sector)); + + /* Erase by sector by sector to be done*/ + for(index = pEraseInit->Sector; index < (pEraseInit->NbSectors + pEraseInit->Sector); index++) + { + FLASH_Erase_Sector(index, (uint8_t) pEraseInit->VoltageRange); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + /* If the erase operation is completed, disable the SER Bit and SNB Bits */ + CLEAR_BIT(FLASH->CR, (FLASH_CR_SER | FLASH_CR_SNB)); + + if(status != HAL_OK) + { + /* In case of error, stop erase procedure and return the faulty sector*/ + *SectorError = index; + break; + } + } + } + } + + /* Process Unlocked */ + __HAL_UNLOCK(&pFlash); + + return status; +} + +/** + * @brief Perform a mass erase or erase the specified FLASH memory sectors with interrupt enabled + * @param pEraseInit pointer to an FLASH_EraseInitTypeDef structure that + * contains the configuration information for the erasing. + * + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASHEx_Erase_IT(FLASH_EraseInitTypeDef *pEraseInit) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process Locked */ + __HAL_LOCK(&pFlash); + + /* Check the parameters */ + assert_param(IS_FLASH_TYPEERASE(pEraseInit->TypeErase)); + + /* Enable End of FLASH Operation interrupt */ + __HAL_FLASH_ENABLE_IT(FLASH_IT_EOP); + + /* Enable Error source interrupt */ + __HAL_FLASH_ENABLE_IT(FLASH_IT_ERR); + + /* Clear pending flags (if any) */ + __HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_EOP | FLASH_FLAG_OPERR | FLASH_FLAG_WRPERR |\ + FLASH_FLAG_PGAERR | FLASH_FLAG_PGPERR| FLASH_FLAG_ERSERR); + + if(pEraseInit->TypeErase == FLASH_TYPEERASE_MASSERASE) + { + /*Mass erase to be done*/ + pFlash.ProcedureOnGoing = FLASH_PROC_MASSERASE; +#if defined (FLASH_OPTCR_nDBANK) + FLASH_MassErase((uint8_t) pEraseInit->VoltageRange, pEraseInit->Banks); +#else + FLASH_MassErase((uint8_t) pEraseInit->VoltageRange); +#endif /* FLASH_OPTCR_nDBANK */ + } + else + { + /* Erase by sector to be done*/ + + /* Check the parameters */ + assert_param(IS_FLASH_NBSECTORS(pEraseInit->NbSectors + pEraseInit->Sector)); + + pFlash.ProcedureOnGoing = FLASH_PROC_SECTERASE; + pFlash.NbSectorsToErase = pEraseInit->NbSectors; + pFlash.Sector = pEraseInit->Sector; + pFlash.VoltageForErase = (uint8_t)pEraseInit->VoltageRange; + + /*Erase 1st sector and wait for IT*/ + FLASH_Erase_Sector(pEraseInit->Sector, pEraseInit->VoltageRange); + } + + return status; +} + +/** + * @brief Program option bytes + * @param pOBInit pointer to an FLASH_OBInitStruct structure that + * contains the configuration information for the programming. + * + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_FLASHEx_OBProgram(FLASH_OBProgramInitTypeDef *pOBInit) +{ + HAL_StatusTypeDef status = HAL_ERROR; + + /* Process Locked */ + __HAL_LOCK(&pFlash); + + /* Check the parameters */ + assert_param(IS_OPTIONBYTE(pOBInit->OptionType)); + + /* Write protection configuration */ + if((pOBInit->OptionType & OPTIONBYTE_WRP) == OPTIONBYTE_WRP) + { + assert_param(IS_WRPSTATE(pOBInit->WRPState)); + if(pOBInit->WRPState == OB_WRPSTATE_ENABLE) + { + /*Enable of Write protection on the selected Sector*/ + status = FLASH_OB_EnableWRP(pOBInit->WRPSector); + } + else + { + /*Disable of Write protection on the selected Sector*/ + status = FLASH_OB_DisableWRP(pOBInit->WRPSector); + } + } + + /* Read protection configuration */ + if((pOBInit->OptionType & OPTIONBYTE_RDP) == OPTIONBYTE_RDP) + { + status = FLASH_OB_RDP_LevelConfig(pOBInit->RDPLevel); + } + + /* USER configuration */ + if((pOBInit->OptionType & OPTIONBYTE_USER) == OPTIONBYTE_USER) + { +#if defined (FLASH_OPTCR_nDBANK) + status = FLASH_OB_UserConfig(pOBInit->USERConfig & OB_WWDG_SW, + pOBInit->USERConfig & OB_IWDG_SW, + pOBInit->USERConfig & OB_STOP_NO_RST, + pOBInit->USERConfig & OB_STDBY_NO_RST, + pOBInit->USERConfig & OB_IWDG_STOP_ACTIVE, + pOBInit->USERConfig & OB_IWDG_STDBY_ACTIVE, + pOBInit->USERConfig & OB_NDBANK_SINGLE_BANK, + pOBInit->USERConfig & OB_DUAL_BOOT_DISABLE); +#else + status = FLASH_OB_UserConfig(pOBInit->USERConfig & OB_WWDG_SW, + pOBInit->USERConfig & OB_IWDG_SW, + pOBInit->USERConfig & OB_STOP_NO_RST, + pOBInit->USERConfig & OB_STDBY_NO_RST, + pOBInit->USERConfig & OB_IWDG_STOP_ACTIVE, + pOBInit->USERConfig & OB_IWDG_STDBY_ACTIVE); +#endif /* FLASH_OPTCR_nDBANK */ + } + + /* BOR Level configuration */ + if((pOBInit->OptionType & OPTIONBYTE_BOR) == OPTIONBYTE_BOR) + { + status = FLASH_OB_BOR_LevelConfig(pOBInit->BORLevel); + } + + /* Boot 0 Address configuration */ + if((pOBInit->OptionType & OPTIONBYTE_BOOTADDR_0) == OPTIONBYTE_BOOTADDR_0) + { + status = FLASH_OB_BootAddressConfig(OPTIONBYTE_BOOTADDR_0, pOBInit->BootAddr0); + } + + /* Boot 1 Address configuration */ + if((pOBInit->OptionType & OPTIONBYTE_BOOTADDR_1) == OPTIONBYTE_BOOTADDR_1) + { + status = FLASH_OB_BootAddressConfig(OPTIONBYTE_BOOTADDR_1, pOBInit->BootAddr1); + } + +#if defined (FLASH_OPTCR2_PCROP) + /* PCROP configuration */ + if((pOBInit->OptionType & OPTIONBYTE_PCROP) == OPTIONBYTE_PCROP) + { + status = FLASH_OB_PCROP_Config(pOBInit->PCROPSector); + } + + /* PCROP_RDP configuration */ + if((pOBInit->OptionType & OPTIONBYTE_PCROP_RDP) == OPTIONBYTE_PCROP_RDP) + { + status = FLASH_OB_PCROP_RDP_Config(pOBInit->PCROPRdp); + } +#endif /* FLASH_OPTCR2_PCROP */ + + /* Process Unlocked */ + __HAL_UNLOCK(&pFlash); + + return status; +} + +/** + * @brief Get the Option byte configuration + * @param pOBInit pointer to an FLASH_OBInitStruct structure that + * contains the configuration information for the programming. + * + * @retval None + */ +void HAL_FLASHEx_OBGetConfig(FLASH_OBProgramInitTypeDef *pOBInit) +{ + pOBInit->OptionType = OPTIONBYTE_WRP | OPTIONBYTE_RDP | OPTIONBYTE_USER |\ + OPTIONBYTE_BOR | OPTIONBYTE_BOOTADDR_0 | OPTIONBYTE_BOOTADDR_1; + + /*Get WRP*/ + pOBInit->WRPSector = FLASH_OB_GetWRP(); + + /*Get RDP Level*/ + pOBInit->RDPLevel = FLASH_OB_GetRDP(); + + /*Get USER*/ + pOBInit->USERConfig = FLASH_OB_GetUser(); + + /*Get BOR Level*/ + pOBInit->BORLevel = FLASH_OB_GetBOR(); + + /*Get Boot Address when Boot pin = 0 */ + pOBInit->BootAddr0 = FLASH_OB_GetBootAddress(OPTIONBYTE_BOOTADDR_0); + + /*Get Boot Address when Boot pin = 1 */ + pOBInit->BootAddr1 = FLASH_OB_GetBootAddress(OPTIONBYTE_BOOTADDR_1); + +#if defined (FLASH_OPTCR2_PCROP) + /*Get PCROP Sectors */ + pOBInit->PCROPSector = FLASH_OB_GetPCROP(); + + /*Get PCROP_RDP Value */ + pOBInit->PCROPRdp = FLASH_OB_GetPCROPRDP(); +#endif /* FLASH_OPTCR2_PCROP */ +} +/** + * @} + */ + +#if defined (FLASH_OPTCR_nDBANK) +/** + * @brief Full erase of FLASH memory sectors + * @param VoltageRange The device voltage range which defines the erase parallelism. + * This parameter can be one of the following values: + * @arg VOLTAGE_RANGE_1: when the device voltage range is 1.8V to 2.1V, + * the operation will be done by byte (8-bit) + * @arg VOLTAGE_RANGE_2: when the device voltage range is 2.1V to 2.7V, + * the operation will be done by half word (16-bit) + * @arg VOLTAGE_RANGE_3: when the device voltage range is 2.7V to 3.6V, + * the operation will be done by word (32-bit) + * @arg VOLTAGE_RANGE_4: when the device voltage range is 2.7V to 3.6V + External Vpp, + * the operation will be done by double word (64-bit) + * @param Banks Banks to be erased + * This parameter can be one of the following values: + * @arg FLASH_BANK_1: Bank1 to be erased + * @arg FLASH_BANK_2: Bank2 to be erased + * @arg FLASH_BANK_BOTH: Bank1 and Bank2 to be erased + * + * @retval HAL Status + */ +static void FLASH_MassErase(uint8_t VoltageRange, uint32_t Banks) +{ + /* Check the parameters */ + assert_param(IS_VOLTAGERANGE(VoltageRange)); + assert_param(IS_FLASH_BANK(Banks)); + + /* if the previous operation is completed, proceed to erase all sectors */ + FLASH->CR &= CR_PSIZE_MASK; + if(Banks == FLASH_BANK_BOTH) + { + /* bank1 & bank2 will be erased*/ + FLASH->CR |= FLASH_MER_BIT; + } + else if(Banks == FLASH_BANK_2) + { + /*Only bank2 will be erased*/ + FLASH->CR |= FLASH_CR_MER2; + } + else + { + /*Only bank1 will be erased*/ + FLASH->CR |= FLASH_CR_MER1; + } + FLASH->CR |= FLASH_CR_STRT | ((uint32_t)VoltageRange <<8); + /* Data synchronous Barrier (DSB) Just after the write operation + This will force the CPU to respect the sequence of instruction (no optimization).*/ + __DSB(); +} + +/** + * @brief Erase the specified FLASH memory sector + * @param Sector FLASH sector to erase + * The value of this parameter depend on device used within the same series + * @param VoltageRange The device voltage range which defines the erase parallelism. + * This parameter can be one of the following values: + * @arg FLASH_VOLTAGE_RANGE_1: when the device voltage range is 1.8V to 2.1V, + * the operation will be done by byte (8-bit) + * @arg FLASH_VOLTAGE_RANGE_2: when the device voltage range is 2.1V to 2.7V, + * the operation will be done by half word (16-bit) + * @arg FLASH_VOLTAGE_RANGE_3: when the device voltage range is 2.7V to 3.6V, + * the operation will be done by word (32-bit) + * @arg FLASH_VOLTAGE_RANGE_4: when the device voltage range is 2.7V to 3.6V + External Vpp, + * the operation will be done by double word (64-bit) + * + * @retval None + */ +void FLASH_Erase_Sector(uint32_t Sector, uint8_t VoltageRange) +{ + uint32_t tmp_psize = 0; + + /* Check the parameters */ + assert_param(IS_FLASH_SECTOR(Sector)); + assert_param(IS_VOLTAGERANGE(VoltageRange)); + + if(VoltageRange == FLASH_VOLTAGE_RANGE_1) + { + tmp_psize = FLASH_PSIZE_BYTE; + } + else if(VoltageRange == FLASH_VOLTAGE_RANGE_2) + { + tmp_psize = FLASH_PSIZE_HALF_WORD; + } + else if(VoltageRange == FLASH_VOLTAGE_RANGE_3) + { + tmp_psize = FLASH_PSIZE_WORD; + } + else + { + tmp_psize = FLASH_PSIZE_DOUBLE_WORD; + } + + /* Need to add offset of 4 when sector higher than FLASH_SECTOR_11 */ + if(Sector > FLASH_SECTOR_11) + { + Sector += 4; + } + + /* If the previous operation is completed, proceed to erase the sector */ + FLASH->CR &= CR_PSIZE_MASK; + FLASH->CR |= tmp_psize; + CLEAR_BIT(FLASH->CR, FLASH_CR_SNB); + FLASH->CR |= FLASH_CR_SER | (Sector << FLASH_CR_SNB_Pos); + FLASH->CR |= FLASH_CR_STRT; + + /* Data synchronous Barrier (DSB) Just after the write operation + This will force the CPU to respect the sequence of instruction (no optimization).*/ + __DSB(); +} + +/** + * @brief Return the FLASH Write Protection Option Bytes value. + * @retval uint32_t FLASH Write Protection Option Bytes value + */ +static uint32_t FLASH_OB_GetWRP(void) +{ + /* Return the FLASH write protection Register value */ + return ((uint32_t)(FLASH->OPTCR & 0x0FFF0000)); +} + +/** + * @brief Program the FLASH User Option Byte: IWDG_SW / RST_STOP / RST_STDBY. + * @param Wwdg Selects the IWDG mode + * This parameter can be one of the following values: + * @arg OB_WWDG_SW: Software WWDG selected + * @arg OB_WWDG_HW: Hardware WWDG selected + * @param Iwdg Selects the WWDG mode + * This parameter can be one of the following values: + * @arg OB_IWDG_SW: Software IWDG selected + * @arg OB_IWDG_HW: Hardware IWDG selected + * @param Stop Reset event when entering STOP mode. + * This parameter can be one of the following values: + * @arg OB_STOP_NO_RST: No reset generated when entering in STOP + * @arg OB_STOP_RST: Reset generated when entering in STOP + * @param Stdby Reset event when entering Standby mode. + * This parameter can be one of the following values: + * @arg OB_STDBY_NO_RST: No reset generated when entering in STANDBY + * @arg OB_STDBY_RST: Reset generated when entering in STANDBY + * @param Iwdgstop Independent watchdog counter freeze in Stop mode. + * This parameter can be one of the following values: + * @arg OB_IWDG_STOP_FREEZE: Freeze IWDG counter in STOP + * @arg OB_IWDG_STOP_ACTIVE: IWDG counter active in STOP + * @param Iwdgstdby Independent watchdog counter freeze in standby mode. + * This parameter can be one of the following values: + * @arg OB_IWDG_STDBY_FREEZE: Freeze IWDG counter in STANDBY + * @arg OB_IWDG_STDBY_ACTIVE: IWDG counter active in STANDBY + * @param NDBank Flash Single Bank mode enabled. + * This parameter can be one of the following values: + * @arg OB_NDBANK_SINGLE_BANK: enable 256 bits mode (Flash is a single bank) + * @arg OB_NDBANK_DUAL_BANK: disable 256 bits mode (Flash is a dual bank in 128 bits mode) + * @param NDBoot Flash Dual boot mode disable. + * This parameter can be one of the following values: + * @arg OB_DUAL_BOOT_DISABLE: Disable Dual Boot + * @arg OB_DUAL_BOOT_ENABLE: Enable Dual Boot + + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_UserConfig(uint32_t Wwdg, uint32_t Iwdg, uint32_t Stop, uint32_t Stdby, uint32_t Iwdgstop, \ + uint32_t Iwdgstdby, uint32_t NDBank, uint32_t NDBoot) +{ + uint32_t useroptionmask = 0x00; + uint32_t useroptionvalue = 0x00; + + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_WWDG_SOURCE(Wwdg)); + assert_param(IS_OB_IWDG_SOURCE(Iwdg)); + assert_param(IS_OB_STOP_SOURCE(Stop)); + assert_param(IS_OB_STDBY_SOURCE(Stdby)); + assert_param(IS_OB_IWDG_STOP_FREEZE(Iwdgstop)); + assert_param(IS_OB_IWDG_STDBY_FREEZE(Iwdgstdby)); + assert_param(IS_OB_NDBANK(NDBank)); + assert_param(IS_OB_NDBOOT(NDBoot)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if(status == HAL_OK) + { + useroptionmask = (FLASH_OPTCR_WWDG_SW | FLASH_OPTCR_IWDG_SW | FLASH_OPTCR_nRST_STOP | \ + FLASH_OPTCR_nRST_STDBY | FLASH_OPTCR_IWDG_STOP | FLASH_OPTCR_IWDG_STDBY | \ + FLASH_OPTCR_nDBOOT | FLASH_OPTCR_nDBANK); + + useroptionvalue = (Iwdg | Wwdg | Stop | Stdby | Iwdgstop | Iwdgstdby | NDBoot | NDBank); + + /* Update User Option Byte */ + MODIFY_REG(FLASH->OPTCR, useroptionmask, useroptionvalue); + } + + return status; +} + +/** + * @brief Return the FLASH User Option Byte value. + * @retval uint32_t FLASH User Option Bytes values: WWDG_SW(Bit4), IWDG_SW(Bit5), nRST_STOP(Bit6), + * nRST_STDBY(Bit7), nDBOOT(Bit28), nDBANK(Bit29), IWDG_STDBY(Bit30) and IWDG_STOP(Bit31). + */ +static uint32_t FLASH_OB_GetUser(void) +{ + /* Return the User Option Byte */ + return ((uint32_t)(FLASH->OPTCR & 0xF00000F0U)); +} +#else + +/** + * @brief Full erase of FLASH memory sectors + * @param VoltageRange The device voltage range which defines the erase parallelism. + * This parameter can be one of the following values: + * @arg VOLTAGE_RANGE_1: when the device voltage range is 1.8V to 2.1V, + * the operation will be done by byte (8-bit) + * @arg VOLTAGE_RANGE_2: when the device voltage range is 2.1V to 2.7V, + * the operation will be done by half word (16-bit) + * @arg VOLTAGE_RANGE_3: when the device voltage range is 2.7V to 3.6V, + * the operation will be done by word (32-bit) + * @arg VOLTAGE_RANGE_4: when the device voltage range is 2.7V to 3.6V + External Vpp, + * the operation will be done by double word (64-bit) + * + * @retval HAL Status + */ +static void FLASH_MassErase(uint8_t VoltageRange) +{ + /* Check the parameters */ + assert_param(IS_VOLTAGERANGE(VoltageRange)); + + /* if the previous operation is completed, proceed to erase all sectors */ + FLASH->CR &= CR_PSIZE_MASK; + FLASH->CR |= FLASH_CR_MER; + FLASH->CR |= FLASH_CR_STRT | ((uint32_t)VoltageRange <<8); + /* Data synchronous Barrier (DSB) Just after the write operation + This will force the CPU to respect the sequence of instruction (no optimization).*/ + __DSB(); +} + +/** + * @brief Erase the specified FLASH memory sector + * @param Sector FLASH sector to erase + * The value of this parameter depend on device used within the same series + * @param VoltageRange The device voltage range which defines the erase parallelism. + * This parameter can be one of the following values: + * @arg FLASH_VOLTAGE_RANGE_1: when the device voltage range is 1.8V to 2.1V, + * the operation will be done by byte (8-bit) + * @arg FLASH_VOLTAGE_RANGE_2: when the device voltage range is 2.1V to 2.7V, + * the operation will be done by half word (16-bit) + * @arg FLASH_VOLTAGE_RANGE_3: when the device voltage range is 2.7V to 3.6V, + * the operation will be done by word (32-bit) + * @arg FLASH_VOLTAGE_RANGE_4: when the device voltage range is 2.7V to 3.6V + External Vpp, + * the operation will be done by double word (64-bit) + * + * @retval None + */ +void FLASH_Erase_Sector(uint32_t Sector, uint8_t VoltageRange) +{ + uint32_t tmp_psize = 0; + + /* Check the parameters */ + assert_param(IS_FLASH_SECTOR(Sector)); + assert_param(IS_VOLTAGERANGE(VoltageRange)); + + if(VoltageRange == FLASH_VOLTAGE_RANGE_1) + { + tmp_psize = FLASH_PSIZE_BYTE; + } + else if(VoltageRange == FLASH_VOLTAGE_RANGE_2) + { + tmp_psize = FLASH_PSIZE_HALF_WORD; + } + else if(VoltageRange == FLASH_VOLTAGE_RANGE_3) + { + tmp_psize = FLASH_PSIZE_WORD; + } + else + { + tmp_psize = FLASH_PSIZE_DOUBLE_WORD; + } + + /* If the previous operation is completed, proceed to erase the sector */ + FLASH->CR &= CR_PSIZE_MASK; + FLASH->CR |= tmp_psize; + FLASH->CR &= SECTOR_MASK; + FLASH->CR |= FLASH_CR_SER | (Sector << FLASH_CR_SNB_Pos); + FLASH->CR |= FLASH_CR_STRT; + + /* Data synchronous Barrier (DSB) Just after the write operation + This will force the CPU to respect the sequence of instruction (no optimization).*/ + __DSB(); +} + +/** + * @brief Return the FLASH Write Protection Option Bytes value. + * @retval uint32_t FLASH Write Protection Option Bytes value + */ +static uint32_t FLASH_OB_GetWRP(void) +{ + /* Return the FLASH write protection Register value */ + return ((uint32_t)(FLASH->OPTCR & 0x00FF0000)); +} + +/** + * @brief Program the FLASH User Option Byte: IWDG_SW / RST_STOP / RST_STDBY. + * @param Wwdg Selects the IWDG mode + * This parameter can be one of the following values: + * @arg OB_WWDG_SW: Software WWDG selected + * @arg OB_WWDG_HW: Hardware WWDG selected + * @param Iwdg Selects the WWDG mode + * This parameter can be one of the following values: + * @arg OB_IWDG_SW: Software IWDG selected + * @arg OB_IWDG_HW: Hardware IWDG selected + * @param Stop Reset event when entering STOP mode. + * This parameter can be one of the following values: + * @arg OB_STOP_NO_RST: No reset generated when entering in STOP + * @arg OB_STOP_RST: Reset generated when entering in STOP + * @param Stdby Reset event when entering Standby mode. + * This parameter can be one of the following values: + * @arg OB_STDBY_NO_RST: No reset generated when entering in STANDBY + * @arg OB_STDBY_RST: Reset generated when entering in STANDBY + * @param Iwdgstop Independent watchdog counter freeze in Stop mode. + * This parameter can be one of the following values: + * @arg OB_IWDG_STOP_FREEZE: Freeze IWDG counter in STOP + * @arg OB_IWDG_STOP_ACTIVE: IWDG counter active in STOP + * @param Iwdgstdby Independent watchdog counter freeze in standby mode. + * This parameter can be one of the following values: + * @arg OB_IWDG_STDBY_FREEZE: Freeze IWDG counter in STANDBY + * @arg OB_IWDG_STDBY_ACTIVE: IWDG counter active in STANDBY + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_UserConfig(uint32_t Wwdg, uint32_t Iwdg, uint32_t Stop, uint32_t Stdby, uint32_t Iwdgstop, uint32_t Iwdgstdby) +{ + uint32_t useroptionmask = 0x00; + uint32_t useroptionvalue = 0x00; + + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_WWDG_SOURCE(Wwdg)); + assert_param(IS_OB_IWDG_SOURCE(Iwdg)); + assert_param(IS_OB_STOP_SOURCE(Stop)); + assert_param(IS_OB_STDBY_SOURCE(Stdby)); + assert_param(IS_OB_IWDG_STOP_FREEZE(Iwdgstop)); + assert_param(IS_OB_IWDG_STDBY_FREEZE(Iwdgstdby)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if(status == HAL_OK) + { + useroptionmask = (FLASH_OPTCR_WWDG_SW | FLASH_OPTCR_IWDG_SW | FLASH_OPTCR_nRST_STOP | \ + FLASH_OPTCR_nRST_STDBY | FLASH_OPTCR_IWDG_STOP | FLASH_OPTCR_IWDG_STDBY); + + useroptionvalue = (Iwdg | Wwdg | Stop | Stdby | Iwdgstop | Iwdgstdby); + + /* Update User Option Byte */ + MODIFY_REG(FLASH->OPTCR, useroptionmask, useroptionvalue); + } + + return status; + +} + +/** + * @brief Return the FLASH User Option Byte value. + * @retval uint32_t FLASH User Option Bytes values: WWDG_SW(Bit4), IWDG_SW(Bit5), nRST_STOP(Bit6), + * nRST_STDBY(Bit7), IWDG_STDBY(Bit30) and IWDG_STOP(Bit31). + */ +static uint32_t FLASH_OB_GetUser(void) +{ + /* Return the User Option Byte */ + return ((uint32_t)(FLASH->OPTCR & 0xC00000F0U)); +} +#endif /* FLASH_OPTCR_nDBANK */ + +/** + * @brief Enable the write protection of the desired bank1 or bank2 sectors + * + * @note When the memory read protection level is selected (RDP level = 1), + * it is not possible to program or erase the flash sector i if CortexM7 + * debug features are connected or boot code is executed in RAM, even if nWRPi = 1 + * + * @param WRPSector specifies the sector(s) to be write protected. + * This parameter can be one of the following values: + * @arg WRPSector: A value between OB_WRP_SECTOR_0 and OB_WRP_SECTOR_7 (for STM32F74xxx/STM32F75xxx devices) + * or a value between OB_WRP_SECTOR_0 and OB_WRP_SECTOR_11 (in Single Bank mode for STM32F76xxx/STM32F77xxx devices) + * or a value between OB_WRP_DB_SECTOR_0 and OB_WRP_DB_SECTOR_23 (in Dual Bank mode for STM32F76xxx/STM32F77xxx devices) + * @arg OB_WRP_SECTOR_All + * + * @retval HAL FLASH State + */ +static HAL_StatusTypeDef FLASH_OB_EnableWRP(uint32_t WRPSector) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_WRP_SECTOR(WRPSector)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if(status == HAL_OK) + { + /*Write protection enabled on sectors */ + FLASH->OPTCR &= (~WRPSector); + } + + return status; +} + +/** + * @brief Disable the write protection of the desired bank1 or bank 2 sectors + * + * @note When the memory read protection level is selected (RDP level = 1), + * it is not possible to program or erase the flash sector i if CortexM4 + * debug features are connected or boot code is executed in RAM, even if nWRPi = 1 + * + * @param WRPSector specifies the sector(s) to be write protected. + * This parameter can be one of the following values: + * @arg WRPSector: A value between OB_WRP_SECTOR_0 and OB_WRP_SECTOR_7 (for STM32F74xxx/STM32F75xxx devices) + * or a value between OB_WRP_SECTOR_0 and OB_WRP_SECTOR_11 (in Single Bank mode for STM32F76xxx/STM32F77xxx devices) + * or a value between OB_WRP_DB_SECTOR_0 and OB_WRP_DB_SECTOR_23 (in Dual Bank mode for STM32F76xxx/STM32F77xxx devices) + * @arg OB_WRP_Sector_All + * + * + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_DisableWRP(uint32_t WRPSector) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_WRP_SECTOR(WRPSector)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if(status == HAL_OK) + { + /* Write protection disabled on sectors */ + FLASH->OPTCR |= (WRPSector); + } + + return status; +} + +/** + * @brief Set the read protection level. + * @param Level specifies the read protection level. + * This parameter can be one of the following values: + * @arg OB_RDP_LEVEL_0: No protection + * @arg OB_RDP_LEVEL_1: Read protection of the memory + * @arg OB_RDP_LEVEL_2: Full chip protection + * + * @note WARNING: When enabling OB_RDP level 2 it's no more possible to go back to level 1 or 0 + * + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_RDP_LevelConfig(uint8_t Level) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_RDP_LEVEL(Level)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if(status == HAL_OK) + { + *(__IO uint8_t*)OPTCR_BYTE1_ADDRESS = Level; + } + + return status; +} + +/** + * @brief Set the BOR Level. + * @param Level specifies the Option Bytes BOR Reset Level. + * This parameter can be one of the following values: + * @arg OB_BOR_LEVEL3: Supply voltage ranges from 2.7 to 3.6 V + * @arg OB_BOR_LEVEL2: Supply voltage ranges from 2.4 to 2.7 V + * @arg OB_BOR_LEVEL1: Supply voltage ranges from 2.1 to 2.4 V + * @arg OB_BOR_OFF: Supply voltage ranges from 1.62 to 2.1 V + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_BOR_LevelConfig(uint8_t Level) +{ + /* Check the parameters */ + assert_param(IS_OB_BOR_LEVEL(Level)); + + /* Set the BOR Level */ + MODIFY_REG(FLASH->OPTCR, FLASH_OPTCR_BOR_LEV, Level); + + return HAL_OK; + +} + +/** + * @brief Configure Boot base address. + * + * @param BootOption specifies Boot base address depending from Boot pin = 0 or pin = 1 + * This parameter can be one of the following values: + * @arg OPTIONBYTE_BOOTADDR_0 : Boot address based when Boot pin = 0 + * @arg OPTIONBYTE_BOOTADDR_1 : Boot address based when Boot pin = 1 + * @param Address specifies Boot base address + * This parameter can be one of the following values: + * @arg OB_BOOTADDR_ITCM_RAM : Boot from ITCM RAM (0x00000000) + * @arg OB_BOOTADDR_SYSTEM : Boot from System memory bootloader (0x00100000) + * @arg OB_BOOTADDR_ITCM_FLASH : Boot from Flash on ITCM interface (0x00200000) + * @arg OB_BOOTADDR_AXIM_FLASH : Boot from Flash on AXIM interface (0x08000000) + * @arg OB_BOOTADDR_DTCM_RAM : Boot from DTCM RAM (0x20000000) + * @arg OB_BOOTADDR_SRAM1 : Boot from SRAM1 (0x20010000) + * @arg OB_BOOTADDR_SRAM2 : Boot from SRAM2 (0x2004C000) + * + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_BootAddressConfig(uint32_t BootOption, uint32_t Address) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_BOOT_ADDRESS(Address)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if(status == HAL_OK) + { + if(BootOption == OPTIONBYTE_BOOTADDR_0) + { + MODIFY_REG(FLASH->OPTCR1, FLASH_OPTCR1_BOOT_ADD0, Address); + } + else + { + MODIFY_REG(FLASH->OPTCR1, FLASH_OPTCR1_BOOT_ADD1, (Address << 16)); + } + } + + return status; +} + +/** + * @brief Returns the FLASH Read Protection level. + * @retval FlagStatus FLASH ReadOut Protection Status: + * This parameter can be one of the following values: + * @arg OB_RDP_LEVEL_0: No protection + * @arg OB_RDP_LEVEL_1: Read protection of the memory + * @arg OB_RDP_LEVEL_2: Full chip protection + */ +static uint8_t FLASH_OB_GetRDP(void) +{ + uint8_t readstatus = OB_RDP_LEVEL_0; + + if ((*(__IO uint8_t*)(OPTCR_BYTE1_ADDRESS)) == OB_RDP_LEVEL_0) + { + readstatus = OB_RDP_LEVEL_0; + } + else if ((*(__IO uint8_t*)(OPTCR_BYTE1_ADDRESS)) == OB_RDP_LEVEL_2) + { + readstatus = OB_RDP_LEVEL_2; + } + else + { + readstatus = OB_RDP_LEVEL_1; + } + + return readstatus; +} + +/** + * @brief Returns the FLASH BOR level. + * @retval uint32_t The FLASH BOR level: + * - OB_BOR_LEVEL3: Supply voltage ranges from 2.7 to 3.6 V + * - OB_BOR_LEVEL2: Supply voltage ranges from 2.4 to 2.7 V + * - OB_BOR_LEVEL1: Supply voltage ranges from 2.1 to 2.4 V + * - OB_BOR_OFF : Supply voltage ranges from 1.62 to 2.1 V + */ +static uint32_t FLASH_OB_GetBOR(void) +{ + /* Return the FLASH BOR level */ + return ((uint32_t)(FLASH->OPTCR & 0x0C)); +} + +/** + * @brief Configure Boot base address. + * + * @param BootOption specifies Boot base address depending from Boot pin = 0 or pin = 1 + * This parameter can be one of the following values: + * @arg OPTIONBYTE_BOOTADDR_0 : Boot address based when Boot pin = 0 + * @arg OPTIONBYTE_BOOTADDR_1 : Boot address based when Boot pin = 1 + * + * @retval uint32_t Boot Base Address: + * - OB_BOOTADDR_ITCM_RAM : Boot from ITCM RAM (0x00000000) + * - OB_BOOTADDR_SYSTEM : Boot from System memory bootloader (0x00100000) + * - OB_BOOTADDR_ITCM_FLASH : Boot from Flash on ITCM interface (0x00200000) + * - OB_BOOTADDR_AXIM_FLASH : Boot from Flash on AXIM interface (0x08000000) + * - OB_BOOTADDR_DTCM_RAM : Boot from DTCM RAM (0x20000000) + * - OB_BOOTADDR_SRAM1 : Boot from SRAM1 (0x20010000) + * - OB_BOOTADDR_SRAM2 : Boot from SRAM2 (0x2004C000) + */ +static uint32_t FLASH_OB_GetBootAddress(uint32_t BootOption) +{ + uint32_t Address = 0; + + /* Return the Boot base Address */ + if(BootOption == OPTIONBYTE_BOOTADDR_0) + { + Address = FLASH->OPTCR1 & FLASH_OPTCR1_BOOT_ADD0; + } + else + { + Address = ((FLASH->OPTCR1 & FLASH_OPTCR1_BOOT_ADD1) >> 16); + } + + return Address; +} + +#if defined (FLASH_OPTCR2_PCROP) +/** + * @brief Set the PCROP protection for sectors. + * @param PCROPSector specifies the sector(s) to be PCROP protected. + * This parameter can be one of the following values: + * @arg OB_PCROP_SECTOR_x: A value between OB_PCROP_SECTOR_0 and OB_PCROP_SECTOR_7 + * @arg OB_PCROP_SECTOR_ALL + * + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_PCROP_Config(uint32_t PCROPSector) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_PCROP_SECTOR(PCROPSector)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if(status == HAL_OK) + { + MODIFY_REG(FLASH->OPTCR2, FLASH_OPTCR2_PCROP, PCROPSector); + } + + return status; +} + +/** + * @brief Set the PCROP_RDP value + * @param Pcrop_Rdp specifies the PCROP_RDP bit value. + * + * @retval HAL Status + */ +static HAL_StatusTypeDef FLASH_OB_PCROP_RDP_Config(uint32_t Pcrop_Rdp) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_OB_PCROP_RDP_VALUE(Pcrop_Rdp)); + + /* Wait for last operation to be completed */ + status = FLASH_WaitForLastOperation((uint32_t)FLASH_TIMEOUT_VALUE); + + if(status == HAL_OK) + { + MODIFY_REG(FLASH->OPTCR2, FLASH_OPTCR2_PCROP_RDP, Pcrop_Rdp); + } + + return status; +} + +/** + * @brief Return the FLASH PCROP Protection Option Bytes value. + * @retval uint32_t FLASH PCROP Protection Option Bytes value + */ +static uint32_t FLASH_OB_GetPCROP(void) +{ + /* Return the FLASH write protection Register value */ + return ((uint32_t)(FLASH->OPTCR2 & FLASH_OPTCR2_PCROP)); +} + +/** + * @brief Return the FLASH PCROP_RDP option byte value. + * @retval uint32_t FLASH PCROP_RDP option byte value + */ +static uint32_t FLASH_OB_GetPCROPRDP(void) +{ + /* Return the FLASH write protection Register value */ + return ((uint32_t)(FLASH->OPTCR2 & FLASH_OPTCR2_PCROP_RDP)); +} +#endif /* FLASH_OPTCR2_PCROP */ + +/** + * @} + */ + +#endif /* HAL_FLASH_MODULE_ENABLED */ + +/** + * @} + */ + +/** + * @} + */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_gpio.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_gpio.c new file mode 100644 index 0000000..d67de6c --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_gpio.c @@ -0,0 +1,527 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_gpio.c + * @author MCD Application Team + * @brief GPIO HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the General Purpose Input/Output (GPIO) peripheral: + * + Initialization and de-initialization functions + * + IO operation functions + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + @verbatim + ============================================================================== + ##### GPIO Peripheral features ##### + ============================================================================== + [..] + Subject to the specific hardware characteristics of each I/O port listed in the datasheet, each + port bit of the General Purpose IO (GPIO) Ports, can be individually configured by software + in several modes: + (+) Input mode + (+) Analog mode + (+) Output mode + (+) Alternate function mode + (+) External interrupt/event lines + + [..] + During and just after reset, the alternate functions and external interrupt + lines are not active and the I/O ports are configured in input floating mode. + + [..] + All GPIO pins have weak internal pull-up and pull-down resistors, which can be + activated or not. + + [..] + In Output or Alternate mode, each IO can be configured on open-drain or push-pull + type and the IO speed can be selected depending on the VDD value. + + [..] + All ports have external interrupt/event capability. To use external interrupt + lines, the port must be configured in input mode. All available GPIO pins are + connected to the 16 external interrupt/event lines from EXTI0 to EXTI15. + + [..] + The external interrupt/event controller consists of up to 23 edge detectors + (16 lines are connected to GPIO) for generating event/interrupt requests (each + input line can be independently configured to select the type (interrupt or event) + and the corresponding trigger event (rising or falling or both). Each line can + also be masked independently. + + ##### How to use this driver ##### + ============================================================================== + [..] + (#) Enable the GPIO AHB clock using the following function: __HAL_RCC_GPIOx_CLK_ENABLE(). + + (#) Configure the GPIO pin(s) using HAL_GPIO_Init(). + (++) Configure the IO mode using "Mode" member from GPIO_InitTypeDef structure + (++) Activate Pull-up, Pull-down resistor using "Pull" member from GPIO_InitTypeDef + structure. + (++) In case of Output or alternate function mode selection: the speed is + configured through "Speed" member from GPIO_InitTypeDef structure. + (++) In alternate mode is selection, the alternate function connected to the IO + is configured through "Alternate" member from GPIO_InitTypeDef structure. + (++) Analog mode is required when a pin is to be used as ADC channel + or DAC output. + (++) In case of external interrupt/event selection the "Mode" member from + GPIO_InitTypeDef structure select the type (interrupt or event) and + the corresponding trigger event (rising or falling or both). + + (#) In case of external interrupt/event mode selection, configure NVIC IRQ priority + mapped to the EXTI line using HAL_NVIC_SetPriority() and enable it using + HAL_NVIC_EnableIRQ(). + + (#) To get the level of a pin configured in input mode use HAL_GPIO_ReadPin(). + + (#) To set/reset the level of a pin configured in output mode use + HAL_GPIO_WritePin()/HAL_GPIO_TogglePin(). + + (#) To lock pin configuration until next reset use HAL_GPIO_LockPin(). + + + (#) During and just after reset, the alternate functions are not + active and the GPIO pins are configured in input floating mode (except JTAG + pins). + + (#) The LSE oscillator pins OSC32_IN and OSC32_OUT can be used as general purpose + (PC14 and PC15, respectively) when the LSE oscillator is off. The LSE has + priority over the GPIO function. + + (#) The HSE oscillator pins OSC_IN/OSC_OUT can be used as + general purpose PH0 and PH1, respectively, when the HSE oscillator is off. + The HSE has priority over the GPIO function. + + @endverbatim + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup GPIO GPIO + * @brief GPIO HAL module driver + * @{ + */ + +#ifdef HAL_GPIO_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup GPIO_Private_Constants GPIO Private Constants + * @{ + */ + +#define GPIO_NUMBER ((uint32_t)16U) +/** + * @} + */ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ +/** @defgroup GPIO_Exported_Functions GPIO Exported Functions + * @{ + */ + +/** @defgroup GPIO_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * +@verbatim + =============================================================================== + ##### Initialization and de-initialization functions ##### + =============================================================================== + [..] + This section provides functions allowing to initialize and de-initialize the GPIOs + to be ready for use. + +@endverbatim + * @{ + */ + +/** + * @brief Initializes the GPIOx peripheral according to the specified parameters in the GPIO_Init. + * @param GPIOx where x can be (A..K) to select the GPIO peripheral. + * @param GPIO_Init pointer to a GPIO_InitTypeDef structure that contains + * the configuration information for the specified GPIO peripheral. + * @retval None + */ +void HAL_GPIO_Init(GPIO_TypeDef *GPIOx, GPIO_InitTypeDef *GPIO_Init) +{ + uint32_t position = 0x00; + uint32_t ioposition = 0x00; + uint32_t iocurrent = 0x00; + uint32_t temp = 0x00; + + /* Check the parameters */ + assert_param(IS_GPIO_ALL_INSTANCE(GPIOx)); + assert_param(IS_GPIO_PIN(GPIO_Init->Pin)); + assert_param(IS_GPIO_MODE(GPIO_Init->Mode)); + + /* Configure the port pins */ + for(position = 0; position < GPIO_NUMBER; position++) + { + /* Get the IO position */ + ioposition = ((uint32_t)0x01) << position; + /* Get the current IO position */ + iocurrent = (uint32_t)(GPIO_Init->Pin) & ioposition; + + if(iocurrent == ioposition) + { + /*--------------------- GPIO Mode Configuration ------------------------*/ + /* In case of Output or Alternate function mode selection */ + if(((GPIO_Init->Mode & GPIO_MODE) == MODE_OUTPUT) || ((GPIO_Init->Mode & GPIO_MODE) == MODE_AF)) + { + /* Check the Speed parameter */ + assert_param(IS_GPIO_SPEED(GPIO_Init->Speed)); + /* Configure the IO Speed */ + temp = GPIOx->OSPEEDR; + temp &= ~(GPIO_OSPEEDER_OSPEEDR0 << (position * 2)); + temp |= (GPIO_Init->Speed << (position * 2)); + GPIOx->OSPEEDR = temp; + + /* Configure the IO Output Type */ + temp = GPIOx->OTYPER; + temp &= ~(GPIO_OTYPER_OT_0 << position) ; + temp |= (((GPIO_Init->Mode & OUTPUT_TYPE) >> OUTPUT_TYPE_Pos) << position); + GPIOx->OTYPER = temp; + } + + if((GPIO_Init->Mode & GPIO_MODE) != MODE_ANALOG) + { + /* Check the Pull parameter */ + assert_param(IS_GPIO_PULL(GPIO_Init->Pull)); + + /* Activate the Pull-up or Pull down resistor for the current IO */ + temp = GPIOx->PUPDR; + temp &= ~(GPIO_PUPDR_PUPDR0 << (position * 2)); + temp |= ((GPIO_Init->Pull) << (position * 2)); + GPIOx->PUPDR = temp; + } + + /* In case of Alternate function mode selection */ + if((GPIO_Init->Mode & GPIO_MODE) == MODE_AF) + { + /* Check the Alternate function parameter */ + assert_param(IS_GPIO_AF(GPIO_Init->Alternate)); + + /* Configure Alternate function mapped with the current IO */ + temp = GPIOx->AFR[position >> 3]; + temp &= ~((uint32_t)0xF << ((uint32_t)(position & (uint32_t)0x07) * 4)) ; + temp |= ((uint32_t)(GPIO_Init->Alternate) << (((uint32_t)position & (uint32_t)0x07) * 4)); + GPIOx->AFR[position >> 3] = temp; + } + + /* Configure IO Direction mode (Input, Output, Alternate or Analog) */ + temp = GPIOx->MODER; + temp &= ~(GPIO_MODER_MODER0 << (position * 2)); + temp |= ((GPIO_Init->Mode & GPIO_MODE) << (position * 2)); + GPIOx->MODER = temp; + + /*--------------------- EXTI Mode Configuration ------------------------*/ + /* Configure the External Interrupt or event for the current IO */ + if((GPIO_Init->Mode & EXTI_MODE) != 0x00u) + { + /* Enable SYSCFG Clock */ + __HAL_RCC_SYSCFG_CLK_ENABLE(); + + temp = SYSCFG->EXTICR[position >> 2]; + temp &= ~(((uint32_t)0x0F) << (4 * (position & 0x03))); + temp |= ((uint32_t)(GPIO_GET_INDEX(GPIOx)) << (4 * (position & 0x03))); + SYSCFG->EXTICR[position >> 2] = temp; + + /* Clear Rising Falling edge configuration */ + temp = EXTI->RTSR; + temp &= ~((uint32_t)iocurrent); + if((GPIO_Init->Mode & TRIGGER_RISING) != 0x00u) + { + temp |= iocurrent; + } + EXTI->RTSR = temp; + + temp = EXTI->FTSR; + temp &= ~((uint32_t)iocurrent); + if((GPIO_Init->Mode & TRIGGER_FALLING) != 0x00u) + { + temp |= iocurrent; + } + EXTI->FTSR = temp; + + temp = EXTI->EMR; + temp &= ~((uint32_t)iocurrent); + if((GPIO_Init->Mode & EXTI_EVT) != 0x00u) + { + temp |= iocurrent; + } + EXTI->EMR = temp; + + /* Clear EXTI line configuration */ + temp = EXTI->IMR; + temp &= ~((uint32_t)iocurrent); + if((GPIO_Init->Mode & EXTI_IT) != 0x00u) + { + temp |= iocurrent; + } + EXTI->IMR = temp; + } + } + } +} + +/** + * @brief De-initializes the GPIOx peripheral registers to their default reset values. + * @param GPIOx where x can be (A..K) to select the GPIO peripheral. + * @param GPIO_Pin specifies the port bit to be written. + * This parameter can be one of GPIO_PIN_x where x can be (0..15). + * @retval None + */ +void HAL_GPIO_DeInit(GPIO_TypeDef *GPIOx, uint32_t GPIO_Pin) +{ + uint32_t position; + uint32_t ioposition = 0x00; + uint32_t iocurrent = 0x00; + uint32_t tmp = 0x00; + + /* Check the parameters */ + assert_param(IS_GPIO_ALL_INSTANCE(GPIOx)); + + /* Configure the port pins */ + for(position = 0; position < GPIO_NUMBER; position++) + { + /* Get the IO position */ + ioposition = ((uint32_t)0x01) << position; + /* Get the current IO position */ + iocurrent = (GPIO_Pin) & ioposition; + + if(iocurrent == ioposition) + { + /*------------------------- EXTI Mode Configuration --------------------*/ + tmp = SYSCFG->EXTICR[position >> 2]; + tmp &= (((uint32_t)0x0F) << (4 * (position & 0x03))); + if(tmp == ((uint32_t)(GPIO_GET_INDEX(GPIOx)) << (4 * (position & 0x03)))) + { + /* Clear EXTI line configuration */ + EXTI->IMR &= ~((uint32_t)iocurrent); + EXTI->EMR &= ~((uint32_t)iocurrent); + + /* Clear Rising Falling edge configuration */ + EXTI->FTSR &= ~((uint32_t)iocurrent); + EXTI->RTSR &= ~((uint32_t)iocurrent); + + /* Configure the External Interrupt or event for the current IO */ + tmp = ((uint32_t)0x0F) << (4 * (position & 0x03)); + SYSCFG->EXTICR[position >> 2] &= ~tmp; + } + /*------------------------- GPIO Mode Configuration --------------------*/ + /* Configure IO Direction in Input Floating Mode */ + GPIOx->MODER &= ~(GPIO_MODER_MODER0 << (position * 2)); + + /* Configure the default Alternate Function in current IO */ + GPIOx->AFR[position >> 3] &= ~((uint32_t)0xF << ((uint32_t)(position & (uint32_t)0x07) * 4)) ; + + /* Deactivate the Pull-up and Pull-down resistor for the current IO */ + GPIOx->PUPDR &= ~(GPIO_PUPDR_PUPDR0 << (position * 2)); + + /* Configure the default value IO Output Type */ + GPIOx->OTYPER &= ~(GPIO_OTYPER_OT_0 << position) ; + + /* Configure the default value for IO Speed */ + GPIOx->OSPEEDR &= ~(GPIO_OSPEEDER_OSPEEDR0 << (position * 2)); + } + } +} + +/** + * @} + */ + +/** @defgroup GPIO_Exported_Functions_Group2 IO operation functions + * @brief GPIO Read and Write + * +@verbatim + =============================================================================== + ##### IO operation functions ##### + =============================================================================== + +@endverbatim + * @{ + */ + +/** + * @brief Reads the specified input port pin. + * @param GPIOx where x can be (A..K) to select the GPIO peripheral. + * @param GPIO_Pin specifies the port bit to read. + * This parameter can be GPIO_PIN_x where x can be (0..15). + * @retval The input port pin value. + */ +GPIO_PinState HAL_GPIO_ReadPin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin) +{ + GPIO_PinState bitstatus; + + /* Check the parameters */ + assert_param(IS_GPIO_PIN(GPIO_Pin)); + + if((GPIOx->IDR & GPIO_Pin) != (uint32_t)GPIO_PIN_RESET) + { + bitstatus = GPIO_PIN_SET; + } + else + { + bitstatus = GPIO_PIN_RESET; + } + return bitstatus; +} + +/** + * @brief Sets or clears the selected data port bit. + * + * @note This function uses GPIOx_BSRR register to allow atomic read/modify + * accesses. In this way, there is no risk of an IRQ occurring between + * the read and the modify access. + * + * @param GPIOx where x can be (A..K) to select the GPIO peripheral. + * @param GPIO_Pin specifies the port bit to be written. + * This parameter can be one of GPIO_PIN_x where x can be (0..15). + * @param PinState specifies the value to be written to the selected bit. + * This parameter can be one of the GPIO_PinState enum values: + * @arg GPIO_PIN_RESET: to clear the port pin + * @arg GPIO_PIN_SET: to set the port pin + * @retval None + */ +void HAL_GPIO_WritePin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin, GPIO_PinState PinState) +{ + /* Check the parameters */ + assert_param(IS_GPIO_PIN(GPIO_Pin)); + assert_param(IS_GPIO_PIN_ACTION(PinState)); + + if(PinState != GPIO_PIN_RESET) + { + GPIOx->BSRR = GPIO_Pin; + } + else + { + GPIOx->BSRR = (uint32_t)GPIO_Pin << 16; + } +} + +/** + * @brief Toggles the specified GPIO pins. + * @param GPIOx Where x can be (A..I) to select the GPIO peripheral. + * @param GPIO_Pin Specifies the pins to be toggled. + * @retval None + */ +void HAL_GPIO_TogglePin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin) +{ + uint32_t odr; + + /* Check the parameters */ + assert_param(IS_GPIO_PIN(GPIO_Pin)); + + /* get current Output Data Register value */ + odr = GPIOx->ODR; + + /* Set selected pins that were at low level, and reset ones that were high */ + GPIOx->BSRR = ((odr & GPIO_Pin) << GPIO_NUMBER) | (~odr & GPIO_Pin); +} + +/** + * @brief Locks GPIO Pins configuration registers. + * @note The locked registers are GPIOx_MODER, GPIOx_OTYPER, GPIOx_OSPEEDR, + * GPIOx_PUPDR, GPIOx_AFRL and GPIOx_AFRH. + * @note The configuration of the locked GPIO pins can no longer be modified + * until the next reset. + * @param GPIOx where x can be (A..F) to select the GPIO peripheral for STM32F7 family + * @param GPIO_Pin specifies the port bit to be locked. + * This parameter can be any combination of GPIO_PIN_x where x can be (0..15). + * @retval None + */ +HAL_StatusTypeDef HAL_GPIO_LockPin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin) +{ + __IO uint32_t tmp = GPIO_LCKR_LCKK; + + /* Check the parameters */ + assert_param(IS_GPIO_PIN(GPIO_Pin)); + + /* Apply lock key write sequence */ + tmp |= GPIO_Pin; + /* Set LCKx bit(s): LCKK='1' + LCK[15-0] */ + GPIOx->LCKR = tmp; + /* Reset LCKx bit(s): LCKK='0' + LCK[15-0] */ + GPIOx->LCKR = GPIO_Pin; + /* Set LCKx bit(s): LCKK='1' + LCK[15-0] */ + GPIOx->LCKR = tmp; + /* Read LCKR register. This read is mandatory to complete key lock sequence */ + tmp = GPIOx->LCKR; + + /* Read again in order to confirm lock is active */ + if((GPIOx->LCKR & GPIO_LCKR_LCKK) != RESET) + { + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief This function handles EXTI interrupt request. + * @param GPIO_Pin Specifies the pins connected EXTI line + * @retval None + */ +void HAL_GPIO_EXTI_IRQHandler(uint16_t GPIO_Pin) +{ + /* EXTI line interrupt detected */ + if(__HAL_GPIO_EXTI_GET_IT(GPIO_Pin) != RESET) + { + __HAL_GPIO_EXTI_CLEAR_IT(GPIO_Pin); + HAL_GPIO_EXTI_Callback(GPIO_Pin); + } +} + +/** + * @brief EXTI line detection callbacks. + * @param GPIO_Pin Specifies the pins connected EXTI line + * @retval None + */ +__weak void HAL_GPIO_EXTI_Callback(uint16_t GPIO_Pin) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(GPIO_Pin); + + /* NOTE: This function Should not be modified, when the callback is needed, + the HAL_GPIO_EXTI_Callback could be implemented in the user file + */ +} + +/** + * @} + */ + + +/** + * @} + */ + +#endif /* HAL_GPIO_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c.c new file mode 100644 index 0000000..df361f8 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c.c @@ -0,0 +1,6905 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_i2c.c + * @author MCD Application Team + * @brief I2C HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Inter Integrated Circuit (I2C) peripheral: + * + Initialization and de-initialization functions + * + IO operation functions + * + Peripheral State and Errors functions + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + The I2C HAL driver can be used as follows: + + (#) Declare a I2C_HandleTypeDef handle structure, for example: + I2C_HandleTypeDef hi2c; + + (#)Initialize the I2C low level resources by implementing the HAL_I2C_MspInit() API: + (##) Enable the I2Cx interface clock + (##) I2C pins configuration + (+++) Enable the clock for the I2C GPIOs + (+++) Configure I2C pins as alternate function open-drain + (##) NVIC configuration if you need to use interrupt process + (+++) Configure the I2Cx interrupt priority + (+++) Enable the NVIC I2C IRQ Channel + (##) DMA Configuration if you need to use DMA process + (+++) Declare a DMA_HandleTypeDef handle structure for + the transmit or receive stream + (+++) Enable the DMAx interface clock using + (+++) Configure the DMA handle parameters + (+++) Configure the DMA Tx or Rx stream + (+++) Associate the initialized DMA handle to the hi2c DMA Tx or Rx handle + (+++) Configure the priority and enable the NVIC for the transfer complete interrupt on + the DMA Tx or Rx stream + + (#) Configure the Communication Clock Timing, Own Address1, Master Addressing mode, Dual Addressing mode, + Own Address2, Own Address2 Mask, General call and Nostretch mode in the hi2c Init structure. + + (#) Initialize the I2C registers by calling the HAL_I2C_Init(), configures also the low level Hardware + (GPIO, CLOCK, NVIC...etc) by calling the customized HAL_I2C_MspInit(&hi2c) API. + + (#) To check if target device is ready for communication, use the function HAL_I2C_IsDeviceReady() + + (#) For I2C IO and IO MEM operations, three operation modes are available within this driver : + + *** Polling mode IO operation *** + ================================= + [..] + (+) Transmit in master mode an amount of data in blocking mode using HAL_I2C_Master_Transmit() + (+) Receive in master mode an amount of data in blocking mode using HAL_I2C_Master_Receive() + (+) Transmit in slave mode an amount of data in blocking mode using HAL_I2C_Slave_Transmit() + (+) Receive in slave mode an amount of data in blocking mode using HAL_I2C_Slave_Receive() + + *** Polling mode IO MEM operation *** + ===================================== + [..] + (+) Write an amount of data in blocking mode to a specific memory address using HAL_I2C_Mem_Write() + (+) Read an amount of data in blocking mode from a specific memory address using HAL_I2C_Mem_Read() + + + *** Interrupt mode IO operation *** + =================================== + [..] + (+) Transmit in master mode an amount of data in non-blocking mode using HAL_I2C_Master_Transmit_IT() + (+) At transmission end of transfer, HAL_I2C_MasterTxCpltCallback() is executed and users can + add their own code by customization of function pointer HAL_I2C_MasterTxCpltCallback() + (+) Receive in master mode an amount of data in non-blocking mode using HAL_I2C_Master_Receive_IT() + (+) At reception end of transfer, HAL_I2C_MasterRxCpltCallback() is executed and users can + add their own code by customization of function pointer HAL_I2C_MasterRxCpltCallback() + (+) Transmit in slave mode an amount of data in non-blocking mode using HAL_I2C_Slave_Transmit_IT() + (+) At transmission end of transfer, HAL_I2C_SlaveTxCpltCallback() is executed and users can + add their own code by customization of function pointer HAL_I2C_SlaveTxCpltCallback() + (+) Receive in slave mode an amount of data in non-blocking mode using HAL_I2C_Slave_Receive_IT() + (+) At reception end of transfer, HAL_I2C_SlaveRxCpltCallback() is executed and users can + add their own code by customization of function pointer HAL_I2C_SlaveRxCpltCallback() + (+) In case of transfer Error, HAL_I2C_ErrorCallback() function is executed and users can + add their own code by customization of function pointer HAL_I2C_ErrorCallback() + (+) Abort a master I2C process communication with Interrupt using HAL_I2C_Master_Abort_IT() + (+) End of abort process, HAL_I2C_AbortCpltCallback() is executed and users can + add their own code by customization of function pointer HAL_I2C_AbortCpltCallback() + (+) Discard a slave I2C process communication using __HAL_I2C_GENERATE_NACK() macro. + This action will inform Master to generate a Stop condition to discard the communication. + + + *** Interrupt mode or DMA mode IO sequential operation *** + ========================================================== + [..] + (@) These interfaces allow to manage a sequential transfer with a repeated start condition + when a direction change during transfer + [..] + (+) A specific option field manage the different steps of a sequential transfer + (+) Option field values are defined through I2C_XFEROPTIONS and are listed below: + (++) I2C_FIRST_AND_LAST_FRAME: No sequential usage, functional is same as associated interfaces in + no sequential mode + (++) I2C_FIRST_FRAME: Sequential usage, this option allow to manage a sequence with start condition, address + and data to transfer without a final stop condition + (++) I2C_FIRST_AND_NEXT_FRAME: Sequential usage (Master only), this option allow to manage a sequence with + start condition, address and data to transfer without a final stop condition, + an then permit a call the same master sequential interface several times + (like HAL_I2C_Master_Seq_Transmit_IT() then HAL_I2C_Master_Seq_Transmit_IT() + or HAL_I2C_Master_Seq_Transmit_DMA() then HAL_I2C_Master_Seq_Transmit_DMA()) + (++) I2C_NEXT_FRAME: Sequential usage, this option allow to manage a sequence with a restart condition, address + and with new data to transfer if the direction change or manage only the new data to + transfer + if no direction change and without a final stop condition in both cases + (++) I2C_LAST_FRAME: Sequential usage, this option allow to manage a sequance with a restart condition, address + and with new data to transfer if the direction change or manage only the new data to + transfer + if no direction change and with a final stop condition in both cases + (++) I2C_LAST_FRAME_NO_STOP: Sequential usage (Master only), this option allow to manage a restart condition + after several call of the same master sequential interface several times + (link with option I2C_FIRST_AND_NEXT_FRAME). + Usage can, transfer several bytes one by one using + HAL_I2C_Master_Seq_Transmit_IT + or HAL_I2C_Master_Seq_Receive_IT + or HAL_I2C_Master_Seq_Transmit_DMA + or HAL_I2C_Master_Seq_Receive_DMA + with option I2C_FIRST_AND_NEXT_FRAME then I2C_NEXT_FRAME. + Then usage of this option I2C_LAST_FRAME_NO_STOP at the last Transmit or + Receive sequence permit to call the opposite interface Receive or Transmit + without stopping the communication and so generate a restart condition. + (++) I2C_OTHER_FRAME: Sequential usage (Master only), this option allow to manage a restart condition after + each call of the same master sequential + interface. + Usage can, transfer several bytes one by one with a restart with slave address between + each bytes using + HAL_I2C_Master_Seq_Transmit_IT + or HAL_I2C_Master_Seq_Receive_IT + or HAL_I2C_Master_Seq_Transmit_DMA + or HAL_I2C_Master_Seq_Receive_DMA + with option I2C_FIRST_FRAME then I2C_OTHER_FRAME. + Then usage of this option I2C_OTHER_AND_LAST_FRAME at the last frame to help automatic + generation of STOP condition. + + (+) Different sequential I2C interfaces are listed below: + (++) Sequential transmit in master I2C mode an amount of data in non-blocking mode using + HAL_I2C_Master_Seq_Transmit_IT() or using HAL_I2C_Master_Seq_Transmit_DMA() + (+++) At transmission end of current frame transfer, HAL_I2C_MasterTxCpltCallback() is executed and + users can add their own code by customization of function pointer HAL_I2C_MasterTxCpltCallback() + (++) Sequential receive in master I2C mode an amount of data in non-blocking mode using + HAL_I2C_Master_Seq_Receive_IT() or using HAL_I2C_Master_Seq_Receive_DMA() + (+++) At reception end of current frame transfer, HAL_I2C_MasterRxCpltCallback() is executed and users can + add their own code by customization of function pointer HAL_I2C_MasterRxCpltCallback() + (++) Abort a master IT or DMA I2C process communication with Interrupt using HAL_I2C_Master_Abort_IT() + (+++) End of abort process, HAL_I2C_AbortCpltCallback() is executed and users can + add their own code by customization of function pointer HAL_I2C_AbortCpltCallback() + (++) Enable/disable the Address listen mode in slave I2C mode using HAL_I2C_EnableListen_IT() + HAL_I2C_DisableListen_IT() + (+++) When address slave I2C match, HAL_I2C_AddrCallback() is executed and users can + add their own code to check the Address Match Code and the transmission direction request by master + (Write/Read). + (+++) At Listen mode end HAL_I2C_ListenCpltCallback() is executed and users can + add their own code by customization of function pointer HAL_I2C_ListenCpltCallback() + (++) Sequential transmit in slave I2C mode an amount of data in non-blocking mode using + HAL_I2C_Slave_Seq_Transmit_IT() or using HAL_I2C_Slave_Seq_Transmit_DMA() + (+++) At transmission end of current frame transfer, HAL_I2C_SlaveTxCpltCallback() is executed and + users can add their own code by customization of function pointer HAL_I2C_SlaveTxCpltCallback() + (++) Sequential receive in slave I2C mode an amount of data in non-blocking mode using + HAL_I2C_Slave_Seq_Receive_IT() or using HAL_I2C_Slave_Seq_Receive_DMA() + (+++) At reception end of current frame transfer, HAL_I2C_SlaveRxCpltCallback() is executed and users can + add their own code by customization of function pointer HAL_I2C_SlaveRxCpltCallback() + (++) In case of transfer Error, HAL_I2C_ErrorCallback() function is executed and users can + add their own code by customization of function pointer HAL_I2C_ErrorCallback() + (++) Discard a slave I2C process communication using __HAL_I2C_GENERATE_NACK() macro. + This action will inform Master to generate a Stop condition to discard the communication. + + *** Interrupt mode IO MEM operation *** + ======================================= + [..] + (+) Write an amount of data in non-blocking mode with Interrupt to a specific memory address using + HAL_I2C_Mem_Write_IT() + (+) At Memory end of write transfer, HAL_I2C_MemTxCpltCallback() is executed and users can + add their own code by customization of function pointer HAL_I2C_MemTxCpltCallback() + (+) Read an amount of data in non-blocking mode with Interrupt from a specific memory address using + HAL_I2C_Mem_Read_IT() + (+) At Memory end of read transfer, HAL_I2C_MemRxCpltCallback() is executed and users can + add their own code by customization of function pointer HAL_I2C_MemRxCpltCallback() + (+) In case of transfer Error, HAL_I2C_ErrorCallback() function is executed and users can + add their own code by customization of function pointer HAL_I2C_ErrorCallback() + + *** DMA mode IO operation *** + ============================== + [..] + (+) Transmit in master mode an amount of data in non-blocking mode (DMA) using + HAL_I2C_Master_Transmit_DMA() + (+) At transmission end of transfer, HAL_I2C_MasterTxCpltCallback() is executed and users can + add their own code by customization of function pointer HAL_I2C_MasterTxCpltCallback() + (+) Receive in master mode an amount of data in non-blocking mode (DMA) using + HAL_I2C_Master_Receive_DMA() + (+) At reception end of transfer, HAL_I2C_MasterRxCpltCallback() is executed and users can + add their own code by customization of function pointer HAL_I2C_MasterRxCpltCallback() + (+) Transmit in slave mode an amount of data in non-blocking mode (DMA) using + HAL_I2C_Slave_Transmit_DMA() + (+) At transmission end of transfer, HAL_I2C_SlaveTxCpltCallback() is executed and users can + add their own code by customization of function pointer HAL_I2C_SlaveTxCpltCallback() + (+) Receive in slave mode an amount of data in non-blocking mode (DMA) using + HAL_I2C_Slave_Receive_DMA() + (+) At reception end of transfer, HAL_I2C_SlaveRxCpltCallback() is executed and users can + add their own code by customization of function pointer HAL_I2C_SlaveRxCpltCallback() + (+) In case of transfer Error, HAL_I2C_ErrorCallback() function is executed and users can + add their own code by customization of function pointer HAL_I2C_ErrorCallback() + (+) Abort a master I2C process communication with Interrupt using HAL_I2C_Master_Abort_IT() + (+) End of abort process, HAL_I2C_AbortCpltCallback() is executed and users can + add their own code by customization of function pointer HAL_I2C_AbortCpltCallback() + (+) Discard a slave I2C process communication using __HAL_I2C_GENERATE_NACK() macro. + This action will inform Master to generate a Stop condition to discard the communication. + + *** DMA mode IO MEM operation *** + ================================= + [..] + (+) Write an amount of data in non-blocking mode with DMA to a specific memory address using + HAL_I2C_Mem_Write_DMA() + (+) At Memory end of write transfer, HAL_I2C_MemTxCpltCallback() is executed and users can + add their own code by customization of function pointer HAL_I2C_MemTxCpltCallback() + (+) Read an amount of data in non-blocking mode with DMA from a specific memory address using + HAL_I2C_Mem_Read_DMA() + (+) At Memory end of read transfer, HAL_I2C_MemRxCpltCallback() is executed and users can + add their own code by customization of function pointer HAL_I2C_MemRxCpltCallback() + (+) In case of transfer Error, HAL_I2C_ErrorCallback() function is executed and users can + add their own code by customization of function pointer HAL_I2C_ErrorCallback() + + + *** I2C HAL driver macros list *** + ================================== + [..] + Below the list of most used macros in I2C HAL driver. + + (+) __HAL_I2C_ENABLE: Enable the I2C peripheral + (+) __HAL_I2C_DISABLE: Disable the I2C peripheral + (+) __HAL_I2C_GENERATE_NACK: Generate a Non-Acknowledge I2C peripheral in Slave mode + (+) __HAL_I2C_GET_FLAG: Check whether the specified I2C flag is set or not + (+) __HAL_I2C_CLEAR_FLAG: Clear the specified I2C pending flag + (+) __HAL_I2C_ENABLE_IT: Enable the specified I2C interrupt + (+) __HAL_I2C_DISABLE_IT: Disable the specified I2C interrupt + + *** Callback registration *** + ============================================= + [..] + The compilation flag USE_HAL_I2C_REGISTER_CALLBACKS when set to 1 + allows the user to configure dynamically the driver callbacks. + Use Functions HAL_I2C_RegisterCallback() or HAL_I2C_RegisterAddrCallback() + to register an interrupt callback. + [..] + Function HAL_I2C_RegisterCallback() allows to register following callbacks: + (+) MasterTxCpltCallback : callback for Master transmission end of transfer. + (+) MasterRxCpltCallback : callback for Master reception end of transfer. + (+) SlaveTxCpltCallback : callback for Slave transmission end of transfer. + (+) SlaveRxCpltCallback : callback for Slave reception end of transfer. + (+) ListenCpltCallback : callback for end of listen mode. + (+) MemTxCpltCallback : callback for Memory transmission end of transfer. + (+) MemRxCpltCallback : callback for Memory reception end of transfer. + (+) ErrorCallback : callback for error detection. + (+) AbortCpltCallback : callback for abort completion process. + (+) MspInitCallback : callback for Msp Init. + (+) MspDeInitCallback : callback for Msp DeInit. + This function takes as parameters the HAL peripheral handle, the Callback ID + and a pointer to the user callback function. + [..] + For specific callback AddrCallback use dedicated register callbacks : HAL_I2C_RegisterAddrCallback(). + [..] + Use function HAL_I2C_UnRegisterCallback to reset a callback to the default + weak function. + HAL_I2C_UnRegisterCallback takes as parameters the HAL peripheral handle, + and the Callback ID. + This function allows to reset following callbacks: + (+) MasterTxCpltCallback : callback for Master transmission end of transfer. + (+) MasterRxCpltCallback : callback for Master reception end of transfer. + (+) SlaveTxCpltCallback : callback for Slave transmission end of transfer. + (+) SlaveRxCpltCallback : callback for Slave reception end of transfer. + (+) ListenCpltCallback : callback for end of listen mode. + (+) MemTxCpltCallback : callback for Memory transmission end of transfer. + (+) MemRxCpltCallback : callback for Memory reception end of transfer. + (+) ErrorCallback : callback for error detection. + (+) AbortCpltCallback : callback for abort completion process. + (+) MspInitCallback : callback for Msp Init. + (+) MspDeInitCallback : callback for Msp DeInit. + [..] + For callback AddrCallback use dedicated register callbacks : HAL_I2C_UnRegisterAddrCallback(). + [..] + By default, after the HAL_I2C_Init() and when the state is HAL_I2C_STATE_RESET + all callbacks are set to the corresponding weak functions: + examples HAL_I2C_MasterTxCpltCallback(), HAL_I2C_MasterRxCpltCallback(). + Exception done for MspInit and MspDeInit functions that are + reset to the legacy weak functions in the HAL_I2C_Init()/ HAL_I2C_DeInit() only when + these callbacks are null (not registered beforehand). + If MspInit or MspDeInit are not null, the HAL_I2C_Init()/ HAL_I2C_DeInit() + keep and use the user MspInit/MspDeInit callbacks (registered beforehand) whatever the state. + [..] + Callbacks can be registered/unregistered in HAL_I2C_STATE_READY state only. + Exception done MspInit/MspDeInit functions that can be registered/unregistered + in HAL_I2C_STATE_READY or HAL_I2C_STATE_RESET state, + thus registered (user) MspInit/DeInit callbacks can be used during the Init/DeInit. + Then, the user first registers the MspInit/MspDeInit user callbacks + using HAL_I2C_RegisterCallback() before calling HAL_I2C_DeInit() + or HAL_I2C_Init() function. + [..] + When the compilation flag USE_HAL_I2C_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registration feature is not available and all callbacks + are set to the corresponding weak functions. + + [..] + (@) You can refer to the I2C HAL driver header file for more useful macros + + @endverbatim + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup I2C I2C + * @brief I2C HAL module driver + * @{ + */ + +#ifdef HAL_I2C_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ + +/** @defgroup I2C_Private_Define I2C Private Define + * @{ + */ +#define TIMING_CLEAR_MASK (0xF0FFFFFFU) /*!< I2C TIMING clear register Mask */ +#define I2C_TIMEOUT_ADDR (10000U) /*!< 10 s */ +#define I2C_TIMEOUT_BUSY (25U) /*!< 25 ms */ +#define I2C_TIMEOUT_DIR (25U) /*!< 25 ms */ +#define I2C_TIMEOUT_RXNE (25U) /*!< 25 ms */ +#define I2C_TIMEOUT_STOPF (25U) /*!< 25 ms */ +#define I2C_TIMEOUT_TC (25U) /*!< 25 ms */ +#define I2C_TIMEOUT_TCR (25U) /*!< 25 ms */ +#define I2C_TIMEOUT_TXIS (25U) /*!< 25 ms */ +#define I2C_TIMEOUT_FLAG (25U) /*!< 25 ms */ + +#define MAX_NBYTE_SIZE 255U +#define SLAVE_ADDR_SHIFT 7U +#define SLAVE_ADDR_MSK 0x06U + +/* Private define for @ref PreviousState usage */ +#define I2C_STATE_MSK ((uint32_t)((uint32_t)((uint32_t)HAL_I2C_STATE_BUSY_TX | \ + (uint32_t)HAL_I2C_STATE_BUSY_RX) & \ + (uint32_t)(~((uint32_t)HAL_I2C_STATE_READY)))) +/*!< Mask State define, keep only RX and TX bits */ +#define I2C_STATE_NONE ((uint32_t)(HAL_I2C_MODE_NONE)) +/*!< Default Value */ +#define I2C_STATE_MASTER_BUSY_TX ((uint32_t)(((uint32_t)HAL_I2C_STATE_BUSY_TX & I2C_STATE_MSK) | \ + (uint32_t)HAL_I2C_MODE_MASTER)) +/*!< Master Busy TX, combinaison of State LSB and Mode enum */ +#define I2C_STATE_MASTER_BUSY_RX ((uint32_t)(((uint32_t)HAL_I2C_STATE_BUSY_RX & I2C_STATE_MSK) | \ + (uint32_t)HAL_I2C_MODE_MASTER)) +/*!< Master Busy RX, combinaison of State LSB and Mode enum */ +#define I2C_STATE_SLAVE_BUSY_TX ((uint32_t)(((uint32_t)HAL_I2C_STATE_BUSY_TX & I2C_STATE_MSK) | \ + (uint32_t)HAL_I2C_MODE_SLAVE)) +/*!< Slave Busy TX, combinaison of State LSB and Mode enum */ +#define I2C_STATE_SLAVE_BUSY_RX ((uint32_t)(((uint32_t)HAL_I2C_STATE_BUSY_RX & I2C_STATE_MSK) | \ + (uint32_t)HAL_I2C_MODE_SLAVE)) +/*!< Slave Busy RX, combinaison of State LSB and Mode enum */ +#define I2C_STATE_MEM_BUSY_TX ((uint32_t)(((uint32_t)HAL_I2C_STATE_BUSY_TX & I2C_STATE_MSK) | \ + (uint32_t)HAL_I2C_MODE_MEM)) +/*!< Memory Busy TX, combinaison of State LSB and Mode enum */ +#define I2C_STATE_MEM_BUSY_RX ((uint32_t)(((uint32_t)HAL_I2C_STATE_BUSY_RX & I2C_STATE_MSK) | \ + (uint32_t)HAL_I2C_MODE_MEM)) +/*!< Memory Busy RX, combinaison of State LSB and Mode enum */ + + +/* Private define to centralize the enable/disable of Interrupts */ +#define I2C_XFER_TX_IT (uint16_t)(0x0001U) /*!< Bit field can be combinated with + @ref I2C_XFER_LISTEN_IT */ +#define I2C_XFER_RX_IT (uint16_t)(0x0002U) /*!< Bit field can be combinated with + @ref I2C_XFER_LISTEN_IT */ +#define I2C_XFER_LISTEN_IT (uint16_t)(0x8000U) /*!< Bit field can be combinated with @ref I2C_XFER_TX_IT + and @ref I2C_XFER_RX_IT */ + +#define I2C_XFER_ERROR_IT (uint16_t)(0x0010U) /*!< Bit definition to manage addition of global Error + and NACK treatment */ +#define I2C_XFER_CPLT_IT (uint16_t)(0x0020U) /*!< Bit definition to manage only STOP evenement */ +#define I2C_XFER_RELOAD_IT (uint16_t)(0x0040U) /*!< Bit definition to manage only Reload of NBYTE */ + +/* Private define Sequential Transfer Options default/reset value */ +#define I2C_NO_OPTION_FRAME (0xFFFF0000U) +/** + * @} + */ + +/* Private macro -------------------------------------------------------------*/ +/* Macro to get remaining data to transfer on DMA side */ +#define I2C_GET_DMA_REMAIN_DATA(__HANDLE__) __HAL_DMA_GET_COUNTER(__HANDLE__) + +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ + +/** @defgroup I2C_Private_Functions I2C Private Functions + * @{ + */ +/* Private functions to handle DMA transfer */ +static void I2C_DMAMasterTransmitCplt(DMA_HandleTypeDef *hdma); +static void I2C_DMAMasterReceiveCplt(DMA_HandleTypeDef *hdma); +static void I2C_DMASlaveTransmitCplt(DMA_HandleTypeDef *hdma); +static void I2C_DMASlaveReceiveCplt(DMA_HandleTypeDef *hdma); +static void I2C_DMAError(DMA_HandleTypeDef *hdma); +static void I2C_DMAAbort(DMA_HandleTypeDef *hdma); + +/* Private functions to handle IT transfer */ +static void I2C_ITAddrCplt(I2C_HandleTypeDef *hi2c, uint32_t ITFlags); +static void I2C_ITMasterSeqCplt(I2C_HandleTypeDef *hi2c); +static void I2C_ITSlaveSeqCplt(I2C_HandleTypeDef *hi2c); +static void I2C_ITMasterCplt(I2C_HandleTypeDef *hi2c, uint32_t ITFlags); +static void I2C_ITSlaveCplt(I2C_HandleTypeDef *hi2c, uint32_t ITFlags); +static void I2C_ITListenCplt(I2C_HandleTypeDef *hi2c, uint32_t ITFlags); +static void I2C_ITError(I2C_HandleTypeDef *hi2c, uint32_t ErrorCode); + +/* Private functions to handle IT transfer */ +static HAL_StatusTypeDef I2C_RequestMemoryWrite(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, + uint16_t MemAddress, uint16_t MemAddSize, uint32_t Timeout, + uint32_t Tickstart); +static HAL_StatusTypeDef I2C_RequestMemoryRead(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, + uint16_t MemAddress, uint16_t MemAddSize, uint32_t Timeout, + uint32_t Tickstart); + +/* Private functions for I2C transfer IRQ handler */ +static HAL_StatusTypeDef I2C_Master_ISR_IT(struct __I2C_HandleTypeDef *hi2c, uint32_t ITFlags, + uint32_t ITSources); +static HAL_StatusTypeDef I2C_Slave_ISR_IT(struct __I2C_HandleTypeDef *hi2c, uint32_t ITFlags, + uint32_t ITSources); +static HAL_StatusTypeDef I2C_Master_ISR_DMA(struct __I2C_HandleTypeDef *hi2c, uint32_t ITFlags, + uint32_t ITSources); +static HAL_StatusTypeDef I2C_Slave_ISR_DMA(struct __I2C_HandleTypeDef *hi2c, uint32_t ITFlags, + uint32_t ITSources); + +/* Private functions to handle flags during polling transfer */ +static HAL_StatusTypeDef I2C_WaitOnFlagUntilTimeout(I2C_HandleTypeDef *hi2c, uint32_t Flag, FlagStatus Status, + uint32_t Timeout, uint32_t Tickstart); +static HAL_StatusTypeDef I2C_WaitOnTXISFlagUntilTimeout(I2C_HandleTypeDef *hi2c, uint32_t Timeout, + uint32_t Tickstart); +static HAL_StatusTypeDef I2C_WaitOnRXNEFlagUntilTimeout(I2C_HandleTypeDef *hi2c, uint32_t Timeout, + uint32_t Tickstart); +static HAL_StatusTypeDef I2C_WaitOnSTOPFlagUntilTimeout(I2C_HandleTypeDef *hi2c, uint32_t Timeout, + uint32_t Tickstart); +static HAL_StatusTypeDef I2C_IsErrorOccurred(I2C_HandleTypeDef *hi2c, uint32_t Timeout, + uint32_t Tickstart); + +/* Private functions to centralize the enable/disable of Interrupts */ +static void I2C_Enable_IRQ(I2C_HandleTypeDef *hi2c, uint16_t InterruptRequest); +static void I2C_Disable_IRQ(I2C_HandleTypeDef *hi2c, uint16_t InterruptRequest); + +/* Private function to treat different error callback */ +static void I2C_TreatErrorCallback(I2C_HandleTypeDef *hi2c); + +/* Private function to flush TXDR register */ +static void I2C_Flush_TXDR(I2C_HandleTypeDef *hi2c); + +/* Private function to handle start, restart or stop a transfer */ +static void I2C_TransferConfig(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t Size, uint32_t Mode, + uint32_t Request); + +/* Private function to Convert Specific options */ +static void I2C_ConvertOtherXferOptions(I2C_HandleTypeDef *hi2c); +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup I2C_Exported_Functions I2C Exported Functions + * @{ + */ + +/** @defgroup I2C_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * +@verbatim + =============================================================================== + ##### Initialization and de-initialization functions ##### + =============================================================================== + [..] This subsection provides a set of functions allowing to initialize and + deinitialize the I2Cx peripheral: + + (+) User must Implement HAL_I2C_MspInit() function in which he configures + all related peripherals resources (CLOCK, GPIO, DMA, IT and NVIC ). + + (+) Call the function HAL_I2C_Init() to configure the selected device with + the selected configuration: + (++) Clock Timing + (++) Own Address 1 + (++) Addressing mode (Master, Slave) + (++) Dual Addressing mode + (++) Own Address 2 + (++) Own Address 2 Mask + (++) General call mode + (++) Nostretch mode + + (+) Call the function HAL_I2C_DeInit() to restore the default configuration + of the selected I2Cx peripheral. + +@endverbatim + * @{ + */ + +/** + * @brief Initializes the I2C according to the specified parameters + * in the I2C_InitTypeDef and initialize the associated handle. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Init(I2C_HandleTypeDef *hi2c) +{ + /* Check the I2C handle allocation */ + if (hi2c == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_I2C_ALL_INSTANCE(hi2c->Instance)); + assert_param(IS_I2C_OWN_ADDRESS1(hi2c->Init.OwnAddress1)); + assert_param(IS_I2C_ADDRESSING_MODE(hi2c->Init.AddressingMode)); + assert_param(IS_I2C_DUAL_ADDRESS(hi2c->Init.DualAddressMode)); + assert_param(IS_I2C_OWN_ADDRESS2(hi2c->Init.OwnAddress2)); + assert_param(IS_I2C_OWN_ADDRESS2_MASK(hi2c->Init.OwnAddress2Masks)); + assert_param(IS_I2C_GENERAL_CALL(hi2c->Init.GeneralCallMode)); + assert_param(IS_I2C_NO_STRETCH(hi2c->Init.NoStretchMode)); + + if (hi2c->State == HAL_I2C_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + hi2c->Lock = HAL_UNLOCKED; + +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + /* Init the I2C Callback settings */ + hi2c->MasterTxCpltCallback = HAL_I2C_MasterTxCpltCallback; /* Legacy weak MasterTxCpltCallback */ + hi2c->MasterRxCpltCallback = HAL_I2C_MasterRxCpltCallback; /* Legacy weak MasterRxCpltCallback */ + hi2c->SlaveTxCpltCallback = HAL_I2C_SlaveTxCpltCallback; /* Legacy weak SlaveTxCpltCallback */ + hi2c->SlaveRxCpltCallback = HAL_I2C_SlaveRxCpltCallback; /* Legacy weak SlaveRxCpltCallback */ + hi2c->ListenCpltCallback = HAL_I2C_ListenCpltCallback; /* Legacy weak ListenCpltCallback */ + hi2c->MemTxCpltCallback = HAL_I2C_MemTxCpltCallback; /* Legacy weak MemTxCpltCallback */ + hi2c->MemRxCpltCallback = HAL_I2C_MemRxCpltCallback; /* Legacy weak MemRxCpltCallback */ + hi2c->ErrorCallback = HAL_I2C_ErrorCallback; /* Legacy weak ErrorCallback */ + hi2c->AbortCpltCallback = HAL_I2C_AbortCpltCallback; /* Legacy weak AbortCpltCallback */ + hi2c->AddrCallback = HAL_I2C_AddrCallback; /* Legacy weak AddrCallback */ + + if (hi2c->MspInitCallback == NULL) + { + hi2c->MspInitCallback = HAL_I2C_MspInit; /* Legacy weak MspInit */ + } + + /* Init the low level hardware : GPIO, CLOCK, CORTEX...etc */ + hi2c->MspInitCallback(hi2c); +#else + /* Init the low level hardware : GPIO, CLOCK, CORTEX...etc */ + HAL_I2C_MspInit(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + + hi2c->State = HAL_I2C_STATE_BUSY; + + /* Disable the selected I2C peripheral */ + __HAL_I2C_DISABLE(hi2c); + + /*---------------------------- I2Cx TIMINGR Configuration ------------------*/ + /* Configure I2Cx: Frequency range */ + hi2c->Instance->TIMINGR = hi2c->Init.Timing & TIMING_CLEAR_MASK; + + /*---------------------------- I2Cx OAR1 Configuration ---------------------*/ + /* Disable Own Address1 before set the Own Address1 configuration */ + hi2c->Instance->OAR1 &= ~I2C_OAR1_OA1EN; + + /* Configure I2Cx: Own Address1 and ack own address1 mode */ + if (hi2c->Init.AddressingMode == I2C_ADDRESSINGMODE_7BIT) + { + hi2c->Instance->OAR1 = (I2C_OAR1_OA1EN | hi2c->Init.OwnAddress1); + } + else /* I2C_ADDRESSINGMODE_10BIT */ + { + hi2c->Instance->OAR1 = (I2C_OAR1_OA1EN | I2C_OAR1_OA1MODE | hi2c->Init.OwnAddress1); + } + + /*---------------------------- I2Cx CR2 Configuration ----------------------*/ + /* Configure I2Cx: Addressing Master mode */ + if (hi2c->Init.AddressingMode == I2C_ADDRESSINGMODE_10BIT) + { + hi2c->Instance->CR2 = (I2C_CR2_ADD10); + } + /* Enable the AUTOEND by default, and enable NACK (should be disable only during Slave process */ + hi2c->Instance->CR2 |= (I2C_CR2_AUTOEND | I2C_CR2_NACK); + + /*---------------------------- I2Cx OAR2 Configuration ---------------------*/ + /* Disable Own Address2 before set the Own Address2 configuration */ + hi2c->Instance->OAR2 &= ~I2C_DUALADDRESS_ENABLE; + + /* Configure I2Cx: Dual mode and Own Address2 */ + hi2c->Instance->OAR2 = (hi2c->Init.DualAddressMode | hi2c->Init.OwnAddress2 | \ + (hi2c->Init.OwnAddress2Masks << 8)); + + /*---------------------------- I2Cx CR1 Configuration ----------------------*/ + /* Configure I2Cx: Generalcall and NoStretch mode */ + hi2c->Instance->CR1 = (hi2c->Init.GeneralCallMode | hi2c->Init.NoStretchMode); + + /* Enable the selected I2C peripheral */ + __HAL_I2C_ENABLE(hi2c); + + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + hi2c->State = HAL_I2C_STATE_READY; + hi2c->PreviousState = I2C_STATE_NONE; + hi2c->Mode = HAL_I2C_MODE_NONE; + + return HAL_OK; +} + +/** + * @brief DeInitialize the I2C peripheral. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_DeInit(I2C_HandleTypeDef *hi2c) +{ + /* Check the I2C handle allocation */ + if (hi2c == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_I2C_ALL_INSTANCE(hi2c->Instance)); + + hi2c->State = HAL_I2C_STATE_BUSY; + + /* Disable the I2C Peripheral Clock */ + __HAL_I2C_DISABLE(hi2c); + +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + if (hi2c->MspDeInitCallback == NULL) + { + hi2c->MspDeInitCallback = HAL_I2C_MspDeInit; /* Legacy weak MspDeInit */ + } + + /* DeInit the low level hardware: GPIO, CLOCK, NVIC */ + hi2c->MspDeInitCallback(hi2c); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC */ + HAL_I2C_MspDeInit(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + hi2c->State = HAL_I2C_STATE_RESET; + hi2c->PreviousState = I2C_STATE_NONE; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Release Lock */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; +} + +/** + * @brief Initialize the I2C MSP. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +__weak void HAL_I2C_MspInit(I2C_HandleTypeDef *hi2c) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hi2c); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_I2C_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitialize the I2C MSP. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +__weak void HAL_I2C_MspDeInit(I2C_HandleTypeDef *hi2c) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hi2c); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_I2C_MspDeInit could be implemented in the user file + */ +} + +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) +/** + * @brief Register a User I2C Callback + * To be used instead of the weak predefined callback + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param CallbackID ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_I2C_MASTER_TX_COMPLETE_CB_ID Master Tx Transfer completed callback ID + * @arg @ref HAL_I2C_MASTER_RX_COMPLETE_CB_ID Master Rx Transfer completed callback ID + * @arg @ref HAL_I2C_SLAVE_TX_COMPLETE_CB_ID Slave Tx Transfer completed callback ID + * @arg @ref HAL_I2C_SLAVE_RX_COMPLETE_CB_ID Slave Rx Transfer completed callback ID + * @arg @ref HAL_I2C_LISTEN_COMPLETE_CB_ID Listen Complete callback ID + * @arg @ref HAL_I2C_MEM_TX_COMPLETE_CB_ID Memory Tx Transfer callback ID + * @arg @ref HAL_I2C_MEM_RX_COMPLETE_CB_ID Memory Rx Transfer completed callback ID + * @arg @ref HAL_I2C_ERROR_CB_ID Error callback ID + * @arg @ref HAL_I2C_ABORT_CB_ID Abort callback ID + * @arg @ref HAL_I2C_MSPINIT_CB_ID MspInit callback ID + * @arg @ref HAL_I2C_MSPDEINIT_CB_ID MspDeInit callback ID + * @param pCallback pointer to the Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_RegisterCallback(I2C_HandleTypeDef *hi2c, HAL_I2C_CallbackIDTypeDef CallbackID, + pI2C_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + /* Process locked */ + __HAL_LOCK(hi2c); + + if (HAL_I2C_STATE_READY == hi2c->State) + { + switch (CallbackID) + { + case HAL_I2C_MASTER_TX_COMPLETE_CB_ID : + hi2c->MasterTxCpltCallback = pCallback; + break; + + case HAL_I2C_MASTER_RX_COMPLETE_CB_ID : + hi2c->MasterRxCpltCallback = pCallback; + break; + + case HAL_I2C_SLAVE_TX_COMPLETE_CB_ID : + hi2c->SlaveTxCpltCallback = pCallback; + break; + + case HAL_I2C_SLAVE_RX_COMPLETE_CB_ID : + hi2c->SlaveRxCpltCallback = pCallback; + break; + + case HAL_I2C_LISTEN_COMPLETE_CB_ID : + hi2c->ListenCpltCallback = pCallback; + break; + + case HAL_I2C_MEM_TX_COMPLETE_CB_ID : + hi2c->MemTxCpltCallback = pCallback; + break; + + case HAL_I2C_MEM_RX_COMPLETE_CB_ID : + hi2c->MemRxCpltCallback = pCallback; + break; + + case HAL_I2C_ERROR_CB_ID : + hi2c->ErrorCallback = pCallback; + break; + + case HAL_I2C_ABORT_CB_ID : + hi2c->AbortCpltCallback = pCallback; + break; + + case HAL_I2C_MSPINIT_CB_ID : + hi2c->MspInitCallback = pCallback; + break; + + case HAL_I2C_MSPDEINIT_CB_ID : + hi2c->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (HAL_I2C_STATE_RESET == hi2c->State) + { + switch (CallbackID) + { + case HAL_I2C_MSPINIT_CB_ID : + hi2c->MspInitCallback = pCallback; + break; + + case HAL_I2C_MSPDEINIT_CB_ID : + hi2c->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hi2c); + return status; +} + +/** + * @brief Unregister an I2C Callback + * I2C callback is redirected to the weak predefined callback + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param CallbackID ID of the callback to be unregistered + * This parameter can be one of the following values: + * This parameter can be one of the following values: + * @arg @ref HAL_I2C_MASTER_TX_COMPLETE_CB_ID Master Tx Transfer completed callback ID + * @arg @ref HAL_I2C_MASTER_RX_COMPLETE_CB_ID Master Rx Transfer completed callback ID + * @arg @ref HAL_I2C_SLAVE_TX_COMPLETE_CB_ID Slave Tx Transfer completed callback ID + * @arg @ref HAL_I2C_SLAVE_RX_COMPLETE_CB_ID Slave Rx Transfer completed callback ID + * @arg @ref HAL_I2C_LISTEN_COMPLETE_CB_ID Listen Complete callback ID + * @arg @ref HAL_I2C_MEM_TX_COMPLETE_CB_ID Memory Tx Transfer callback ID + * @arg @ref HAL_I2C_MEM_RX_COMPLETE_CB_ID Memory Rx Transfer completed callback ID + * @arg @ref HAL_I2C_ERROR_CB_ID Error callback ID + * @arg @ref HAL_I2C_ABORT_CB_ID Abort callback ID + * @arg @ref HAL_I2C_MSPINIT_CB_ID MspInit callback ID + * @arg @ref HAL_I2C_MSPDEINIT_CB_ID MspDeInit callback ID + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_UnRegisterCallback(I2C_HandleTypeDef *hi2c, HAL_I2C_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hi2c); + + if (HAL_I2C_STATE_READY == hi2c->State) + { + switch (CallbackID) + { + case HAL_I2C_MASTER_TX_COMPLETE_CB_ID : + hi2c->MasterTxCpltCallback = HAL_I2C_MasterTxCpltCallback; /* Legacy weak MasterTxCpltCallback */ + break; + + case HAL_I2C_MASTER_RX_COMPLETE_CB_ID : + hi2c->MasterRxCpltCallback = HAL_I2C_MasterRxCpltCallback; /* Legacy weak MasterRxCpltCallback */ + break; + + case HAL_I2C_SLAVE_TX_COMPLETE_CB_ID : + hi2c->SlaveTxCpltCallback = HAL_I2C_SlaveTxCpltCallback; /* Legacy weak SlaveTxCpltCallback */ + break; + + case HAL_I2C_SLAVE_RX_COMPLETE_CB_ID : + hi2c->SlaveRxCpltCallback = HAL_I2C_SlaveRxCpltCallback; /* Legacy weak SlaveRxCpltCallback */ + break; + + case HAL_I2C_LISTEN_COMPLETE_CB_ID : + hi2c->ListenCpltCallback = HAL_I2C_ListenCpltCallback; /* Legacy weak ListenCpltCallback */ + break; + + case HAL_I2C_MEM_TX_COMPLETE_CB_ID : + hi2c->MemTxCpltCallback = HAL_I2C_MemTxCpltCallback; /* Legacy weak MemTxCpltCallback */ + break; + + case HAL_I2C_MEM_RX_COMPLETE_CB_ID : + hi2c->MemRxCpltCallback = HAL_I2C_MemRxCpltCallback; /* Legacy weak MemRxCpltCallback */ + break; + + case HAL_I2C_ERROR_CB_ID : + hi2c->ErrorCallback = HAL_I2C_ErrorCallback; /* Legacy weak ErrorCallback */ + break; + + case HAL_I2C_ABORT_CB_ID : + hi2c->AbortCpltCallback = HAL_I2C_AbortCpltCallback; /* Legacy weak AbortCpltCallback */ + break; + + case HAL_I2C_MSPINIT_CB_ID : + hi2c->MspInitCallback = HAL_I2C_MspInit; /* Legacy weak MspInit */ + break; + + case HAL_I2C_MSPDEINIT_CB_ID : + hi2c->MspDeInitCallback = HAL_I2C_MspDeInit; /* Legacy weak MspDeInit */ + break; + + default : + /* Update the error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (HAL_I2C_STATE_RESET == hi2c->State) + { + switch (CallbackID) + { + case HAL_I2C_MSPINIT_CB_ID : + hi2c->MspInitCallback = HAL_I2C_MspInit; /* Legacy weak MspInit */ + break; + + case HAL_I2C_MSPDEINIT_CB_ID : + hi2c->MspDeInitCallback = HAL_I2C_MspDeInit; /* Legacy weak MspDeInit */ + break; + + default : + /* Update the error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hi2c); + return status; +} + +/** + * @brief Register the Slave Address Match I2C Callback + * To be used instead of the weak HAL_I2C_AddrCallback() predefined callback + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param pCallback pointer to the Address Match Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_RegisterAddrCallback(I2C_HandleTypeDef *hi2c, pI2C_AddrCallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + /* Process locked */ + __HAL_LOCK(hi2c); + + if (HAL_I2C_STATE_READY == hi2c->State) + { + hi2c->AddrCallback = pCallback; + } + else + { + /* Update the error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hi2c); + return status; +} + +/** + * @brief UnRegister the Slave Address Match I2C Callback + * Info Ready I2C Callback is redirected to the weak HAL_I2C_AddrCallback() predefined callback + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_UnRegisterAddrCallback(I2C_HandleTypeDef *hi2c) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hi2c); + + if (HAL_I2C_STATE_READY == hi2c->State) + { + hi2c->AddrCallback = HAL_I2C_AddrCallback; /* Legacy weak AddrCallback */ + } + else + { + /* Update the error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hi2c); + return status; +} + +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @defgroup I2C_Exported_Functions_Group2 Input and Output operation functions + * @brief Data transfers functions + * +@verbatim + =============================================================================== + ##### IO operation functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to manage the I2C data + transfers. + + (#) There are two modes of transfer: + (++) Blocking mode : The communication is performed in the polling mode. + The status of all data processing is returned by the same function + after finishing transfer. + (++) No-Blocking mode : The communication is performed using Interrupts + or DMA. These functions return the status of the transfer startup. + The end of the data processing will be indicated through the + dedicated I2C IRQ when using Interrupt mode or the DMA IRQ when + using DMA mode. + + (#) Blocking mode functions are : + (++) HAL_I2C_Master_Transmit() + (++) HAL_I2C_Master_Receive() + (++) HAL_I2C_Slave_Transmit() + (++) HAL_I2C_Slave_Receive() + (++) HAL_I2C_Mem_Write() + (++) HAL_I2C_Mem_Read() + (++) HAL_I2C_IsDeviceReady() + + (#) No-Blocking mode functions with Interrupt are : + (++) HAL_I2C_Master_Transmit_IT() + (++) HAL_I2C_Master_Receive_IT() + (++) HAL_I2C_Slave_Transmit_IT() + (++) HAL_I2C_Slave_Receive_IT() + (++) HAL_I2C_Mem_Write_IT() + (++) HAL_I2C_Mem_Read_IT() + (++) HAL_I2C_Master_Seq_Transmit_IT() + (++) HAL_I2C_Master_Seq_Receive_IT() + (++) HAL_I2C_Slave_Seq_Transmit_IT() + (++) HAL_I2C_Slave_Seq_Receive_IT() + (++) HAL_I2C_EnableListen_IT() + (++) HAL_I2C_DisableListen_IT() + (++) HAL_I2C_Master_Abort_IT() + + (#) No-Blocking mode functions with DMA are : + (++) HAL_I2C_Master_Transmit_DMA() + (++) HAL_I2C_Master_Receive_DMA() + (++) HAL_I2C_Slave_Transmit_DMA() + (++) HAL_I2C_Slave_Receive_DMA() + (++) HAL_I2C_Mem_Write_DMA() + (++) HAL_I2C_Mem_Read_DMA() + (++) HAL_I2C_Master_Seq_Transmit_DMA() + (++) HAL_I2C_Master_Seq_Receive_DMA() + (++) HAL_I2C_Slave_Seq_Transmit_DMA() + (++) HAL_I2C_Slave_Seq_Receive_DMA() + + (#) A set of Transfer Complete Callbacks are provided in non Blocking mode: + (++) HAL_I2C_MasterTxCpltCallback() + (++) HAL_I2C_MasterRxCpltCallback() + (++) HAL_I2C_SlaveTxCpltCallback() + (++) HAL_I2C_SlaveRxCpltCallback() + (++) HAL_I2C_MemTxCpltCallback() + (++) HAL_I2C_MemRxCpltCallback() + (++) HAL_I2C_AddrCallback() + (++) HAL_I2C_ListenCpltCallback() + (++) HAL_I2C_ErrorCallback() + (++) HAL_I2C_AbortCpltCallback() + +@endverbatim + * @{ + */ + +/** + * @brief Transmits in master mode an amount of data in blocking mode. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Master_Transmit(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, + uint16_t Size, uint32_t Timeout) +{ + uint32_t tickstart; + + if (hi2c->State == HAL_I2C_STATE_READY) + { + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_BUSY, SET, I2C_TIMEOUT_BUSY, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + hi2c->State = HAL_I2C_STATE_BUSY_TX; + hi2c->Mode = HAL_I2C_MODE_MASTER; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferISR = NULL; + + /* Send Slave Address */ + /* Set NBYTES to write and reload if hi2c->XferCount > MAX_NBYTE_SIZE and generate RESTART */ + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_RELOAD_MODE, + I2C_GENERATE_START_WRITE); + } + else + { + hi2c->XferSize = hi2c->XferCount; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, + I2C_GENERATE_START_WRITE); + } + + while (hi2c->XferCount > 0U) + { + /* Wait until TXIS flag is set */ + if (I2C_WaitOnTXISFlagUntilTimeout(hi2c, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + /* Write data to TXDR */ + hi2c->Instance->TXDR = *hi2c->pBuffPtr; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + hi2c->XferCount--; + hi2c->XferSize--; + + if ((hi2c->XferCount != 0U) && (hi2c->XferSize == 0U)) + { + /* Wait until TCR flag is set */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_TCR, RESET, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_RELOAD_MODE, + I2C_NO_STARTSTOP); + } + else + { + hi2c->XferSize = hi2c->XferCount; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, + I2C_NO_STARTSTOP); + } + } + } + + /* No need to Check TC flag, with AUTOEND mode the stop is automatically generated */ + /* Wait until STOPF flag is set */ + if (I2C_WaitOnSTOPFlagUntilTimeout(hi2c, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Clear STOP Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + + /* Clear Configuration Register 2 */ + I2C_RESET_CR2(hi2c); + + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receives in master mode an amount of data in blocking mode. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Master_Receive(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, + uint16_t Size, uint32_t Timeout) +{ + uint32_t tickstart; + + if (hi2c->State == HAL_I2C_STATE_READY) + { + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_BUSY, SET, I2C_TIMEOUT_BUSY, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + hi2c->State = HAL_I2C_STATE_BUSY_RX; + hi2c->Mode = HAL_I2C_MODE_MASTER; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferISR = NULL; + + /* Send Slave Address */ + /* Set NBYTES to write and reload if hi2c->XferCount > MAX_NBYTE_SIZE and generate RESTART */ + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_RELOAD_MODE, + I2C_GENERATE_START_READ); + } + else + { + hi2c->XferSize = hi2c->XferCount; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, + I2C_GENERATE_START_READ); + } + + while (hi2c->XferCount > 0U) + { + /* Wait until RXNE flag is set */ + if (I2C_WaitOnRXNEFlagUntilTimeout(hi2c, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Read data from RXDR */ + *hi2c->pBuffPtr = (uint8_t)hi2c->Instance->RXDR; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + hi2c->XferSize--; + hi2c->XferCount--; + + if ((hi2c->XferCount != 0U) && (hi2c->XferSize == 0U)) + { + /* Wait until TCR flag is set */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_TCR, RESET, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_RELOAD_MODE, + I2C_NO_STARTSTOP); + } + else + { + hi2c->XferSize = hi2c->XferCount; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, + I2C_NO_STARTSTOP); + } + } + } + + /* No need to Check TC flag, with AUTOEND mode the stop is automatically generated */ + /* Wait until STOPF flag is set */ + if (I2C_WaitOnSTOPFlagUntilTimeout(hi2c, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Clear STOP Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + + /* Clear Configuration Register 2 */ + I2C_RESET_CR2(hi2c); + + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Transmits in slave mode an amount of data in blocking mode. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Slave_Transmit(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size, + uint32_t Timeout) +{ + uint32_t tickstart; + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + hi2c->State = HAL_I2C_STATE_BUSY_TX; + hi2c->Mode = HAL_I2C_MODE_SLAVE; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferISR = NULL; + + /* Enable Address Acknowledge */ + hi2c->Instance->CR2 &= ~I2C_CR2_NACK; + + /* Wait until ADDR flag is set */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_ADDR, RESET, Timeout, tickstart) != HAL_OK) + { + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + return HAL_ERROR; + } + + /* Clear ADDR flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_ADDR); + + /* If 10bit addressing mode is selected */ + if (hi2c->Init.AddressingMode == I2C_ADDRESSINGMODE_10BIT) + { + /* Wait until ADDR flag is set */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_ADDR, RESET, Timeout, tickstart) != HAL_OK) + { + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + return HAL_ERROR; + } + + /* Clear ADDR flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_ADDR); + } + + /* Wait until DIR flag is set Transmitter mode */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_DIR, RESET, Timeout, tickstart) != HAL_OK) + { + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + return HAL_ERROR; + } + + while (hi2c->XferCount > 0U) + { + /* Wait until TXIS flag is set */ + if (I2C_WaitOnTXISFlagUntilTimeout(hi2c, Timeout, tickstart) != HAL_OK) + { + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + return HAL_ERROR; + } + + /* Write data to TXDR */ + hi2c->Instance->TXDR = *hi2c->pBuffPtr; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + hi2c->XferCount--; + } + + /* Wait until AF flag is set */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_AF, RESET, Timeout, tickstart) != HAL_OK) + { + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + return HAL_ERROR; + } + + /* Flush TX register */ + I2C_Flush_TXDR(hi2c); + + /* Clear AF flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + + /* Wait until STOP flag is set */ + if (I2C_WaitOnSTOPFlagUntilTimeout(hi2c, Timeout, tickstart) != HAL_OK) + { + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + + return HAL_ERROR; + } + + /* Clear STOP flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + + /* Wait until BUSY flag is reset */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_BUSY, SET, Timeout, tickstart) != HAL_OK) + { + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + return HAL_ERROR; + } + + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receive in slave mode an amount of data in blocking mode + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Slave_Receive(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size, + uint32_t Timeout) +{ + uint32_t tickstart; + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + hi2c->State = HAL_I2C_STATE_BUSY_RX; + hi2c->Mode = HAL_I2C_MODE_SLAVE; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferSize = hi2c->XferCount; + hi2c->XferISR = NULL; + + /* Enable Address Acknowledge */ + hi2c->Instance->CR2 &= ~I2C_CR2_NACK; + + /* Wait until ADDR flag is set */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_ADDR, RESET, Timeout, tickstart) != HAL_OK) + { + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + return HAL_ERROR; + } + + /* Clear ADDR flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_ADDR); + + /* Wait until DIR flag is reset Receiver mode */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_DIR, SET, Timeout, tickstart) != HAL_OK) + { + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + return HAL_ERROR; + } + + while (hi2c->XferCount > 0U) + { + /* Wait until RXNE flag is set */ + if (I2C_WaitOnRXNEFlagUntilTimeout(hi2c, Timeout, tickstart) != HAL_OK) + { + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + + /* Store Last receive data if any */ + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_RXNE) == SET) + { + /* Read data from RXDR */ + *hi2c->pBuffPtr = (uint8_t)hi2c->Instance->RXDR; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + hi2c->XferCount--; + hi2c->XferSize--; + } + + return HAL_ERROR; + } + + /* Read data from RXDR */ + *hi2c->pBuffPtr = (uint8_t)hi2c->Instance->RXDR; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + hi2c->XferCount--; + hi2c->XferSize--; + } + + /* Wait until STOP flag is set */ + if (I2C_WaitOnSTOPFlagUntilTimeout(hi2c, Timeout, tickstart) != HAL_OK) + { + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + return HAL_ERROR; + } + + /* Clear STOP flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + + /* Wait until BUSY flag is reset */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_BUSY, SET, Timeout, tickstart) != HAL_OK) + { + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + return HAL_ERROR; + } + + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Transmit in master mode an amount of data in non-blocking mode with Interrupt + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Master_Transmit_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, + uint16_t Size) +{ + uint32_t xfermode; + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) == SET) + { + return HAL_BUSY; + } + + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY_TX; + hi2c->Mode = HAL_I2C_MODE_MASTER; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferISR = I2C_Master_ISR_IT; + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + xfermode = I2C_AUTOEND_MODE; + } + + /* Send Slave Address */ + /* Set NBYTES to write and reload if hi2c->XferCount > MAX_NBYTE_SIZE */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, xfermode, I2C_GENERATE_START_WRITE); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + + /* Enable ERR, TC, STOP, NACK, TXI interrupt */ + /* possible to enable all of these */ + /* I2C_IT_ERRI | I2C_IT_TCI | I2C_IT_STOPI | I2C_IT_NACKI | + I2C_IT_ADDRI | I2C_IT_RXI | I2C_IT_TXI */ + I2C_Enable_IRQ(hi2c, I2C_XFER_TX_IT); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receive in master mode an amount of data in non-blocking mode with Interrupt + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Master_Receive_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, + uint16_t Size) +{ + uint32_t xfermode; + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) == SET) + { + return HAL_BUSY; + } + + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY_RX; + hi2c->Mode = HAL_I2C_MODE_MASTER; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferISR = I2C_Master_ISR_IT; + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + xfermode = I2C_AUTOEND_MODE; + } + + /* Send Slave Address */ + /* Set NBYTES to write and reload if hi2c->XferCount > MAX_NBYTE_SIZE */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, xfermode, I2C_GENERATE_START_READ); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + + /* Enable ERR, TC, STOP, NACK, RXI interrupt */ + /* possible to enable all of these */ + /* I2C_IT_ERRI | I2C_IT_TCI | I2C_IT_STOPI | I2C_IT_NACKI | + I2C_IT_ADDRI | I2C_IT_RXI | I2C_IT_TXI */ + I2C_Enable_IRQ(hi2c, I2C_XFER_RX_IT); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Transmit in slave mode an amount of data in non-blocking mode with Interrupt + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Slave_Transmit_IT(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size) +{ + if (hi2c->State == HAL_I2C_STATE_READY) + { + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY_TX; + hi2c->Mode = HAL_I2C_MODE_SLAVE; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Enable Address Acknowledge */ + hi2c->Instance->CR2 &= ~I2C_CR2_NACK; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferSize = hi2c->XferCount; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferISR = I2C_Slave_ISR_IT; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + + /* Enable ERR, TC, STOP, NACK, TXI interrupt */ + /* possible to enable all of these */ + /* I2C_IT_ERRI | I2C_IT_TCI | I2C_IT_STOPI | I2C_IT_NACKI | + I2C_IT_ADDRI | I2C_IT_RXI | I2C_IT_TXI */ + I2C_Enable_IRQ(hi2c, I2C_XFER_TX_IT | I2C_XFER_LISTEN_IT); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receive in slave mode an amount of data in non-blocking mode with Interrupt + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Slave_Receive_IT(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size) +{ + if (hi2c->State == HAL_I2C_STATE_READY) + { + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY_RX; + hi2c->Mode = HAL_I2C_MODE_SLAVE; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Enable Address Acknowledge */ + hi2c->Instance->CR2 &= ~I2C_CR2_NACK; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferSize = hi2c->XferCount; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferISR = I2C_Slave_ISR_IT; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + + /* Enable ERR, TC, STOP, NACK, RXI interrupt */ + /* possible to enable all of these */ + /* I2C_IT_ERRI | I2C_IT_TCI | I2C_IT_STOPI | I2C_IT_NACKI | + I2C_IT_ADDRI | I2C_IT_RXI | I2C_IT_TXI */ + I2C_Enable_IRQ(hi2c, I2C_XFER_RX_IT | I2C_XFER_LISTEN_IT); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Transmit in master mode an amount of data in non-blocking mode with DMA + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Master_Transmit_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, + uint16_t Size) +{ + uint32_t xfermode; + HAL_StatusTypeDef dmaxferstatus; + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) == SET) + { + return HAL_BUSY; + } + + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY_TX; + hi2c->Mode = HAL_I2C_MODE_MASTER; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferISR = I2C_Master_ISR_DMA; + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + xfermode = I2C_AUTOEND_MODE; + } + + if (hi2c->XferSize > 0U) + { + if (hi2c->hdmatx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmatx->XferCpltCallback = I2C_DMAMasterTransmitCplt; + + /* Set the DMA error callback */ + hi2c->hdmatx->XferErrorCallback = I2C_DMAError; + + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmatx->XferHalfCpltCallback = NULL; + hi2c->hdmatx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmatx, (uint32_t)pData, (uint32_t)&hi2c->Instance->TXDR, + hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + if (dmaxferstatus == HAL_OK) + { + /* Send Slave Address */ + /* Set NBYTES to write and reload if hi2c->XferCount > MAX_NBYTE_SIZE and generate RESTART */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, xfermode, I2C_GENERATE_START_WRITE); + + /* Update XferCount value */ + hi2c->XferCount -= hi2c->XferSize; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR and NACK interrupts */ + I2C_Enable_IRQ(hi2c, I2C_XFER_ERROR_IT); + + /* Enable DMA Request */ + hi2c->Instance->CR1 |= I2C_CR1_TXDMAEN; + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + } + else + { + /* Update Transfer ISR function pointer */ + hi2c->XferISR = I2C_Master_ISR_IT; + + /* Send Slave Address */ + /* Set NBYTES to write and generate START condition */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, + I2C_GENERATE_START_WRITE); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR, TC, STOP, NACK, TXI interrupt */ + /* possible to enable all of these */ + /* I2C_IT_ERRI | I2C_IT_TCI | I2C_IT_STOPI | I2C_IT_NACKI | + I2C_IT_ADDRI | I2C_IT_RXI | I2C_IT_TXI */ + I2C_Enable_IRQ(hi2c, I2C_XFER_TX_IT); + } + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receive in master mode an amount of data in non-blocking mode with DMA + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Master_Receive_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, + uint16_t Size) +{ + uint32_t xfermode; + HAL_StatusTypeDef dmaxferstatus; + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) == SET) + { + return HAL_BUSY; + } + + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY_RX; + hi2c->Mode = HAL_I2C_MODE_MASTER; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferISR = I2C_Master_ISR_DMA; + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + xfermode = I2C_AUTOEND_MODE; + } + + if (hi2c->XferSize > 0U) + { + if (hi2c->hdmarx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmarx->XferCpltCallback = I2C_DMAMasterReceiveCplt; + + /* Set the DMA error callback */ + hi2c->hdmarx->XferErrorCallback = I2C_DMAError; + + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmarx->XferHalfCpltCallback = NULL; + hi2c->hdmarx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmarx, (uint32_t)&hi2c->Instance->RXDR, (uint32_t)pData, + hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + if (dmaxferstatus == HAL_OK) + { + /* Send Slave Address */ + /* Set NBYTES to read and reload if hi2c->XferCount > MAX_NBYTE_SIZE and generate RESTART */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, xfermode, I2C_GENERATE_START_READ); + + /* Update XferCount value */ + hi2c->XferCount -= hi2c->XferSize; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR and NACK interrupts */ + I2C_Enable_IRQ(hi2c, I2C_XFER_ERROR_IT); + + /* Enable DMA Request */ + hi2c->Instance->CR1 |= I2C_CR1_RXDMAEN; + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + } + else + { + /* Update Transfer ISR function pointer */ + hi2c->XferISR = I2C_Master_ISR_IT; + + /* Send Slave Address */ + /* Set NBYTES to read and generate START condition */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, + I2C_GENERATE_START_READ); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR, TC, STOP, NACK, TXI interrupt */ + /* possible to enable all of these */ + /* I2C_IT_ERRI | I2C_IT_TCI | I2C_IT_STOPI | I2C_IT_NACKI | + I2C_IT_ADDRI | I2C_IT_RXI | I2C_IT_TXI */ + I2C_Enable_IRQ(hi2c, I2C_XFER_TX_IT); + } + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Transmit in slave mode an amount of data in non-blocking mode with DMA + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Slave_Transmit_DMA(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size) +{ + HAL_StatusTypeDef dmaxferstatus; + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY_TX; + hi2c->Mode = HAL_I2C_MODE_SLAVE; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferSize = hi2c->XferCount; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferISR = I2C_Slave_ISR_DMA; + + if (hi2c->hdmatx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmatx->XferCpltCallback = I2C_DMASlaveTransmitCplt; + + /* Set the DMA error callback */ + hi2c->hdmatx->XferErrorCallback = I2C_DMAError; + + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmatx->XferHalfCpltCallback = NULL; + hi2c->hdmatx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmatx, (uint32_t)pData, (uint32_t)&hi2c->Instance->TXDR, + hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + if (dmaxferstatus == HAL_OK) + { + /* Enable Address Acknowledge */ + hi2c->Instance->CR2 &= ~I2C_CR2_NACK; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR, STOP, NACK, ADDR interrupts */ + I2C_Enable_IRQ(hi2c, I2C_XFER_LISTEN_IT); + + /* Enable DMA Request */ + hi2c->Instance->CR1 |= I2C_CR1_TXDMAEN; + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receive in slave mode an amount of data in non-blocking mode with DMA + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Slave_Receive_DMA(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size) +{ + HAL_StatusTypeDef dmaxferstatus; + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY_RX; + hi2c->Mode = HAL_I2C_MODE_SLAVE; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferSize = hi2c->XferCount; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferISR = I2C_Slave_ISR_DMA; + + if (hi2c->hdmarx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmarx->XferCpltCallback = I2C_DMASlaveReceiveCplt; + + /* Set the DMA error callback */ + hi2c->hdmarx->XferErrorCallback = I2C_DMAError; + + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmarx->XferHalfCpltCallback = NULL; + hi2c->hdmarx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmarx, (uint32_t)&hi2c->Instance->RXDR, (uint32_t)pData, + hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + if (dmaxferstatus == HAL_OK) + { + /* Enable Address Acknowledge */ + hi2c->Instance->CR2 &= ~I2C_CR2_NACK; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR, STOP, NACK, ADDR interrupts */ + I2C_Enable_IRQ(hi2c, I2C_XFER_LISTEN_IT); + + /* Enable DMA Request */ + hi2c->Instance->CR1 |= I2C_CR1_RXDMAEN; + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} +/** + * @brief Write an amount of data in blocking mode to a specific memory address + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param MemAddress Internal memory address + * @param MemAddSize Size of internal memory address + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Mem_Write(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, + uint16_t MemAddSize, uint8_t *pData, uint16_t Size, uint32_t Timeout) +{ + uint32_t tickstart; + + /* Check the parameters */ + assert_param(IS_I2C_MEMADD_SIZE(MemAddSize)); + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_BUSY, SET, I2C_TIMEOUT_BUSY, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + hi2c->State = HAL_I2C_STATE_BUSY_TX; + hi2c->Mode = HAL_I2C_MODE_MEM; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferISR = NULL; + + /* Send Slave Address and Memory Address */ + if (I2C_RequestMemoryWrite(hi2c, DevAddress, MemAddress, MemAddSize, Timeout, tickstart) != HAL_OK) + { + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + return HAL_ERROR; + } + + /* Set NBYTES to write and reload if hi2c->XferCount > MAX_NBYTE_SIZE */ + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_RELOAD_MODE, I2C_NO_STARTSTOP); + } + else + { + hi2c->XferSize = hi2c->XferCount; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, I2C_NO_STARTSTOP); + } + + do + { + /* Wait until TXIS flag is set */ + if (I2C_WaitOnTXISFlagUntilTimeout(hi2c, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Write data to TXDR */ + hi2c->Instance->TXDR = *hi2c->pBuffPtr; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + hi2c->XferCount--; + hi2c->XferSize--; + + if ((hi2c->XferCount != 0U) && (hi2c->XferSize == 0U)) + { + /* Wait until TCR flag is set */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_TCR, RESET, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_RELOAD_MODE, + I2C_NO_STARTSTOP); + } + else + { + hi2c->XferSize = hi2c->XferCount; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, + I2C_NO_STARTSTOP); + } + } + + } while (hi2c->XferCount > 0U); + + /* No need to Check TC flag, with AUTOEND mode the stop is automatically generated */ + /* Wait until STOPF flag is reset */ + if (I2C_WaitOnSTOPFlagUntilTimeout(hi2c, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Clear STOP Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + + /* Clear Configuration Register 2 */ + I2C_RESET_CR2(hi2c); + + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Read an amount of data in blocking mode from a specific memory address + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param MemAddress Internal memory address + * @param MemAddSize Size of internal memory address + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Mem_Read(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, + uint16_t MemAddSize, uint8_t *pData, uint16_t Size, uint32_t Timeout) +{ + uint32_t tickstart; + + /* Check the parameters */ + assert_param(IS_I2C_MEMADD_SIZE(MemAddSize)); + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_BUSY, SET, I2C_TIMEOUT_BUSY, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + hi2c->State = HAL_I2C_STATE_BUSY_RX; + hi2c->Mode = HAL_I2C_MODE_MEM; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferISR = NULL; + + /* Send Slave Address and Memory Address */ + if (I2C_RequestMemoryRead(hi2c, DevAddress, MemAddress, MemAddSize, Timeout, tickstart) != HAL_OK) + { + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + return HAL_ERROR; + } + + /* Send Slave Address */ + /* Set NBYTES to write and reload if hi2c->XferCount > MAX_NBYTE_SIZE and generate RESTART */ + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_RELOAD_MODE, + I2C_GENERATE_START_READ); + } + else + { + hi2c->XferSize = hi2c->XferCount; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, + I2C_GENERATE_START_READ); + } + + do + { + /* Wait until RXNE flag is set */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_RXNE, RESET, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Read data from RXDR */ + *hi2c->pBuffPtr = (uint8_t)hi2c->Instance->RXDR; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + hi2c->XferSize--; + hi2c->XferCount--; + + if ((hi2c->XferCount != 0U) && (hi2c->XferSize == 0U)) + { + /* Wait until TCR flag is set */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_TCR, RESET, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t) hi2c->XferSize, I2C_RELOAD_MODE, + I2C_NO_STARTSTOP); + } + else + { + hi2c->XferSize = hi2c->XferCount; + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, + I2C_NO_STARTSTOP); + } + } + } while (hi2c->XferCount > 0U); + + /* No need to Check TC flag, with AUTOEND mode the stop is automatically generated */ + /* Wait until STOPF flag is reset */ + if (I2C_WaitOnSTOPFlagUntilTimeout(hi2c, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Clear STOP Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + + /* Clear Configuration Register 2 */ + I2C_RESET_CR2(hi2c); + + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} +/** + * @brief Write an amount of data in non-blocking mode with Interrupt to a specific memory address + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param MemAddress Internal memory address + * @param MemAddSize Size of internal memory address + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Mem_Write_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, + uint16_t MemAddSize, uint8_t *pData, uint16_t Size) +{ + uint32_t tickstart; + uint32_t xfermode; + + /* Check the parameters */ + assert_param(IS_I2C_MEMADD_SIZE(MemAddSize)); + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) == SET) + { + return HAL_BUSY; + } + + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + hi2c->State = HAL_I2C_STATE_BUSY_TX; + hi2c->Mode = HAL_I2C_MODE_MEM; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferISR = I2C_Master_ISR_IT; + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + xfermode = I2C_AUTOEND_MODE; + } + + /* Send Slave Address and Memory Address */ + if (I2C_RequestMemoryWrite(hi2c, DevAddress, MemAddress, MemAddSize, I2C_TIMEOUT_FLAG, tickstart) + != HAL_OK) + { + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + return HAL_ERROR; + } + + /* Set NBYTES to write and reload if hi2c->XferCount > MAX_NBYTE_SIZE and generate RESTART */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, xfermode, I2C_NO_STARTSTOP); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + + /* Enable ERR, TC, STOP, NACK, TXI interrupt */ + /* possible to enable all of these */ + /* I2C_IT_ERRI | I2C_IT_TCI | I2C_IT_STOPI | I2C_IT_NACKI | + I2C_IT_ADDRI | I2C_IT_RXI | I2C_IT_TXI */ + I2C_Enable_IRQ(hi2c, I2C_XFER_TX_IT); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Read an amount of data in non-blocking mode with Interrupt from a specific memory address + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param MemAddress Internal memory address + * @param MemAddSize Size of internal memory address + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Mem_Read_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, + uint16_t MemAddSize, uint8_t *pData, uint16_t Size) +{ + uint32_t tickstart; + uint32_t xfermode; + + /* Check the parameters */ + assert_param(IS_I2C_MEMADD_SIZE(MemAddSize)); + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) == SET) + { + return HAL_BUSY; + } + + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + hi2c->State = HAL_I2C_STATE_BUSY_RX; + hi2c->Mode = HAL_I2C_MODE_MEM; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferISR = I2C_Master_ISR_IT; + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + xfermode = I2C_AUTOEND_MODE; + } + + /* Send Slave Address and Memory Address */ + if (I2C_RequestMemoryRead(hi2c, DevAddress, MemAddress, MemAddSize, I2C_TIMEOUT_FLAG, tickstart) != HAL_OK) + { + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + return HAL_ERROR; + } + + /* Set NBYTES to write and reload if hi2c->XferCount > MAX_NBYTE_SIZE and generate RESTART */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, xfermode, I2C_GENERATE_START_READ); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + + /* Enable ERR, TC, STOP, NACK, RXI interrupt */ + /* possible to enable all of these */ + /* I2C_IT_ERRI | I2C_IT_TCI | I2C_IT_STOPI | I2C_IT_NACKI | + I2C_IT_ADDRI | I2C_IT_RXI | I2C_IT_TXI */ + I2C_Enable_IRQ(hi2c, I2C_XFER_RX_IT); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} +/** + * @brief Write an amount of data in non-blocking mode with DMA to a specific memory address + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param MemAddress Internal memory address + * @param MemAddSize Size of internal memory address + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Mem_Write_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, + uint16_t MemAddSize, uint8_t *pData, uint16_t Size) +{ + uint32_t tickstart; + uint32_t xfermode; + HAL_StatusTypeDef dmaxferstatus; + + /* Check the parameters */ + assert_param(IS_I2C_MEMADD_SIZE(MemAddSize)); + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) == SET) + { + return HAL_BUSY; + } + + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + hi2c->State = HAL_I2C_STATE_BUSY_TX; + hi2c->Mode = HAL_I2C_MODE_MEM; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferISR = I2C_Master_ISR_DMA; + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + xfermode = I2C_AUTOEND_MODE; + } + + /* Send Slave Address and Memory Address */ + if (I2C_RequestMemoryWrite(hi2c, DevAddress, MemAddress, MemAddSize, I2C_TIMEOUT_FLAG, tickstart) + != HAL_OK) + { + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + return HAL_ERROR; + } + + + if (hi2c->hdmatx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmatx->XferCpltCallback = I2C_DMAMasterTransmitCplt; + + /* Set the DMA error callback */ + hi2c->hdmatx->XferErrorCallback = I2C_DMAError; + + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmatx->XferHalfCpltCallback = NULL; + hi2c->hdmatx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmatx, (uint32_t)pData, (uint32_t)&hi2c->Instance->TXDR, + hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + if (dmaxferstatus == HAL_OK) + { + /* Send Slave Address */ + /* Set NBYTES to write and reload if hi2c->XferCount > MAX_NBYTE_SIZE and generate RESTART */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, xfermode, I2C_NO_STARTSTOP); + + /* Update XferCount value */ + hi2c->XferCount -= hi2c->XferSize; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR and NACK interrupts */ + I2C_Enable_IRQ(hi2c, I2C_XFER_ERROR_IT); + + /* Enable DMA Request */ + hi2c->Instance->CR1 |= I2C_CR1_TXDMAEN; + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Reads an amount of data in non-blocking mode with DMA from a specific memory address. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param MemAddress Internal memory address + * @param MemAddSize Size of internal memory address + * @param pData Pointer to data buffer + * @param Size Amount of data to be read + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Mem_Read_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint16_t MemAddress, + uint16_t MemAddSize, uint8_t *pData, uint16_t Size) +{ + uint32_t tickstart; + uint32_t xfermode; + HAL_StatusTypeDef dmaxferstatus; + + /* Check the parameters */ + assert_param(IS_I2C_MEMADD_SIZE(MemAddSize)); + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) == SET) + { + return HAL_BUSY; + } + + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* Init tickstart for timeout management*/ + tickstart = HAL_GetTick(); + + hi2c->State = HAL_I2C_STATE_BUSY_RX; + hi2c->Mode = HAL_I2C_MODE_MEM; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferISR = I2C_Master_ISR_DMA; + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + xfermode = I2C_AUTOEND_MODE; + } + + /* Send Slave Address and Memory Address */ + if (I2C_RequestMemoryRead(hi2c, DevAddress, MemAddress, MemAddSize, I2C_TIMEOUT_FLAG, tickstart) != HAL_OK) + { + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + return HAL_ERROR; + } + + if (hi2c->hdmarx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmarx->XferCpltCallback = I2C_DMAMasterReceiveCplt; + + /* Set the DMA error callback */ + hi2c->hdmarx->XferErrorCallback = I2C_DMAError; + + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmarx->XferHalfCpltCallback = NULL; + hi2c->hdmarx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmarx, (uint32_t)&hi2c->Instance->RXDR, (uint32_t)pData, + hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + if (dmaxferstatus == HAL_OK) + { + /* Set NBYTES to write and reload if hi2c->XferCount > MAX_NBYTE_SIZE and generate RESTART */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, xfermode, I2C_GENERATE_START_READ); + + /* Update XferCount value */ + hi2c->XferCount -= hi2c->XferSize; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR and NACK interrupts */ + I2C_Enable_IRQ(hi2c, I2C_XFER_ERROR_IT); + + /* Enable DMA Request */ + hi2c->Instance->CR1 |= I2C_CR1_RXDMAEN; + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Checks if target device is ready for communication. + * @note This function is used with Memory devices + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param Trials Number of trials + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_IsDeviceReady(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint32_t Trials, + uint32_t Timeout) +{ + uint32_t tickstart; + + __IO uint32_t I2C_Trials = 0UL; + + FlagStatus tmp1; + FlagStatus tmp2; + + if (hi2c->State == HAL_I2C_STATE_READY) + { + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) == SET) + { + return HAL_BUSY; + } + + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + do + { + /* Generate Start */ + hi2c->Instance->CR2 = I2C_GENERATE_START(hi2c->Init.AddressingMode, DevAddress); + + /* No need to Check TC flag, with AUTOEND mode the stop is automatically generated */ + /* Wait until STOPF flag is set or a NACK flag is set*/ + tickstart = HAL_GetTick(); + + tmp1 = __HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_STOPF); + tmp2 = __HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_AF); + + while ((tmp1 == RESET) && (tmp2 == RESET)) + { + if (Timeout != HAL_MAX_DELAY) + { + if (((HAL_GetTick() - tickstart) > Timeout) || (Timeout == 0U)) + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + } + + tmp1 = __HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_STOPF); + tmp2 = __HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_AF); + } + + /* Check if the NACKF flag has not been set */ + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_AF) == RESET) + { + /* Wait until STOPF flag is reset */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_STOPF, RESET, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Clear STOP Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + + /* Device is ready */ + hi2c->State = HAL_I2C_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; + } + else + { + /* Wait until STOPF flag is reset */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_STOPF, RESET, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Clear NACK Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + + /* Clear STOP Flag, auto generated with autoend*/ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + } + + /* Check if the maximum allowed number of trials has been reached */ + if (I2C_Trials == Trials) + { + /* Generate Stop */ + hi2c->Instance->CR2 |= I2C_CR2_STOP; + + /* Wait until STOPF flag is reset */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_STOPF, RESET, Timeout, tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Clear STOP Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + } + + /* Increment Trials */ + I2C_Trials++; + } while (I2C_Trials < Trials); + + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Sequential transmit in master I2C mode an amount of data in non-blocking mode with Interrupt. + * @note This interface allow to manage repeated start condition when a direction change during transfer + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param XferOptions Options of Transfer, value of @ref I2C_XFEROPTIONS + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Master_Seq_Transmit_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, + uint16_t Size, uint32_t XferOptions) +{ + uint32_t xfermode; + uint32_t xferrequest = I2C_GENERATE_START_WRITE; + + /* Check the parameters */ + assert_param(IS_I2C_TRANSFER_OPTIONS_REQUEST(XferOptions)); + + if (hi2c->State == HAL_I2C_STATE_READY) + { + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY_TX; + hi2c->Mode = HAL_I2C_MODE_MASTER; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferOptions = XferOptions; + hi2c->XferISR = I2C_Master_ISR_IT; + + /* If hi2c->XferCount > MAX_NBYTE_SIZE, use reload mode */ + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + xfermode = hi2c->XferOptions; + } + + /* If transfer direction not change and there is no request to start another frame, + do not generate Restart Condition */ + /* Mean Previous state is same as current state */ + if ((hi2c->PreviousState == I2C_STATE_MASTER_BUSY_TX) && \ + (IS_I2C_TRANSFER_OTHER_OPTIONS_REQUEST(XferOptions) == 0)) + { + xferrequest = I2C_NO_STARTSTOP; + } + else + { + /* Convert OTHER_xxx XferOptions if any */ + I2C_ConvertOtherXferOptions(hi2c); + + /* Update xfermode accordingly if no reload is necessary */ + if (hi2c->XferCount <= MAX_NBYTE_SIZE) + { + xfermode = hi2c->XferOptions; + } + } + + /* Send Slave Address and set NBYTES to write */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, xfermode, xferrequest); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + I2C_Enable_IRQ(hi2c, I2C_XFER_TX_IT); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Sequential transmit in master I2C mode an amount of data in non-blocking mode with DMA. + * @note This interface allow to manage repeated start condition when a direction change during transfer + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param XferOptions Options of Transfer, value of @ref I2C_XFEROPTIONS + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Master_Seq_Transmit_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, + uint16_t Size, uint32_t XferOptions) +{ + uint32_t xfermode; + uint32_t xferrequest = I2C_GENERATE_START_WRITE; + HAL_StatusTypeDef dmaxferstatus; + + /* Check the parameters */ + assert_param(IS_I2C_TRANSFER_OPTIONS_REQUEST(XferOptions)); + + if (hi2c->State == HAL_I2C_STATE_READY) + { + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY_TX; + hi2c->Mode = HAL_I2C_MODE_MASTER; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferOptions = XferOptions; + hi2c->XferISR = I2C_Master_ISR_DMA; + + /* If hi2c->XferCount > MAX_NBYTE_SIZE, use reload mode */ + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + xfermode = hi2c->XferOptions; + } + + /* If transfer direction not change and there is no request to start another frame, + do not generate Restart Condition */ + /* Mean Previous state is same as current state */ + if ((hi2c->PreviousState == I2C_STATE_MASTER_BUSY_TX) && \ + (IS_I2C_TRANSFER_OTHER_OPTIONS_REQUEST(XferOptions) == 0)) + { + xferrequest = I2C_NO_STARTSTOP; + } + else + { + /* Convert OTHER_xxx XferOptions if any */ + I2C_ConvertOtherXferOptions(hi2c); + + /* Update xfermode accordingly if no reload is necessary */ + if (hi2c->XferCount <= MAX_NBYTE_SIZE) + { + xfermode = hi2c->XferOptions; + } + } + + if (hi2c->XferSize > 0U) + { + if (hi2c->hdmatx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmatx->XferCpltCallback = I2C_DMAMasterTransmitCplt; + + /* Set the DMA error callback */ + hi2c->hdmatx->XferErrorCallback = I2C_DMAError; + + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmatx->XferHalfCpltCallback = NULL; + hi2c->hdmatx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmatx, (uint32_t)pData, (uint32_t)&hi2c->Instance->TXDR, + hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + if (dmaxferstatus == HAL_OK) + { + /* Send Slave Address and set NBYTES to write */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, xfermode, xferrequest); + + /* Update XferCount value */ + hi2c->XferCount -= hi2c->XferSize; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR and NACK interrupts */ + I2C_Enable_IRQ(hi2c, I2C_XFER_ERROR_IT); + + /* Enable DMA Request */ + hi2c->Instance->CR1 |= I2C_CR1_TXDMAEN; + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + } + else + { + /* Update Transfer ISR function pointer */ + hi2c->XferISR = I2C_Master_ISR_IT; + + /* Send Slave Address */ + /* Set NBYTES to write and generate START condition */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, + I2C_GENERATE_START_WRITE); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR, TC, STOP, NACK, TXI interrupt */ + /* possible to enable all of these */ + /* I2C_IT_ERRI | I2C_IT_TCI | I2C_IT_STOPI | I2C_IT_NACKI | + I2C_IT_ADDRI | I2C_IT_RXI | I2C_IT_TXI */ + I2C_Enable_IRQ(hi2c, I2C_XFER_TX_IT); + } + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Sequential receive in master I2C mode an amount of data in non-blocking mode with Interrupt + * @note This interface allow to manage repeated start condition when a direction change during transfer + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param XferOptions Options of Transfer, value of @ref I2C_XFEROPTIONS + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Master_Seq_Receive_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, + uint16_t Size, uint32_t XferOptions) +{ + uint32_t xfermode; + uint32_t xferrequest = I2C_GENERATE_START_READ; + + /* Check the parameters */ + assert_param(IS_I2C_TRANSFER_OPTIONS_REQUEST(XferOptions)); + + if (hi2c->State == HAL_I2C_STATE_READY) + { + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY_RX; + hi2c->Mode = HAL_I2C_MODE_MASTER; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferOptions = XferOptions; + hi2c->XferISR = I2C_Master_ISR_IT; + + /* If hi2c->XferCount > MAX_NBYTE_SIZE, use reload mode */ + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + xfermode = hi2c->XferOptions; + } + + /* If transfer direction not change and there is no request to start another frame, + do not generate Restart Condition */ + /* Mean Previous state is same as current state */ + if ((hi2c->PreviousState == I2C_STATE_MASTER_BUSY_RX) && \ + (IS_I2C_TRANSFER_OTHER_OPTIONS_REQUEST(XferOptions) == 0)) + { + xferrequest = I2C_NO_STARTSTOP; + } + else + { + /* Convert OTHER_xxx XferOptions if any */ + I2C_ConvertOtherXferOptions(hi2c); + + /* Update xfermode accordingly if no reload is necessary */ + if (hi2c->XferCount <= MAX_NBYTE_SIZE) + { + xfermode = hi2c->XferOptions; + } + } + + /* Send Slave Address and set NBYTES to read */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, xfermode, xferrequest); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + I2C_Enable_IRQ(hi2c, I2C_XFER_RX_IT); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Sequential receive in master I2C mode an amount of data in non-blocking mode with DMA + * @note This interface allow to manage repeated start condition when a direction change during transfer + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param XferOptions Options of Transfer, value of @ref I2C_XFEROPTIONS + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Master_Seq_Receive_DMA(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t *pData, + uint16_t Size, uint32_t XferOptions) +{ + uint32_t xfermode; + uint32_t xferrequest = I2C_GENERATE_START_READ; + HAL_StatusTypeDef dmaxferstatus; + + /* Check the parameters */ + assert_param(IS_I2C_TRANSFER_OPTIONS_REQUEST(XferOptions)); + + if (hi2c->State == HAL_I2C_STATE_READY) + { + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY_RX; + hi2c->Mode = HAL_I2C_MODE_MASTER; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferOptions = XferOptions; + hi2c->XferISR = I2C_Master_ISR_DMA; + + /* If hi2c->XferCount > MAX_NBYTE_SIZE, use reload mode */ + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + xfermode = hi2c->XferOptions; + } + + /* If transfer direction not change and there is no request to start another frame, + do not generate Restart Condition */ + /* Mean Previous state is same as current state */ + if ((hi2c->PreviousState == I2C_STATE_MASTER_BUSY_RX) && \ + (IS_I2C_TRANSFER_OTHER_OPTIONS_REQUEST(XferOptions) == 0)) + { + xferrequest = I2C_NO_STARTSTOP; + } + else + { + /* Convert OTHER_xxx XferOptions if any */ + I2C_ConvertOtherXferOptions(hi2c); + + /* Update xfermode accordingly if no reload is necessary */ + if (hi2c->XferCount <= MAX_NBYTE_SIZE) + { + xfermode = hi2c->XferOptions; + } + } + + if (hi2c->XferSize > 0U) + { + if (hi2c->hdmarx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmarx->XferCpltCallback = I2C_DMAMasterReceiveCplt; + + /* Set the DMA error callback */ + hi2c->hdmarx->XferErrorCallback = I2C_DMAError; + + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmarx->XferHalfCpltCallback = NULL; + hi2c->hdmarx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmarx, (uint32_t)&hi2c->Instance->RXDR, (uint32_t)pData, + hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + if (dmaxferstatus == HAL_OK) + { + /* Send Slave Address and set NBYTES to read */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, xfermode, xferrequest); + + /* Update XferCount value */ + hi2c->XferCount -= hi2c->XferSize; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR and NACK interrupts */ + I2C_Enable_IRQ(hi2c, I2C_XFER_ERROR_IT); + + /* Enable DMA Request */ + hi2c->Instance->CR1 |= I2C_CR1_RXDMAEN; + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + } + else + { + /* Update Transfer ISR function pointer */ + hi2c->XferISR = I2C_Master_ISR_IT; + + /* Send Slave Address */ + /* Set NBYTES to read and generate START condition */ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)hi2c->XferSize, I2C_AUTOEND_MODE, + I2C_GENERATE_START_READ); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR, TC, STOP, NACK, TXI interrupt */ + /* possible to enable all of these */ + /* I2C_IT_ERRI | I2C_IT_TCI | I2C_IT_STOPI | I2C_IT_NACKI | + I2C_IT_ADDRI | I2C_IT_RXI | I2C_IT_TXI */ + I2C_Enable_IRQ(hi2c, I2C_XFER_TX_IT); + } + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Sequential transmit in slave/device I2C mode an amount of data in non-blocking mode with Interrupt + * @note This interface allow to manage repeated start condition when a direction change during transfer + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param XferOptions Options of Transfer, value of @ref I2C_XFEROPTIONS + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Slave_Seq_Transmit_IT(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size, + uint32_t XferOptions) +{ + /* Check the parameters */ + assert_param(IS_I2C_TRANSFER_OPTIONS_REQUEST(XferOptions)); + + if (((uint32_t)hi2c->State & (uint32_t)HAL_I2C_STATE_LISTEN) == (uint32_t)HAL_I2C_STATE_LISTEN) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + + /* Disable Interrupts, to prevent preemption during treatment in case of multicall */ + I2C_Disable_IRQ(hi2c, I2C_XFER_LISTEN_IT | I2C_XFER_TX_IT); + + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* I2C cannot manage full duplex exchange so disable previous IT enabled if any */ + /* and then toggle the HAL slave RX state to TX state */ + if (hi2c->State == HAL_I2C_STATE_BUSY_RX_LISTEN) + { + /* Disable associated Interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_RX_IT); + + /* Abort DMA Xfer if any */ + if ((hi2c->Instance->CR1 & I2C_CR1_RXDMAEN) == I2C_CR1_RXDMAEN) + { + hi2c->Instance->CR1 &= ~I2C_CR1_RXDMAEN; + + if (hi2c->hdmarx != NULL) + { + /* Set the I2C DMA Abort callback : + will lead to call HAL_I2C_ErrorCallback() at end of DMA abort procedure */ + hi2c->hdmarx->XferAbortCallback = I2C_DMAAbort; + + /* Abort DMA RX */ + if (HAL_DMA_Abort_IT(hi2c->hdmarx) != HAL_OK) + { + /* Call Directly XferAbortCallback function in case of error */ + hi2c->hdmarx->XferAbortCallback(hi2c->hdmarx); + } + } + } + } + + hi2c->State = HAL_I2C_STATE_BUSY_TX_LISTEN; + hi2c->Mode = HAL_I2C_MODE_SLAVE; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Enable Address Acknowledge */ + hi2c->Instance->CR2 &= ~I2C_CR2_NACK; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferSize = hi2c->XferCount; + hi2c->XferOptions = XferOptions; + hi2c->XferISR = I2C_Slave_ISR_IT; + + if (I2C_GET_DIR(hi2c) == I2C_DIRECTION_RECEIVE) + { + /* Clear ADDR flag after prepare the transfer parameters */ + /* This action will generate an acknowledge to the Master */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_ADDR); + } + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* REnable ADDR interrupt */ + I2C_Enable_IRQ(hi2c, I2C_XFER_TX_IT | I2C_XFER_LISTEN_IT); + + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Sequential transmit in slave/device I2C mode an amount of data in non-blocking mode with DMA + * @note This interface allow to manage repeated start condition when a direction change during transfer + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param XferOptions Options of Transfer, value of @ref I2C_XFEROPTIONS + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Slave_Seq_Transmit_DMA(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size, + uint32_t XferOptions) +{ + HAL_StatusTypeDef dmaxferstatus; + + /* Check the parameters */ + assert_param(IS_I2C_TRANSFER_OPTIONS_REQUEST(XferOptions)); + + if (((uint32_t)hi2c->State & (uint32_t)HAL_I2C_STATE_LISTEN) == (uint32_t)HAL_I2C_STATE_LISTEN) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* Disable Interrupts, to prevent preemption during treatment in case of multicall */ + I2C_Disable_IRQ(hi2c, I2C_XFER_LISTEN_IT | I2C_XFER_TX_IT); + + /* I2C cannot manage full duplex exchange so disable previous IT enabled if any */ + /* and then toggle the HAL slave RX state to TX state */ + if (hi2c->State == HAL_I2C_STATE_BUSY_RX_LISTEN) + { + /* Disable associated Interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_RX_IT); + + if ((hi2c->Instance->CR1 & I2C_CR1_RXDMAEN) == I2C_CR1_RXDMAEN) + { + /* Abort DMA Xfer if any */ + if (hi2c->hdmarx != NULL) + { + hi2c->Instance->CR1 &= ~I2C_CR1_RXDMAEN; + + /* Set the I2C DMA Abort callback : + will lead to call HAL_I2C_ErrorCallback() at end of DMA abort procedure */ + hi2c->hdmarx->XferAbortCallback = I2C_DMAAbort; + + /* Abort DMA RX */ + if (HAL_DMA_Abort_IT(hi2c->hdmarx) != HAL_OK) + { + /* Call Directly XferAbortCallback function in case of error */ + hi2c->hdmarx->XferAbortCallback(hi2c->hdmarx); + } + } + } + } + else if (hi2c->State == HAL_I2C_STATE_BUSY_TX_LISTEN) + { + if ((hi2c->Instance->CR1 & I2C_CR1_TXDMAEN) == I2C_CR1_TXDMAEN) + { + hi2c->Instance->CR1 &= ~I2C_CR1_TXDMAEN; + + /* Abort DMA Xfer if any */ + if (hi2c->hdmatx != NULL) + { + /* Set the I2C DMA Abort callback : + will lead to call HAL_I2C_ErrorCallback() at end of DMA abort procedure */ + hi2c->hdmatx->XferAbortCallback = I2C_DMAAbort; + + /* Abort DMA TX */ + if (HAL_DMA_Abort_IT(hi2c->hdmatx) != HAL_OK) + { + /* Call Directly XferAbortCallback function in case of error */ + hi2c->hdmatx->XferAbortCallback(hi2c->hdmatx); + } + } + } + } + else + { + /* Nothing to do */ + } + + hi2c->State = HAL_I2C_STATE_BUSY_TX_LISTEN; + hi2c->Mode = HAL_I2C_MODE_SLAVE; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Enable Address Acknowledge */ + hi2c->Instance->CR2 &= ~I2C_CR2_NACK; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferSize = hi2c->XferCount; + hi2c->XferOptions = XferOptions; + hi2c->XferISR = I2C_Slave_ISR_DMA; + + if (hi2c->hdmatx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmatx->XferCpltCallback = I2C_DMASlaveTransmitCplt; + + /* Set the DMA error callback */ + hi2c->hdmatx->XferErrorCallback = I2C_DMAError; + + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmatx->XferHalfCpltCallback = NULL; + hi2c->hdmatx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmatx, (uint32_t)pData, (uint32_t)&hi2c->Instance->TXDR, + hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + if (dmaxferstatus == HAL_OK) + { + /* Update XferCount value */ + hi2c->XferCount -= hi2c->XferSize; + + /* Reset XferSize */ + hi2c->XferSize = 0; + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + if (I2C_GET_DIR(hi2c) == I2C_DIRECTION_RECEIVE) + { + /* Clear ADDR flag after prepare the transfer parameters */ + /* This action will generate an acknowledge to the Master */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_ADDR); + } + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Enable DMA Request */ + hi2c->Instance->CR1 |= I2C_CR1_TXDMAEN; + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* Enable ERR, STOP, NACK, ADDR interrupts */ + I2C_Enable_IRQ(hi2c, I2C_XFER_LISTEN_IT); + + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Sequential receive in slave/device I2C mode an amount of data in non-blocking mode with Interrupt + * @note This interface allow to manage repeated start condition when a direction change during transfer + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param XferOptions Options of Transfer, value of @ref I2C_XFEROPTIONS + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Slave_Seq_Receive_IT(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size, + uint32_t XferOptions) +{ + /* Check the parameters */ + assert_param(IS_I2C_TRANSFER_OPTIONS_REQUEST(XferOptions)); + + if (((uint32_t)hi2c->State & (uint32_t)HAL_I2C_STATE_LISTEN) == (uint32_t)HAL_I2C_STATE_LISTEN) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + + /* Disable Interrupts, to prevent preemption during treatment in case of multicall */ + I2C_Disable_IRQ(hi2c, I2C_XFER_LISTEN_IT | I2C_XFER_RX_IT); + + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* I2C cannot manage full duplex exchange so disable previous IT enabled if any */ + /* and then toggle the HAL slave TX state to RX state */ + if (hi2c->State == HAL_I2C_STATE_BUSY_TX_LISTEN) + { + /* Disable associated Interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_TX_IT); + + if ((hi2c->Instance->CR1 & I2C_CR1_TXDMAEN) == I2C_CR1_TXDMAEN) + { + hi2c->Instance->CR1 &= ~I2C_CR1_TXDMAEN; + + /* Abort DMA Xfer if any */ + if (hi2c->hdmatx != NULL) + { + /* Set the I2C DMA Abort callback : + will lead to call HAL_I2C_ErrorCallback() at end of DMA abort procedure */ + hi2c->hdmatx->XferAbortCallback = I2C_DMAAbort; + + /* Abort DMA TX */ + if (HAL_DMA_Abort_IT(hi2c->hdmatx) != HAL_OK) + { + /* Call Directly XferAbortCallback function in case of error */ + hi2c->hdmatx->XferAbortCallback(hi2c->hdmatx); + } + } + } + } + + hi2c->State = HAL_I2C_STATE_BUSY_RX_LISTEN; + hi2c->Mode = HAL_I2C_MODE_SLAVE; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Enable Address Acknowledge */ + hi2c->Instance->CR2 &= ~I2C_CR2_NACK; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferSize = hi2c->XferCount; + hi2c->XferOptions = XferOptions; + hi2c->XferISR = I2C_Slave_ISR_IT; + + if (I2C_GET_DIR(hi2c) == I2C_DIRECTION_TRANSMIT) + { + /* Clear ADDR flag after prepare the transfer parameters */ + /* This action will generate an acknowledge to the Master */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_ADDR); + } + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* REnable ADDR interrupt */ + I2C_Enable_IRQ(hi2c, I2C_XFER_RX_IT | I2C_XFER_LISTEN_IT); + + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Sequential receive in slave/device I2C mode an amount of data in non-blocking mode with DMA + * @note This interface allow to manage repeated start condition when a direction change during transfer + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param pData Pointer to data buffer + * @param Size Amount of data to be sent + * @param XferOptions Options of Transfer, value of @ref I2C_XFEROPTIONS + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Slave_Seq_Receive_DMA(I2C_HandleTypeDef *hi2c, uint8_t *pData, uint16_t Size, + uint32_t XferOptions) +{ + HAL_StatusTypeDef dmaxferstatus; + + /* Check the parameters */ + assert_param(IS_I2C_TRANSFER_OPTIONS_REQUEST(XferOptions)); + + if (((uint32_t)hi2c->State & (uint32_t)HAL_I2C_STATE_LISTEN) == (uint32_t)HAL_I2C_STATE_LISTEN) + { + if ((pData == NULL) || (Size == 0U)) + { + hi2c->ErrorCode = HAL_I2C_ERROR_INVALID_PARAM; + return HAL_ERROR; + } + + /* Disable Interrupts, to prevent preemption during treatment in case of multicall */ + I2C_Disable_IRQ(hi2c, I2C_XFER_LISTEN_IT | I2C_XFER_RX_IT); + + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* I2C cannot manage full duplex exchange so disable previous IT enabled if any */ + /* and then toggle the HAL slave TX state to RX state */ + if (hi2c->State == HAL_I2C_STATE_BUSY_TX_LISTEN) + { + /* Disable associated Interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_TX_IT); + + if ((hi2c->Instance->CR1 & I2C_CR1_TXDMAEN) == I2C_CR1_TXDMAEN) + { + /* Abort DMA Xfer if any */ + if (hi2c->hdmatx != NULL) + { + hi2c->Instance->CR1 &= ~I2C_CR1_TXDMAEN; + + /* Set the I2C DMA Abort callback : + will lead to call HAL_I2C_ErrorCallback() at end of DMA abort procedure */ + hi2c->hdmatx->XferAbortCallback = I2C_DMAAbort; + + /* Abort DMA TX */ + if (HAL_DMA_Abort_IT(hi2c->hdmatx) != HAL_OK) + { + /* Call Directly XferAbortCallback function in case of error */ + hi2c->hdmatx->XferAbortCallback(hi2c->hdmatx); + } + } + } + } + else if (hi2c->State == HAL_I2C_STATE_BUSY_RX_LISTEN) + { + if ((hi2c->Instance->CR1 & I2C_CR1_RXDMAEN) == I2C_CR1_RXDMAEN) + { + hi2c->Instance->CR1 &= ~I2C_CR1_RXDMAEN; + + /* Abort DMA Xfer if any */ + if (hi2c->hdmarx != NULL) + { + /* Set the I2C DMA Abort callback : + will lead to call HAL_I2C_ErrorCallback() at end of DMA abort procedure */ + hi2c->hdmarx->XferAbortCallback = I2C_DMAAbort; + + /* Abort DMA RX */ + if (HAL_DMA_Abort_IT(hi2c->hdmarx) != HAL_OK) + { + /* Call Directly XferAbortCallback function in case of error */ + hi2c->hdmarx->XferAbortCallback(hi2c->hdmarx); + } + } + } + } + else + { + /* Nothing to do */ + } + + hi2c->State = HAL_I2C_STATE_BUSY_RX_LISTEN; + hi2c->Mode = HAL_I2C_MODE_SLAVE; + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + + /* Enable Address Acknowledge */ + hi2c->Instance->CR2 &= ~I2C_CR2_NACK; + + /* Prepare transfer parameters */ + hi2c->pBuffPtr = pData; + hi2c->XferCount = Size; + hi2c->XferSize = hi2c->XferCount; + hi2c->XferOptions = XferOptions; + hi2c->XferISR = I2C_Slave_ISR_DMA; + + if (hi2c->hdmarx != NULL) + { + /* Set the I2C DMA transfer complete callback */ + hi2c->hdmarx->XferCpltCallback = I2C_DMASlaveReceiveCplt; + + /* Set the DMA error callback */ + hi2c->hdmarx->XferErrorCallback = I2C_DMAError; + + /* Set the unused DMA callbacks to NULL */ + hi2c->hdmarx->XferHalfCpltCallback = NULL; + hi2c->hdmarx->XferAbortCallback = NULL; + + /* Enable the DMA stream */ + dmaxferstatus = HAL_DMA_Start_IT(hi2c->hdmarx, (uint32_t)&hi2c->Instance->RXDR, + (uint32_t)pData, hi2c->XferSize); + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA_PARAM; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + if (dmaxferstatus == HAL_OK) + { + /* Update XferCount value */ + hi2c->XferCount -= hi2c->XferSize; + + /* Reset XferSize */ + hi2c->XferSize = 0; + } + else + { + /* Update I2C state */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Update I2C error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_DMA; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + + if (I2C_GET_DIR(hi2c) == I2C_DIRECTION_TRANSMIT) + { + /* Clear ADDR flag after prepare the transfer parameters */ + /* This action will generate an acknowledge to the Master */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_ADDR); + } + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Enable DMA Request */ + hi2c->Instance->CR1 |= I2C_CR1_RXDMAEN; + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + /* REnable ADDR interrupt */ + I2C_Enable_IRQ(hi2c, I2C_XFER_RX_IT | I2C_XFER_LISTEN_IT); + + return HAL_OK; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Enable the Address listen mode with Interrupt. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_EnableListen_IT(I2C_HandleTypeDef *hi2c) +{ + if (hi2c->State == HAL_I2C_STATE_READY) + { + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->XferISR = I2C_Slave_ISR_IT; + + /* Enable the Address Match interrupt */ + I2C_Enable_IRQ(hi2c, I2C_XFER_LISTEN_IT); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Disable the Address listen mode with Interrupt. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_DisableListen_IT(I2C_HandleTypeDef *hi2c) +{ + /* Declaration of tmp to prevent undefined behavior of volatile usage */ + uint32_t tmp; + + /* Disable Address listen mode only if a transfer is not ongoing */ + if (hi2c->State == HAL_I2C_STATE_LISTEN) + { + tmp = (uint32_t)(hi2c->State) & I2C_STATE_MSK; + hi2c->PreviousState = tmp | (uint32_t)(hi2c->Mode); + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + hi2c->XferISR = NULL; + + /* Disable the Address Match interrupt */ + I2C_Disable_IRQ(hi2c, I2C_XFER_LISTEN_IT); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Abort a master I2C IT or DMA process communication with Interrupt. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2C_Master_Abort_IT(I2C_HandleTypeDef *hi2c, uint16_t DevAddress) +{ + if (hi2c->Mode == HAL_I2C_MODE_MASTER) + { + /* Process Locked */ + __HAL_LOCK(hi2c); + + /* Disable Interrupts and Store Previous state */ + if (hi2c->State == HAL_I2C_STATE_BUSY_TX) + { + I2C_Disable_IRQ(hi2c, I2C_XFER_TX_IT); + hi2c->PreviousState = I2C_STATE_MASTER_BUSY_TX; + } + else if (hi2c->State == HAL_I2C_STATE_BUSY_RX) + { + I2C_Disable_IRQ(hi2c, I2C_XFER_RX_IT); + hi2c->PreviousState = I2C_STATE_MASTER_BUSY_RX; + } + else + { + /* Do nothing */ + } + + /* Set State at HAL_I2C_STATE_ABORT */ + hi2c->State = HAL_I2C_STATE_ABORT; + + /* Set NBYTES to 1 to generate a dummy read on I2C peripheral */ + /* Set AUTOEND mode, this will generate a NACK then STOP condition to abort the current transfer */ + I2C_TransferConfig(hi2c, DevAddress, 1, I2C_AUTOEND_MODE, I2C_GENERATE_STOP); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Note : The I2C interrupts must be enabled after unlocking current process + to avoid the risk of I2C interrupt handle execution before current + process unlock */ + I2C_Enable_IRQ(hi2c, I2C_XFER_CPLT_IT); + + return HAL_OK; + } + else + { + /* Wrong usage of abort function */ + /* This function should be used only in case of abort monitored by master device */ + return HAL_ERROR; + } +} + +/** + * @} + */ + +/** @defgroup I2C_IRQ_Handler_and_Callbacks IRQ Handler and Callbacks + * @{ + */ + +/** + * @brief This function handles I2C event interrupt request. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +void HAL_I2C_EV_IRQHandler(I2C_HandleTypeDef *hi2c) +{ + /* Get current IT Flags and IT sources value */ + uint32_t itflags = READ_REG(hi2c->Instance->ISR); + uint32_t itsources = READ_REG(hi2c->Instance->CR1); + + /* I2C events treatment -------------------------------------*/ + if (hi2c->XferISR != NULL) + { + hi2c->XferISR(hi2c, itflags, itsources); + } +} + +/** + * @brief This function handles I2C error interrupt request. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +void HAL_I2C_ER_IRQHandler(I2C_HandleTypeDef *hi2c) +{ + uint32_t itflags = READ_REG(hi2c->Instance->ISR); + uint32_t itsources = READ_REG(hi2c->Instance->CR1); + uint32_t tmperror; + + /* I2C Bus error interrupt occurred ------------------------------------*/ + if ((I2C_CHECK_FLAG(itflags, I2C_FLAG_BERR) != RESET) && \ + (I2C_CHECK_IT_SOURCE(itsources, I2C_IT_ERRI) != RESET)) + { + hi2c->ErrorCode |= HAL_I2C_ERROR_BERR; + + /* Clear BERR flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_BERR); + } + + /* I2C Over-Run/Under-Run interrupt occurred ----------------------------------------*/ + if ((I2C_CHECK_FLAG(itflags, I2C_FLAG_OVR) != RESET) && \ + (I2C_CHECK_IT_SOURCE(itsources, I2C_IT_ERRI) != RESET)) + { + hi2c->ErrorCode |= HAL_I2C_ERROR_OVR; + + /* Clear OVR flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_OVR); + } + + /* I2C Arbitration Loss error interrupt occurred -------------------------------------*/ + if ((I2C_CHECK_FLAG(itflags, I2C_FLAG_ARLO) != RESET) && \ + (I2C_CHECK_IT_SOURCE(itsources, I2C_IT_ERRI) != RESET)) + { + hi2c->ErrorCode |= HAL_I2C_ERROR_ARLO; + + /* Clear ARLO flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_ARLO); + } + + /* Store current volatile hi2c->ErrorCode, misra rule */ + tmperror = hi2c->ErrorCode; + + /* Call the Error Callback in case of Error detected */ + if ((tmperror & (HAL_I2C_ERROR_BERR | HAL_I2C_ERROR_OVR | HAL_I2C_ERROR_ARLO)) != HAL_I2C_ERROR_NONE) + { + I2C_ITError(hi2c, tmperror); + } +} + +/** + * @brief Master Tx Transfer completed callback. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +__weak void HAL_I2C_MasterTxCpltCallback(I2C_HandleTypeDef *hi2c) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hi2c); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_I2C_MasterTxCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Master Rx Transfer completed callback. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +__weak void HAL_I2C_MasterRxCpltCallback(I2C_HandleTypeDef *hi2c) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hi2c); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_I2C_MasterRxCpltCallback could be implemented in the user file + */ +} + +/** @brief Slave Tx Transfer completed callback. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +__weak void HAL_I2C_SlaveTxCpltCallback(I2C_HandleTypeDef *hi2c) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hi2c); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_I2C_SlaveTxCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Slave Rx Transfer completed callback. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +__weak void HAL_I2C_SlaveRxCpltCallback(I2C_HandleTypeDef *hi2c) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hi2c); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_I2C_SlaveRxCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Slave Address Match callback. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param TransferDirection Master request Transfer Direction (Write/Read), value of @ref I2C_XFERDIRECTION + * @param AddrMatchCode Address Match Code + * @retval None + */ +__weak void HAL_I2C_AddrCallback(I2C_HandleTypeDef *hi2c, uint8_t TransferDirection, uint16_t AddrMatchCode) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hi2c); + UNUSED(TransferDirection); + UNUSED(AddrMatchCode); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_I2C_AddrCallback() could be implemented in the user file + */ +} + +/** + * @brief Listen Complete callback. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +__weak void HAL_I2C_ListenCpltCallback(I2C_HandleTypeDef *hi2c) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hi2c); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_I2C_ListenCpltCallback() could be implemented in the user file + */ +} + +/** + * @brief Memory Tx Transfer completed callback. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +__weak void HAL_I2C_MemTxCpltCallback(I2C_HandleTypeDef *hi2c) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hi2c); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_I2C_MemTxCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Memory Rx Transfer completed callback. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +__weak void HAL_I2C_MemRxCpltCallback(I2C_HandleTypeDef *hi2c) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hi2c); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_I2C_MemRxCpltCallback could be implemented in the user file + */ +} + +/** + * @brief I2C error callback. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +__weak void HAL_I2C_ErrorCallback(I2C_HandleTypeDef *hi2c) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hi2c); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_I2C_ErrorCallback could be implemented in the user file + */ +} + +/** + * @brief I2C abort callback. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval None + */ +__weak void HAL_I2C_AbortCpltCallback(I2C_HandleTypeDef *hi2c) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hi2c); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_I2C_AbortCpltCallback could be implemented in the user file + */ +} + +/** + * @} + */ + +/** @defgroup I2C_Exported_Functions_Group3 Peripheral State, Mode and Error functions + * @brief Peripheral State, Mode and Error functions + * +@verbatim + =============================================================================== + ##### Peripheral State, Mode and Error functions ##### + =============================================================================== + [..] + This subsection permit to get in run-time the status of the peripheral + and the data flow. + +@endverbatim + * @{ + */ + +/** + * @brief Return the I2C handle state. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval HAL state + */ +HAL_I2C_StateTypeDef HAL_I2C_GetState(I2C_HandleTypeDef *hi2c) +{ + /* Return I2C handle state */ + return hi2c->State; +} + +/** + * @brief Returns the I2C Master, Slave, Memory or no mode. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for I2C module + * @retval HAL mode + */ +HAL_I2C_ModeTypeDef HAL_I2C_GetMode(I2C_HandleTypeDef *hi2c) +{ + return hi2c->Mode; +} + +/** + * @brief Return the I2C error code. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @retval I2C Error Code + */ +uint32_t HAL_I2C_GetError(I2C_HandleTypeDef *hi2c) +{ + return hi2c->ErrorCode; +} + +/** + * @} + */ + +/** + * @} + */ + +/** @addtogroup I2C_Private_Functions + * @{ + */ + +/** + * @brief Interrupt Sub-Routine which handle the Interrupt Flags Master Mode with Interrupt. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param ITFlags Interrupt flags to handle. + * @param ITSources Interrupt sources enabled. + * @retval HAL status + */ +static HAL_StatusTypeDef I2C_Master_ISR_IT(struct __I2C_HandleTypeDef *hi2c, uint32_t ITFlags, + uint32_t ITSources) +{ + uint16_t devaddress; + uint32_t tmpITFlags = ITFlags; + + /* Process Locked */ + __HAL_LOCK(hi2c); + + if ((I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_AF) != RESET) && \ + (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_NACKI) != RESET)) + { + /* Clear NACK Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + + /* Set corresponding Error Code */ + /* No need to generate STOP, it is automatically done */ + /* Error callback will be send during stop flag treatment */ + hi2c->ErrorCode |= HAL_I2C_ERROR_AF; + + /* Flush TX register */ + I2C_Flush_TXDR(hi2c); + } + else if ((I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_RXNE) != RESET) && \ + (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_RXI) != RESET)) + { + /* Remove RXNE flag on temporary variable as read done */ + tmpITFlags &= ~I2C_FLAG_RXNE; + + /* Read data from RXDR */ + *hi2c->pBuffPtr = (uint8_t)hi2c->Instance->RXDR; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + hi2c->XferSize--; + hi2c->XferCount--; + } + else if ((I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_TXIS) != RESET) && \ + (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_TXI) != RESET)) + { + /* Write data to TXDR */ + hi2c->Instance->TXDR = *hi2c->pBuffPtr; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + hi2c->XferSize--; + hi2c->XferCount--; + } + else if ((I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_TCR) != RESET) && \ + (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_TCI) != RESET)) + { + if ((hi2c->XferCount != 0U) && (hi2c->XferSize == 0U)) + { + devaddress = (uint16_t)(hi2c->Instance->CR2 & I2C_CR2_SADD); + + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + I2C_TransferConfig(hi2c, devaddress, (uint8_t)hi2c->XferSize, I2C_RELOAD_MODE, I2C_NO_STARTSTOP); + } + else + { + hi2c->XferSize = hi2c->XferCount; + if (hi2c->XferOptions != I2C_NO_OPTION_FRAME) + { + I2C_TransferConfig(hi2c, devaddress, (uint8_t)hi2c->XferSize, + hi2c->XferOptions, I2C_NO_STARTSTOP); + } + else + { + I2C_TransferConfig(hi2c, devaddress, (uint8_t)hi2c->XferSize, + I2C_AUTOEND_MODE, I2C_NO_STARTSTOP); + } + } + } + else + { + /* Call TxCpltCallback() if no stop mode is set */ + if (I2C_GET_STOP_MODE(hi2c) != I2C_AUTOEND_MODE) + { + /* Call I2C Master Sequential complete process */ + I2C_ITMasterSeqCplt(hi2c); + } + else + { + /* Wrong size Status regarding TCR flag event */ + /* Call the corresponding callback to inform upper layer of End of Transfer */ + I2C_ITError(hi2c, HAL_I2C_ERROR_SIZE); + } + } + } + else if ((I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_TC) != RESET) && \ + (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_TCI) != RESET)) + { + if (hi2c->XferCount == 0U) + { + if (I2C_GET_STOP_MODE(hi2c) != I2C_AUTOEND_MODE) + { + /* Generate a stop condition in case of no transfer option */ + if (hi2c->XferOptions == I2C_NO_OPTION_FRAME) + { + /* Generate Stop */ + hi2c->Instance->CR2 |= I2C_CR2_STOP; + } + else + { + /* Call I2C Master Sequential complete process */ + I2C_ITMasterSeqCplt(hi2c); + } + } + } + else + { + /* Wrong size Status regarding TC flag event */ + /* Call the corresponding callback to inform upper layer of End of Transfer */ + I2C_ITError(hi2c, HAL_I2C_ERROR_SIZE); + } + } + else + { + /* Nothing to do */ + } + + if ((I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_STOPF) != RESET) && \ + (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_STOPI) != RESET)) + { + /* Call I2C Master complete process */ + I2C_ITMasterCplt(hi2c, tmpITFlags); + } + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; +} + +/** + * @brief Interrupt Sub-Routine which handle the Interrupt Flags Slave Mode with Interrupt. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param ITFlags Interrupt flags to handle. + * @param ITSources Interrupt sources enabled. + * @retval HAL status + */ +static HAL_StatusTypeDef I2C_Slave_ISR_IT(struct __I2C_HandleTypeDef *hi2c, uint32_t ITFlags, + uint32_t ITSources) +{ + uint32_t tmpoptions = hi2c->XferOptions; + uint32_t tmpITFlags = ITFlags; + + /* Process locked */ + __HAL_LOCK(hi2c); + + /* Check if STOPF is set */ + if ((I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_STOPF) != RESET) && \ + (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_STOPI) != RESET)) + { + /* Call I2C Slave complete process */ + I2C_ITSlaveCplt(hi2c, tmpITFlags); + } + + if ((I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_AF) != RESET) && \ + (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_NACKI) != RESET)) + { + /* Check that I2C transfer finished */ + /* if yes, normal use case, a NACK is sent by the MASTER when Transfer is finished */ + /* Mean XferCount == 0*/ + /* So clear Flag NACKF only */ + if (hi2c->XferCount == 0U) + { + if ((hi2c->State == HAL_I2C_STATE_LISTEN) && (tmpoptions == I2C_FIRST_AND_LAST_FRAME)) + /* Same action must be done for (tmpoptions == I2C_LAST_FRAME) which removed for + Warning[Pa134]: left and right operands are identical */ + { + /* Call I2C Listen complete process */ + I2C_ITListenCplt(hi2c, tmpITFlags); + } + else if ((hi2c->State == HAL_I2C_STATE_BUSY_TX_LISTEN) && (tmpoptions != I2C_NO_OPTION_FRAME)) + { + /* Clear NACK Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + + /* Flush TX register */ + I2C_Flush_TXDR(hi2c); + + /* Last Byte is Transmitted */ + /* Call I2C Slave Sequential complete process */ + I2C_ITSlaveSeqCplt(hi2c); + } + else + { + /* Clear NACK Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + } + } + else + { + /* if no, error use case, a Non-Acknowledge of last Data is generated by the MASTER*/ + /* Clear NACK Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + + /* Set ErrorCode corresponding to a Non-Acknowledge */ + hi2c->ErrorCode |= HAL_I2C_ERROR_AF; + + if ((tmpoptions == I2C_FIRST_FRAME) || (tmpoptions == I2C_NEXT_FRAME)) + { + /* Call the corresponding callback to inform upper layer of End of Transfer */ + I2C_ITError(hi2c, hi2c->ErrorCode); + } + } + } + else if ((I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_RXNE) != RESET) && \ + (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_RXI) != RESET)) + { + if (hi2c->XferCount > 0U) + { + /* Read data from RXDR */ + *hi2c->pBuffPtr = (uint8_t)hi2c->Instance->RXDR; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + hi2c->XferSize--; + hi2c->XferCount--; + } + + if ((hi2c->XferCount == 0U) && \ + (tmpoptions != I2C_NO_OPTION_FRAME)) + { + /* Call I2C Slave Sequential complete process */ + I2C_ITSlaveSeqCplt(hi2c); + } + } + else if ((I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_ADDR) != RESET) && \ + (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_ADDRI) != RESET)) + { + I2C_ITAddrCplt(hi2c, tmpITFlags); + } + else if ((I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_TXIS) != RESET) && \ + (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_TXI) != RESET)) + { + /* Write data to TXDR only if XferCount not reach "0" */ + /* A TXIS flag can be set, during STOP treatment */ + /* Check if all Data have already been sent */ + /* If it is the case, this last write in TXDR is not sent, correspond to a dummy TXIS event */ + if (hi2c->XferCount > 0U) + { + /* Write data to TXDR */ + hi2c->Instance->TXDR = *hi2c->pBuffPtr; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + hi2c->XferCount--; + hi2c->XferSize--; + } + else + { + if ((tmpoptions == I2C_NEXT_FRAME) || (tmpoptions == I2C_FIRST_FRAME)) + { + /* Last Byte is Transmitted */ + /* Call I2C Slave Sequential complete process */ + I2C_ITSlaveSeqCplt(hi2c); + } + } + } + else + { + /* Nothing to do */ + } + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; +} + +/** + * @brief Interrupt Sub-Routine which handle the Interrupt Flags Master Mode with DMA. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param ITFlags Interrupt flags to handle. + * @param ITSources Interrupt sources enabled. + * @retval HAL status + */ +static HAL_StatusTypeDef I2C_Master_ISR_DMA(struct __I2C_HandleTypeDef *hi2c, uint32_t ITFlags, + uint32_t ITSources) +{ + uint16_t devaddress; + uint32_t xfermode; + + /* Process Locked */ + __HAL_LOCK(hi2c); + + if ((I2C_CHECK_FLAG(ITFlags, I2C_FLAG_AF) != RESET) && \ + (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_NACKI) != RESET)) + { + /* Clear NACK Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + + /* Set corresponding Error Code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_AF; + + /* No need to generate STOP, it is automatically done */ + /* But enable STOP interrupt, to treat it */ + /* Error callback will be send during stop flag treatment */ + I2C_Enable_IRQ(hi2c, I2C_XFER_CPLT_IT); + + /* Flush TX register */ + I2C_Flush_TXDR(hi2c); + } + else if ((I2C_CHECK_FLAG(ITFlags, I2C_FLAG_TCR) != RESET) && \ + (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_TCI) != RESET)) + { + /* Disable TC interrupt */ + __HAL_I2C_DISABLE_IT(hi2c, I2C_IT_TCI); + + if (hi2c->XferCount != 0U) + { + /* Recover Slave address */ + devaddress = (uint16_t)(hi2c->Instance->CR2 & I2C_CR2_SADD); + + /* Prepare the new XferSize to transfer */ + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + xfermode = I2C_RELOAD_MODE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + if (hi2c->XferOptions != I2C_NO_OPTION_FRAME) + { + xfermode = hi2c->XferOptions; + } + else + { + xfermode = I2C_AUTOEND_MODE; + } + } + + /* Set the new XferSize in Nbytes register */ + I2C_TransferConfig(hi2c, devaddress, (uint8_t)hi2c->XferSize, xfermode, I2C_NO_STARTSTOP); + + /* Update XferCount value */ + hi2c->XferCount -= hi2c->XferSize; + + /* Enable DMA Request */ + if (hi2c->State == HAL_I2C_STATE_BUSY_RX) + { + hi2c->Instance->CR1 |= I2C_CR1_RXDMAEN; + } + else + { + hi2c->Instance->CR1 |= I2C_CR1_TXDMAEN; + } + } + else + { + /* Call TxCpltCallback() if no stop mode is set */ + if (I2C_GET_STOP_MODE(hi2c) != I2C_AUTOEND_MODE) + { + /* Call I2C Master Sequential complete process */ + I2C_ITMasterSeqCplt(hi2c); + } + else + { + /* Wrong size Status regarding TCR flag event */ + /* Call the corresponding callback to inform upper layer of End of Transfer */ + I2C_ITError(hi2c, HAL_I2C_ERROR_SIZE); + } + } + } + else if ((I2C_CHECK_FLAG(ITFlags, I2C_FLAG_TC) != RESET) && \ + (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_TCI) != RESET)) + { + if (hi2c->XferCount == 0U) + { + if (I2C_GET_STOP_MODE(hi2c) != I2C_AUTOEND_MODE) + { + /* Generate a stop condition in case of no transfer option */ + if (hi2c->XferOptions == I2C_NO_OPTION_FRAME) + { + /* Generate Stop */ + hi2c->Instance->CR2 |= I2C_CR2_STOP; + } + else + { + /* Call I2C Master Sequential complete process */ + I2C_ITMasterSeqCplt(hi2c); + } + } + } + else + { + /* Wrong size Status regarding TC flag event */ + /* Call the corresponding callback to inform upper layer of End of Transfer */ + I2C_ITError(hi2c, HAL_I2C_ERROR_SIZE); + } + } + else if ((I2C_CHECK_FLAG(ITFlags, I2C_FLAG_STOPF) != RESET) && \ + (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_STOPI) != RESET)) + { + /* Call I2C Master complete process */ + I2C_ITMasterCplt(hi2c, ITFlags); + } + else + { + /* Nothing to do */ + } + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; +} + +/** + * @brief Interrupt Sub-Routine which handle the Interrupt Flags Slave Mode with DMA. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param ITFlags Interrupt flags to handle. + * @param ITSources Interrupt sources enabled. + * @retval HAL status + */ +static HAL_StatusTypeDef I2C_Slave_ISR_DMA(struct __I2C_HandleTypeDef *hi2c, uint32_t ITFlags, + uint32_t ITSources) +{ + uint32_t tmpoptions = hi2c->XferOptions; + uint32_t treatdmanack = 0U; + HAL_I2C_StateTypeDef tmpstate; + + /* Process locked */ + __HAL_LOCK(hi2c); + + /* Check if STOPF is set */ + if ((I2C_CHECK_FLAG(ITFlags, I2C_FLAG_STOPF) != RESET) && \ + (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_STOPI) != RESET)) + { + /* Call I2C Slave complete process */ + I2C_ITSlaveCplt(hi2c, ITFlags); + } + + if ((I2C_CHECK_FLAG(ITFlags, I2C_FLAG_AF) != RESET) && \ + (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_NACKI) != RESET)) + { + /* Check that I2C transfer finished */ + /* if yes, normal use case, a NACK is sent by the MASTER when Transfer is finished */ + /* Mean XferCount == 0 */ + /* So clear Flag NACKF only */ + if ((I2C_CHECK_IT_SOURCE(ITSources, I2C_CR1_TXDMAEN) != RESET) || + (I2C_CHECK_IT_SOURCE(ITSources, I2C_CR1_RXDMAEN) != RESET)) + { + /* Split check of hdmarx, for MISRA compliance */ + if (hi2c->hdmarx != NULL) + { + if (I2C_CHECK_IT_SOURCE(ITSources, I2C_CR1_RXDMAEN) != RESET) + { + if (I2C_GET_DMA_REMAIN_DATA(hi2c->hdmarx) == 0U) + { + treatdmanack = 1U; + } + } + } + + /* Split check of hdmatx, for MISRA compliance */ + if (hi2c->hdmatx != NULL) + { + if (I2C_CHECK_IT_SOURCE(ITSources, I2C_CR1_TXDMAEN) != RESET) + { + if (I2C_GET_DMA_REMAIN_DATA(hi2c->hdmatx) == 0U) + { + treatdmanack = 1U; + } + } + } + + if (treatdmanack == 1U) + { + if ((hi2c->State == HAL_I2C_STATE_LISTEN) && (tmpoptions == I2C_FIRST_AND_LAST_FRAME)) + /* Same action must be done for (tmpoptions == I2C_LAST_FRAME) which removed for + Warning[Pa134]: left and right operands are identical */ + { + /* Call I2C Listen complete process */ + I2C_ITListenCplt(hi2c, ITFlags); + } + else if ((hi2c->State == HAL_I2C_STATE_BUSY_TX_LISTEN) && (tmpoptions != I2C_NO_OPTION_FRAME)) + { + /* Clear NACK Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + + /* Flush TX register */ + I2C_Flush_TXDR(hi2c); + + /* Last Byte is Transmitted */ + /* Call I2C Slave Sequential complete process */ + I2C_ITSlaveSeqCplt(hi2c); + } + else + { + /* Clear NACK Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + } + } + else + { + /* if no, error use case, a Non-Acknowledge of last Data is generated by the MASTER*/ + /* Clear NACK Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + + /* Set ErrorCode corresponding to a Non-Acknowledge */ + hi2c->ErrorCode |= HAL_I2C_ERROR_AF; + + /* Store current hi2c->State, solve MISRA2012-Rule-13.5 */ + tmpstate = hi2c->State; + + if ((tmpoptions == I2C_FIRST_FRAME) || (tmpoptions == I2C_NEXT_FRAME)) + { + if ((tmpstate == HAL_I2C_STATE_BUSY_TX) || (tmpstate == HAL_I2C_STATE_BUSY_TX_LISTEN)) + { + hi2c->PreviousState = I2C_STATE_SLAVE_BUSY_TX; + } + else if ((tmpstate == HAL_I2C_STATE_BUSY_RX) || (tmpstate == HAL_I2C_STATE_BUSY_RX_LISTEN)) + { + hi2c->PreviousState = I2C_STATE_SLAVE_BUSY_RX; + } + else + { + /* Do nothing */ + } + + /* Call the corresponding callback to inform upper layer of End of Transfer */ + I2C_ITError(hi2c, hi2c->ErrorCode); + } + } + } + else + { + /* Only Clear NACK Flag, no DMA treatment is pending */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + } + } + else if ((I2C_CHECK_FLAG(ITFlags, I2C_FLAG_ADDR) != RESET) && \ + (I2C_CHECK_IT_SOURCE(ITSources, I2C_IT_ADDRI) != RESET)) + { + I2C_ITAddrCplt(hi2c, ITFlags); + } + else + { + /* Nothing to do */ + } + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; +} + +/** + * @brief Master sends target device address followed by internal memory address for write request. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param MemAddress Internal memory address + * @param MemAddSize Size of internal memory address + * @param Timeout Timeout duration + * @param Tickstart Tick start value + * @retval HAL status + */ +static HAL_StatusTypeDef I2C_RequestMemoryWrite(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, + uint16_t MemAddress, uint16_t MemAddSize, uint32_t Timeout, + uint32_t Tickstart) +{ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)MemAddSize, I2C_RELOAD_MODE, I2C_GENERATE_START_WRITE); + + /* Wait until TXIS flag is set */ + if (I2C_WaitOnTXISFlagUntilTimeout(hi2c, Timeout, Tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* If Memory address size is 8Bit */ + if (MemAddSize == I2C_MEMADD_SIZE_8BIT) + { + /* Send Memory Address */ + hi2c->Instance->TXDR = I2C_MEM_ADD_LSB(MemAddress); + } + /* If Memory address size is 16Bit */ + else + { + /* Send MSB of Memory Address */ + hi2c->Instance->TXDR = I2C_MEM_ADD_MSB(MemAddress); + + /* Wait until TXIS flag is set */ + if (I2C_WaitOnTXISFlagUntilTimeout(hi2c, Timeout, Tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Send LSB of Memory Address */ + hi2c->Instance->TXDR = I2C_MEM_ADD_LSB(MemAddress); + } + + /* Wait until TCR flag is set */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_TCR, RESET, Timeout, Tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + return HAL_OK; +} + +/** + * @brief Master sends target device address followed by internal memory address for read request. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param DevAddress Target device address: The device 7 bits address value + * in datasheet must be shifted to the left before calling the interface + * @param MemAddress Internal memory address + * @param MemAddSize Size of internal memory address + * @param Timeout Timeout duration + * @param Tickstart Tick start value + * @retval HAL status + */ +static HAL_StatusTypeDef I2C_RequestMemoryRead(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, + uint16_t MemAddress, uint16_t MemAddSize, uint32_t Timeout, + uint32_t Tickstart) +{ + I2C_TransferConfig(hi2c, DevAddress, (uint8_t)MemAddSize, I2C_SOFTEND_MODE, I2C_GENERATE_START_WRITE); + + /* Wait until TXIS flag is set */ + if (I2C_WaitOnTXISFlagUntilTimeout(hi2c, Timeout, Tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* If Memory address size is 8Bit */ + if (MemAddSize == I2C_MEMADD_SIZE_8BIT) + { + /* Send Memory Address */ + hi2c->Instance->TXDR = I2C_MEM_ADD_LSB(MemAddress); + } + /* If Memory address size is 16Bit */ + else + { + /* Send MSB of Memory Address */ + hi2c->Instance->TXDR = I2C_MEM_ADD_MSB(MemAddress); + + /* Wait until TXIS flag is set */ + if (I2C_WaitOnTXISFlagUntilTimeout(hi2c, Timeout, Tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Send LSB of Memory Address */ + hi2c->Instance->TXDR = I2C_MEM_ADD_LSB(MemAddress); + } + + /* Wait until TC flag is set */ + if (I2C_WaitOnFlagUntilTimeout(hi2c, I2C_FLAG_TC, RESET, Timeout, Tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + return HAL_OK; +} + +/** + * @brief I2C Address complete process callback. + * @param hi2c I2C handle. + * @param ITFlags Interrupt flags to handle. + * @retval None + */ +static void I2C_ITAddrCplt(I2C_HandleTypeDef *hi2c, uint32_t ITFlags) +{ + uint8_t transferdirection; + uint16_t slaveaddrcode; + uint16_t ownadd1code; + uint16_t ownadd2code; + + /* Prevent unused argument(s) compilation warning */ + UNUSED(ITFlags); + + /* In case of Listen state, need to inform upper layer of address match code event */ + if (((uint32_t)hi2c->State & (uint32_t)HAL_I2C_STATE_LISTEN) == (uint32_t)HAL_I2C_STATE_LISTEN) + { + transferdirection = I2C_GET_DIR(hi2c); + slaveaddrcode = I2C_GET_ADDR_MATCH(hi2c); + ownadd1code = I2C_GET_OWN_ADDRESS1(hi2c); + ownadd2code = I2C_GET_OWN_ADDRESS2(hi2c); + + /* If 10bits addressing mode is selected */ + if (hi2c->Init.AddressingMode == I2C_ADDRESSINGMODE_10BIT) + { + if ((slaveaddrcode & SLAVE_ADDR_MSK) == ((ownadd1code >> SLAVE_ADDR_SHIFT) & SLAVE_ADDR_MSK)) + { + slaveaddrcode = ownadd1code; + hi2c->AddrEventCount++; + if (hi2c->AddrEventCount == 2U) + { + /* Reset Address Event counter */ + hi2c->AddrEventCount = 0U; + + /* Clear ADDR flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_ADDR); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call Slave Addr callback */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->AddrCallback(hi2c, transferdirection, slaveaddrcode); +#else + HAL_I2C_AddrCallback(hi2c, transferdirection, slaveaddrcode); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + } + else + { + slaveaddrcode = ownadd2code; + + /* Disable ADDR Interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_LISTEN_IT); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call Slave Addr callback */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->AddrCallback(hi2c, transferdirection, slaveaddrcode); +#else + HAL_I2C_AddrCallback(hi2c, transferdirection, slaveaddrcode); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + } + /* else 7 bits addressing mode is selected */ + else + { + /* Disable ADDR Interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_LISTEN_IT); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call Slave Addr callback */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->AddrCallback(hi2c, transferdirection, slaveaddrcode); +#else + HAL_I2C_AddrCallback(hi2c, transferdirection, slaveaddrcode); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + } + /* Else clear address flag only */ + else + { + /* Clear ADDR flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_ADDR); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + } +} + +/** + * @brief I2C Master sequential complete process. + * @param hi2c I2C handle. + * @retval None + */ +static void I2C_ITMasterSeqCplt(I2C_HandleTypeDef *hi2c) +{ + /* Reset I2C handle mode */ + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* No Generate Stop, to permit restart mode */ + /* The stop will be done at the end of transfer, when I2C_AUTOEND_MODE enable */ + if (hi2c->State == HAL_I2C_STATE_BUSY_TX) + { + hi2c->State = HAL_I2C_STATE_READY; + hi2c->PreviousState = I2C_STATE_MASTER_BUSY_TX; + hi2c->XferISR = NULL; + + /* Disable Interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_TX_IT); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->MasterTxCpltCallback(hi2c); +#else + HAL_I2C_MasterTxCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + /* hi2c->State == HAL_I2C_STATE_BUSY_RX */ + else + { + hi2c->State = HAL_I2C_STATE_READY; + hi2c->PreviousState = I2C_STATE_MASTER_BUSY_RX; + hi2c->XferISR = NULL; + + /* Disable Interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_RX_IT); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->MasterRxCpltCallback(hi2c); +#else + HAL_I2C_MasterRxCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } +} + +/** + * @brief I2C Slave sequential complete process. + * @param hi2c I2C handle. + * @retval None + */ +static void I2C_ITSlaveSeqCplt(I2C_HandleTypeDef *hi2c) +{ + uint32_t tmpcr1value = READ_REG(hi2c->Instance->CR1); + + /* Reset I2C handle mode */ + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* If a DMA is ongoing, Update handle size context */ + if (I2C_CHECK_IT_SOURCE(tmpcr1value, I2C_CR1_TXDMAEN) != RESET) + { + /* Disable DMA Request */ + hi2c->Instance->CR1 &= ~I2C_CR1_TXDMAEN; + } + else if (I2C_CHECK_IT_SOURCE(tmpcr1value, I2C_CR1_RXDMAEN) != RESET) + { + /* Disable DMA Request */ + hi2c->Instance->CR1 &= ~I2C_CR1_RXDMAEN; + } + else + { + /* Do nothing */ + } + + if (hi2c->State == HAL_I2C_STATE_BUSY_TX_LISTEN) + { + /* Remove HAL_I2C_STATE_SLAVE_BUSY_TX, keep only HAL_I2C_STATE_LISTEN */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->PreviousState = I2C_STATE_SLAVE_BUSY_TX; + + /* Disable Interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_TX_IT); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->SlaveTxCpltCallback(hi2c); +#else + HAL_I2C_SlaveTxCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + + else if (hi2c->State == HAL_I2C_STATE_BUSY_RX_LISTEN) + { + /* Remove HAL_I2C_STATE_SLAVE_BUSY_RX, keep only HAL_I2C_STATE_LISTEN */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->PreviousState = I2C_STATE_SLAVE_BUSY_RX; + + /* Disable Interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_RX_IT); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->SlaveRxCpltCallback(hi2c); +#else + HAL_I2C_SlaveRxCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + else + { + /* Nothing to do */ + } +} + +/** + * @brief I2C Master complete process. + * @param hi2c I2C handle. + * @param ITFlags Interrupt flags to handle. + * @retval None + */ +static void I2C_ITMasterCplt(I2C_HandleTypeDef *hi2c, uint32_t ITFlags) +{ + uint32_t tmperror; + uint32_t tmpITFlags = ITFlags; + __IO uint32_t tmpreg; + + /* Clear STOP Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + + /* Disable Interrupts and Store Previous state */ + if (hi2c->State == HAL_I2C_STATE_BUSY_TX) + { + I2C_Disable_IRQ(hi2c, I2C_XFER_TX_IT); + hi2c->PreviousState = I2C_STATE_MASTER_BUSY_TX; + } + else if (hi2c->State == HAL_I2C_STATE_BUSY_RX) + { + I2C_Disable_IRQ(hi2c, I2C_XFER_RX_IT); + hi2c->PreviousState = I2C_STATE_MASTER_BUSY_RX; + } + else + { + /* Do nothing */ + } + + /* Clear Configuration Register 2 */ + I2C_RESET_CR2(hi2c); + + /* Reset handle parameters */ + hi2c->XferISR = NULL; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + + if (I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_AF) != RESET) + { + /* Clear NACK Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + + /* Set acknowledge error code */ + hi2c->ErrorCode |= HAL_I2C_ERROR_AF; + } + + /* Fetch Last receive data if any */ + if ((hi2c->State == HAL_I2C_STATE_ABORT) && (I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_RXNE) != RESET)) + { + /* Read data from RXDR */ + tmpreg = (uint8_t)hi2c->Instance->RXDR; + UNUSED(tmpreg); + } + + /* Flush TX register */ + I2C_Flush_TXDR(hi2c); + + /* Store current volatile hi2c->ErrorCode, misra rule */ + tmperror = hi2c->ErrorCode; + + /* Call the corresponding callback to inform upper layer of End of Transfer */ + if ((hi2c->State == HAL_I2C_STATE_ABORT) || (tmperror != HAL_I2C_ERROR_NONE)) + { + /* Call the corresponding callback to inform upper layer of End of Transfer */ + I2C_ITError(hi2c, hi2c->ErrorCode); + } + /* hi2c->State == HAL_I2C_STATE_BUSY_TX */ + else if (hi2c->State == HAL_I2C_STATE_BUSY_TX) + { + hi2c->State = HAL_I2C_STATE_READY; + hi2c->PreviousState = I2C_STATE_NONE; + + if (hi2c->Mode == HAL_I2C_MODE_MEM) + { + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->MemTxCpltCallback(hi2c); +#else + HAL_I2C_MemTxCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + else + { + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->MasterTxCpltCallback(hi2c); +#else + HAL_I2C_MasterTxCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + } + /* hi2c->State == HAL_I2C_STATE_BUSY_RX */ + else if (hi2c->State == HAL_I2C_STATE_BUSY_RX) + { + hi2c->State = HAL_I2C_STATE_READY; + hi2c->PreviousState = I2C_STATE_NONE; + + if (hi2c->Mode == HAL_I2C_MODE_MEM) + { + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->MemRxCpltCallback(hi2c); +#else + HAL_I2C_MemRxCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + else + { + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->MasterRxCpltCallback(hi2c); +#else + HAL_I2C_MasterRxCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + } + else + { + /* Nothing to do */ + } +} + +/** + * @brief I2C Slave complete process. + * @param hi2c I2C handle. + * @param ITFlags Interrupt flags to handle. + * @retval None + */ +static void I2C_ITSlaveCplt(I2C_HandleTypeDef *hi2c, uint32_t ITFlags) +{ + uint32_t tmpcr1value = READ_REG(hi2c->Instance->CR1); + uint32_t tmpITFlags = ITFlags; + HAL_I2C_StateTypeDef tmpstate = hi2c->State; + + /* Clear STOP Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + + /* Disable Interrupts and Store Previous state */ + if ((tmpstate == HAL_I2C_STATE_BUSY_TX) || (tmpstate == HAL_I2C_STATE_BUSY_TX_LISTEN)) + { + I2C_Disable_IRQ(hi2c, I2C_XFER_LISTEN_IT | I2C_XFER_TX_IT); + hi2c->PreviousState = I2C_STATE_SLAVE_BUSY_TX; + } + else if ((tmpstate == HAL_I2C_STATE_BUSY_RX) || (tmpstate == HAL_I2C_STATE_BUSY_RX_LISTEN)) + { + I2C_Disable_IRQ(hi2c, I2C_XFER_LISTEN_IT | I2C_XFER_RX_IT); + hi2c->PreviousState = I2C_STATE_SLAVE_BUSY_RX; + } + else + { + /* Do nothing */ + } + + /* Disable Address Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + + /* Clear Configuration Register 2 */ + I2C_RESET_CR2(hi2c); + + /* Flush TX register */ + I2C_Flush_TXDR(hi2c); + + /* If a DMA is ongoing, Update handle size context */ + if (I2C_CHECK_IT_SOURCE(tmpcr1value, I2C_CR1_TXDMAEN) != RESET) + { + /* Disable DMA Request */ + hi2c->Instance->CR1 &= ~I2C_CR1_TXDMAEN; + + if (hi2c->hdmatx != NULL) + { + hi2c->XferCount = (uint16_t)I2C_GET_DMA_REMAIN_DATA(hi2c->hdmatx); + } + } + else if (I2C_CHECK_IT_SOURCE(tmpcr1value, I2C_CR1_RXDMAEN) != RESET) + { + /* Disable DMA Request */ + hi2c->Instance->CR1 &= ~I2C_CR1_RXDMAEN; + + if (hi2c->hdmarx != NULL) + { + hi2c->XferCount = (uint16_t)I2C_GET_DMA_REMAIN_DATA(hi2c->hdmarx); + } + } + else + { + /* Do nothing */ + } + + /* Store Last receive data if any */ + if (I2C_CHECK_FLAG(tmpITFlags, I2C_FLAG_RXNE) != RESET) + { + /* Remove RXNE flag on temporary variable as read done */ + tmpITFlags &= ~I2C_FLAG_RXNE; + + /* Read data from RXDR */ + *hi2c->pBuffPtr = (uint8_t)hi2c->Instance->RXDR; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + if ((hi2c->XferSize > 0U)) + { + hi2c->XferSize--; + hi2c->XferCount--; + } + } + + /* All data are not transferred, so set error code accordingly */ + if (hi2c->XferCount != 0U) + { + /* Set ErrorCode corresponding to a Non-Acknowledge */ + hi2c->ErrorCode |= HAL_I2C_ERROR_AF; + } + + hi2c->Mode = HAL_I2C_MODE_NONE; + hi2c->XferISR = NULL; + + if (hi2c->ErrorCode != HAL_I2C_ERROR_NONE) + { + /* Call the corresponding callback to inform upper layer of End of Transfer */ + I2C_ITError(hi2c, hi2c->ErrorCode); + + /* Call the Listen Complete callback, to inform upper layer of the end of Listen usecase */ + if (hi2c->State == HAL_I2C_STATE_LISTEN) + { + /* Call I2C Listen complete process */ + I2C_ITListenCplt(hi2c, tmpITFlags); + } + } + else if (hi2c->XferOptions != I2C_NO_OPTION_FRAME) + { + /* Call the Sequential Complete callback, to inform upper layer of the end of Transfer */ + I2C_ITSlaveSeqCplt(hi2c); + + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->State = HAL_I2C_STATE_READY; + hi2c->PreviousState = I2C_STATE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the Listen Complete callback, to inform upper layer of the end of Listen usecase */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->ListenCpltCallback(hi2c); +#else + HAL_I2C_ListenCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + /* Call the corresponding callback to inform upper layer of End of Transfer */ + else if (hi2c->State == HAL_I2C_STATE_BUSY_RX) + { + hi2c->State = HAL_I2C_STATE_READY; + hi2c->PreviousState = I2C_STATE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->SlaveRxCpltCallback(hi2c); +#else + HAL_I2C_SlaveRxCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + else + { + hi2c->State = HAL_I2C_STATE_READY; + hi2c->PreviousState = I2C_STATE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->SlaveTxCpltCallback(hi2c); +#else + HAL_I2C_SlaveTxCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } +} + +/** + * @brief I2C Listen complete process. + * @param hi2c I2C handle. + * @param ITFlags Interrupt flags to handle. + * @retval None + */ +static void I2C_ITListenCplt(I2C_HandleTypeDef *hi2c, uint32_t ITFlags) +{ + /* Reset handle parameters */ + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->PreviousState = I2C_STATE_NONE; + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + hi2c->XferISR = NULL; + + /* Store Last receive data if any */ + if (I2C_CHECK_FLAG(ITFlags, I2C_FLAG_RXNE) != RESET) + { + /* Read data from RXDR */ + *hi2c->pBuffPtr = (uint8_t)hi2c->Instance->RXDR; + + /* Increment Buffer pointer */ + hi2c->pBuffPtr++; + + if ((hi2c->XferSize > 0U)) + { + hi2c->XferSize--; + hi2c->XferCount--; + + /* Set ErrorCode corresponding to a Non-Acknowledge */ + hi2c->ErrorCode |= HAL_I2C_ERROR_AF; + } + } + + /* Disable all Interrupts*/ + I2C_Disable_IRQ(hi2c, I2C_XFER_LISTEN_IT | I2C_XFER_RX_IT | I2C_XFER_TX_IT); + + /* Clear NACK Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the Listen Complete callback, to inform upper layer of the end of Listen usecase */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->ListenCpltCallback(hi2c); +#else + HAL_I2C_ListenCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ +} + +/** + * @brief I2C interrupts error process. + * @param hi2c I2C handle. + * @param ErrorCode Error code to handle. + * @retval None + */ +static void I2C_ITError(I2C_HandleTypeDef *hi2c, uint32_t ErrorCode) +{ + HAL_I2C_StateTypeDef tmpstate = hi2c->State; + uint32_t tmppreviousstate; + + /* Reset handle parameters */ + hi2c->Mode = HAL_I2C_MODE_NONE; + hi2c->XferOptions = I2C_NO_OPTION_FRAME; + hi2c->XferCount = 0U; + + /* Set new error code */ + hi2c->ErrorCode |= ErrorCode; + + /* Disable Interrupts */ + if ((tmpstate == HAL_I2C_STATE_LISTEN) || + (tmpstate == HAL_I2C_STATE_BUSY_TX_LISTEN) || + (tmpstate == HAL_I2C_STATE_BUSY_RX_LISTEN)) + { + /* Disable all interrupts, except interrupts related to LISTEN state */ + I2C_Disable_IRQ(hi2c, I2C_XFER_RX_IT | I2C_XFER_TX_IT); + + /* keep HAL_I2C_STATE_LISTEN if set */ + hi2c->State = HAL_I2C_STATE_LISTEN; + hi2c->XferISR = I2C_Slave_ISR_IT; + } + else + { + /* Disable all interrupts */ + I2C_Disable_IRQ(hi2c, I2C_XFER_LISTEN_IT | I2C_XFER_RX_IT | I2C_XFER_TX_IT); + + /* If state is an abort treatment on going, don't change state */ + /* This change will be do later */ + if (hi2c->State != HAL_I2C_STATE_ABORT) + { + /* Set HAL_I2C_STATE_READY */ + hi2c->State = HAL_I2C_STATE_READY; + } + hi2c->XferISR = NULL; + } + + /* Abort DMA TX transfer if any */ + tmppreviousstate = hi2c->PreviousState; + if ((hi2c->hdmatx != NULL) && ((tmppreviousstate == I2C_STATE_MASTER_BUSY_TX) || \ + (tmppreviousstate == I2C_STATE_SLAVE_BUSY_TX))) + { + if ((hi2c->Instance->CR1 & I2C_CR1_TXDMAEN) == I2C_CR1_TXDMAEN) + { + hi2c->Instance->CR1 &= ~I2C_CR1_TXDMAEN; + } + + if (HAL_DMA_GetState(hi2c->hdmatx) != HAL_DMA_STATE_READY) + { + /* Set the I2C DMA Abort callback : + will lead to call HAL_I2C_ErrorCallback() at end of DMA abort procedure */ + hi2c->hdmatx->XferAbortCallback = I2C_DMAAbort; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Abort DMA TX */ + if (HAL_DMA_Abort_IT(hi2c->hdmatx) != HAL_OK) + { + /* Call Directly XferAbortCallback function in case of error */ + hi2c->hdmatx->XferAbortCallback(hi2c->hdmatx); + } + } + else + { + I2C_TreatErrorCallback(hi2c); + } + } + /* Abort DMA RX transfer if any */ + else if ((hi2c->hdmarx != NULL) && ((tmppreviousstate == I2C_STATE_MASTER_BUSY_RX) || \ + (tmppreviousstate == I2C_STATE_SLAVE_BUSY_RX))) + { + if ((hi2c->Instance->CR1 & I2C_CR1_RXDMAEN) == I2C_CR1_RXDMAEN) + { + hi2c->Instance->CR1 &= ~I2C_CR1_RXDMAEN; + } + + if (HAL_DMA_GetState(hi2c->hdmarx) != HAL_DMA_STATE_READY) + { + /* Set the I2C DMA Abort callback : + will lead to call HAL_I2C_ErrorCallback() at end of DMA abort procedure */ + hi2c->hdmarx->XferAbortCallback = I2C_DMAAbort; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Abort DMA RX */ + if (HAL_DMA_Abort_IT(hi2c->hdmarx) != HAL_OK) + { + /* Call Directly hi2c->hdmarx->XferAbortCallback function in case of error */ + hi2c->hdmarx->XferAbortCallback(hi2c->hdmarx); + } + } + else + { + I2C_TreatErrorCallback(hi2c); + } + } + else + { + I2C_TreatErrorCallback(hi2c); + } +} + +/** + * @brief I2C Error callback treatment. + * @param hi2c I2C handle. + * @retval None + */ +static void I2C_TreatErrorCallback(I2C_HandleTypeDef *hi2c) +{ + if (hi2c->State == HAL_I2C_STATE_ABORT) + { + hi2c->State = HAL_I2C_STATE_READY; + hi2c->PreviousState = I2C_STATE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->AbortCpltCallback(hi2c); +#else + HAL_I2C_AbortCpltCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } + else + { + hi2c->PreviousState = I2C_STATE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + /* Call the corresponding callback to inform upper layer of End of Transfer */ +#if (USE_HAL_I2C_REGISTER_CALLBACKS == 1) + hi2c->ErrorCallback(hi2c); +#else + HAL_I2C_ErrorCallback(hi2c); +#endif /* USE_HAL_I2C_REGISTER_CALLBACKS */ + } +} + +/** + * @brief I2C Tx data register flush process. + * @param hi2c I2C handle. + * @retval None + */ +static void I2C_Flush_TXDR(I2C_HandleTypeDef *hi2c) +{ + /* If a pending TXIS flag is set */ + /* Write a dummy data in TXDR to clear it */ + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_TXIS) != RESET) + { + hi2c->Instance->TXDR = 0x00U; + } + + /* Flush TX register if not empty */ + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_TXE) == RESET) + { + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_TXE); + } +} + +/** + * @brief DMA I2C master transmit process complete callback. + * @param hdma DMA handle + * @retval None + */ +static void I2C_DMAMasterTransmitCplt(DMA_HandleTypeDef *hdma) +{ + /* Derogation MISRAC2012-Rule-11.5 */ + I2C_HandleTypeDef *hi2c = (I2C_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); + + /* Disable DMA Request */ + hi2c->Instance->CR1 &= ~I2C_CR1_TXDMAEN; + + /* If last transfer, enable STOP interrupt */ + if (hi2c->XferCount == 0U) + { + /* Enable STOP interrupt */ + I2C_Enable_IRQ(hi2c, I2C_XFER_CPLT_IT); + } + /* else prepare a new DMA transfer and enable TCReload interrupt */ + else + { + /* Update Buffer pointer */ + hi2c->pBuffPtr += hi2c->XferSize; + + /* Set the XferSize to transfer */ + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + } + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(hi2c->hdmatx, (uint32_t)hi2c->pBuffPtr, (uint32_t)&hi2c->Instance->TXDR, + hi2c->XferSize) != HAL_OK) + { + /* Call the corresponding callback to inform upper layer of End of Transfer */ + I2C_ITError(hi2c, HAL_I2C_ERROR_DMA); + } + else + { + /* Enable TC interrupts */ + I2C_Enable_IRQ(hi2c, I2C_XFER_RELOAD_IT); + } + } +} + +/** + * @brief DMA I2C slave transmit process complete callback. + * @param hdma DMA handle + * @retval None + */ +static void I2C_DMASlaveTransmitCplt(DMA_HandleTypeDef *hdma) +{ + /* Derogation MISRAC2012-Rule-11.5 */ + I2C_HandleTypeDef *hi2c = (I2C_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); + uint32_t tmpoptions = hi2c->XferOptions; + + if ((tmpoptions == I2C_NEXT_FRAME) || (tmpoptions == I2C_FIRST_FRAME)) + { + /* Disable DMA Request */ + hi2c->Instance->CR1 &= ~I2C_CR1_TXDMAEN; + + /* Last Byte is Transmitted */ + /* Call I2C Slave Sequential complete process */ + I2C_ITSlaveSeqCplt(hi2c); + } + else + { + /* No specific action, Master fully manage the generation of STOP condition */ + /* Mean that this generation can arrive at any time, at the end or during DMA process */ + /* So STOP condition should be manage through Interrupt treatment */ + } +} + +/** + * @brief DMA I2C master receive process complete callback. + * @param hdma DMA handle + * @retval None + */ +static void I2C_DMAMasterReceiveCplt(DMA_HandleTypeDef *hdma) +{ + /* Derogation MISRAC2012-Rule-11.5 */ + I2C_HandleTypeDef *hi2c = (I2C_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); + + /* Disable DMA Request */ + hi2c->Instance->CR1 &= ~I2C_CR1_RXDMAEN; + + /* If last transfer, enable STOP interrupt */ + if (hi2c->XferCount == 0U) + { + /* Enable STOP interrupt */ + I2C_Enable_IRQ(hi2c, I2C_XFER_CPLT_IT); + } + /* else prepare a new DMA transfer and enable TCReload interrupt */ + else + { + /* Update Buffer pointer */ + hi2c->pBuffPtr += hi2c->XferSize; + + /* Set the XferSize to transfer */ + if (hi2c->XferCount > MAX_NBYTE_SIZE) + { + hi2c->XferSize = MAX_NBYTE_SIZE; + } + else + { + hi2c->XferSize = hi2c->XferCount; + } + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(hi2c->hdmarx, (uint32_t)&hi2c->Instance->RXDR, (uint32_t)hi2c->pBuffPtr, + hi2c->XferSize) != HAL_OK) + { + /* Call the corresponding callback to inform upper layer of End of Transfer */ + I2C_ITError(hi2c, HAL_I2C_ERROR_DMA); + } + else + { + /* Enable TC interrupts */ + I2C_Enable_IRQ(hi2c, I2C_XFER_RELOAD_IT); + } + } +} + +/** + * @brief DMA I2C slave receive process complete callback. + * @param hdma DMA handle + * @retval None + */ +static void I2C_DMASlaveReceiveCplt(DMA_HandleTypeDef *hdma) +{ + /* Derogation MISRAC2012-Rule-11.5 */ + I2C_HandleTypeDef *hi2c = (I2C_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); + uint32_t tmpoptions = hi2c->XferOptions; + + if ((I2C_GET_DMA_REMAIN_DATA(hi2c->hdmarx) == 0U) && \ + (tmpoptions != I2C_NO_OPTION_FRAME)) + { + /* Disable DMA Request */ + hi2c->Instance->CR1 &= ~I2C_CR1_RXDMAEN; + + /* Call I2C Slave Sequential complete process */ + I2C_ITSlaveSeqCplt(hi2c); + } + else + { + /* No specific action, Master fully manage the generation of STOP condition */ + /* Mean that this generation can arrive at any time, at the end or during DMA process */ + /* So STOP condition should be manage through Interrupt treatment */ + } +} + +/** + * @brief DMA I2C communication error callback. + * @param hdma DMA handle + * @retval None + */ +static void I2C_DMAError(DMA_HandleTypeDef *hdma) +{ + uint32_t treatdmaerror = 0U; + /* Derogation MISRAC2012-Rule-11.5 */ + I2C_HandleTypeDef *hi2c = (I2C_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); + + if (hi2c->hdmatx != NULL) + { + if (I2C_GET_DMA_REMAIN_DATA(hi2c->hdmatx) == 0U) + { + treatdmaerror = 1U; + } + } + + if (hi2c->hdmarx != NULL) + { + if (I2C_GET_DMA_REMAIN_DATA(hi2c->hdmarx) == 0U) + { + treatdmaerror = 1U; + } + } + + /* Check if a FIFO error is detected, if true normal use case, so no specific action to perform */ + if (!((HAL_DMA_GetError(hdma) == HAL_DMA_ERROR_FE)) && (treatdmaerror != 0U)) + { + /* Disable Acknowledge */ + hi2c->Instance->CR2 |= I2C_CR2_NACK; + + /* Call the corresponding callback to inform upper layer of End of Transfer */ + I2C_ITError(hi2c, HAL_I2C_ERROR_DMA); + } +} + +/** + * @brief DMA I2C communication abort callback + * (To be called at end of DMA Abort procedure). + * @param hdma DMA handle. + * @retval None + */ +static void I2C_DMAAbort(DMA_HandleTypeDef *hdma) +{ + /* Derogation MISRAC2012-Rule-11.5 */ + I2C_HandleTypeDef *hi2c = (I2C_HandleTypeDef *)(((DMA_HandleTypeDef *)hdma)->Parent); + + /* Reset AbortCpltCallback */ + if (hi2c->hdmatx != NULL) + { + hi2c->hdmatx->XferAbortCallback = NULL; + } + if (hi2c->hdmarx != NULL) + { + hi2c->hdmarx->XferAbortCallback = NULL; + } + + I2C_TreatErrorCallback(hi2c); +} + +/** + * @brief This function handles I2C Communication Timeout. It waits + * until a flag is no longer in the specified status. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param Flag Specifies the I2C flag to check. + * @param Status The actual Flag status (SET or RESET). + * @param Timeout Timeout duration + * @param Tickstart Tick start value + * @retval HAL status + */ +static HAL_StatusTypeDef I2C_WaitOnFlagUntilTimeout(I2C_HandleTypeDef *hi2c, uint32_t Flag, FlagStatus Status, + uint32_t Timeout, uint32_t Tickstart) +{ + while (__HAL_I2C_GET_FLAG(hi2c, Flag) == Status) + { + /* Check for the Timeout */ + if (Timeout != HAL_MAX_DELAY) + { + if (((HAL_GetTick() - Tickstart) > Timeout) || (Timeout == 0U)) + { + hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + return HAL_ERROR; + } + } + } + return HAL_OK; +} + +/** + * @brief This function handles I2C Communication Timeout for specific usage of TXIS flag. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param Timeout Timeout duration + * @param Tickstart Tick start value + * @retval HAL status + */ +static HAL_StatusTypeDef I2C_WaitOnTXISFlagUntilTimeout(I2C_HandleTypeDef *hi2c, uint32_t Timeout, + uint32_t Tickstart) +{ + while (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_TXIS) == RESET) + { + /* Check if an error is detected */ + if (I2C_IsErrorOccurred(hi2c, Timeout, Tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Check for the Timeout */ + if (Timeout != HAL_MAX_DELAY) + { + if (((HAL_GetTick() - Tickstart) > Timeout) || (Timeout == 0U)) + { + hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + } + } + return HAL_OK; +} + +/** + * @brief This function handles I2C Communication Timeout for specific usage of STOP flag. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param Timeout Timeout duration + * @param Tickstart Tick start value + * @retval HAL status + */ +static HAL_StatusTypeDef I2C_WaitOnSTOPFlagUntilTimeout(I2C_HandleTypeDef *hi2c, uint32_t Timeout, + uint32_t Tickstart) +{ + while (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_STOPF) == RESET) + { + /* Check if an error is detected */ + if (I2C_IsErrorOccurred(hi2c, Timeout, Tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Check for the Timeout */ + if (((HAL_GetTick() - Tickstart) > Timeout) || (Timeout == 0U)) + { + hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + } + return HAL_OK; +} + +/** + * @brief This function handles I2C Communication Timeout for specific usage of RXNE flag. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param Timeout Timeout duration + * @param Tickstart Tick start value + * @retval HAL status + */ +static HAL_StatusTypeDef I2C_WaitOnRXNEFlagUntilTimeout(I2C_HandleTypeDef *hi2c, uint32_t Timeout, + uint32_t Tickstart) +{ + while (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_RXNE) == RESET) + { + /* Check if an error is detected */ + if (I2C_IsErrorOccurred(hi2c, Timeout, Tickstart) != HAL_OK) + { + return HAL_ERROR; + } + + /* Check if a STOPF is detected */ + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_STOPF) == SET) + { + /* Check if an RXNE is pending */ + /* Store Last receive data if any */ + if ((__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_RXNE) == SET) && (hi2c->XferSize > 0U)) + { + /* Return HAL_OK */ + /* The Reading of data from RXDR will be done in caller function */ + return HAL_OK; + } + else + { + if (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_AF) == SET) + { + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + hi2c->ErrorCode = HAL_I2C_ERROR_AF; + } + else + { + hi2c->ErrorCode = HAL_I2C_ERROR_NONE; + } + + /* Clear STOP Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + + /* Clear Configuration Register 2 */ + I2C_RESET_CR2(hi2c); + + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + } + + /* Check for the Timeout */ + if (((HAL_GetTick() - Tickstart) > Timeout) || (Timeout == 0U)) + { + hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; + hi2c->State = HAL_I2C_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_ERROR; + } + } + return HAL_OK; +} + +/** + * @brief This function handles errors detection during an I2C Communication. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param Timeout Timeout duration + * @param Tickstart Tick start value + * @retval HAL status + */ +static HAL_StatusTypeDef I2C_IsErrorOccurred(I2C_HandleTypeDef *hi2c, uint32_t Timeout, uint32_t Tickstart) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t itflag = hi2c->Instance->ISR; + uint32_t error_code = 0; + uint32_t tickstart = Tickstart; + uint32_t tmp1; + HAL_I2C_ModeTypeDef tmp2; + + if (HAL_IS_BIT_SET(itflag, I2C_FLAG_AF)) + { + /* Clear NACKF Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_AF); + + /* Wait until STOP Flag is set or timeout occurred */ + /* AutoEnd should be initiate after AF */ + while ((__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_STOPF) == RESET) && (status == HAL_OK)) + { + /* Check for the Timeout */ + if (Timeout != HAL_MAX_DELAY) + { + if (((HAL_GetTick() - tickstart) > Timeout) || (Timeout == 0U)) + { + tmp1 = (uint32_t)(hi2c->Instance->CR2 & I2C_CR2_STOP); + tmp2 = hi2c->Mode; + + /* In case of I2C still busy, try to regenerate a STOP manually */ + if ((__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_BUSY) != RESET) && \ + (tmp1 != I2C_CR2_STOP) && \ + (tmp2 != HAL_I2C_MODE_SLAVE)) + { + /* Generate Stop */ + hi2c->Instance->CR2 |= I2C_CR2_STOP; + + /* Update Tick with new reference */ + tickstart = HAL_GetTick(); + } + + while (__HAL_I2C_GET_FLAG(hi2c, I2C_FLAG_STOPF) == RESET) + { + /* Check for the Timeout */ + if ((HAL_GetTick() - tickstart) > I2C_TIMEOUT_STOPF) + { + hi2c->ErrorCode |= HAL_I2C_ERROR_TIMEOUT; + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + status = HAL_ERROR; + } + } + } + } + } + + /* In case STOP Flag is detected, clear it */ + if (status == HAL_OK) + { + /* Clear STOP Flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_STOPF); + } + + error_code |= HAL_I2C_ERROR_AF; + + status = HAL_ERROR; + } + + /* Refresh Content of Status register */ + itflag = hi2c->Instance->ISR; + + /* Then verify if an additional errors occurs */ + /* Check if a Bus error occurred */ + if (HAL_IS_BIT_SET(itflag, I2C_FLAG_BERR)) + { + error_code |= HAL_I2C_ERROR_BERR; + + /* Clear BERR flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_BERR); + + status = HAL_ERROR; + } + + /* Check if an Over-Run/Under-Run error occurred */ + if (HAL_IS_BIT_SET(itflag, I2C_FLAG_OVR)) + { + error_code |= HAL_I2C_ERROR_OVR; + + /* Clear OVR flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_OVR); + + status = HAL_ERROR; + } + + /* Check if an Arbitration Loss error occurred */ + if (HAL_IS_BIT_SET(itflag, I2C_FLAG_ARLO)) + { + error_code |= HAL_I2C_ERROR_ARLO; + + /* Clear ARLO flag */ + __HAL_I2C_CLEAR_FLAG(hi2c, I2C_FLAG_ARLO); + + status = HAL_ERROR; + } + + if (status != HAL_OK) + { + /* Flush TX register */ + I2C_Flush_TXDR(hi2c); + + /* Clear Configuration Register 2 */ + I2C_RESET_CR2(hi2c); + + hi2c->ErrorCode |= error_code; + hi2c->State = HAL_I2C_STATE_READY; + hi2c->Mode = HAL_I2C_MODE_NONE; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + } + + return status; +} + +/** + * @brief Handles I2Cx communication when starting transfer or during transfer (TC or TCR flag are set). + * @param hi2c I2C handle. + * @param DevAddress Specifies the slave address to be programmed. + * @param Size Specifies the number of bytes to be programmed. + * This parameter must be a value between 0 and 255. + * @param Mode New state of the I2C START condition generation. + * This parameter can be one of the following values: + * @arg @ref I2C_RELOAD_MODE Enable Reload mode . + * @arg @ref I2C_AUTOEND_MODE Enable Automatic end mode. + * @arg @ref I2C_SOFTEND_MODE Enable Software end mode. + * @param Request New state of the I2C START condition generation. + * This parameter can be one of the following values: + * @arg @ref I2C_NO_STARTSTOP Don't Generate stop and start condition. + * @arg @ref I2C_GENERATE_STOP Generate stop condition (Size should be set to 0). + * @arg @ref I2C_GENERATE_START_READ Generate Restart for read request. + * @arg @ref I2C_GENERATE_START_WRITE Generate Restart for write request. + * @retval None + */ +static void I2C_TransferConfig(I2C_HandleTypeDef *hi2c, uint16_t DevAddress, uint8_t Size, uint32_t Mode, + uint32_t Request) +{ + /* Check the parameters */ + assert_param(IS_I2C_ALL_INSTANCE(hi2c->Instance)); + assert_param(IS_TRANSFER_MODE(Mode)); + assert_param(IS_TRANSFER_REQUEST(Request)); + + /* Declaration of tmp to prevent undefined behavior of volatile usage */ + uint32_t tmp = ((uint32_t)(((uint32_t)DevAddress & I2C_CR2_SADD) | \ + (((uint32_t)Size << I2C_CR2_NBYTES_Pos) & I2C_CR2_NBYTES) | \ + (uint32_t)Mode | (uint32_t)Request) & (~0x80000000U)); + + /* update CR2 register */ + MODIFY_REG(hi2c->Instance->CR2, \ + ((I2C_CR2_SADD | I2C_CR2_NBYTES | I2C_CR2_RELOAD | I2C_CR2_AUTOEND | \ + (I2C_CR2_RD_WRN & (uint32_t)(Request >> (31U - I2C_CR2_RD_WRN_Pos))) | \ + I2C_CR2_START | I2C_CR2_STOP)), tmp); +} + +/** + * @brief Manage the enabling of Interrupts. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param InterruptRequest Value of @ref I2C_Interrupt_configuration_definition. + * @retval None + */ +static void I2C_Enable_IRQ(I2C_HandleTypeDef *hi2c, uint16_t InterruptRequest) +{ + uint32_t tmpisr = 0U; + + if ((hi2c->XferISR == I2C_Master_ISR_DMA) || \ + (hi2c->XferISR == I2C_Slave_ISR_DMA)) + { + if ((InterruptRequest & I2C_XFER_LISTEN_IT) == I2C_XFER_LISTEN_IT) + { + /* Enable ERR, STOP, NACK and ADDR interrupts */ + tmpisr |= I2C_IT_ADDRI | I2C_IT_STOPI | I2C_IT_NACKI | I2C_IT_ERRI; + } + + if (InterruptRequest == I2C_XFER_ERROR_IT) + { + /* Enable ERR and NACK interrupts */ + tmpisr |= I2C_IT_ERRI | I2C_IT_NACKI; + } + + if (InterruptRequest == I2C_XFER_CPLT_IT) + { + /* Enable STOP interrupts */ + tmpisr |= (I2C_IT_STOPI | I2C_IT_TCI); + } + + if (InterruptRequest == I2C_XFER_RELOAD_IT) + { + /* Enable TC interrupts */ + tmpisr |= I2C_IT_TCI; + } + } + else + { + if ((InterruptRequest & I2C_XFER_LISTEN_IT) == I2C_XFER_LISTEN_IT) + { + /* Enable ERR, STOP, NACK, and ADDR interrupts */ + tmpisr |= I2C_IT_ADDRI | I2C_IT_STOPI | I2C_IT_NACKI | I2C_IT_ERRI; + } + + if ((InterruptRequest & I2C_XFER_TX_IT) == I2C_XFER_TX_IT) + { + /* Enable ERR, TC, STOP, NACK and RXI interrupts */ + tmpisr |= I2C_IT_ERRI | I2C_IT_TCI | I2C_IT_STOPI | I2C_IT_NACKI | I2C_IT_TXI; + } + + if ((InterruptRequest & I2C_XFER_RX_IT) == I2C_XFER_RX_IT) + { + /* Enable ERR, TC, STOP, NACK and TXI interrupts */ + tmpisr |= I2C_IT_ERRI | I2C_IT_TCI | I2C_IT_STOPI | I2C_IT_NACKI | I2C_IT_RXI; + } + + if (InterruptRequest == I2C_XFER_CPLT_IT) + { + /* Enable STOP interrupts */ + tmpisr |= I2C_IT_STOPI; + } + } + + /* Enable interrupts only at the end */ + /* to avoid the risk of I2C interrupt handle execution before */ + /* all interrupts requested done */ + __HAL_I2C_ENABLE_IT(hi2c, tmpisr); +} + +/** + * @brief Manage the disabling of Interrupts. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2C. + * @param InterruptRequest Value of @ref I2C_Interrupt_configuration_definition. + * @retval None + */ +static void I2C_Disable_IRQ(I2C_HandleTypeDef *hi2c, uint16_t InterruptRequest) +{ + uint32_t tmpisr = 0U; + + if ((InterruptRequest & I2C_XFER_TX_IT) == I2C_XFER_TX_IT) + { + /* Disable TC and TXI interrupts */ + tmpisr |= I2C_IT_TCI | I2C_IT_TXI; + + if (((uint32_t)hi2c->State & (uint32_t)HAL_I2C_STATE_LISTEN) != (uint32_t)HAL_I2C_STATE_LISTEN) + { + /* Disable NACK and STOP interrupts */ + tmpisr |= I2C_IT_STOPI | I2C_IT_NACKI | I2C_IT_ERRI; + } + } + + if ((InterruptRequest & I2C_XFER_RX_IT) == I2C_XFER_RX_IT) + { + /* Disable TC and RXI interrupts */ + tmpisr |= I2C_IT_TCI | I2C_IT_RXI; + + if (((uint32_t)hi2c->State & (uint32_t)HAL_I2C_STATE_LISTEN) != (uint32_t)HAL_I2C_STATE_LISTEN) + { + /* Disable NACK and STOP interrupts */ + tmpisr |= I2C_IT_STOPI | I2C_IT_NACKI | I2C_IT_ERRI; + } + } + + if ((InterruptRequest & I2C_XFER_LISTEN_IT) == I2C_XFER_LISTEN_IT) + { + /* Disable ADDR, NACK and STOP interrupts */ + tmpisr |= I2C_IT_ADDRI | I2C_IT_STOPI | I2C_IT_NACKI | I2C_IT_ERRI; + } + + if (InterruptRequest == I2C_XFER_ERROR_IT) + { + /* Enable ERR and NACK interrupts */ + tmpisr |= I2C_IT_ERRI | I2C_IT_NACKI; + } + + if (InterruptRequest == I2C_XFER_CPLT_IT) + { + /* Enable STOP interrupts */ + tmpisr |= I2C_IT_STOPI; + } + + if (InterruptRequest == I2C_XFER_RELOAD_IT) + { + /* Enable TC interrupts */ + tmpisr |= I2C_IT_TCI; + } + + /* Disable interrupts only at the end */ + /* to avoid a breaking situation like at "t" time */ + /* all disable interrupts request are not done */ + __HAL_I2C_DISABLE_IT(hi2c, tmpisr); +} + +/** + * @brief Convert I2Cx OTHER_xxx XferOptions to functional XferOptions. + * @param hi2c I2C handle. + * @retval None + */ +static void I2C_ConvertOtherXferOptions(I2C_HandleTypeDef *hi2c) +{ + /* if user set XferOptions to I2C_OTHER_FRAME */ + /* it request implicitly to generate a restart condition */ + /* set XferOptions to I2C_FIRST_FRAME */ + if (hi2c->XferOptions == I2C_OTHER_FRAME) + { + hi2c->XferOptions = I2C_FIRST_FRAME; + } + /* else if user set XferOptions to I2C_OTHER_AND_LAST_FRAME */ + /* it request implicitly to generate a restart condition */ + /* then generate a stop condition at the end of transfer */ + /* set XferOptions to I2C_FIRST_AND_LAST_FRAME */ + else if (hi2c->XferOptions == I2C_OTHER_AND_LAST_FRAME) + { + hi2c->XferOptions = I2C_FIRST_AND_LAST_FRAME; + } + else + { + /* Nothing to do */ + } +} + +/** + * @} + */ + +#endif /* HAL_I2C_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c_ex.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c_ex.c new file mode 100644 index 0000000..035731f --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c_ex.c @@ -0,0 +1,270 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_i2c_ex.c + * @author MCD Application Team + * @brief I2C Extended HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of I2C Extended peripheral: + * + Filter Mode Functions + * + FastModePlus Functions + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + @verbatim + ============================================================================== + ##### I2C peripheral Extended features ##### + ============================================================================== + + [..] Comparing to other previous devices, the I2C interface for STM32F7xx + devices contains the following additional features + + (+) Possibility to disable or enable Analog Noise Filter + (+) Use of a configured Digital Noise Filter + (+) Disable or enable Fast Mode Plus + + ##### How to use this driver ##### + ============================================================================== + [..] This driver provides functions to: + (#) Configure I2C Analog noise filter using the function HAL_I2CEx_ConfigAnalogFilter() + (#) Configure I2C Digital noise filter using the function HAL_I2CEx_ConfigDigitalFilter() + (#) Configure the enable or disable of fast mode plus driving capability using the functions : + (++) HAL_I2CEx_EnableFastModePlus() + (++) HAL_I2CEx_DisableFastModePlus() + @endverbatim + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup I2CEx I2CEx + * @brief I2C Extended HAL module driver + * @{ + */ + +#ifdef HAL_I2C_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ + +/** @defgroup I2CEx_Exported_Functions I2C Extended Exported Functions + * @{ + */ + +/** @defgroup I2CEx_Exported_Functions_Group1 Filter Mode Functions + * @brief Filter Mode Functions + * +@verbatim + =============================================================================== + ##### Filter Mode Functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Configure Noise Filters + +@endverbatim + * @{ + */ + +/** + * @brief Configure I2C Analog noise filter. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2Cx peripheral. + * @param AnalogFilter New state of the Analog filter. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2CEx_ConfigAnalogFilter(I2C_HandleTypeDef *hi2c, uint32_t AnalogFilter) +{ + /* Check the parameters */ + assert_param(IS_I2C_ALL_INSTANCE(hi2c->Instance)); + assert_param(IS_I2C_ANALOG_FILTER(AnalogFilter)); + + if (hi2c->State == HAL_I2C_STATE_READY) + { + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY; + + /* Disable the selected I2C peripheral */ + __HAL_I2C_DISABLE(hi2c); + + /* Reset I2Cx ANOFF bit */ + hi2c->Instance->CR1 &= ~(I2C_CR1_ANFOFF); + + /* Set analog filter bit*/ + hi2c->Instance->CR1 |= AnalogFilter; + + __HAL_I2C_ENABLE(hi2c); + + hi2c->State = HAL_I2C_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Configure I2C Digital noise filter. + * @param hi2c Pointer to a I2C_HandleTypeDef structure that contains + * the configuration information for the specified I2Cx peripheral. + * @param DigitalFilter Coefficient of digital noise filter between Min_Data=0x00 and Max_Data=0x0F. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_I2CEx_ConfigDigitalFilter(I2C_HandleTypeDef *hi2c, uint32_t DigitalFilter) +{ + uint32_t tmpreg; + + /* Check the parameters */ + assert_param(IS_I2C_ALL_INSTANCE(hi2c->Instance)); + assert_param(IS_I2C_DIGITAL_FILTER(DigitalFilter)); + + if (hi2c->State == HAL_I2C_STATE_READY) + { + /* Process Locked */ + __HAL_LOCK(hi2c); + + hi2c->State = HAL_I2C_STATE_BUSY; + + /* Disable the selected I2C peripheral */ + __HAL_I2C_DISABLE(hi2c); + + /* Get the old register value */ + tmpreg = hi2c->Instance->CR1; + + /* Reset I2Cx DNF bits [11:8] */ + tmpreg &= ~(I2C_CR1_DNF); + + /* Set I2Cx DNF coefficient */ + tmpreg |= DigitalFilter << 8U; + + /* Store the new register value */ + hi2c->Instance->CR1 = tmpreg; + + __HAL_I2C_ENABLE(hi2c); + + hi2c->State = HAL_I2C_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(hi2c); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} +/** + * @} + */ +#if (defined(SYSCFG_PMC_I2C_PB6_FMP) || defined(SYSCFG_PMC_I2C_PB7_FMP)) || (defined(SYSCFG_PMC_I2C_PB8_FMP) || defined(SYSCFG_PMC_I2C_PB9_FMP)) || (defined(SYSCFG_PMC_I2C1_FMP)) || (defined(SYSCFG_PMC_I2C2_FMP)) || defined(SYSCFG_PMC_I2C3_FMP) || defined(SYSCFG_PMC_I2C4_FMP) + +/** @defgroup I2CEx_Exported_Functions_Group3 Fast Mode Plus Functions + * @brief Fast Mode Plus Functions + * +@verbatim + =============================================================================== + ##### Fast Mode Plus Functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Configure Fast Mode Plus + +@endverbatim + * @{ + */ + +/** + * @brief Enable the I2C fast mode plus driving capability. + * @param ConfigFastModePlus Selects the pin. + * This parameter can be one of the @ref I2CEx_FastModePlus values + * @note For I2C1, fast mode plus driving capability can be enabled on all selected + * I2C1 pins using I2C_FASTMODEPLUS_I2C1 parameter or independently + * on each one of the following pins PB6, PB7, PB8 and PB9. + * @note For remaining I2C1 pins (PA14, PA15...) fast mode plus driving capability + * can be enabled only by using I2C_FASTMODEPLUS_I2C1 parameter. + * @note For all I2C2 pins fast mode plus driving capability can be enabled + * only by using I2C_FASTMODEPLUS_I2C2 parameter. + * @note For all I2C3 pins fast mode plus driving capability can be enabled + * only by using I2C_FASTMODEPLUS_I2C3 parameter. + * @note For all I2C4 pins fast mode plus driving capability can be enabled + * only by using I2C_FASTMODEPLUS_I2C4 parameter. + * @retval None + */ +void HAL_I2CEx_EnableFastModePlus(uint32_t ConfigFastModePlus) +{ + /* Check the parameter */ + assert_param(IS_I2C_FASTMODEPLUS(ConfigFastModePlus)); + + /* Enable SYSCFG clock */ + __HAL_RCC_SYSCFG_CLK_ENABLE(); + + /* Enable fast mode plus driving capability for selected pin */ + SET_BIT(SYSCFG->PMC, (uint32_t)ConfigFastModePlus); +} + +/** + * @brief Disable the I2C fast mode plus driving capability. + * @param ConfigFastModePlus Selects the pin. + * This parameter can be one of the @ref I2CEx_FastModePlus values + * @note For I2C1, fast mode plus driving capability can be disabled on all selected + * I2C1 pins using I2C_FASTMODEPLUS_I2C1 parameter or independently + * on each one of the following pins PB6, PB7, PB8 and PB9. + * @note For remaining I2C1 pins (PA14, PA15...) fast mode plus driving capability + * can be disabled only by using I2C_FASTMODEPLUS_I2C1 parameter. + * @note For all I2C2 pins fast mode plus driving capability can be disabled + * only by using I2C_FASTMODEPLUS_I2C2 parameter. + * @note For all I2C3 pins fast mode plus driving capability can be disabled + * only by using I2C_FASTMODEPLUS_I2C3 parameter. + * @note For all I2C4 pins fast mode plus driving capability can be disabled + * only by using I2C_FASTMODEPLUS_I2C4 parameter. + * @retval None + */ +void HAL_I2CEx_DisableFastModePlus(uint32_t ConfigFastModePlus) +{ + /* Check the parameter */ + assert_param(IS_I2C_FASTMODEPLUS(ConfigFastModePlus)); + + /* Enable SYSCFG clock */ + __HAL_RCC_SYSCFG_CLK_ENABLE(); + + /* Disable fast mode plus driving capability for selected pin */ + CLEAR_BIT(SYSCFG->PMC, (uint32_t)ConfigFastModePlus); +} +/** + * @} + */ +#endif /* Fast Mode Plus Availability */ +/** + * @} + */ + +#endif /* HAL_I2C_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pcd.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pcd.c new file mode 100644 index 0000000..e7555be --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pcd.c @@ -0,0 +1,2170 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_pcd.c + * @author MCD Application Team + * @brief PCD HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the USB Peripheral Controller: + * + Initialization and de-initialization functions + * + IO operation functions + * + Peripheral Control functions + * + Peripheral State functions + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + The PCD HAL driver can be used as follows: + + (#) Declare a PCD_HandleTypeDef handle structure, for example: + PCD_HandleTypeDef hpcd; + + (#) Fill parameters of Init structure in HCD handle + + (#) Call HAL_PCD_Init() API to initialize the PCD peripheral (Core, Device core, ...) + + (#) Initialize the PCD low level resources through the HAL_PCD_MspInit() API: + (##) Enable the PCD/USB Low Level interface clock using + (+++) __HAL_RCC_USB_OTG_FS_CLK_ENABLE(); + (+++) __HAL_RCC_USB_OTG_HS_CLK_ENABLE(); (For High Speed Mode) + + (##) Initialize the related GPIO clocks + (##) Configure PCD pin-out + (##) Configure PCD NVIC interrupt + + (#)Associate the Upper USB device stack to the HAL PCD Driver: + (##) hpcd.pData = pdev; + + (#)Enable PCD transmission and reception: + (##) HAL_PCD_Start(); + + @endverbatim + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup PCD PCD + * @brief PCD HAL module driver + * @{ + */ + +#ifdef HAL_PCD_MODULE_ENABLED + +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) + +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/** @defgroup PCD_Private_Macros PCD Private Macros + * @{ + */ +#define PCD_MIN(a, b) (((a) < (b)) ? (a) : (b)) +#define PCD_MAX(a, b) (((a) > (b)) ? (a) : (b)) +/** + * @} + */ + +/* Private functions prototypes ----------------------------------------------*/ +/** @defgroup PCD_Private_Functions PCD Private Functions + * @{ + */ +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +static HAL_StatusTypeDef PCD_WriteEmptyTxFifo(PCD_HandleTypeDef *hpcd, uint32_t epnum); +static HAL_StatusTypeDef PCD_EP_OutXfrComplete_int(PCD_HandleTypeDef *hpcd, uint32_t epnum); +static HAL_StatusTypeDef PCD_EP_OutSetupPacket_int(PCD_HandleTypeDef *hpcd, uint32_t epnum); +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup PCD_Exported_Functions PCD Exported Functions + * @{ + */ + +/** @defgroup PCD_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * +@verbatim + =============================================================================== + ##### Initialization and de-initialization functions ##### + =============================================================================== + [..] This section provides functions allowing to: + +@endverbatim + * @{ + */ + +/** + * @brief Initializes the PCD according to the specified + * parameters in the PCD_InitTypeDef and initialize the associated handle. + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_Init(PCD_HandleTypeDef *hpcd) +{ + USB_OTG_GlobalTypeDef *USBx; + uint8_t i; + + /* Check the PCD handle allocation */ + if (hpcd == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_PCD_ALL_INSTANCE(hpcd->Instance)); + + USBx = hpcd->Instance; + + if (hpcd->State == HAL_PCD_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + hpcd->Lock = HAL_UNLOCKED; + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->SOFCallback = HAL_PCD_SOFCallback; + hpcd->SetupStageCallback = HAL_PCD_SetupStageCallback; + hpcd->ResetCallback = HAL_PCD_ResetCallback; + hpcd->SuspendCallback = HAL_PCD_SuspendCallback; + hpcd->ResumeCallback = HAL_PCD_ResumeCallback; + hpcd->ConnectCallback = HAL_PCD_ConnectCallback; + hpcd->DisconnectCallback = HAL_PCD_DisconnectCallback; + hpcd->DataOutStageCallback = HAL_PCD_DataOutStageCallback; + hpcd->DataInStageCallback = HAL_PCD_DataInStageCallback; + hpcd->ISOOUTIncompleteCallback = HAL_PCD_ISOOUTIncompleteCallback; + hpcd->ISOINIncompleteCallback = HAL_PCD_ISOINIncompleteCallback; + + if (hpcd->MspInitCallback == NULL) + { + hpcd->MspInitCallback = HAL_PCD_MspInit; + } + + /* Init the low level hardware */ + hpcd->MspInitCallback(hpcd); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC... */ + HAL_PCD_MspInit(hpcd); +#endif /* (USE_HAL_PCD_REGISTER_CALLBACKS) */ + } + + hpcd->State = HAL_PCD_STATE_BUSY; + + /* Disable DMA mode for FS instance */ + if ((USBx->CID & (0x1U << 8)) == 0U) + { + hpcd->Init.dma_enable = 0U; + } + + /* Disable the Interrupts */ + __HAL_PCD_DISABLE(hpcd); + + /*Init the Core (common init.) */ + if (USB_CoreInit(hpcd->Instance, hpcd->Init) != HAL_OK) + { + hpcd->State = HAL_PCD_STATE_ERROR; + return HAL_ERROR; + } + + /* Force Device Mode*/ + (void)USB_SetCurrentMode(hpcd->Instance, USB_DEVICE_MODE); + + /* Init endpoints structures */ + for (i = 0U; i < hpcd->Init.dev_endpoints; i++) + { + /* Init ep structure */ + hpcd->IN_ep[i].is_in = 1U; + hpcd->IN_ep[i].num = i; + hpcd->IN_ep[i].tx_fifo_num = i; + /* Control until ep is activated */ + hpcd->IN_ep[i].type = EP_TYPE_CTRL; + hpcd->IN_ep[i].maxpacket = 0U; + hpcd->IN_ep[i].xfer_buff = 0U; + hpcd->IN_ep[i].xfer_len = 0U; + } + + for (i = 0U; i < hpcd->Init.dev_endpoints; i++) + { + hpcd->OUT_ep[i].is_in = 0U; + hpcd->OUT_ep[i].num = i; + /* Control until ep is activated */ + hpcd->OUT_ep[i].type = EP_TYPE_CTRL; + hpcd->OUT_ep[i].maxpacket = 0U; + hpcd->OUT_ep[i].xfer_buff = 0U; + hpcd->OUT_ep[i].xfer_len = 0U; + } + + /* Init Device */ + if (USB_DevInit(hpcd->Instance, hpcd->Init) != HAL_OK) + { + hpcd->State = HAL_PCD_STATE_ERROR; + return HAL_ERROR; + } + + hpcd->USB_Address = 0U; + hpcd->State = HAL_PCD_STATE_READY; + + /* Activate LPM */ + if (hpcd->Init.lpm_enable == 1U) + { + (void)HAL_PCDEx_ActivateLPM(hpcd); + } + + (void)USB_DevDisconnect(hpcd->Instance); + + return HAL_OK; +} + +/** + * @brief DeInitializes the PCD peripheral. + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_DeInit(PCD_HandleTypeDef *hpcd) +{ + /* Check the PCD handle allocation */ + if (hpcd == NULL) + { + return HAL_ERROR; + } + + hpcd->State = HAL_PCD_STATE_BUSY; + + /* Stop Device */ + if (USB_StopDevice(hpcd->Instance) != HAL_OK) + { + return HAL_ERROR; + } + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + if (hpcd->MspDeInitCallback == NULL) + { + hpcd->MspDeInitCallback = HAL_PCD_MspDeInit; /* Legacy weak MspDeInit */ + } + + /* DeInit the low level hardware */ + hpcd->MspDeInitCallback(hpcd); +#else + /* DeInit the low level hardware: CLOCK, NVIC.*/ + HAL_PCD_MspDeInit(hpcd); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + + hpcd->State = HAL_PCD_STATE_RESET; + + return HAL_OK; +} + +/** + * @brief Initializes the PCD MSP. + * @param hpcd PCD handle + * @retval None + */ +__weak void HAL_PCD_MspInit(PCD_HandleTypeDef *hpcd) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes PCD MSP. + * @param hpcd PCD handle + * @retval None + */ +__weak void HAL_PCD_MspDeInit(PCD_HandleTypeDef *hpcd) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_MspDeInit could be implemented in the user file + */ +} + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) +/** + * @brief Register a User USB PCD Callback + * To be used instead of the weak predefined callback + * @param hpcd USB PCD handle + * @param CallbackID ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_PCD_SOF_CB_ID USB PCD SOF callback ID + * @arg @ref HAL_PCD_SETUPSTAGE_CB_ID USB PCD Setup callback ID + * @arg @ref HAL_PCD_RESET_CB_ID USB PCD Reset callback ID + * @arg @ref HAL_PCD_SUSPEND_CB_ID USB PCD Suspend callback ID + * @arg @ref HAL_PCD_RESUME_CB_ID USB PCD Resume callback ID + * @arg @ref HAL_PCD_CONNECT_CB_ID USB PCD Connect callback ID + * @arg @ref HAL_PCD_DISCONNECT_CB_ID OTG PCD Disconnect callback ID + * @arg @ref HAL_PCD_MSPINIT_CB_ID MspDeInit callback ID + * @arg @ref HAL_PCD_MSPDEINIT_CB_ID MspDeInit callback ID + * @param pCallback pointer to the Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_RegisterCallback(PCD_HandleTypeDef *hpcd, + HAL_PCD_CallbackIDTypeDef CallbackID, + pPCD_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + return HAL_ERROR; + } + /* Process locked */ + __HAL_LOCK(hpcd); + + if (hpcd->State == HAL_PCD_STATE_READY) + { + switch (CallbackID) + { + case HAL_PCD_SOF_CB_ID : + hpcd->SOFCallback = pCallback; + break; + + case HAL_PCD_SETUPSTAGE_CB_ID : + hpcd->SetupStageCallback = pCallback; + break; + + case HAL_PCD_RESET_CB_ID : + hpcd->ResetCallback = pCallback; + break; + + case HAL_PCD_SUSPEND_CB_ID : + hpcd->SuspendCallback = pCallback; + break; + + case HAL_PCD_RESUME_CB_ID : + hpcd->ResumeCallback = pCallback; + break; + + case HAL_PCD_CONNECT_CB_ID : + hpcd->ConnectCallback = pCallback; + break; + + case HAL_PCD_DISCONNECT_CB_ID : + hpcd->DisconnectCallback = pCallback; + break; + + case HAL_PCD_MSPINIT_CB_ID : + hpcd->MspInitCallback = pCallback; + break; + + case HAL_PCD_MSPDEINIT_CB_ID : + hpcd->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (hpcd->State == HAL_PCD_STATE_RESET) + { + switch (CallbackID) + { + case HAL_PCD_MSPINIT_CB_ID : + hpcd->MspInitCallback = pCallback; + break; + + case HAL_PCD_MSPDEINIT_CB_ID : + hpcd->MspDeInitCallback = pCallback; + break; + + default : + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hpcd); + return status; +} + +/** + * @brief Unregister an USB PCD Callback + * USB PCD callabck is redirected to the weak predefined callback + * @param hpcd USB PCD handle + * @param CallbackID ID of the callback to be unregistered + * This parameter can be one of the following values: + * @arg @ref HAL_PCD_SOF_CB_ID USB PCD SOF callback ID + * @arg @ref HAL_PCD_SETUPSTAGE_CB_ID USB PCD Setup callback ID + * @arg @ref HAL_PCD_RESET_CB_ID USB PCD Reset callback ID + * @arg @ref HAL_PCD_SUSPEND_CB_ID USB PCD Suspend callback ID + * @arg @ref HAL_PCD_RESUME_CB_ID USB PCD Resume callback ID + * @arg @ref HAL_PCD_CONNECT_CB_ID USB PCD Connect callback ID + * @arg @ref HAL_PCD_DISCONNECT_CB_ID OTG PCD Disconnect callback ID + * @arg @ref HAL_PCD_MSPINIT_CB_ID MspDeInit callback ID + * @arg @ref HAL_PCD_MSPDEINIT_CB_ID MspDeInit callback ID + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_UnRegisterCallback(PCD_HandleTypeDef *hpcd, HAL_PCD_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hpcd); + + /* Setup Legacy weak Callbacks */ + if (hpcd->State == HAL_PCD_STATE_READY) + { + switch (CallbackID) + { + case HAL_PCD_SOF_CB_ID : + hpcd->SOFCallback = HAL_PCD_SOFCallback; + break; + + case HAL_PCD_SETUPSTAGE_CB_ID : + hpcd->SetupStageCallback = HAL_PCD_SetupStageCallback; + break; + + case HAL_PCD_RESET_CB_ID : + hpcd->ResetCallback = HAL_PCD_ResetCallback; + break; + + case HAL_PCD_SUSPEND_CB_ID : + hpcd->SuspendCallback = HAL_PCD_SuspendCallback; + break; + + case HAL_PCD_RESUME_CB_ID : + hpcd->ResumeCallback = HAL_PCD_ResumeCallback; + break; + + case HAL_PCD_CONNECT_CB_ID : + hpcd->ConnectCallback = HAL_PCD_ConnectCallback; + break; + + case HAL_PCD_DISCONNECT_CB_ID : + hpcd->DisconnectCallback = HAL_PCD_DisconnectCallback; + break; + + case HAL_PCD_MSPINIT_CB_ID : + hpcd->MspInitCallback = HAL_PCD_MspInit; + break; + + case HAL_PCD_MSPDEINIT_CB_ID : + hpcd->MspDeInitCallback = HAL_PCD_MspDeInit; + break; + + default : + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (hpcd->State == HAL_PCD_STATE_RESET) + { + switch (CallbackID) + { + case HAL_PCD_MSPINIT_CB_ID : + hpcd->MspInitCallback = HAL_PCD_MspInit; + break; + + case HAL_PCD_MSPDEINIT_CB_ID : + hpcd->MspDeInitCallback = HAL_PCD_MspDeInit; + break; + + default : + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hpcd); + return status; +} + +/** + * @brief Register USB PCD Data OUT Stage Callback + * To be used instead of the weak HAL_PCD_DataOutStageCallback() predefined callback + * @param hpcd PCD handle + * @param pCallback pointer to the USB PCD Data OUT Stage Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_RegisterDataOutStageCallback(PCD_HandleTypeDef *hpcd, + pPCD_DataOutStageCallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + + /* Process locked */ + __HAL_LOCK(hpcd); + + if (hpcd->State == HAL_PCD_STATE_READY) + { + hpcd->DataOutStageCallback = pCallback; + } + else + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hpcd); + + return status; +} + +/** + * @brief Unregister the USB PCD Data OUT Stage Callback + * USB PCD Data OUT Stage Callback is redirected to the weak HAL_PCD_DataOutStageCallback() predefined callback + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_UnRegisterDataOutStageCallback(PCD_HandleTypeDef *hpcd) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hpcd); + + if (hpcd->State == HAL_PCD_STATE_READY) + { + hpcd->DataOutStageCallback = HAL_PCD_DataOutStageCallback; /* Legacy weak DataOutStageCallback */ + } + else + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hpcd); + + return status; +} + +/** + * @brief Register USB PCD Data IN Stage Callback + * To be used instead of the weak HAL_PCD_DataInStageCallback() predefined callback + * @param hpcd PCD handle + * @param pCallback pointer to the USB PCD Data IN Stage Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_RegisterDataInStageCallback(PCD_HandleTypeDef *hpcd, + pPCD_DataInStageCallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + + /* Process locked */ + __HAL_LOCK(hpcd); + + if (hpcd->State == HAL_PCD_STATE_READY) + { + hpcd->DataInStageCallback = pCallback; + } + else + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hpcd); + + return status; +} + +/** + * @brief Unregister the USB PCD Data IN Stage Callback + * USB PCD Data OUT Stage Callback is redirected to the weak HAL_PCD_DataInStageCallback() predefined callback + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_UnRegisterDataInStageCallback(PCD_HandleTypeDef *hpcd) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hpcd); + + if (hpcd->State == HAL_PCD_STATE_READY) + { + hpcd->DataInStageCallback = HAL_PCD_DataInStageCallback; /* Legacy weak DataInStageCallback */ + } + else + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hpcd); + + return status; +} + +/** + * @brief Register USB PCD Iso OUT incomplete Callback + * To be used instead of the weak HAL_PCD_ISOOUTIncompleteCallback() predefined callback + * @param hpcd PCD handle + * @param pCallback pointer to the USB PCD Iso OUT incomplete Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_RegisterIsoOutIncpltCallback(PCD_HandleTypeDef *hpcd, + pPCD_IsoOutIncpltCallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + + /* Process locked */ + __HAL_LOCK(hpcd); + + if (hpcd->State == HAL_PCD_STATE_READY) + { + hpcd->ISOOUTIncompleteCallback = pCallback; + } + else + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hpcd); + + return status; +} + +/** + * @brief Unregister the USB PCD Iso OUT incomplete Callback + * USB PCD Iso OUT incomplete Callback is redirected + * to the weak HAL_PCD_ISOOUTIncompleteCallback() predefined callback + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_UnRegisterIsoOutIncpltCallback(PCD_HandleTypeDef *hpcd) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hpcd); + + if (hpcd->State == HAL_PCD_STATE_READY) + { + hpcd->ISOOUTIncompleteCallback = HAL_PCD_ISOOUTIncompleteCallback; /* Legacy weak ISOOUTIncompleteCallback */ + } + else + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hpcd); + + return status; +} + +/** + * @brief Register USB PCD Iso IN incomplete Callback + * To be used instead of the weak HAL_PCD_ISOINIncompleteCallback() predefined callback + * @param hpcd PCD handle + * @param pCallback pointer to the USB PCD Iso IN incomplete Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_RegisterIsoInIncpltCallback(PCD_HandleTypeDef *hpcd, + pPCD_IsoInIncpltCallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + + /* Process locked */ + __HAL_LOCK(hpcd); + + if (hpcd->State == HAL_PCD_STATE_READY) + { + hpcd->ISOINIncompleteCallback = pCallback; + } + else + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hpcd); + + return status; +} + +/** + * @brief Unregister the USB PCD Iso IN incomplete Callback + * USB PCD Iso IN incomplete Callback is redirected + * to the weak HAL_PCD_ISOINIncompleteCallback() predefined callback + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_UnRegisterIsoInIncpltCallback(PCD_HandleTypeDef *hpcd) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hpcd); + + if (hpcd->State == HAL_PCD_STATE_READY) + { + hpcd->ISOINIncompleteCallback = HAL_PCD_ISOINIncompleteCallback; /* Legacy weak ISOINIncompleteCallback */ + } + else + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hpcd); + + return status; +} + +/** + * @brief Register USB PCD LPM Callback + * To be used instead of the weak HAL_PCDEx_LPM_Callback() predefined callback + * @param hpcd PCD handle + * @param pCallback pointer to the USB PCD LPM Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_RegisterLpmCallback(PCD_HandleTypeDef *hpcd, pPCD_LpmCallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + + /* Process locked */ + __HAL_LOCK(hpcd); + + if (hpcd->State == HAL_PCD_STATE_READY) + { + hpcd->LPMCallback = pCallback; + } + else + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hpcd); + + return status; +} + +/** + * @brief Unregister the USB PCD LPM Callback + * USB LPM Callback is redirected to the weak HAL_PCDEx_LPM_Callback() predefined callback + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_UnRegisterLpmCallback(PCD_HandleTypeDef *hpcd) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(hpcd); + + if (hpcd->State == HAL_PCD_STATE_READY) + { + hpcd->LPMCallback = HAL_PCDEx_LPM_Callback; /* Legacy weak HAL_PCDEx_LPM_Callback */ + } + else + { + /* Update the error code */ + hpcd->ErrorCode |= HAL_PCD_ERROR_INVALID_CALLBACK; + + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(hpcd); + + return status; +} +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @defgroup PCD_Exported_Functions_Group2 Input and Output operation functions + * @brief Data transfers functions + * +@verbatim + =============================================================================== + ##### IO operation functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to manage the PCD data + transfers. + +@endverbatim + * @{ + */ + +/** + * @brief Start the USB device + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_Start(PCD_HandleTypeDef *hpcd) +{ + __HAL_LOCK(hpcd); + __HAL_PCD_ENABLE(hpcd); + (void)USB_DevConnect(hpcd->Instance); + __HAL_UNLOCK(hpcd); + + return HAL_OK; +} + +/** + * @brief Stop the USB device. + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_Stop(PCD_HandleTypeDef *hpcd) +{ + __HAL_LOCK(hpcd); + __HAL_PCD_DISABLE(hpcd); + (void)USB_DevDisconnect(hpcd->Instance); + + (void)USB_FlushTxFifo(hpcd->Instance, 0x10U); + + __HAL_UNLOCK(hpcd); + + return HAL_OK; +} + +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +/** + * @brief Handles PCD interrupt request. + * @param hpcd PCD handle + * @retval HAL status + */ +void HAL_PCD_IRQHandler(PCD_HandleTypeDef *hpcd) +{ + USB_OTG_GlobalTypeDef *USBx = hpcd->Instance; + uint32_t USBx_BASE = (uint32_t)USBx; + USB_OTG_EPTypeDef *ep; + uint32_t i; + uint32_t ep_intr; + uint32_t epint; + uint32_t epnum; + uint32_t fifoemptymsk; + uint32_t temp; + + /* ensure that we are in device mode */ + if (USB_GetMode(hpcd->Instance) == USB_OTG_MODE_DEVICE) + { + /* avoid spurious interrupt */ + if (__HAL_PCD_IS_INVALID_INTERRUPT(hpcd)) + { + return; + } + + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_MMIS)) + { + /* incorrect mode, acknowledge the interrupt */ + __HAL_PCD_CLEAR_FLAG(hpcd, USB_OTG_GINTSTS_MMIS); + } + + /* Handle RxQLevel Interrupt */ + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_RXFLVL)) + { + USB_MASK_INTERRUPT(hpcd->Instance, USB_OTG_GINTSTS_RXFLVL); + + temp = USBx->GRXSTSP; + + ep = &hpcd->OUT_ep[temp & USB_OTG_GRXSTSP_EPNUM]; + + if (((temp & USB_OTG_GRXSTSP_PKTSTS) >> 17) == STS_DATA_UPDT) + { + if ((temp & USB_OTG_GRXSTSP_BCNT) != 0U) + { + (void)USB_ReadPacket(USBx, ep->xfer_buff, + (uint16_t)((temp & USB_OTG_GRXSTSP_BCNT) >> 4)); + + ep->xfer_buff += (temp & USB_OTG_GRXSTSP_BCNT) >> 4; + ep->xfer_count += (temp & USB_OTG_GRXSTSP_BCNT) >> 4; + } + } + else if (((temp & USB_OTG_GRXSTSP_PKTSTS) >> 17) == STS_SETUP_UPDT) + { + (void)USB_ReadPacket(USBx, (uint8_t *)hpcd->Setup, 8U); + ep->xfer_count += (temp & USB_OTG_GRXSTSP_BCNT) >> 4; + } + else + { + /* ... */ + } + + USB_UNMASK_INTERRUPT(hpcd->Instance, USB_OTG_GINTSTS_RXFLVL); + } + + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_OEPINT)) + { + epnum = 0U; + + /* Read in the device interrupt bits */ + ep_intr = USB_ReadDevAllOutEpInterrupt(hpcd->Instance); + + while (ep_intr != 0U) + { + if ((ep_intr & 0x1U) != 0U) + { + epint = USB_ReadDevOutEPInterrupt(hpcd->Instance, (uint8_t)epnum); + + if ((epint & USB_OTG_DOEPINT_XFRC) == USB_OTG_DOEPINT_XFRC) + { + CLEAR_OUT_EP_INTR(epnum, USB_OTG_DOEPINT_XFRC); + (void)PCD_EP_OutXfrComplete_int(hpcd, epnum); + } + + if ((epint & USB_OTG_DOEPINT_STUP) == USB_OTG_DOEPINT_STUP) + { + CLEAR_OUT_EP_INTR(epnum, USB_OTG_DOEPINT_STUP); + /* Class B setup phase done for previous decoded setup */ + (void)PCD_EP_OutSetupPacket_int(hpcd, epnum); + } + + if ((epint & USB_OTG_DOEPINT_OTEPDIS) == USB_OTG_DOEPINT_OTEPDIS) + { + CLEAR_OUT_EP_INTR(epnum, USB_OTG_DOEPINT_OTEPDIS); + } + + /* Clear Status Phase Received interrupt */ + if ((epint & USB_OTG_DOEPINT_OTEPSPR) == USB_OTG_DOEPINT_OTEPSPR) + { + CLEAR_OUT_EP_INTR(epnum, USB_OTG_DOEPINT_OTEPSPR); + } + + /* Clear OUT NAK interrupt */ + if ((epint & USB_OTG_DOEPINT_NAK) == USB_OTG_DOEPINT_NAK) + { + CLEAR_OUT_EP_INTR(epnum, USB_OTG_DOEPINT_NAK); + } + } + epnum++; + ep_intr >>= 1U; + } + } + + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_IEPINT)) + { + /* Read in the device interrupt bits */ + ep_intr = USB_ReadDevAllInEpInterrupt(hpcd->Instance); + + epnum = 0U; + + while (ep_intr != 0U) + { + if ((ep_intr & 0x1U) != 0U) /* In ITR */ + { + epint = USB_ReadDevInEPInterrupt(hpcd->Instance, (uint8_t)epnum); + + if ((epint & USB_OTG_DIEPINT_XFRC) == USB_OTG_DIEPINT_XFRC) + { + fifoemptymsk = (uint32_t)(0x1UL << (epnum & EP_ADDR_MSK)); + USBx_DEVICE->DIEPEMPMSK &= ~fifoemptymsk; + + CLEAR_IN_EP_INTR(epnum, USB_OTG_DIEPINT_XFRC); + + if (hpcd->Init.dma_enable == 1U) + { + hpcd->IN_ep[epnum].xfer_buff += hpcd->IN_ep[epnum].maxpacket; + + /* this is ZLP, so prepare EP0 for next setup */ + if ((epnum == 0U) && (hpcd->IN_ep[epnum].xfer_len == 0U)) + { + /* prepare to rx more setup packets */ + (void)USB_EP0_OutStart(hpcd->Instance, 1U, (uint8_t *)hpcd->Setup); + } + } + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->DataInStageCallback(hpcd, (uint8_t)epnum); +#else + HAL_PCD_DataInStageCallback(hpcd, (uint8_t)epnum); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + } + if ((epint & USB_OTG_DIEPINT_TOC) == USB_OTG_DIEPINT_TOC) + { + CLEAR_IN_EP_INTR(epnum, USB_OTG_DIEPINT_TOC); + } + if ((epint & USB_OTG_DIEPINT_ITTXFE) == USB_OTG_DIEPINT_ITTXFE) + { + CLEAR_IN_EP_INTR(epnum, USB_OTG_DIEPINT_ITTXFE); + } + if ((epint & USB_OTG_DIEPINT_INEPNE) == USB_OTG_DIEPINT_INEPNE) + { + CLEAR_IN_EP_INTR(epnum, USB_OTG_DIEPINT_INEPNE); + } + if ((epint & USB_OTG_DIEPINT_EPDISD) == USB_OTG_DIEPINT_EPDISD) + { + CLEAR_IN_EP_INTR(epnum, USB_OTG_DIEPINT_EPDISD); + } + if ((epint & USB_OTG_DIEPINT_TXFE) == USB_OTG_DIEPINT_TXFE) + { + (void)PCD_WriteEmptyTxFifo(hpcd, epnum); + } + } + epnum++; + ep_intr >>= 1U; + } + } + + /* Handle Resume Interrupt */ + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_WKUINT)) + { + /* Clear the Remote Wake-up Signaling */ + USBx_DEVICE->DCTL &= ~USB_OTG_DCTL_RWUSIG; + + if (hpcd->LPM_State == LPM_L1) + { + hpcd->LPM_State = LPM_L0; + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->LPMCallback(hpcd, PCD_LPM_L0_ACTIVE); +#else + HAL_PCDEx_LPM_Callback(hpcd, PCD_LPM_L0_ACTIVE); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + } + else + { +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->ResumeCallback(hpcd); +#else + HAL_PCD_ResumeCallback(hpcd); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + } + + __HAL_PCD_CLEAR_FLAG(hpcd, USB_OTG_GINTSTS_WKUINT); + } + + /* Handle Suspend Interrupt */ + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_USBSUSP)) + { + if ((USBx_DEVICE->DSTS & USB_OTG_DSTS_SUSPSTS) == USB_OTG_DSTS_SUSPSTS) + { +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->SuspendCallback(hpcd); +#else + HAL_PCD_SuspendCallback(hpcd); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + } + __HAL_PCD_CLEAR_FLAG(hpcd, USB_OTG_GINTSTS_USBSUSP); + } + + /* Handle LPM Interrupt */ + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_LPMINT)) + { + __HAL_PCD_CLEAR_FLAG(hpcd, USB_OTG_GINTSTS_LPMINT); + + if (hpcd->LPM_State == LPM_L0) + { + hpcd->LPM_State = LPM_L1; + hpcd->BESL = (hpcd->Instance->GLPMCFG & USB_OTG_GLPMCFG_BESL) >> 2U; + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->LPMCallback(hpcd, PCD_LPM_L1_ACTIVE); +#else + HAL_PCDEx_LPM_Callback(hpcd, PCD_LPM_L1_ACTIVE); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + } + else + { +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->SuspendCallback(hpcd); +#else + HAL_PCD_SuspendCallback(hpcd); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + } + } + + /* Handle Reset Interrupt */ + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_USBRST)) + { + USBx_DEVICE->DCTL &= ~USB_OTG_DCTL_RWUSIG; + (void)USB_FlushTxFifo(hpcd->Instance, 0x10U); + + for (i = 0U; i < hpcd->Init.dev_endpoints; i++) + { + USBx_INEP(i)->DIEPINT = 0xFB7FU; + USBx_INEP(i)->DIEPCTL &= ~USB_OTG_DIEPCTL_STALL; + USBx_OUTEP(i)->DOEPINT = 0xFB7FU; + USBx_OUTEP(i)->DOEPCTL &= ~USB_OTG_DOEPCTL_STALL; + USBx_OUTEP(i)->DOEPCTL |= USB_OTG_DOEPCTL_SNAK; + } + USBx_DEVICE->DAINTMSK |= 0x10001U; + + if (hpcd->Init.use_dedicated_ep1 != 0U) + { + USBx_DEVICE->DOUTEP1MSK |= USB_OTG_DOEPMSK_STUPM | + USB_OTG_DOEPMSK_XFRCM | + USB_OTG_DOEPMSK_EPDM; + + USBx_DEVICE->DINEP1MSK |= USB_OTG_DIEPMSK_TOM | + USB_OTG_DIEPMSK_XFRCM | + USB_OTG_DIEPMSK_EPDM; + } + else + { + USBx_DEVICE->DOEPMSK |= USB_OTG_DOEPMSK_STUPM | + USB_OTG_DOEPMSK_XFRCM | + USB_OTG_DOEPMSK_EPDM | + USB_OTG_DOEPMSK_OTEPSPRM | + USB_OTG_DOEPMSK_NAKM; + + USBx_DEVICE->DIEPMSK |= USB_OTG_DIEPMSK_TOM | + USB_OTG_DIEPMSK_XFRCM | + USB_OTG_DIEPMSK_EPDM; + } + + /* Set Default Address to 0 */ + USBx_DEVICE->DCFG &= ~USB_OTG_DCFG_DAD; + + /* setup EP0 to receive SETUP packets */ + (void)USB_EP0_OutStart(hpcd->Instance, (uint8_t)hpcd->Init.dma_enable, + (uint8_t *)hpcd->Setup); + + __HAL_PCD_CLEAR_FLAG(hpcd, USB_OTG_GINTSTS_USBRST); + } + + /* Handle Enumeration done Interrupt */ + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_ENUMDNE)) + { + (void)USB_ActivateSetup(hpcd->Instance); + hpcd->Init.speed = USB_GetDevSpeed(hpcd->Instance); + + /* Set USB Turnaround time */ + (void)USB_SetTurnaroundTime(hpcd->Instance, + HAL_RCC_GetHCLKFreq(), + (uint8_t)hpcd->Init.speed); + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->ResetCallback(hpcd); +#else + HAL_PCD_ResetCallback(hpcd); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + + __HAL_PCD_CLEAR_FLAG(hpcd, USB_OTG_GINTSTS_ENUMDNE); + } + + /* Handle SOF Interrupt */ + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_SOF)) + { +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->SOFCallback(hpcd); +#else + HAL_PCD_SOFCallback(hpcd); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + + __HAL_PCD_CLEAR_FLAG(hpcd, USB_OTG_GINTSTS_SOF); + } + + /* Handle Incomplete ISO IN Interrupt */ + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_IISOIXFR)) + { + /* Keep application checking the corresponding Iso IN endpoint + causing the incomplete Interrupt */ + epnum = 0U; + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->ISOINIncompleteCallback(hpcd, (uint8_t)epnum); +#else + HAL_PCD_ISOINIncompleteCallback(hpcd, (uint8_t)epnum); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + + __HAL_PCD_CLEAR_FLAG(hpcd, USB_OTG_GINTSTS_IISOIXFR); + } + + /* Handle Incomplete ISO OUT Interrupt */ + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_PXFR_INCOMPISOOUT)) + { + /* Keep application checking the corresponding Iso OUT endpoint + causing the incomplete Interrupt */ + epnum = 0U; + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->ISOOUTIncompleteCallback(hpcd, (uint8_t)epnum); +#else + HAL_PCD_ISOOUTIncompleteCallback(hpcd, (uint8_t)epnum); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + + __HAL_PCD_CLEAR_FLAG(hpcd, USB_OTG_GINTSTS_PXFR_INCOMPISOOUT); + } + + /* Handle Connection event Interrupt */ + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_SRQINT)) + { +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->ConnectCallback(hpcd); +#else + HAL_PCD_ConnectCallback(hpcd); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + + __HAL_PCD_CLEAR_FLAG(hpcd, USB_OTG_GINTSTS_SRQINT); + } + + /* Handle Disconnection event Interrupt */ + if (__HAL_PCD_GET_FLAG(hpcd, USB_OTG_GINTSTS_OTGINT)) + { + temp = hpcd->Instance->GOTGINT; + + if ((temp & USB_OTG_GOTGINT_SEDET) == USB_OTG_GOTGINT_SEDET) + { +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->DisconnectCallback(hpcd); +#else + HAL_PCD_DisconnectCallback(hpcd); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + } + hpcd->Instance->GOTGINT |= temp; + } + } +} + + +/** + * @brief Handles PCD Wakeup interrupt request. + * @param hpcd PCD handle + * @retval HAL status + */ +void HAL_PCD_WKUP_IRQHandler(PCD_HandleTypeDef *hpcd) +{ + USB_OTG_GlobalTypeDef *USBx; + + USBx = hpcd->Instance; + + if ((USBx->CID & (0x1U << 8)) == 0U) + { + /* Clear EXTI pending Bit */ + __HAL_USB_OTG_FS_WAKEUP_EXTI_CLEAR_FLAG(); + } + else + { + /* Clear EXTI pending Bit */ + __HAL_USB_OTG_HS_WAKEUP_EXTI_CLEAR_FLAG(); + } +} +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + + +/** + * @brief Data OUT stage callback. + * @param hpcd PCD handle + * @param epnum endpoint number + * @retval None + */ +__weak void HAL_PCD_DataOutStageCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + UNUSED(epnum); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_DataOutStageCallback could be implemented in the user file + */ +} + +/** + * @brief Data IN stage callback + * @param hpcd PCD handle + * @param epnum endpoint number + * @retval None + */ +__weak void HAL_PCD_DataInStageCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + UNUSED(epnum); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_DataInStageCallback could be implemented in the user file + */ +} +/** + * @brief Setup stage callback + * @param hpcd PCD handle + * @retval None + */ +__weak void HAL_PCD_SetupStageCallback(PCD_HandleTypeDef *hpcd) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_SetupStageCallback could be implemented in the user file + */ +} + +/** + * @brief USB Start Of Frame callback. + * @param hpcd PCD handle + * @retval None + */ +__weak void HAL_PCD_SOFCallback(PCD_HandleTypeDef *hpcd) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_SOFCallback could be implemented in the user file + */ +} + +/** + * @brief USB Reset callback. + * @param hpcd PCD handle + * @retval None + */ +__weak void HAL_PCD_ResetCallback(PCD_HandleTypeDef *hpcd) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_ResetCallback could be implemented in the user file + */ +} + +/** + * @brief Suspend event callback. + * @param hpcd PCD handle + * @retval None + */ +__weak void HAL_PCD_SuspendCallback(PCD_HandleTypeDef *hpcd) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_SuspendCallback could be implemented in the user file + */ +} + +/** + * @brief Resume event callback. + * @param hpcd PCD handle + * @retval None + */ +__weak void HAL_PCD_ResumeCallback(PCD_HandleTypeDef *hpcd) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_ResumeCallback could be implemented in the user file + */ +} + +/** + * @brief Incomplete ISO OUT callback. + * @param hpcd PCD handle + * @param epnum endpoint number + * @retval None + */ +__weak void HAL_PCD_ISOOUTIncompleteCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + UNUSED(epnum); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_ISOOUTIncompleteCallback could be implemented in the user file + */ +} + +/** + * @brief Incomplete ISO IN callback. + * @param hpcd PCD handle + * @param epnum endpoint number + * @retval None + */ +__weak void HAL_PCD_ISOINIncompleteCallback(PCD_HandleTypeDef *hpcd, uint8_t epnum) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + UNUSED(epnum); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_ISOINIncompleteCallback could be implemented in the user file + */ +} + +/** + * @brief Connection event callback. + * @param hpcd PCD handle + * @retval None + */ +__weak void HAL_PCD_ConnectCallback(PCD_HandleTypeDef *hpcd) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_ConnectCallback could be implemented in the user file + */ +} + +/** + * @brief Disconnection event callback. + * @param hpcd PCD handle + * @retval None + */ +__weak void HAL_PCD_DisconnectCallback(PCD_HandleTypeDef *hpcd) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCD_DisconnectCallback could be implemented in the user file + */ +} + +/** + * @} + */ + +/** @defgroup PCD_Exported_Functions_Group3 Peripheral Control functions + * @brief management functions + * +@verbatim + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to control the PCD data + transfers. + +@endverbatim + * @{ + */ + +/** + * @brief Connect the USB device + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_DevConnect(PCD_HandleTypeDef *hpcd) +{ + __HAL_LOCK(hpcd); + (void)USB_DevConnect(hpcd->Instance); + __HAL_UNLOCK(hpcd); + + return HAL_OK; +} + +/** + * @brief Disconnect the USB device. + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_DevDisconnect(PCD_HandleTypeDef *hpcd) +{ + __HAL_LOCK(hpcd); + (void)USB_DevDisconnect(hpcd->Instance); + __HAL_UNLOCK(hpcd); + + return HAL_OK; +} + +/** + * @brief Set the USB Device address. + * @param hpcd PCD handle + * @param address new device address + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_SetAddress(PCD_HandleTypeDef *hpcd, uint8_t address) +{ + __HAL_LOCK(hpcd); + hpcd->USB_Address = address; + (void)USB_SetDevAddress(hpcd->Instance, address); + __HAL_UNLOCK(hpcd); + + return HAL_OK; +} +/** + * @brief Open and configure an endpoint. + * @param hpcd PCD handle + * @param ep_addr endpoint address + * @param ep_mps endpoint max packet size + * @param ep_type endpoint type + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_EP_Open(PCD_HandleTypeDef *hpcd, uint8_t ep_addr, + uint16_t ep_mps, uint8_t ep_type) +{ + HAL_StatusTypeDef ret = HAL_OK; + PCD_EPTypeDef *ep; + + if ((ep_addr & 0x80U) == 0x80U) + { + ep = &hpcd->IN_ep[ep_addr & EP_ADDR_MSK]; + ep->is_in = 1U; + } + else + { + ep = &hpcd->OUT_ep[ep_addr & EP_ADDR_MSK]; + ep->is_in = 0U; + } + + ep->num = ep_addr & EP_ADDR_MSK; + ep->maxpacket = ep_mps; + ep->type = ep_type; + + if (ep->is_in != 0U) + { + /* Assign a Tx FIFO */ + ep->tx_fifo_num = ep->num; + } + /* Set initial data PID. */ + if (ep_type == EP_TYPE_BULK) + { + ep->data_pid_start = 0U; + } + + __HAL_LOCK(hpcd); + (void)USB_ActivateEndpoint(hpcd->Instance, ep); + __HAL_UNLOCK(hpcd); + + return ret; +} + +/** + * @brief Deactivate an endpoint. + * @param hpcd PCD handle + * @param ep_addr endpoint address + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_EP_Close(PCD_HandleTypeDef *hpcd, uint8_t ep_addr) +{ + PCD_EPTypeDef *ep; + + if ((ep_addr & 0x80U) == 0x80U) + { + ep = &hpcd->IN_ep[ep_addr & EP_ADDR_MSK]; + ep->is_in = 1U; + } + else + { + ep = &hpcd->OUT_ep[ep_addr & EP_ADDR_MSK]; + ep->is_in = 0U; + } + ep->num = ep_addr & EP_ADDR_MSK; + + __HAL_LOCK(hpcd); + (void)USB_DeactivateEndpoint(hpcd->Instance, ep); + __HAL_UNLOCK(hpcd); + return HAL_OK; +} + + +/** + * @brief Receive an amount of data. + * @param hpcd PCD handle + * @param ep_addr endpoint address + * @param pBuf pointer to the reception buffer + * @param len amount of data to be received + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_EP_Receive(PCD_HandleTypeDef *hpcd, uint8_t ep_addr, uint8_t *pBuf, uint32_t len) +{ + PCD_EPTypeDef *ep; + + ep = &hpcd->OUT_ep[ep_addr & EP_ADDR_MSK]; + + /*setup and start the Xfer */ + ep->xfer_buff = pBuf; + ep->xfer_len = len; + ep->xfer_count = 0U; + ep->is_in = 0U; + ep->num = ep_addr & EP_ADDR_MSK; + + if (hpcd->Init.dma_enable == 1U) + { + ep->dma_addr = (uint32_t)pBuf; + } + + if ((ep_addr & EP_ADDR_MSK) == 0U) + { + (void)USB_EP0StartXfer(hpcd->Instance, ep, (uint8_t)hpcd->Init.dma_enable); + } + else + { + (void)USB_EPStartXfer(hpcd->Instance, ep, (uint8_t)hpcd->Init.dma_enable); + } + + return HAL_OK; +} + +/** + * @brief Get Received Data Size + * @param hpcd PCD handle + * @param ep_addr endpoint address + * @retval Data Size + */ +uint32_t HAL_PCD_EP_GetRxCount(PCD_HandleTypeDef *hpcd, uint8_t ep_addr) +{ + return hpcd->OUT_ep[ep_addr & EP_ADDR_MSK].xfer_count; +} +/** + * @brief Send an amount of data + * @param hpcd PCD handle + * @param ep_addr endpoint address + * @param pBuf pointer to the transmission buffer + * @param len amount of data to be sent + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_EP_Transmit(PCD_HandleTypeDef *hpcd, uint8_t ep_addr, uint8_t *pBuf, uint32_t len) +{ + PCD_EPTypeDef *ep; + + ep = &hpcd->IN_ep[ep_addr & EP_ADDR_MSK]; + + /*setup and start the Xfer */ + ep->xfer_buff = pBuf; + ep->xfer_len = len; + ep->xfer_count = 0U; + ep->is_in = 1U; + ep->num = ep_addr & EP_ADDR_MSK; + + if (hpcd->Init.dma_enable == 1U) + { + ep->dma_addr = (uint32_t)pBuf; + } + + if ((ep_addr & EP_ADDR_MSK) == 0U) + { + (void)USB_EP0StartXfer(hpcd->Instance, ep, (uint8_t)hpcd->Init.dma_enable); + } + else + { + (void)USB_EPStartXfer(hpcd->Instance, ep, (uint8_t)hpcd->Init.dma_enable); + } + + return HAL_OK; +} + +/** + * @brief Set a STALL condition over an endpoint + * @param hpcd PCD handle + * @param ep_addr endpoint address + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_EP_SetStall(PCD_HandleTypeDef *hpcd, uint8_t ep_addr) +{ + PCD_EPTypeDef *ep; + + if (((uint32_t)ep_addr & EP_ADDR_MSK) > hpcd->Init.dev_endpoints) + { + return HAL_ERROR; + } + + if ((0x80U & ep_addr) == 0x80U) + { + ep = &hpcd->IN_ep[ep_addr & EP_ADDR_MSK]; + ep->is_in = 1U; + } + else + { + ep = &hpcd->OUT_ep[ep_addr]; + ep->is_in = 0U; + } + + ep->is_stall = 1U; + ep->num = ep_addr & EP_ADDR_MSK; + + __HAL_LOCK(hpcd); + + (void)USB_EPSetStall(hpcd->Instance, ep); + + if ((ep_addr & EP_ADDR_MSK) == 0U) + { + (void)USB_EP0_OutStart(hpcd->Instance, (uint8_t)hpcd->Init.dma_enable, (uint8_t *)hpcd->Setup); + } + + __HAL_UNLOCK(hpcd); + + return HAL_OK; +} + +/** + * @brief Clear a STALL condition over in an endpoint + * @param hpcd PCD handle + * @param ep_addr endpoint address + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_EP_ClrStall(PCD_HandleTypeDef *hpcd, uint8_t ep_addr) +{ + PCD_EPTypeDef *ep; + + if (((uint32_t)ep_addr & 0x0FU) > hpcd->Init.dev_endpoints) + { + return HAL_ERROR; + } + + if ((0x80U & ep_addr) == 0x80U) + { + ep = &hpcd->IN_ep[ep_addr & EP_ADDR_MSK]; + ep->is_in = 1U; + } + else + { + ep = &hpcd->OUT_ep[ep_addr & EP_ADDR_MSK]; + ep->is_in = 0U; + } + + ep->is_stall = 0U; + ep->num = ep_addr & EP_ADDR_MSK; + + __HAL_LOCK(hpcd); + (void)USB_EPClearStall(hpcd->Instance, ep); + __HAL_UNLOCK(hpcd); + + return HAL_OK; +} + +/** + * @brief Flush an endpoint + * @param hpcd PCD handle + * @param ep_addr endpoint address + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_EP_Flush(PCD_HandleTypeDef *hpcd, uint8_t ep_addr) +{ + __HAL_LOCK(hpcd); + + if ((ep_addr & 0x80U) == 0x80U) + { + (void)USB_FlushTxFifo(hpcd->Instance, (uint32_t)ep_addr & EP_ADDR_MSK); + } + else + { + (void)USB_FlushRxFifo(hpcd->Instance); + } + + __HAL_UNLOCK(hpcd); + + return HAL_OK; +} + +/** + * @brief Activate remote wakeup signalling + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_ActivateRemoteWakeup(PCD_HandleTypeDef *hpcd) +{ + return (USB_ActivateRemoteWakeup(hpcd->Instance)); +} + +/** + * @brief De-activate remote wakeup signalling. + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_DeActivateRemoteWakeup(PCD_HandleTypeDef *hpcd) +{ + return (USB_DeActivateRemoteWakeup(hpcd->Instance)); +} + +/** + * @} + */ + +/** @defgroup PCD_Exported_Functions_Group4 Peripheral State functions + * @brief Peripheral State functions + * +@verbatim + =============================================================================== + ##### Peripheral State functions ##### + =============================================================================== + [..] + This subsection permits to get in run-time the status of the peripheral + and the data flow. + +@endverbatim + * @{ + */ + +/** + * @brief Return the PCD handle state. + * @param hpcd PCD handle + * @retval HAL state + */ +PCD_StateTypeDef HAL_PCD_GetState(PCD_HandleTypeDef *hpcd) +{ + return hpcd->State; +} + +/** + * @brief Set the USB Device high speed test mode. + * @param hpcd PCD handle + * @param address test mode + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCD_SetTestMode(PCD_HandleTypeDef *hpcd, uint8_t testmode) +{ + USB_OTG_GlobalTypeDef *USBx = hpcd->Instance; + uint32_t USBx_BASE = (uint32_t)USBx; + + switch (testmode) + { + case TEST_J: + case TEST_K: + case TEST_SE0_NAK: + case TEST_PACKET: + case TEST_FORCE_EN: + USBx_DEVICE->DCTL |= testmode << 4; + break; + + default: + break; + } + + return HAL_OK; +} +/** + * @} + */ + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @addtogroup PCD_Private_Functions + * @{ + */ +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +/** + * @brief Check FIFO for the next packet to be loaded. + * @param hpcd PCD handle + * @param epnum endpoint number + * @retval HAL status + */ +static HAL_StatusTypeDef PCD_WriteEmptyTxFifo(PCD_HandleTypeDef *hpcd, uint32_t epnum) +{ + USB_OTG_GlobalTypeDef *USBx = hpcd->Instance; + uint32_t USBx_BASE = (uint32_t)USBx; + USB_OTG_EPTypeDef *ep; + uint32_t len; + uint32_t len32b; + uint32_t fifoemptymsk; + + ep = &hpcd->IN_ep[epnum]; + + if (ep->xfer_count > ep->xfer_len) + { + return HAL_ERROR; + } + + len = ep->xfer_len - ep->xfer_count; + + if (len > ep->maxpacket) + { + len = ep->maxpacket; + } + + len32b = (len + 3U) / 4U; + + while (((USBx_INEP(epnum)->DTXFSTS & USB_OTG_DTXFSTS_INEPTFSAV) >= len32b) && + (ep->xfer_count < ep->xfer_len) && (ep->xfer_len != 0U)) + { + /* Write the FIFO */ + len = ep->xfer_len - ep->xfer_count; + + if (len > ep->maxpacket) + { + len = ep->maxpacket; + } + len32b = (len + 3U) / 4U; + + (void)USB_WritePacket(USBx, ep->xfer_buff, (uint8_t)epnum, (uint16_t)len, + (uint8_t)hpcd->Init.dma_enable); + + ep->xfer_buff += len; + ep->xfer_count += len; + } + + if (ep->xfer_len <= ep->xfer_count) + { + fifoemptymsk = (uint32_t)(0x1UL << (epnum & EP_ADDR_MSK)); + USBx_DEVICE->DIEPEMPMSK &= ~fifoemptymsk; + } + + return HAL_OK; +} + + +/** + * @brief process EP OUT transfer complete interrupt. + * @param hpcd PCD handle + * @param epnum endpoint number + * @retval HAL status + */ +static HAL_StatusTypeDef PCD_EP_OutXfrComplete_int(PCD_HandleTypeDef *hpcd, uint32_t epnum) +{ + USB_OTG_GlobalTypeDef *USBx = hpcd->Instance; + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t gSNPSiD = *(__IO uint32_t *)(&USBx->CID + 0x1U); + uint32_t DoepintReg = USBx_OUTEP(epnum)->DOEPINT; + + if (hpcd->Init.dma_enable == 1U) + { + if ((DoepintReg & USB_OTG_DOEPINT_STUP) == USB_OTG_DOEPINT_STUP) /* Class C */ + { + /* StupPktRcvd = 1 this is a setup packet */ + if ((gSNPSiD > USB_OTG_CORE_ID_300A) && + ((DoepintReg & USB_OTG_DOEPINT_STPKTRX) == USB_OTG_DOEPINT_STPKTRX)) + { + CLEAR_OUT_EP_INTR(epnum, USB_OTG_DOEPINT_STPKTRX); + } + } + else if ((DoepintReg & USB_OTG_DOEPINT_OTEPSPR) == USB_OTG_DOEPINT_OTEPSPR) /* Class E */ + { + CLEAR_OUT_EP_INTR(epnum, USB_OTG_DOEPINT_OTEPSPR); + } + else if ((DoepintReg & (USB_OTG_DOEPINT_STUP | USB_OTG_DOEPINT_OTEPSPR)) == 0U) + { + /* StupPktRcvd = 1 this is a setup packet */ + if ((gSNPSiD > USB_OTG_CORE_ID_300A) && + ((DoepintReg & USB_OTG_DOEPINT_STPKTRX) == USB_OTG_DOEPINT_STPKTRX)) + { + CLEAR_OUT_EP_INTR(epnum, USB_OTG_DOEPINT_STPKTRX); + } + else + { + /* out data packet received over EP0 */ + hpcd->OUT_ep[epnum].xfer_count = + hpcd->OUT_ep[epnum].maxpacket - + (USBx_OUTEP(epnum)->DOEPTSIZ & USB_OTG_DOEPTSIZ_XFRSIZ); + + hpcd->OUT_ep[epnum].xfer_buff += hpcd->OUT_ep[epnum].maxpacket; + + if ((epnum == 0U) && (hpcd->OUT_ep[epnum].xfer_len == 0U)) + { + /* this is ZLP, so prepare EP0 for next setup */ + (void)USB_EP0_OutStart(hpcd->Instance, 1U, (uint8_t *)hpcd->Setup); + } +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->DataOutStageCallback(hpcd, (uint8_t)epnum); +#else + HAL_PCD_DataOutStageCallback(hpcd, (uint8_t)epnum); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + } + } + else + { + /* ... */ + } + } + else + { + if (gSNPSiD == USB_OTG_CORE_ID_310A) + { + /* StupPktRcvd = 1 this is a setup packet */ + if ((DoepintReg & USB_OTG_DOEPINT_STPKTRX) == USB_OTG_DOEPINT_STPKTRX) + { + CLEAR_OUT_EP_INTR(epnum, USB_OTG_DOEPINT_STPKTRX); + } + else + { + if ((DoepintReg & USB_OTG_DOEPINT_OTEPSPR) == USB_OTG_DOEPINT_OTEPSPR) + { + CLEAR_OUT_EP_INTR(epnum, USB_OTG_DOEPINT_OTEPSPR); + } + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->DataOutStageCallback(hpcd, (uint8_t)epnum); +#else + HAL_PCD_DataOutStageCallback(hpcd, (uint8_t)epnum); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + } + } + else + { + if ((epnum == 0U) && (hpcd->OUT_ep[epnum].xfer_len == 0U)) + { + /* this is ZLP, so prepare EP0 for next setup */ + (void)USB_EP0_OutStart(hpcd->Instance, 0U, (uint8_t *)hpcd->Setup); + } + +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->DataOutStageCallback(hpcd, (uint8_t)epnum); +#else + HAL_PCD_DataOutStageCallback(hpcd, (uint8_t)epnum); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + } + } + + return HAL_OK; +} + + +/** + * @brief process EP OUT setup packet received interrupt. + * @param hpcd PCD handle + * @param epnum endpoint number + * @retval HAL status + */ +static HAL_StatusTypeDef PCD_EP_OutSetupPacket_int(PCD_HandleTypeDef *hpcd, uint32_t epnum) +{ + USB_OTG_GlobalTypeDef *USBx = hpcd->Instance; + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t gSNPSiD = *(__IO uint32_t *)(&USBx->CID + 0x1U); + uint32_t DoepintReg = USBx_OUTEP(epnum)->DOEPINT; + + if ((gSNPSiD > USB_OTG_CORE_ID_300A) && + ((DoepintReg & USB_OTG_DOEPINT_STPKTRX) == USB_OTG_DOEPINT_STPKTRX)) + { + CLEAR_OUT_EP_INTR(epnum, USB_OTG_DOEPINT_STPKTRX); + } + + /* Inform the upper layer that a setup packet is available */ +#if (USE_HAL_PCD_REGISTER_CALLBACKS == 1U) + hpcd->SetupStageCallback(hpcd); +#else + HAL_PCD_SetupStageCallback(hpcd); +#endif /* USE_HAL_PCD_REGISTER_CALLBACKS */ + + if ((gSNPSiD > USB_OTG_CORE_ID_300A) && (hpcd->Init.dma_enable == 1U)) + { + (void)USB_EP0_OutStart(hpcd->Instance, 1U, (uint8_t *)hpcd->Setup); + } + + return HAL_OK; +} +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + + +/** + * @} + */ +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ +#endif /* HAL_PCD_MODULE_ENABLED */ + +/** + * @} + */ + +/** + * @} + */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pcd_ex.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pcd_ex.c new file mode 100644 index 0000000..b9eb434 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pcd_ex.c @@ -0,0 +1,204 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_pcd_ex.c + * @author MCD Application Team + * @brief PCD Extended HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the USB Peripheral Controller: + * + Extended features functions + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup PCDEx PCDEx + * @brief PCD Extended HAL module driver + * @{ + */ + +#ifdef HAL_PCD_MODULE_ENABLED + +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +/* Private types -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private constants ---------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup PCDEx_Exported_Functions PCDEx Exported Functions + * @{ + */ + +/** @defgroup PCDEx_Exported_Functions_Group1 Peripheral Control functions + * @brief PCDEx control functions + * +@verbatim + =============================================================================== + ##### Extended features functions ##### + =============================================================================== + [..] This section provides functions allowing to: + (+) Update FIFO configuration + +@endverbatim + * @{ + */ +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +/** + * @brief Set Tx FIFO + * @param hpcd PCD handle + * @param fifo The number of Tx fifo + * @param size Fifo size + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCDEx_SetTxFiFo(PCD_HandleTypeDef *hpcd, uint8_t fifo, uint16_t size) +{ + uint8_t i; + uint32_t Tx_Offset; + + /* TXn min size = 16 words. (n : Transmit FIFO index) + When a TxFIFO is not used, the Configuration should be as follows: + case 1 : n > m and Txn is not used (n,m : Transmit FIFO indexes) + --> Txm can use the space allocated for Txn. + case2 : n < m and Txn is not used (n,m : Transmit FIFO indexes) + --> Txn should be configured with the minimum space of 16 words + The FIFO is used optimally when used TxFIFOs are allocated in the top + of the FIFO.Ex: use EP1 and EP2 as IN instead of EP1 and EP3 as IN ones. + When DMA is used 3n * FIFO locations should be reserved for internal DMA registers */ + + Tx_Offset = hpcd->Instance->GRXFSIZ; + + if (fifo == 0U) + { + hpcd->Instance->DIEPTXF0_HNPTXFSIZ = ((uint32_t)size << 16) | Tx_Offset; + } + else + { + Tx_Offset += (hpcd->Instance->DIEPTXF0_HNPTXFSIZ) >> 16; + for (i = 0U; i < (fifo - 1U); i++) + { + Tx_Offset += (hpcd->Instance->DIEPTXF[i] >> 16); + } + + /* Multiply Tx_Size by 2 to get higher performance */ + hpcd->Instance->DIEPTXF[fifo - 1U] = ((uint32_t)size << 16) | Tx_Offset; + } + + return HAL_OK; +} + +/** + * @brief Set Rx FIFO + * @param hpcd PCD handle + * @param size Size of Rx fifo + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCDEx_SetRxFiFo(PCD_HandleTypeDef *hpcd, uint16_t size) +{ + hpcd->Instance->GRXFSIZ = size; + + return HAL_OK; +} + +/** + * @brief Activate LPM feature. + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCDEx_ActivateLPM(PCD_HandleTypeDef *hpcd) +{ + USB_OTG_GlobalTypeDef *USBx = hpcd->Instance; + + hpcd->lpm_active = 1U; + hpcd->LPM_State = LPM_L0; + USBx->GINTMSK |= USB_OTG_GINTMSK_LPMINTM; + USBx->GLPMCFG |= (USB_OTG_GLPMCFG_LPMEN | USB_OTG_GLPMCFG_LPMACK | USB_OTG_GLPMCFG_ENBESL); + + return HAL_OK; +} + +/** + * @brief Deactivate LPM feature. + * @param hpcd PCD handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PCDEx_DeActivateLPM(PCD_HandleTypeDef *hpcd) +{ + USB_OTG_GlobalTypeDef *USBx = hpcd->Instance; + + hpcd->lpm_active = 0U; + USBx->GINTMSK &= ~USB_OTG_GINTMSK_LPMINTM; + USBx->GLPMCFG &= ~(USB_OTG_GLPMCFG_LPMEN | USB_OTG_GLPMCFG_LPMACK | USB_OTG_GLPMCFG_ENBESL); + + return HAL_OK; +} + +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + +/** + * @brief Send LPM message to user layer callback. + * @param hpcd PCD handle + * @param msg LPM message + * @retval HAL status + */ +__weak void HAL_PCDEx_LPM_Callback(PCD_HandleTypeDef *hpcd, PCD_LPM_MsgTypeDef msg) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + UNUSED(msg); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCDEx_LPM_Callback could be implemented in the user file + */ +} + +/** + * @brief Send BatteryCharging message to user layer callback. + * @param hpcd PCD handle + * @param msg LPM message + * @retval HAL status + */ +__weak void HAL_PCDEx_BCD_Callback(PCD_HandleTypeDef *hpcd, PCD_BCD_MsgTypeDef msg) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(hpcd); + UNUSED(msg); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_PCDEx_BCD_Callback could be implemented in the user file + */ +} + +/** + * @} + */ + +/** + * @} + */ +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ +#endif /* HAL_PCD_MODULE_ENABLED */ + +/** + * @} + */ + +/** + * @} + */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr.c new file mode 100644 index 0000000..bc0f29c --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr.c @@ -0,0 +1,597 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_pwr.c + * @author MCD Application Team + * @brief PWR HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Power Controller (PWR) peripheral: + * + Initialization and de-initialization functions + * + Peripheral Control functions + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup PWR PWR + * @brief PWR HAL module driver + * @{ + */ + +#ifdef HAL_PWR_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup PWR_Private_Constants + * @{ + */ + +/** @defgroup PWR_PVD_Mode_Mask PWR PVD Mode Mask + * @{ + */ +#define PVD_MODE_IT ((uint32_t)0x00010000U) +#define PVD_MODE_EVT ((uint32_t)0x00020000U) +#define PVD_RISING_EDGE ((uint32_t)0x00000001U) +#define PVD_FALLING_EDGE ((uint32_t)0x00000002U) +/** + * @} + */ + +/** @defgroup PWR_ENABLE_WUP_Mask PWR Enable WUP Mask + * @{ + */ +#define PWR_EWUP_MASK ((uint32_t)0x00003F00) +/** + * @} + */ + +/** + * @} + */ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ + +/** @defgroup PWR_Exported_Functions PWR Exported Functions + * @{ + */ + +/** @defgroup PWR_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and de-initialization functions + * +@verbatim + =============================================================================== + ##### Initialization and de-initialization functions ##### + =============================================================================== + [..] + After reset, the backup domain (RTC registers, RTC backup data + registers and backup SRAM) is protected against possible unwanted + write accesses. + To enable access to the RTC Domain and RTC registers, proceed as follows: + (+) Enable the Power Controller (PWR) APB1 interface clock using the + __HAL_RCC_PWR_CLK_ENABLE() macro. + (+) Enable access to RTC domain using the HAL_PWR_EnableBkUpAccess() function. + +@endverbatim + * @{ + */ + +/** + * @brief Deinitializes the HAL PWR peripheral registers to their default reset values. + * @retval None + */ +void HAL_PWR_DeInit(void) +{ + __HAL_RCC_PWR_FORCE_RESET(); + __HAL_RCC_PWR_RELEASE_RESET(); +} + +/** + * @brief Enables access to the backup domain (RTC registers, RTC + * backup data registers and backup SRAM). + * @note If the HSE divided by 2, 3, ..31 is used as the RTC clock, the + * Backup Domain Access should be kept enabled. + * @retval None + */ +void HAL_PWR_EnableBkUpAccess(void) +{ + /* Enable access to RTC and backup registers */ + SET_BIT(PWR->CR1, PWR_CR1_DBP); +} + +/** + * @brief Disables access to the backup domain (RTC registers, RTC + * backup data registers and backup SRAM). + * @note If the HSE divided by 2, 3, ..31 is used as the RTC clock, the + * Backup Domain Access should be kept enabled. + * @retval None + */ +void HAL_PWR_DisableBkUpAccess(void) +{ + /* Disable access to RTC and backup registers */ + CLEAR_BIT(PWR->CR1, PWR_CR1_DBP); +} + +/** + * @} + */ + +/** @defgroup PWR_Exported_Functions_Group2 Peripheral Control functions + * @brief Low Power modes configuration functions + * +@verbatim + + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + + *** PVD configuration *** + ========================= + [..] + (+) The PVD is used to monitor the VDD power supply by comparing it to a + threshold selected by the PVD Level (PLS[2:0] bits in the PWR_CR). + (+) A PVDO flag is available to indicate if VDD/VDDA is higher or lower + than the PVD threshold. This event is internally connected to the EXTI + line16 and can generate an interrupt if enabled. This is done through + __HAL_PWR_PVD_EXTI_ENABLE_IT() macro. + (+) The PVD is stopped in Standby mode. + + *** Wake-up pin configuration *** + ================================ + [..] + (+) Wake-up pin is used to wake up the system from Standby mode. This pin is + forced in input pull-down configuration and is active on rising edges. + (+) There are up to 6 Wake-up pin in the STM32F7 devices family + + *** Low Power modes configuration *** + ===================================== + [..] + The devices feature 3 low-power modes: + (+) Sleep mode: Cortex-M7 core stopped, peripherals kept running. + (+) Stop mode: all clocks are stopped, regulator running, regulator + in low power mode + (+) Standby mode: 1.2V domain powered off. + + *** Sleep mode *** + ================== + [..] + (+) Entry: + The Sleep mode is entered by using the HAL_PWR_EnterSLEEPMode(PWR_MAINREGULATOR_ON, PWR_SLEEPENTRY_WFI) + functions with + (++) PWR_SLEEPENTRY_WFI: enter SLEEP mode with WFI instruction + (++) PWR_SLEEPENTRY_WFE: enter SLEEP mode with WFE instruction + + -@@- The Regulator parameter is not used for the STM32F7 family + and is kept as parameter just to maintain compatibility with the + lower power families (STM32L). + (+) Exit: + Any peripheral interrupt acknowledged by the nested vectored interrupt + controller (NVIC) can wake up the device from Sleep mode. + + *** Stop mode *** + ================= + [..] + In Stop mode, all clocks in the 1.2V domain are stopped, the PLL, the HSI, + and the HSE RC oscillators are disabled. Internal SRAM and register contents + are preserved. + The voltage regulator can be configured either in normal or low-power mode. + To minimize the consumption In Stop mode, FLASH can be powered off before + entering the Stop mode using the HAL_PWREx_EnableFlashPowerDown() function. + It can be switched on again by software after exiting the Stop mode using + the HAL_PWREx_DisableFlashPowerDown() function. + + (+) Entry: + The Stop mode is entered using the HAL_PWR_EnterSTOPMode(PWR_MAINREGULATOR_ON) + function with: + (++) Main regulator ON. + (++) Low Power regulator ON. + (+) Exit: + Any EXTI Line (Internal or External) configured in Interrupt/Event mode. + + *** Standby mode *** + ==================== + [..] + (+) + The Standby mode allows to achieve the lowest power consumption. It is based + on the Cortex-M7 deep sleep mode, with the voltage regulator disabled. + The 1.2V domain is consequently powered off. The PLL, the HSI oscillator and + the HSE oscillator are also switched off. SRAM and register contents are lost + except for the RTC registers, RTC backup registers, backup SRAM and Standby + circuitry. + + The voltage regulator is OFF. + + (++) Entry: + (+++) The Standby mode is entered using the HAL_PWR_EnterSTANDBYMode() function. + (++) Exit: + (+++) WKUP pin rising or falling edge, RTC alarm (Alarm A and Alarm B), RTC + wakeup, tamper event, time stamp event, external reset in NRST pin, IWDG reset. + + *** Auto-wakeup (AWU) from low-power mode *** + ============================================= + [..] + + (+) The MCU can be woken up from low-power mode by an RTC Alarm event, an RTC + Wakeup event, a tamper event or a time-stamp event, without depending on + an external interrupt (Auto-wakeup mode). + + (+) RTC auto-wakeup (AWU) from the Stop and Standby modes + + (++) To wake up from the Stop mode with an RTC alarm event, it is necessary to + configure the RTC to generate the RTC alarm using the HAL_RTC_SetAlarm_IT() function. + + (++) To wake up from the Stop mode with an RTC Tamper or time stamp event, it + is necessary to configure the RTC to detect the tamper or time stamp event using the + HAL_RTCEx_SetTimeStamp_IT() or HAL_RTCEx_SetTamper_IT() functions. + + (++) To wake up from the Stop mode with an RTC WakeUp event, it is necessary to + configure the RTC to generate the RTC WakeUp event using the HAL_RTCEx_SetWakeUpTimer_IT() function. + +@endverbatim + * @{ + */ + +/** + * @brief Configures the voltage threshold detected by the Power Voltage Detector(PVD). + * @param sConfigPVD pointer to an PWR_PVDTypeDef structure that contains the configuration + * information for the PVD. + * @note Refer to the electrical characteristics of your device datasheet for + * more details about the voltage threshold corresponding to each + * detection level. + * @retval None + */ +void HAL_PWR_ConfigPVD(PWR_PVDTypeDef *sConfigPVD) +{ + /* Check the parameters */ + assert_param(IS_PWR_PVD_LEVEL(sConfigPVD->PVDLevel)); + assert_param(IS_PWR_PVD_MODE(sConfigPVD->Mode)); + + /* Set PLS[7:5] bits according to PVDLevel value */ + MODIFY_REG(PWR->CR1, PWR_CR1_PLS, sConfigPVD->PVDLevel); + + /* Clear any previous config. Keep it clear if no event or IT mode is selected */ + __HAL_PWR_PVD_EXTI_DISABLE_EVENT(); + __HAL_PWR_PVD_EXTI_DISABLE_IT(); + __HAL_PWR_PVD_EXTI_DISABLE_RISING_EDGE(); + __HAL_PWR_PVD_EXTI_DISABLE_FALLING_EDGE(); + + /* Configure interrupt mode */ + if((sConfigPVD->Mode & PVD_MODE_IT) == PVD_MODE_IT) + { + __HAL_PWR_PVD_EXTI_ENABLE_IT(); + } + + /* Configure event mode */ + if((sConfigPVD->Mode & PVD_MODE_EVT) == PVD_MODE_EVT) + { + __HAL_PWR_PVD_EXTI_ENABLE_EVENT(); + } + + /* Configure the edge */ + if((sConfigPVD->Mode & PVD_RISING_EDGE) == PVD_RISING_EDGE) + { + __HAL_PWR_PVD_EXTI_ENABLE_RISING_EDGE(); + } + + if((sConfigPVD->Mode & PVD_FALLING_EDGE) == PVD_FALLING_EDGE) + { + __HAL_PWR_PVD_EXTI_ENABLE_FALLING_EDGE(); + } +} + +/** + * @brief Enables the Power Voltage Detector(PVD). + * @retval None + */ +void HAL_PWR_EnablePVD(void) +{ + /* Enable the power voltage detector */ + SET_BIT(PWR->CR1, PWR_CR1_PVDE); +} + +/** + * @brief Disables the Power Voltage Detector(PVD). + * @retval None + */ +void HAL_PWR_DisablePVD(void) +{ + /* Disable the power voltage detector */ + CLEAR_BIT(PWR->CR1, PWR_CR1_PVDE); +} + +/** + * @brief Enable the WakeUp PINx functionality. + * @param WakeUpPinPolarity Specifies which Wake-Up pin to enable. + * This parameter can be one of the following legacy values, which sets the default polarity: + * detection on high level (rising edge): + * @arg PWR_WAKEUP_PIN1, PWR_WAKEUP_PIN2, PWR_WAKEUP_PIN3, PWR_WAKEUP_PIN4, PWR_WAKEUP_PIN5, PWR_WAKEUP_PIN6 + * or one of the following value where the user can explicitly states the enabled pin and + * the chosen polarity + * @arg PWR_WAKEUP_PIN1_HIGH or PWR_WAKEUP_PIN1_LOW + * @arg PWR_WAKEUP_PIN2_HIGH or PWR_WAKEUP_PIN2_LOW + * @arg PWR_WAKEUP_PIN3_HIGH or PWR_WAKEUP_PIN3_LOW + * @arg PWR_WAKEUP_PIN4_HIGH or PWR_WAKEUP_PIN4_LOW + * @arg PWR_WAKEUP_PIN5_HIGH or PWR_WAKEUP_PIN5_LOW + * @arg PWR_WAKEUP_PIN6_HIGH or PWR_WAKEUP_PIN6_LOW + * @note PWR_WAKEUP_PINx and PWR_WAKEUP_PINx_HIGH are equivalent. + * @retval None + */ +void HAL_PWR_EnableWakeUpPin(uint32_t WakeUpPinPolarity) +{ + assert_param(IS_PWR_WAKEUP_PIN(WakeUpPinPolarity)); + + /* Enable wake-up pin */ + SET_BIT(PWR->CSR2, (PWR_EWUP_MASK & WakeUpPinPolarity)); + + /* Specifies the Wake-Up pin polarity for the event detection + (rising or falling edge) */ + MODIFY_REG(PWR->CR2, (PWR_EWUP_MASK & WakeUpPinPolarity), (WakeUpPinPolarity >> 0x06)); +} + +/** + * @brief Disables the WakeUp PINx functionality. + * @param WakeUpPinx Specifies the Power Wake-Up pin to disable. + * This parameter can be one of the following values: + * @arg PWR_WAKEUP_PIN1 + * @arg PWR_WAKEUP_PIN2 + * @arg PWR_WAKEUP_PIN3 + * @arg PWR_WAKEUP_PIN4 + * @arg PWR_WAKEUP_PIN5 + * @arg PWR_WAKEUP_PIN6 + * @retval None + */ +void HAL_PWR_DisableWakeUpPin(uint32_t WakeUpPinx) +{ + assert_param(IS_PWR_WAKEUP_PIN(WakeUpPinx)); + + CLEAR_BIT(PWR->CSR2, WakeUpPinx); +} + +/** + * @brief Enters Sleep mode. + * + * @note In Sleep mode, all I/O pins keep the same state as in Run mode. + * + * @note In Sleep mode, the systick is stopped to avoid exit from this mode with + * systick interrupt when used as time base for Timeout + * + * @param Regulator Specifies the regulator state in SLEEP mode. + * This parameter can be one of the following values: + * @arg PWR_MAINREGULATOR_ON: SLEEP mode with regulator ON + * @arg PWR_LOWPOWERREGULATOR_ON: SLEEP mode with low power regulator ON + * @note This parameter is not used for the STM32F7 family and is kept as parameter + * just to maintain compatibility with the lower power families. + * @param SLEEPEntry Specifies if SLEEP mode in entered with WFI or WFE instruction. + * This parameter can be one of the following values: + * @arg PWR_SLEEPENTRY_WFI: enter SLEEP mode with WFI instruction + * @arg PWR_SLEEPENTRY_WFE: enter SLEEP mode with WFE instruction + * @retval None + */ +void HAL_PWR_EnterSLEEPMode(uint32_t Regulator, uint8_t SLEEPEntry) +{ + /* Check the parameters */ + assert_param(IS_PWR_REGULATOR(Regulator)); + assert_param(IS_PWR_SLEEP_ENTRY(SLEEPEntry)); + + /* Clear SLEEPDEEP bit of Cortex System Control Register */ + CLEAR_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SLEEPDEEP_Msk)); + + /* Ensure that all instructions done before entering SLEEP mode */ + __DSB(); + __ISB(); + + /* Select SLEEP mode entry -------------------------------------------------*/ + if(SLEEPEntry == PWR_SLEEPENTRY_WFI) + { + /* Request Wait For Interrupt */ + __WFI(); + } + else + { + /* Request Wait For Event */ + __SEV(); + __WFE(); + __WFE(); + } +} + +/** + * @brief Enters Stop mode. + * @note In Stop mode, all I/O pins keep the same state as in Run mode. + * @note When exiting Stop mode by issuing an interrupt or a wakeup event, + * the HSI RC oscillator is selected as system clock. + * @note When the voltage regulator operates in low power mode, an additional + * startup delay is incurred when waking up from Stop mode. + * By keeping the internal regulator ON during Stop mode, the consumption + * is higher although the startup time is reduced. + * @param Regulator Specifies the regulator state in Stop mode. + * This parameter can be one of the following values: + * @arg PWR_MAINREGULATOR_ON: Stop mode with regulator ON + * @arg PWR_LOWPOWERREGULATOR_ON: Stop mode with low power regulator ON + * @param STOPEntry Specifies if Stop mode in entered with WFI or WFE instruction. + * This parameter can be one of the following values: + * @arg PWR_STOPENTRY_WFI: Enter Stop mode with WFI instruction + * @arg PWR_STOPENTRY_WFE: Enter Stop mode with WFE instruction + * @retval None + */ +void HAL_PWR_EnterSTOPMode(uint32_t Regulator, uint8_t STOPEntry) +{ + uint32_t tmpreg = 0; + + /* Check the parameters */ + assert_param(IS_PWR_REGULATOR(Regulator)); + assert_param(IS_PWR_STOP_ENTRY(STOPEntry)); + + /* Select the regulator state in Stop mode ---------------------------------*/ + tmpreg = PWR->CR1; + /* Clear PDDS and LPDS bits */ + tmpreg &= (uint32_t)~(PWR_CR1_PDDS | PWR_CR1_LPDS); + + /* Set LPDS, MRLVDS and LPLVDS bits according to Regulator value */ + tmpreg |= Regulator; + + /* Store the new value */ + PWR->CR1 = tmpreg; + + /* Set SLEEPDEEP bit of Cortex System Control Register */ + SCB->SCR |= SCB_SCR_SLEEPDEEP_Msk; + + /* Ensure that all instructions done before entering STOP mode */ + __DSB(); + __ISB(); + + /* Select Stop mode entry --------------------------------------------------*/ + if(STOPEntry == PWR_STOPENTRY_WFI) + { + /* Request Wait For Interrupt */ + __WFI(); + } + else + { + /* Request Wait For Event */ + __SEV(); + __WFE(); + __WFE(); + } + /* Reset SLEEPDEEP bit of Cortex System Control Register */ + SCB->SCR &= (uint32_t)~((uint32_t)SCB_SCR_SLEEPDEEP_Msk); +} + +/** + * @brief Enters Standby mode. + * @note In Standby mode, all I/O pins are high impedance except for: + * - Reset pad (still available) + * - RTC_AF1 pin (PC13) if configured for tamper, time-stamp, RTC + * Alarm out, or RTC clock calibration out. + * - RTC_AF2 pin (PI8) if configured for tamper or time-stamp. + * - WKUP pins if enabled. + * @retval None + */ +void HAL_PWR_EnterSTANDBYMode(void) +{ + /* Select Standby mode */ + PWR->CR1 |= PWR_CR1_PDDS; + + /* Set SLEEPDEEP bit of Cortex System Control Register */ + SCB->SCR |= SCB_SCR_SLEEPDEEP_Msk; + + /* This option is used to ensure that store operations are completed */ +#if defined ( __CC_ARM) + __force_stores(); +#endif + /* Request Wait For Interrupt */ + __WFI(); +} + +/** + * @brief This function handles the PWR PVD interrupt request. + * @note This API should be called under the PVD_IRQHandler(). + * @retval None + */ +void HAL_PWR_PVD_IRQHandler(void) +{ + /* Check PWR Exti flag */ + if(__HAL_PWR_PVD_EXTI_GET_FLAG() != RESET) + { + /* PWR PVD interrupt user callback */ + HAL_PWR_PVDCallback(); + + /* Clear PWR Exti pending bit */ + __HAL_PWR_PVD_EXTI_CLEAR_FLAG(); + } +} + +/** + * @brief PWR PVD interrupt callback + * @retval None + */ +__weak void HAL_PWR_PVDCallback(void) +{ + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_PWR_PVDCallback could be implemented in the user file + */ +} + +/** + * @brief Indicates Sleep-On-Exit when returning from Handler mode to Thread mode. + * @note Set SLEEPONEXIT bit of SCR register. When this bit is set, the processor + * re-enters SLEEP mode when an interruption handling is over. + * Setting this bit is useful when the processor is expected to run only on + * interruptions handling. + * @retval None + */ +void HAL_PWR_EnableSleepOnExit(void) +{ + /* Set SLEEPONEXIT bit of Cortex System Control Register */ + SET_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SLEEPONEXIT_Msk)); +} + +/** + * @brief Disables Sleep-On-Exit feature when returning from Handler mode to Thread mode. + * @note Clears SLEEPONEXIT bit of SCR register. When this bit is set, the processor + * re-enters SLEEP mode when an interruption handling is over. + * @retval None + */ +void HAL_PWR_DisableSleepOnExit(void) +{ + /* Clear SLEEPONEXIT bit of Cortex System Control Register */ + CLEAR_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SLEEPONEXIT_Msk)); +} + +/** + * @brief Enables CORTEX M4 SEVONPEND bit. + * @note Sets SEVONPEND bit of SCR register. When this bit is set, this causes + * WFE to wake up when an interrupt moves from inactive to pended. + * @retval None + */ +void HAL_PWR_EnableSEVOnPend(void) +{ + /* Set SEVONPEND bit of Cortex System Control Register */ + SET_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SEVONPEND_Msk)); +} + +/** + * @brief Disables CORTEX M4 SEVONPEND bit. + * @note Clears SEVONPEND bit of SCR register. When this bit is set, this causes + * WFE to wake up when an interrupt moves from inactive to pended. + * @retval None + */ +void HAL_PWR_DisableSEVOnPend(void) +{ + /* Clear SEVONPEND bit of Cortex System Control Register */ + CLEAR_BIT(SCB->SCR, ((uint32_t)SCB_SCR_SEVONPEND_Msk)); +} + +/** + * @} + */ + +/** + * @} + */ + +#endif /* HAL_PWR_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr_ex.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr_ex.c new file mode 100644 index 0000000..f426aaa --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr_ex.c @@ -0,0 +1,552 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_pwr_ex.c + * @author MCD Application Team + * @brief Extended PWR HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of PWR extension peripheral: + * + Peripheral Extended features functions + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup PWREx PWREx + * @brief PWR HAL module driver + * @{ + */ + +#ifdef HAL_PWR_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @addtogroup PWREx_Private_Constants + * @{ + */ +#define PWR_OVERDRIVE_TIMEOUT_VALUE 1000 +#define PWR_UDERDRIVE_TIMEOUT_VALUE 1000 +#define PWR_BKPREG_TIMEOUT_VALUE 1000 +#define PWR_VOSRDY_TIMEOUT_VALUE 1000 +/** + * @} + */ + +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +/** @defgroup PWREx_Exported_Functions PWREx Exported Functions + * @{ + */ + +/** @defgroup PWREx_Exported_Functions_Group1 Peripheral Extended features functions + * @brief Peripheral Extended features functions + * +@verbatim + + =============================================================================== + ##### Peripheral extended features functions ##### + =============================================================================== + + *** Main and Backup Regulators configuration *** + ================================================ + [..] + (+) The backup domain includes 4 Kbytes of backup SRAM accessible only from + the CPU, and address in 32-bit, 16-bit or 8-bit mode. Its content is + retained even in Standby or VBAT mode when the low power backup regulator + is enabled. It can be considered as an internal EEPROM when VBAT is + always present. You can use the HAL_PWREx_EnableBkUpReg() function to + enable the low power backup regulator. + + (+) When the backup domain is supplied by VDD (analog switch connected to VDD) + the backup SRAM is powered from VDD which replaces the VBAT power supply to + save battery life. + + (+) The backup SRAM is not mass erased by a tamper event. It is read + protected to prevent confidential data, such as cryptographic private + key, from being accessed. The backup SRAM can be erased only through + the Flash interface when a protection level change from level 1 to + level 0 is requested. + -@- Refer to the description of Read protection (RDP) in the Flash + programming manual. + + (+) The main internal regulator can be configured to have a tradeoff between + performance and power consumption when the device does not operate at + the maximum frequency. This is done through __HAL_PWR_MAINREGULATORMODE_CONFIG() + macro which configure VOS bit in PWR_CR register + + Refer to the product datasheets for more details. + + *** FLASH Power Down configuration **** + ======================================= + [..] + (+) By setting the FPDS bit in the PWR_CR register by using the + HAL_PWREx_EnableFlashPowerDown() function, the Flash memory also enters power + down mode when the device enters Stop mode. When the Flash memory + is in power down mode, an additional startup delay is incurred when + waking up from Stop mode. + + *** Over-Drive and Under-Drive configuration **** + ================================================= + [..] + (+) In Run mode: the main regulator has 2 operating modes available: + (++) Normal mode: The CPU and core logic operate at maximum frequency at a given + voltage scaling (scale 1, scale 2 or scale 3) + (++) Over-drive mode: This mode allows the CPU and the core logic to operate at a + higher frequency than the normal mode for a given voltage scaling (scale 1, + scale 2 or scale 3). This mode is enabled through HAL_PWREx_EnableOverDrive() function and + disabled by HAL_PWREx_DisableOverDrive() function, to enter or exit from Over-drive mode please follow + the sequence described in Reference manual. + + (+) In Stop mode: the main regulator or low power regulator supplies a low power + voltage to the 1.2V domain, thus preserving the content of registers + and internal SRAM. 2 operating modes are available: + (++) Normal mode: the 1.2V domain is preserved in nominal leakage mode. This mode is only + available when the main regulator or the low power regulator is used in Scale 3 or + low voltage mode. + (++) Under-drive mode: the 1.2V domain is preserved in reduced leakage mode. This mode is only + available when the main regulator or the low power regulator is in low voltage mode. + +@endverbatim + * @{ + */ + +/** + * @brief Enables the Backup Regulator. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PWREx_EnableBkUpReg(void) +{ + uint32_t tickstart = 0; + + /* Enable Backup regulator */ + PWR->CSR1 |= PWR_CSR1_BRE; + + /* Workaround for the following hardware bug: */ + /* Id 19: PWR : No STANDBY wake-up when Back-up RAM enabled (ref. Errata Sheet p23) */ + PWR->CSR1 |= PWR_CSR1_EIWUP; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait till Backup regulator ready flag is set */ + while(__HAL_PWR_GET_FLAG(PWR_FLAG_BRR) == RESET) + { + if((HAL_GetTick() - tickstart ) > PWR_BKPREG_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + return HAL_OK; +} + +/** + * @brief Disables the Backup Regulator. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PWREx_DisableBkUpReg(void) +{ + uint32_t tickstart = 0; + + /* Disable Backup regulator */ + PWR->CSR1 &= (uint32_t)~((uint32_t)PWR_CSR1_BRE); + + /* Workaround for the following hardware bug: */ + /* Id 19: PWR : No STANDBY wake-up when Back-up RAM enabled (ref. Errata Sheet p23) */ + PWR->CSR1 |= PWR_CSR1_EIWUP; + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait till Backup regulator ready flag is set */ + while(__HAL_PWR_GET_FLAG(PWR_FLAG_BRR) != RESET) + { + if((HAL_GetTick() - tickstart ) > PWR_BKPREG_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + return HAL_OK; +} + +/** + * @brief Enables the Flash Power Down in Stop mode. + * @retval None + */ +void HAL_PWREx_EnableFlashPowerDown(void) +{ + /* Enable the Flash Power Down */ + PWR->CR1 |= PWR_CR1_FPDS; +} + +/** + * @brief Disables the Flash Power Down in Stop mode. + * @retval None + */ +void HAL_PWREx_DisableFlashPowerDown(void) +{ + /* Disable the Flash Power Down */ + PWR->CR1 &= (uint32_t)~((uint32_t)PWR_CR1_FPDS); +} + +/** + * @brief Enables Main Regulator low voltage mode. + * @retval None + */ +void HAL_PWREx_EnableMainRegulatorLowVoltage(void) +{ + /* Enable Main regulator low voltage */ + PWR->CR1 |= PWR_CR1_MRUDS; +} + +/** + * @brief Disables Main Regulator low voltage mode. + * @retval None + */ +void HAL_PWREx_DisableMainRegulatorLowVoltage(void) +{ + /* Disable Main regulator low voltage */ + PWR->CR1 &= (uint32_t)~((uint32_t)PWR_CR1_MRUDS); +} + +/** + * @brief Enables Low Power Regulator low voltage mode. + * @retval None + */ +void HAL_PWREx_EnableLowRegulatorLowVoltage(void) +{ + /* Enable low power regulator */ + PWR->CR1 |= PWR_CR1_LPUDS; +} + +/** + * @brief Disables Low Power Regulator low voltage mode. + * @retval None + */ +void HAL_PWREx_DisableLowRegulatorLowVoltage(void) +{ + /* Disable low power regulator */ + PWR->CR1 &= (uint32_t)~((uint32_t)PWR_CR1_LPUDS); +} + +/** + * @brief Activates the Over-Drive mode. + * @note This mode allows the CPU and the core logic to operate at a higher frequency + * than the normal mode for a given voltage scaling (scale 1, scale 2 or scale 3). + * @note It is recommended to enter or exit Over-drive mode when the application is not running + * critical tasks and when the system clock source is either HSI or HSE. + * During the Over-drive switch activation, no peripheral clocks should be enabled. + * The peripheral clocks must be enabled once the Over-drive mode is activated. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PWREx_EnableOverDrive(void) +{ + uint32_t tickstart = 0; + + __HAL_RCC_PWR_CLK_ENABLE(); + + /* Enable the Over-drive to extend the clock frequency to 216 MHz */ + __HAL_PWR_OVERDRIVE_ENABLE(); + + /* Get tick */ + tickstart = HAL_GetTick(); + + while(!__HAL_PWR_GET_FLAG(PWR_FLAG_ODRDY)) + { + if((HAL_GetTick() - tickstart ) > PWR_OVERDRIVE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Enable the Over-drive switch */ + __HAL_PWR_OVERDRIVESWITCHING_ENABLE(); + + /* Get tick */ + tickstart = HAL_GetTick(); + + while(!__HAL_PWR_GET_FLAG(PWR_FLAG_ODSWRDY)) + { + if((HAL_GetTick() - tickstart ) > PWR_OVERDRIVE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + return HAL_OK; +} + +/** + * @brief Deactivates the Over-Drive mode. + * @note This mode allows the CPU and the core logic to operate at a higher frequency + * than the normal mode for a given voltage scaling (scale 1, scale 2 or scale 3). + * @note It is recommended to enter or exit Over-drive mode when the application is not running + * critical tasks and when the system clock source is either HSI or HSE. + * During the Over-drive switch activation, no peripheral clocks should be enabled. + * The peripheral clocks must be enabled once the Over-drive mode is activated. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_PWREx_DisableOverDrive(void) +{ + uint32_t tickstart = 0; + + __HAL_RCC_PWR_CLK_ENABLE(); + + /* Disable the Over-drive switch */ + __HAL_PWR_OVERDRIVESWITCHING_DISABLE(); + + /* Get tick */ + tickstart = HAL_GetTick(); + + while(__HAL_PWR_GET_FLAG(PWR_FLAG_ODSWRDY)) + { + if((HAL_GetTick() - tickstart ) > PWR_OVERDRIVE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Disable the Over-drive */ + __HAL_PWR_OVERDRIVE_DISABLE(); + + /* Get tick */ + tickstart = HAL_GetTick(); + + while(__HAL_PWR_GET_FLAG(PWR_FLAG_ODRDY)) + { + if((HAL_GetTick() - tickstart ) > PWR_OVERDRIVE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + return HAL_OK; +} + +/** + * @brief Enters in Under-Drive STOP mode. + * + * @note This mode can be selected only when the Under-Drive is already active + * + * @note This mode is enabled only with STOP low power mode. + * In this mode, the 1.2V domain is preserved in reduced leakage mode. This + * mode is only available when the main regulator or the low power regulator + * is in low voltage mode + * + * @note If the Under-drive mode was enabled, it is automatically disabled after + * exiting Stop mode. + * When the voltage regulator operates in Under-drive mode, an additional + * startup delay is induced when waking up from Stop mode. + * + * @note In Stop mode, all I/O pins keep the same state as in Run mode. + * + * @note When exiting Stop mode by issuing an interrupt or a wakeup event, + * the HSI RC oscillator is selected as system clock. + * + * @note When the voltage regulator operates in low power mode, an additional + * startup delay is incurred when waking up from Stop mode. + * By keeping the internal regulator ON during Stop mode, the consumption + * is higher although the startup time is reduced. + * + * @param Regulator specifies the regulator state in STOP mode. + * This parameter can be one of the following values: + * @arg PWR_MAINREGULATOR_UNDERDRIVE_ON: Main Regulator in under-drive mode + * and Flash memory in power-down when the device is in Stop under-drive mode + * @arg PWR_LOWPOWERREGULATOR_UNDERDRIVE_ON: Low Power Regulator in under-drive mode + * and Flash memory in power-down when the device is in Stop under-drive mode + * @param STOPEntry specifies if STOP mode in entered with WFI or WFE instruction. + * This parameter can be one of the following values: + * @arg PWR_SLEEPENTRY_WFI: enter STOP mode with WFI instruction + * @arg PWR_SLEEPENTRY_WFE: enter STOP mode with WFE instruction + * @retval None + */ +HAL_StatusTypeDef HAL_PWREx_EnterUnderDriveSTOPMode(uint32_t Regulator, uint8_t STOPEntry) +{ + uint32_t tempreg = 0; + uint32_t tickstart = 0; + + /* Check the parameters */ + assert_param(IS_PWR_REGULATOR_UNDERDRIVE(Regulator)); + assert_param(IS_PWR_STOP_ENTRY(STOPEntry)); + + /* Enable Power ctrl clock */ + __HAL_RCC_PWR_CLK_ENABLE(); + /* Enable the Under-drive Mode ---------------------------------------------*/ + /* Clear Under-drive flag */ + __HAL_PWR_CLEAR_ODRUDR_FLAG(); + + /* Enable the Under-drive */ + __HAL_PWR_UNDERDRIVE_ENABLE(); + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait for UnderDrive mode is ready */ + while(__HAL_PWR_GET_FLAG(PWR_FLAG_UDRDY)) + { + if((HAL_GetTick() - tickstart ) > PWR_UDERDRIVE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Select the regulator state in STOP mode ---------------------------------*/ + tempreg = PWR->CR1; + /* Clear PDDS, LPDS, MRLUDS and LPLUDS bits */ + tempreg &= (uint32_t)~(PWR_CR1_PDDS | PWR_CR1_LPDS | PWR_CR1_LPUDS | PWR_CR1_MRUDS); + + /* Set LPDS, MRLUDS and LPLUDS bits according to PWR_Regulator value */ + tempreg |= Regulator; + + /* Store the new value */ + PWR->CR1 = tempreg; + + /* Set SLEEPDEEP bit of Cortex System Control Register */ + SCB->SCR |= SCB_SCR_SLEEPDEEP_Msk; + + /* Select STOP mode entry --------------------------------------------------*/ + if(STOPEntry == PWR_SLEEPENTRY_WFI) + { + /* Request Wait For Interrupt */ + __WFI(); + } + else + { + /* Request Wait For Event */ + __WFE(); + } + /* Reset SLEEPDEEP bit of Cortex System Control Register */ + SCB->SCR &= (uint32_t)~((uint32_t)SCB_SCR_SLEEPDEEP_Msk); + + return HAL_OK; +} + +/** + * @brief Returns Voltage Scaling Range. + * @retval VOS bit field (PWR_REGULATOR_VOLTAGE_SCALE1, PWR_REGULATOR_VOLTAGE_SCALE2 or + * PWR_REGULATOR_VOLTAGE_SCALE3)PWR_REGULATOR_VOLTAGE_SCALE1 + */ +uint32_t HAL_PWREx_GetVoltageRange(void) +{ + return (PWR->CR1 & PWR_CR1_VOS); +} + +/** + * @brief Configures the main internal regulator output voltage. + * @param VoltageScaling specifies the regulator output voltage to achieve + * a tradeoff between performance and power consumption. + * This parameter can be one of the following values: + * @arg PWR_REGULATOR_VOLTAGE_SCALE1: Regulator voltage output range 1 mode, + * typical output voltage at 1.4 V, + * system frequency up to 216 MHz. + * @arg PWR_REGULATOR_VOLTAGE_SCALE2: Regulator voltage output range 2 mode, + * typical output voltage at 1.2 V, + * system frequency up to 180 MHz. + * @arg PWR_REGULATOR_VOLTAGE_SCALE3: Regulator voltage output range 2 mode, + * typical output voltage at 1.00 V, + * system frequency up to 151 MHz. + * @note To update the system clock frequency(SYSCLK): + * - Set the HSI or HSE as system clock frequency using the HAL_RCC_ClockConfig(). + * - Call the HAL_RCC_OscConfig() to configure the PLL. + * - Call HAL_PWREx_ConfigVoltageScaling() API to adjust the voltage scale. + * - Set the new system clock frequency using the HAL_RCC_ClockConfig(). + * @note The scale can be modified only when the HSI or HSE clock source is selected + * as system clock source, otherwise the API returns HAL_ERROR. + * @note When the PLL is OFF, the voltage scale 3 is automatically selected and the VOS bits + * value in the PWR_CR1 register are not taken in account. + * @note This API forces the PLL state ON to allow the possibility to configure the voltage scale 1 or 2. + * @note The new voltage scale is active only when the PLL is ON. + * @retval HAL Status + */ +HAL_StatusTypeDef HAL_PWREx_ControlVoltageScaling(uint32_t VoltageScaling) +{ + uint32_t tickstart = 0; + + assert_param(IS_PWR_REGULATOR_VOLTAGE(VoltageScaling)); + + /* Enable Power ctrl clock */ + __HAL_RCC_PWR_CLK_ENABLE(); + + /* Check if the PLL is used as system clock or not */ + if(__HAL_RCC_GET_SYSCLK_SOURCE() != RCC_CFGR_SWS_PLL) + { + /* Disable the main PLL */ + __HAL_RCC_PLL_DISABLE(); + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + /* Wait till PLL is disabled */ + while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) != RESET) + { + if((HAL_GetTick() - tickstart ) > PLL_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Set Range */ + __HAL_PWR_VOLTAGESCALING_CONFIG(VoltageScaling); + + /* Enable the main PLL */ + __HAL_RCC_PLL_ENABLE(); + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + /* Wait till PLL is ready */ + while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) == RESET) + { + if((HAL_GetTick() - tickstart ) > PLL_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + while((__HAL_PWR_GET_FLAG(PWR_FLAG_VOSRDY) == RESET)) + { + if((HAL_GetTick() - tickstart ) > PWR_VOSRDY_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + else + { + return HAL_ERROR; + } + return HAL_OK; +} + +/** + * @} + */ + +/** + * @} + */ + +#endif /* HAL_PWR_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc.c new file mode 100644 index 0000000..6c030ce --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc.c @@ -0,0 +1,1239 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_rcc.c + * @author MCD Application Team + * @brief RCC HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Reset and Clock Control (RCC) peripheral: + * + Initialization and de-initialization functions + * + Peripheral Control functions + * + @verbatim + ============================================================================== + ##### RCC specific features ##### + ============================================================================== + [..] + After reset the device is running from Internal High Speed oscillator + (HSI 16MHz) with Flash 0 wait state, Flash prefetch buffer, D-Cache + and I-Cache are disabled, and all peripherals are off except internal + SRAM, Flash and JTAG. + (+) There is no prescaler on High speed (AHB) and Low speed (APB) buses; + all peripherals mapped on these buses are running at HSI speed. + (+) The clock for all peripherals is switched off, except the SRAM and FLASH. + (+) All GPIOs are in input floating state, except the JTAG pins which + are assigned to be used for debug purpose. + + [..] + Once the device started from reset, the user application has to: + (+) Configure the clock source to be used to drive the System clock + (if the application needs higher frequency/performance) + (+) Configure the System clock frequency and Flash settings + (+) Configure the AHB and APB buses prescalers + (+) Enable the clock for the peripheral(s) to be used + (+) Configure the clock source(s) for peripherals which clocks are not + derived from the System clock (I2S, RTC, ADC, USB OTG FS/SDIO/RNG) + + ##### RCC Limitations ##### + ============================================================================== + [..] + A delay between an RCC peripheral clock enable and the effective peripheral + enabling should be taken into account in order to manage the peripheral read/write + from/to registers. + (+) This delay depends on the peripheral mapping. + (+) If peripheral is mapped on AHB: the delay is 2 AHB clock cycle + after the clock enable bit is set on the hardware register + (+) If peripheral is mapped on APB: the delay is 2 APB clock cycle + after the clock enable bit is set on the hardware register + + [..] + Implemented Workaround: + (+) For AHB & APB peripherals, a dummy read to the peripheral register has been + inserted in each __HAL_RCC_PPP_CLK_ENABLE() macro. + + @endverbatim + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup RCC RCC + * @brief RCC HAL module driver + * @{ + */ + +#ifdef HAL_RCC_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/** @defgroup RCC_Private_Macros RCC Private Macros + * @{ + */ + +#define MCO1_CLK_ENABLE() __HAL_RCC_GPIOA_CLK_ENABLE() +#define MCO1_GPIO_PORT GPIOA +#define MCO1_PIN GPIO_PIN_8 + +#define MCO2_CLK_ENABLE() __HAL_RCC_GPIOC_CLK_ENABLE() +#define MCO2_GPIO_PORT GPIOC +#define MCO2_PIN GPIO_PIN_9 + +/** + * @} + */ +/* Private variables ---------------------------------------------------------*/ +/** @defgroup RCC_Private_Variables RCC Private Variables + * @{ + */ + +/** + * @} + */ + +/* Private function prototypes -----------------------------------------------*/ +/* Exported functions ---------------------------------------------------------*/ + +/** @defgroup RCC_Exported_Functions RCC Exported Functions + * @{ + */ + +/** @defgroup RCC_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * + @verbatim + =============================================================================== +##### Initialization and de-initialization functions ##### + =============================================================================== + [..] + This section provides functions allowing to configure the internal/external oscillators + (HSE, HSI, LSE, LSI, PLL, CSS and MCO) and the System buses clocks (SYSCLK, AHB, APB1 + and APB2). + + [..] Internal/external clock and PLL configuration + (#) HSI (high-speed internal), 16 MHz factory-trimmed RC used directly or through + the PLL as System clock source. + + (#) LSI (low-speed internal), 32 KHz low consumption RC used as IWDG and/or RTC + clock source. + + (#) HSE (high-speed external), 4 to 26 MHz crystal oscillator used directly or + through the PLL as System clock source. Can be used also as RTC clock source. + + (#) LSE (low-speed external), 32 KHz oscillator used as RTC clock source. + + (#) PLL (clocked by HSI or HSE), featuring two different output clocks: + (++) The first output is used to generate the high speed system clock (up to 216 MHz) + (++) The second output is used to generate the clock for the USB OTG FS (48 MHz), + the random analog generator (<=48 MHz) and the SDIO (<= 48 MHz). + + (#) CSS (Clock security system), once enable using the function HAL_RCC_EnableCSS() + and if a HSE clock failure occurs(HSE used directly or through PLL as System + clock source), the System clock is automatically switched to HSI and an interrupt + is generated if enabled. The interrupt is linked to the Cortex-M7 NMI + (Non-Maskable Interrupt) exception vector. + + (#) MCO1 (microcontroller clock output), used to output HSI, LSE, HSE or PLL + clock (through a configurable prescaler) on PA8 pin. + + (#) MCO2 (microcontroller clock output), used to output HSE, PLL, SYSCLK or PLLI2S + clock (through a configurable prescaler) on PC9 pin. + + [..] System, AHB and APB buses clocks configuration + (#) Several clock sources can be used to drive the System clock (SYSCLK): HSI, + HSE and PLL. + The AHB clock (HCLK) is derived from System clock through configurable + prescaler and used to clock the CPU, memory and peripherals mapped + on AHB bus (DMA, GPIO...). APB1 (PCLK1) and APB2 (PCLK2) clocks are derived + from AHB clock through configurable prescalers and used to clock + the peripherals mapped on these buses. You can use + "HAL_RCC_GetSysClockFreq()" function to retrieve the frequencies of these clocks. + + -@- All the peripheral clocks are derived from the System clock (SYSCLK) except: + (+@) I2S: the I2S clock can be derived either from a specific PLL (PLLI2S) or + from an external clock mapped on the I2S_CKIN pin. + You have to use __HAL_RCC_PLLI2S_CONFIG() macro to configure this clock. + (+@) SAI: the SAI clock can be derived either from a specific PLL (PLLI2S) or (PLLSAI) or + from an external clock mapped on the I2S_CKIN pin. + You have to use __HAL_RCC_PLLI2S_CONFIG() macro to configure this clock. + (+@) RTC: the RTC clock can be derived either from the LSI, LSE or HSE clock + divided by 2 to 31. You have to use __HAL_RCC_RTC_CONFIG() and __HAL_RCC_RTC_ENABLE() + macros to configure this clock. + (+@) USB OTG FS, SDIO and RTC: USB OTG FS require a frequency equal to 48 MHz + to work correctly, while the SDIO require a frequency equal or lower than + to 48. This clock is derived of the main PLL through PLLQ divider. + (+@) IWDG clock which is always the LSI clock. +@endverbatim + * @{ + */ + +/** + * @brief Resets the RCC clock configuration to the default reset state. + * @note The default reset state of the clock configuration is given below: + * - HSI ON and used as system clock source + * - HSE, PLL, PLLI2S and PLLSAI OFF + * - AHB, APB1 and APB2 prescaler set to 1. + * - CSS, MCO1 and MCO2 OFF + * - All interrupts disabled + * @note This function doesn't modify the configuration of the + * - Peripheral clocks + * - LSI, LSE and RTC clocks + * @retval None + */ +HAL_StatusTypeDef HAL_RCC_DeInit(void) +{ + uint32_t tickstart; + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Set HSION bit to the reset value */ + SET_BIT(RCC->CR, RCC_CR_HSION); + + /* Wait till HSI is ready */ + while (READ_BIT(RCC->CR, RCC_CR_HSIRDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > HSI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Set HSITRIM[4:0] bits to the reset value */ + SET_BIT(RCC->CR, RCC_CR_HSITRIM_4); + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Reset CFGR register */ + CLEAR_REG(RCC->CFGR); + + /* Wait till clock switch is ready */ + while (READ_BIT(RCC->CFGR, RCC_CFGR_SWS) != RESET) + { + if ((HAL_GetTick() - tickstart) > CLOCKSWITCH_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Clear HSEON, HSEBYP and CSSON bits */ + CLEAR_BIT(RCC->CR, RCC_CR_HSEON | RCC_CR_HSEBYP | RCC_CR_CSSON); + + /* Wait till HSE is disabled */ + while (READ_BIT(RCC->CR, RCC_CR_HSERDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > HSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Clear PLLON bit */ + CLEAR_BIT(RCC->CR, RCC_CR_PLLON); + + /* Wait till PLL is disabled */ + while (READ_BIT(RCC->CR, RCC_CR_PLLRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Reset PLLI2SON bit */ + CLEAR_BIT(RCC->CR, RCC_CR_PLLI2SON); + + /* Wait till PLLI2S is disabled */ + while (READ_BIT(RCC->CR, RCC_CR_PLLI2SRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Get Start Tick */ + tickstart = HAL_GetTick(); + + /* Reset PLLSAI bit */ + CLEAR_BIT(RCC->CR, RCC_CR_PLLSAION); + + /* Wait till PLLSAI is disabled */ + while (READ_BIT(RCC->CR, RCC_CR_PLLSAIRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Once PLL, PLLI2S and PLLSAI are OFF, reset PLLCFGR register to default value */ + RCC->PLLCFGR = RCC_PLLCFGR_PLLM_4 | RCC_PLLCFGR_PLLN_6 | RCC_PLLCFGR_PLLN_7 | RCC_PLLCFGR_PLLQ_2 | 0x20000000U; + + /* Reset PLLI2SCFGR register to default value */ + RCC->PLLI2SCFGR = RCC_PLLI2SCFGR_PLLI2SN_6 | RCC_PLLI2SCFGR_PLLI2SN_7 | RCC_PLLI2SCFGR_PLLI2SQ_2 | RCC_PLLI2SCFGR_PLLI2SR_1; + + /* Reset PLLSAICFGR register to default value */ + RCC->PLLSAICFGR = RCC_PLLSAICFGR_PLLSAIN_6 | RCC_PLLSAICFGR_PLLSAIN_7 | RCC_PLLSAICFGR_PLLSAIQ_2 | 0x20000000U; + + /* Disable all interrupts */ + CLEAR_BIT(RCC->CIR, RCC_CIR_LSIRDYIE | RCC_CIR_LSERDYIE | RCC_CIR_HSIRDYIE | RCC_CIR_HSERDYIE | RCC_CIR_PLLRDYIE | RCC_CIR_PLLI2SRDYIE | RCC_CIR_PLLSAIRDYIE); + + /* Clear all interrupt flags */ + SET_BIT(RCC->CIR, RCC_CIR_LSIRDYC | RCC_CIR_LSERDYC | RCC_CIR_HSIRDYC | RCC_CIR_HSERDYC | RCC_CIR_PLLRDYC | RCC_CIR_PLLI2SRDYC | RCC_CIR_PLLSAIRDYC | RCC_CIR_CSSC); + + /* Clear LSION bit */ + CLEAR_BIT(RCC->CSR, RCC_CSR_LSION); + + /* Reset all CSR flags */ + SET_BIT(RCC->CSR, RCC_CSR_RMVF); + + /* Update the SystemCoreClock global variable */ + SystemCoreClock = HSI_VALUE; + + /* Adapt Systick interrupt period */ + if (HAL_InitTick(uwTickPrio) != HAL_OK) + { + return HAL_ERROR; + } + else + { + return HAL_OK; + } +} + +/** + * @brief Initializes the RCC Oscillators according to the specified parameters in the + * RCC_OscInitTypeDef. + * @param RCC_OscInitStruct pointer to an RCC_OscInitTypeDef structure that + * contains the configuration information for the RCC Oscillators. + * @note The PLL is not disabled when used as system clock. + * @note Transitions LSE Bypass to LSE On and LSE On to LSE Bypass are not + * supported by this function. User should request a transition to LSE Off + * first and then LSE On or LSE Bypass. + * @note Transition HSE Bypass to HSE On and HSE On to HSE Bypass are not + * supported by this function. User should request a transition to HSE Off + * first and then HSE On or HSE Bypass. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) +{ + uint32_t tickstart; + uint32_t pll_config; + FlagStatus pwrclkchanged = RESET; + + /* Check Null pointer */ + if (RCC_OscInitStruct == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_RCC_OSCILLATORTYPE(RCC_OscInitStruct->OscillatorType)); + + /*------------------------------- HSE Configuration ------------------------*/ + if (((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_HSE) == RCC_OSCILLATORTYPE_HSE) + { + /* Check the parameters */ + assert_param(IS_RCC_HSE(RCC_OscInitStruct->HSEState)); + /* When the HSE is used as system clock or clock source for PLL, It can not be disabled */ + if ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_SYSCLKSOURCE_STATUS_HSE) + || ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_SYSCLKSOURCE_STATUS_PLLCLK) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSE))) + { + if ((__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) != RESET) && (RCC_OscInitStruct->HSEState == RCC_HSE_OFF)) + { + return HAL_ERROR; + } + } + else + { + /* Set the new HSE configuration ---------------------------------------*/ + __HAL_RCC_HSE_CONFIG(RCC_OscInitStruct->HSEState); + + /* Check the HSE State */ + if (RCC_OscInitStruct->HSEState != RCC_HSE_OFF) + { + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till HSE is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > HSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + else + { + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till HSE is bypassed or disabled */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > HSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + } + /*----------------------------- HSI Configuration --------------------------*/ + if (((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_HSI) == RCC_OSCILLATORTYPE_HSI) + { + /* Check the parameters */ + assert_param(IS_RCC_HSI(RCC_OscInitStruct->HSIState)); + assert_param(IS_RCC_CALIBRATION_VALUE(RCC_OscInitStruct->HSICalibrationValue)); + + /* Check if HSI is used as system clock or as PLL source when PLL is selected as system clock */ + if ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_SYSCLKSOURCE_STATUS_HSI) + || ((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_SYSCLKSOURCE_STATUS_PLLCLK) && ((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLCFGR_PLLSRC_HSI))) + { + /* When HSI is used as system clock it will not disabled */ + if ((__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) != RESET) && (RCC_OscInitStruct->HSIState != RCC_HSI_ON)) + { + return HAL_ERROR; + } + /* Otherwise, just the calibration is allowed */ + else + { + /* Adjusts the Internal High Speed oscillator (HSI) calibration value.*/ + __HAL_RCC_HSI_CALIBRATIONVALUE_ADJUST(RCC_OscInitStruct->HSICalibrationValue); + } + } + else + { + /* Check the HSI State */ + if ((RCC_OscInitStruct->HSIState) != RCC_HSI_OFF) + { + /* Enable the Internal High Speed oscillator (HSI). */ + __HAL_RCC_HSI_ENABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till HSI is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > HSI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Adjusts the Internal High Speed oscillator (HSI) calibration value.*/ + __HAL_RCC_HSI_CALIBRATIONVALUE_ADJUST(RCC_OscInitStruct->HSICalibrationValue); + } + else + { + /* Disable the Internal High Speed oscillator (HSI). */ + __HAL_RCC_HSI_DISABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till HSI is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > HSI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + } + /*------------------------------ LSI Configuration -------------------------*/ + if (((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_LSI) == RCC_OSCILLATORTYPE_LSI) + { + /* Check the parameters */ + assert_param(IS_RCC_LSI(RCC_OscInitStruct->LSIState)); + + /* Check the LSI State */ + if ((RCC_OscInitStruct->LSIState) != RCC_LSI_OFF) + { + /* Enable the Internal Low Speed oscillator (LSI). */ + __HAL_RCC_LSI_ENABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till LSI is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSIRDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > LSI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + else + { + /* Disable the Internal Low Speed oscillator (LSI). */ + __HAL_RCC_LSI_DISABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till LSI is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSIRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > LSI_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + /*------------------------------ LSE Configuration -------------------------*/ + if (((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_LSE) == RCC_OSCILLATORTYPE_LSE) + { + /* Check the parameters */ + assert_param(IS_RCC_LSE(RCC_OscInitStruct->LSEState)); + + /* Update LSE configuration in Backup Domain control register */ + /* Requires to enable write access to Backup Domain of necessary */ + if (__HAL_RCC_PWR_IS_CLK_DISABLED()) + { + /* Enable Power Clock*/ + __HAL_RCC_PWR_CLK_ENABLE(); + pwrclkchanged = SET; + } + + if (HAL_IS_BIT_CLR(PWR->CR1, PWR_CR1_DBP)) + { + /* Enable write access to Backup domain */ + PWR->CR1 |= PWR_CR1_DBP; + + /* Wait for Backup domain Write protection disable */ + tickstart = HAL_GetTick(); + + while (HAL_IS_BIT_CLR(PWR->CR1, PWR_CR1_DBP)) + { + if ((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + + /* Set the new LSE configuration -----------------------------------------*/ + __HAL_RCC_LSE_CONFIG(RCC_OscInitStruct->LSEState); + /* Check the LSE State */ + if ((RCC_OscInitStruct->LSEState) != RCC_LSE_OFF) + { + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till LSE is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + else + { + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till LSE is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + + /* Restore clock configuration if changed */ + if (pwrclkchanged == SET) + { + __HAL_RCC_PWR_CLK_DISABLE(); + } + } + /*-------------------------------- PLL Configuration -----------------------*/ + /* Check the parameters */ + assert_param(IS_RCC_PLL(RCC_OscInitStruct->PLL.PLLState)); + if ((RCC_OscInitStruct->PLL.PLLState) != RCC_PLL_NONE) + { + /* Check if the PLL is used as system clock or not */ + if (__HAL_RCC_GET_SYSCLK_SOURCE() != RCC_SYSCLKSOURCE_STATUS_PLLCLK) + { + if ((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_ON) + { + /* Check the parameters */ + assert_param(IS_RCC_PLLSOURCE(RCC_OscInitStruct->PLL.PLLSource)); + assert_param(IS_RCC_PLLM_VALUE(RCC_OscInitStruct->PLL.PLLM)); + assert_param(IS_RCC_PLLN_VALUE(RCC_OscInitStruct->PLL.PLLN)); + assert_param(IS_RCC_PLLP_VALUE(RCC_OscInitStruct->PLL.PLLP)); + assert_param(IS_RCC_PLLQ_VALUE(RCC_OscInitStruct->PLL.PLLQ)); +#if defined (RCC_PLLCFGR_PLLR) + assert_param(IS_RCC_PLLR_VALUE(RCC_OscInitStruct->PLL.PLLR)); +#endif + + /* Disable the main PLL. */ + __HAL_RCC_PLL_DISABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLL is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Configure the main PLL clock source, multiplication and division factors. */ +#if defined (RCC_PLLCFGR_PLLR) + __HAL_RCC_PLL_CONFIG(RCC_OscInitStruct->PLL.PLLSource, + RCC_OscInitStruct->PLL.PLLM, + RCC_OscInitStruct->PLL.PLLN, + RCC_OscInitStruct->PLL.PLLP, + RCC_OscInitStruct->PLL.PLLQ, + RCC_OscInitStruct->PLL.PLLR); +#else + __HAL_RCC_PLL_CONFIG(RCC_OscInitStruct->PLL.PLLSource, + RCC_OscInitStruct->PLL.PLLM, + RCC_OscInitStruct->PLL.PLLN, + RCC_OscInitStruct->PLL.PLLP, + RCC_OscInitStruct->PLL.PLLQ); +#endif + + /* Enable the main PLL. */ + __HAL_RCC_PLL_ENABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLL is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) == RESET) + { + if ((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + else + { + /* Disable the main PLL. */ + __HAL_RCC_PLL_DISABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLL is ready */ + while (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) != RESET) + { + if ((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + else + { + /* Do not return HAL_ERROR if request repeats the current configuration */ + pll_config = RCC->PLLCFGR; +#if defined (RCC_PLLCFGR_PLLR) + if (((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_OFF) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLSRC) != RCC_OscInitStruct->PLL.PLLSource) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLM) != RCC_OscInitStruct->PLL.PLLM) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLN) != (RCC_OscInitStruct->PLL.PLLN << RCC_PLLCFGR_PLLN_Pos)) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLP) != ((((RCC_OscInitStruct->PLL.PLLP) >> 1U) - 1U) << RCC_PLLCFGR_PLLP_Pos)) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLQ) != (RCC_OscInitStruct->PLL.PLLQ << RCC_PLLCFGR_PLLQ_Pos)) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLR) != (RCC_OscInitStruct->PLL.PLLR << RCC_PLLCFGR_PLLR_Pos))) +#else + if (((RCC_OscInitStruct->PLL.PLLState) == RCC_PLL_OFF) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLSRC) != RCC_OscInitStruct->PLL.PLLSource) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLM) != RCC_OscInitStruct->PLL.PLLM) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLN) != (RCC_OscInitStruct->PLL.PLLN << RCC_PLLCFGR_PLLN_Pos)) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLP) != ((((RCC_OscInitStruct->PLL.PLLP) >> 1U) - 1U) << RCC_PLLCFGR_PLLP_Pos)) || + (READ_BIT(pll_config, RCC_PLLCFGR_PLLQ) != (RCC_OscInitStruct->PLL.PLLQ << RCC_PLLCFGR_PLLQ_Pos))) +#endif + { + return HAL_ERROR; + } + } + } + return HAL_OK; +} + +/** + * @brief Initializes the CPU, AHB and APB buses clocks according to the specified + * parameters in the RCC_ClkInitStruct. + * @param RCC_ClkInitStruct pointer to an RCC_OscInitTypeDef structure that + * contains the configuration information for the RCC peripheral. + * @param FLatency FLASH Latency, this parameter depend on device selected + * + * @note The SystemCoreClock CMSIS variable is used to store System Clock Frequency + * and updated by HAL_RCC_GetHCLKFreq() function called within this function + * + * @note The HSI is used (enabled by hardware) as system clock source after + * startup from Reset, wake-up from STOP and STANDBY mode, or in case + * of failure of the HSE used directly or indirectly as system clock + * (if the Clock Security System CSS is enabled). + * + * @note A switch from one clock source to another occurs only if the target + * clock source is ready (clock stable after startup delay or PLL locked). + * If a clock source which is not yet ready is selected, the switch will + * occur when the clock source will be ready. + * You can use HAL_RCC_GetClockConfig() function to know which clock is + * currently used as system clock source. + * @note Depending on the device voltage range, the software has to set correctly + * HPRE[3:0] bits to ensure that HCLK not exceed the maximum allowed frequency + * (for more details refer to section above "Initialization/de-initialization functions") + * @retval None + */ +HAL_StatusTypeDef HAL_RCC_ClockConfig(RCC_ClkInitTypeDef *RCC_ClkInitStruct, uint32_t FLatency) +{ + uint32_t tickstart = 0; + + /* Check Null pointer */ + if (RCC_ClkInitStruct == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_RCC_CLOCKTYPE(RCC_ClkInitStruct->ClockType)); + assert_param(IS_FLASH_LATENCY(FLatency)); + + /* To correctly read data from FLASH memory, the number of wait states (LATENCY) + must be correctly programmed according to the frequency of the CPU clock + (HCLK) and the supply voltage of the device. */ + + /* Increasing the CPU frequency */ + if (FLatency > __HAL_FLASH_GET_LATENCY()) + { + /* Program the new number of wait states to the LATENCY bits in the FLASH_ACR register */ + __HAL_FLASH_SET_LATENCY(FLatency); + + /* Check that the new number of wait states is taken into account to access the Flash + memory by reading the FLASH_ACR register */ + if (__HAL_FLASH_GET_LATENCY() != FLatency) + { + return HAL_ERROR; + } + } + + /*-------------------------- HCLK Configuration --------------------------*/ + if (((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_HCLK) == RCC_CLOCKTYPE_HCLK) + { + /* Set the highest APBx dividers in order to ensure that we do not go through + a non-spec phase whatever we decrease or increase HCLK. */ + if (((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_PCLK1) == RCC_CLOCKTYPE_PCLK1) + { + MODIFY_REG(RCC->CFGR, RCC_CFGR_PPRE1, RCC_HCLK_DIV16); + } + + if (((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_PCLK2) == RCC_CLOCKTYPE_PCLK2) + { + MODIFY_REG(RCC->CFGR, RCC_CFGR_PPRE2, (RCC_HCLK_DIV16 << 3)); + } + + /* Set the new HCLK clock divider */ + assert_param(IS_RCC_HCLK(RCC_ClkInitStruct->AHBCLKDivider)); + MODIFY_REG(RCC->CFGR, RCC_CFGR_HPRE, RCC_ClkInitStruct->AHBCLKDivider); + } + + /*------------------------- SYSCLK Configuration ---------------------------*/ + if (((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_SYSCLK) == RCC_CLOCKTYPE_SYSCLK) + { + assert_param(IS_RCC_SYSCLKSOURCE(RCC_ClkInitStruct->SYSCLKSource)); + + /* HSE is selected as System Clock Source */ + if (RCC_ClkInitStruct->SYSCLKSource == RCC_SYSCLKSOURCE_HSE) + { + /* Check the HSE ready flag */ + if (__HAL_RCC_GET_FLAG(RCC_FLAG_HSERDY) == RESET) + { + return HAL_ERROR; + } + } + /* PLL is selected as System Clock Source */ + else if (RCC_ClkInitStruct->SYSCLKSource == RCC_SYSCLKSOURCE_PLLCLK) + { + /* Check the PLL ready flag */ + if (__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) == RESET) + { + return HAL_ERROR; + } + } + /* HSI is selected as System Clock Source */ + else + { + /* Check the HSI ready flag */ + if (__HAL_RCC_GET_FLAG(RCC_FLAG_HSIRDY) == RESET) + { + return HAL_ERROR; + } + } + + __HAL_RCC_SYSCLK_CONFIG(RCC_ClkInitStruct->SYSCLKSource); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + while (__HAL_RCC_GET_SYSCLK_SOURCE() != (RCC_ClkInitStruct->SYSCLKSource << RCC_CFGR_SWS_Pos)) + { + if ((HAL_GetTick() - tickstart) > CLOCKSWITCH_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + + /* Decreasing the number of wait states because of lower CPU frequency */ + if (FLatency < __HAL_FLASH_GET_LATENCY()) + { + /* Program the new number of wait states to the LATENCY bits in the FLASH_ACR register */ + __HAL_FLASH_SET_LATENCY(FLatency); + + /* Check that the new number of wait states is taken into account to access the Flash + memory by reading the FLASH_ACR register */ + if (__HAL_FLASH_GET_LATENCY() != FLatency) + { + return HAL_ERROR; + } + } + + /*-------------------------- PCLK1 Configuration ---------------------------*/ + if (((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_PCLK1) == RCC_CLOCKTYPE_PCLK1) + { + assert_param(IS_RCC_PCLK(RCC_ClkInitStruct->APB1CLKDivider)); + MODIFY_REG(RCC->CFGR, RCC_CFGR_PPRE1, RCC_ClkInitStruct->APB1CLKDivider); + } + + /*-------------------------- PCLK2 Configuration ---------------------------*/ + if (((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_PCLK2) == RCC_CLOCKTYPE_PCLK2) + { + assert_param(IS_RCC_PCLK(RCC_ClkInitStruct->APB2CLKDivider)); + MODIFY_REG(RCC->CFGR, RCC_CFGR_PPRE2, ((RCC_ClkInitStruct->APB2CLKDivider) << 3)); + } + + /* Update the SystemCoreClock global variable */ + SystemCoreClock = HAL_RCC_GetSysClockFreq() >> AHBPrescTable[(RCC->CFGR & RCC_CFGR_HPRE) >> RCC_CFGR_HPRE_Pos]; + + /* Configure the source of time base considering new system clocks settings*/ + HAL_InitTick(uwTickPrio); + + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup RCC_Exported_Functions_Group2 Peripheral Control functions + * @brief RCC clocks control functions + * + @verbatim + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to control the RCC Clocks + frequencies. + +@endverbatim + * @{ + */ + +/** + * @brief Selects the clock source to output on MCO1 pin(PA8) or on MCO2 pin(PC9). + * @note PA8/PC9 should be configured in alternate function mode. + * @param RCC_MCOx specifies the output direction for the clock source. + * This parameter can be one of the following values: + * @arg RCC_MCO1: Clock source to output on MCO1 pin(PA8). + * @arg RCC_MCO2: Clock source to output on MCO2 pin(PC9). + * @param RCC_MCOSource specifies the clock source to output. + * This parameter can be one of the following values: + * @arg RCC_MCO1SOURCE_HSI: HSI clock selected as MCO1 source + * @arg RCC_MCO1SOURCE_LSE: LSE clock selected as MCO1 source + * @arg RCC_MCO1SOURCE_HSE: HSE clock selected as MCO1 source + * @arg RCC_MCO1SOURCE_PLLCLK: main PLL clock selected as MCO1 source + * @arg RCC_MCO2SOURCE_SYSCLK: System clock (SYSCLK) selected as MCO2 source + * @arg RCC_MCO2SOURCE_PLLI2SCLK: PLLI2S clock selected as MCO2 source + * @arg RCC_MCO2SOURCE_HSE: HSE clock selected as MCO2 source + * @arg RCC_MCO2SOURCE_PLLCLK: main PLL clock selected as MCO2 source + * @param RCC_MCODiv specifies the MCOx prescaler. + * This parameter can be one of the following values: + * @arg RCC_MCODIV_1: no division applied to MCOx clock + * @arg RCC_MCODIV_2: division by 2 applied to MCOx clock + * @arg RCC_MCODIV_3: division by 3 applied to MCOx clock + * @arg RCC_MCODIV_4: division by 4 applied to MCOx clock + * @arg RCC_MCODIV_5: division by 5 applied to MCOx clock + * @retval None + */ +void HAL_RCC_MCOConfig(uint32_t RCC_MCOx, uint32_t RCC_MCOSource, uint32_t RCC_MCODiv) +{ + GPIO_InitTypeDef GPIO_InitStruct; + /* Check the parameters */ + assert_param(IS_RCC_MCO(RCC_MCOx)); + assert_param(IS_RCC_MCODIV(RCC_MCODiv)); + /* RCC_MCO1 */ + if (RCC_MCOx == RCC_MCO1) + { + assert_param(IS_RCC_MCO1SOURCE(RCC_MCOSource)); + + /* MCO1 Clock Enable */ + MCO1_CLK_ENABLE(); + + /* Configure the MCO1 pin in alternate function mode */ + GPIO_InitStruct.Pin = MCO1_PIN; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Speed = GPIO_SPEED_HIGH; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Alternate = GPIO_AF0_MCO; + HAL_GPIO_Init(MCO1_GPIO_PORT, &GPIO_InitStruct); + + /* Mask MCO1 and MCO1PRE[2:0] bits then Select MCO1 clock source and prescaler */ + MODIFY_REG(RCC->CFGR, (RCC_CFGR_MCO1 | RCC_CFGR_MCO1PRE), (RCC_MCOSource | RCC_MCODiv)); + } + else + { + assert_param(IS_RCC_MCO2SOURCE(RCC_MCOSource)); + + /* MCO2 Clock Enable */ + MCO2_CLK_ENABLE(); + + /* Configure the MCO2 pin in alternate function mode */ + GPIO_InitStruct.Pin = MCO2_PIN; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Speed = GPIO_SPEED_HIGH; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Alternate = GPIO_AF0_MCO; + HAL_GPIO_Init(MCO2_GPIO_PORT, &GPIO_InitStruct); + + /* Mask MCO2 and MCO2PRE[2:0] bits then Select MCO2 clock source and prescaler */ + MODIFY_REG(RCC->CFGR, (RCC_CFGR_MCO2 | RCC_CFGR_MCO2PRE), (RCC_MCOSource | (RCC_MCODiv << 3))); + } +} + +/** + * @brief Enables the Clock Security System. + * @note If a failure is detected on the HSE oscillator clock, this oscillator + * is automatically disabled and an interrupt is generated to inform the + * software about the failure (Clock Security System Interrupt, CSSI), + * allowing the MCU to perform rescue operations. The CSSI is linked to + * the Cortex-M7 NMI (Non-Maskable Interrupt) exception vector. + * @retval None + */ +void HAL_RCC_EnableCSS(void) +{ + SET_BIT(RCC->CR, RCC_CR_CSSON); +} + +/** + * @brief Disables the Clock Security System. + * @retval None + */ +void HAL_RCC_DisableCSS(void) +{ + CLEAR_BIT(RCC->CR, RCC_CR_CSSON); +} + +/** + * @brief Returns the SYSCLK frequency + * + * @note The system frequency computed by this function is not the real + * frequency in the chip. It is calculated based on the predefined + * constant and the selected clock source: + * @note If SYSCLK source is HSI, function returns values based on HSI_VALUE(*) + * @note If SYSCLK source is HSE, function returns values based on HSE_VALUE(**) + * @note If SYSCLK source is PLL, function returns values based on HSE_VALUE(**) + * or HSI_VALUE(*) multiplied/divided by the PLL factors. + * @note (*) HSI_VALUE is a constant defined in stm32f7xx_hal_conf.h file (default value + * 16 MHz) but the real value may vary depending on the variations + * in voltage and temperature. + * @note (**) HSE_VALUE is a constant defined in stm32f7xx_hal_conf.h file (default value + * 25 MHz), user has to ensure that HSE_VALUE is same as the real + * frequency of the crystal used. Otherwise, this function may + * have wrong result. + * + * @note The result of this function could be not correct when using fractional + * value for HSE crystal. + * + * @note This function can be used by the user application to compute the + * baudrate for the communication peripherals or configure other parameters. + * + * @note Each time SYSCLK changes, this function must be called to update the + * right SYSCLK value. Otherwise, any configuration based on this function will be incorrect. + * + * + * @retval SYSCLK frequency + */ +uint32_t HAL_RCC_GetSysClockFreq(void) +{ + uint32_t pllm = 0, pllvco = 0, pllp = 0; + uint32_t sysclockfreq = 0; + + /* Get SYSCLK source -------------------------------------------------------*/ + switch (RCC->CFGR & RCC_CFGR_SWS) + { + case RCC_SYSCLKSOURCE_STATUS_HSI: /* HSI used as system clock source */ + { + sysclockfreq = HSI_VALUE; + break; + } + case RCC_SYSCLKSOURCE_STATUS_HSE: /* HSE used as system clock source */ + { + sysclockfreq = HSE_VALUE; + break; + } + case RCC_SYSCLKSOURCE_STATUS_PLLCLK: /* PLL used as system clock source */ + { + /* PLL_VCO = (HSE_VALUE or HSI_VALUE / PLLM) * PLLN + SYSCLK = PLL_VCO / PLLP */ + pllm = RCC->PLLCFGR & RCC_PLLCFGR_PLLM; + if (__HAL_RCC_GET_PLL_OSCSOURCE() != RCC_PLLCFGR_PLLSRC_HSI) + { + /* HSE used as PLL clock source */ + pllvco = (uint32_t)((((uint64_t) HSE_VALUE * ((uint64_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos)))) / (uint64_t)pllm); + } + else + { + /* HSI used as PLL clock source */ + pllvco = (uint32_t)((((uint64_t) HSI_VALUE * ((uint64_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos)))) / (uint64_t)pllm); + } + pllp = ((((RCC->PLLCFGR & RCC_PLLCFGR_PLLP) >> RCC_PLLCFGR_PLLP_Pos) + 1) * 2); + + sysclockfreq = pllvco / pllp; + break; + } + default: + { + sysclockfreq = HSI_VALUE; + break; + } + } + return sysclockfreq; +} + +/** + * @brief Returns the HCLK frequency + * @note Each time HCLK changes, this function must be called to update the + * right HCLK value. Otherwise, any configuration based on this function will be incorrect. + * @note The SystemCoreClock CMSIS variable is used to store System Clock Frequency. + * @retval HCLK frequency + */ +uint32_t HAL_RCC_GetHCLKFreq(void) +{ + return SystemCoreClock; +} + +/** + * @brief Returns the PCLK1 frequency + * @note Each time PCLK1 changes, this function must be called to update the + * right PCLK1 value. Otherwise, any configuration based on this function will be incorrect. + * @retval PCLK1 frequency + */ +uint32_t HAL_RCC_GetPCLK1Freq(void) +{ + /* Get HCLK source and Compute PCLK1 frequency ---------------------------*/ + return (HAL_RCC_GetHCLKFreq() >> APBPrescTable[(RCC->CFGR & RCC_CFGR_PPRE1) >> RCC_CFGR_PPRE1_Pos]); +} + +/** + * @brief Returns the PCLK2 frequency + * @note Each time PCLK2 changes, this function must be called to update the + * right PCLK2 value. Otherwise, any configuration based on this function will be incorrect. + * @retval PCLK2 frequency + */ +uint32_t HAL_RCC_GetPCLK2Freq(void) +{ + /* Get HCLK source and Compute PCLK2 frequency ---------------------------*/ + return (HAL_RCC_GetHCLKFreq() >> APBPrescTable[(RCC->CFGR & RCC_CFGR_PPRE2) >> RCC_CFGR_PPRE2_Pos]); +} + +/** + * @brief Configures the RCC_OscInitStruct according to the internal + * RCC configuration registers. + * @param RCC_OscInitStruct pointer to an RCC_OscInitTypeDef structure that + * will be configured. + * @retval None + */ +void HAL_RCC_GetOscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct) +{ + /* Set all possible values for the Oscillator type parameter ---------------*/ + RCC_OscInitStruct->OscillatorType = RCC_OSCILLATORTYPE_HSE | RCC_OSCILLATORTYPE_HSI | RCC_OSCILLATORTYPE_LSE | RCC_OSCILLATORTYPE_LSI; + + /* Get the HSE configuration -----------------------------------------------*/ + if ((RCC->CR & RCC_CR_HSEBYP) == RCC_CR_HSEBYP) + { + RCC_OscInitStruct->HSEState = RCC_HSE_BYPASS; + } + else if ((RCC->CR & RCC_CR_HSEON) == RCC_CR_HSEON) + { + RCC_OscInitStruct->HSEState = RCC_HSE_ON; + } + else + { + RCC_OscInitStruct->HSEState = RCC_HSE_OFF; + } + + /* Get the HSI configuration -----------------------------------------------*/ + if ((RCC->CR & RCC_CR_HSION) == RCC_CR_HSION) + { + RCC_OscInitStruct->HSIState = RCC_HSI_ON; + } + else + { + RCC_OscInitStruct->HSIState = RCC_HSI_OFF; + } + + RCC_OscInitStruct->HSICalibrationValue = (uint32_t)((RCC->CR & RCC_CR_HSITRIM) >> RCC_CR_HSITRIM_Pos); + + /* Get the LSE configuration -----------------------------------------------*/ + if ((RCC->BDCR & RCC_BDCR_LSEBYP) == RCC_BDCR_LSEBYP) + { + RCC_OscInitStruct->LSEState = RCC_LSE_BYPASS; + } + else if ((RCC->BDCR & RCC_BDCR_LSEON) == RCC_BDCR_LSEON) + { + RCC_OscInitStruct->LSEState = RCC_LSE_ON; + } + else + { + RCC_OscInitStruct->LSEState = RCC_LSE_OFF; + } + + /* Get the LSI configuration -----------------------------------------------*/ + if ((RCC->CSR & RCC_CSR_LSION) == RCC_CSR_LSION) + { + RCC_OscInitStruct->LSIState = RCC_LSI_ON; + } + else + { + RCC_OscInitStruct->LSIState = RCC_LSI_OFF; + } + + /* Get the PLL configuration -----------------------------------------------*/ + if ((RCC->CR & RCC_CR_PLLON) == RCC_CR_PLLON) + { + RCC_OscInitStruct->PLL.PLLState = RCC_PLL_ON; + } + else + { + RCC_OscInitStruct->PLL.PLLState = RCC_PLL_OFF; + } + RCC_OscInitStruct->PLL.PLLSource = (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC); + RCC_OscInitStruct->PLL.PLLM = (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM); + RCC_OscInitStruct->PLL.PLLN = (uint32_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos); + RCC_OscInitStruct->PLL.PLLP = (uint32_t)((((RCC->PLLCFGR & RCC_PLLCFGR_PLLP) + RCC_PLLCFGR_PLLP_0) << 1) >> RCC_PLLCFGR_PLLP_Pos); + RCC_OscInitStruct->PLL.PLLQ = (uint32_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLQ) >> RCC_PLLCFGR_PLLQ_Pos); +#if defined (RCC_PLLCFGR_PLLR) + RCC_OscInitStruct->PLL.PLLR = (uint32_t)((RCC->PLLCFGR & RCC_PLLCFGR_PLLR) >> POSITION_VAL(RCC_PLLCFGR_PLLR)); +#endif +} + +/** + * @brief Configures the RCC_ClkInitStruct according to the internal + * RCC configuration registers. + * @param RCC_ClkInitStruct pointer to an RCC_ClkInitTypeDef structure that + * will be configured. + * @param pFLatency Pointer on the Flash Latency. + * @retval None + */ +void HAL_RCC_GetClockConfig(RCC_ClkInitTypeDef *RCC_ClkInitStruct, uint32_t *pFLatency) +{ + /* Set all possible values for the Clock type parameter --------------------*/ + RCC_ClkInitStruct->ClockType = RCC_CLOCKTYPE_SYSCLK | RCC_CLOCKTYPE_HCLK | RCC_CLOCKTYPE_PCLK1 | RCC_CLOCKTYPE_PCLK2; + + /* Get the SYSCLK configuration --------------------------------------------*/ + RCC_ClkInitStruct->SYSCLKSource = (uint32_t)(RCC->CFGR & RCC_CFGR_SW); + + /* Get the HCLK configuration ----------------------------------------------*/ + RCC_ClkInitStruct->AHBCLKDivider = (uint32_t)(RCC->CFGR & RCC_CFGR_HPRE); + + /* Get the APB1 configuration ----------------------------------------------*/ + RCC_ClkInitStruct->APB1CLKDivider = (uint32_t)(RCC->CFGR & RCC_CFGR_PPRE1); + + /* Get the APB2 configuration ----------------------------------------------*/ + RCC_ClkInitStruct->APB2CLKDivider = (uint32_t)((RCC->CFGR & RCC_CFGR_PPRE2) >> 3); + + /* Get the Flash Wait State (Latency) configuration ------------------------*/ + *pFLatency = (uint32_t)(FLASH->ACR & FLASH_ACR_LATENCY); +} + +/** + * @brief This function handles the RCC CSS interrupt request. + * @note This API should be called under the NMI_Handler(). + * @retval None + */ +void HAL_RCC_NMI_IRQHandler(void) +{ + /* Check RCC CSSF flag */ + if (__HAL_RCC_GET_IT(RCC_IT_CSS)) + { + /* RCC Clock Security System interrupt user callback */ + HAL_RCC_CSSCallback(); + + /* Clear RCC CSS pending bit */ + __HAL_RCC_CLEAR_IT(RCC_IT_CSS); + } +} + +/** + * @brief RCC Clock Security System interrupt callback + * @retval None + */ +__weak void HAL_RCC_CSSCallback(void) +{ + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_RCC_CSSCallback could be implemented in the user file + */ +} + +/** + * @} + */ + +/** + * @} + */ + +#endif /* HAL_RCC_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc_ex.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc_ex.c new file mode 100644 index 0000000..60b0a07 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc_ex.c @@ -0,0 +1,1773 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_rcc_ex.c + * @author MCD Application Team + * @brief Extension RCC HAL module driver. + * This file provides firmware functions to manage the following + * functionalities RCC extension peripheral: + * + Extended Peripheral Control functions + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file in + * the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup RCCEx RCCEx + * @brief RCCEx HAL module driver + * @{ + */ + +#ifdef HAL_RCC_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @defgroup RCCEx_Private_Defines RCCEx Private Defines + * @{ + */ +/** + * @} + */ +/* Private macro -------------------------------------------------------------*/ +/** @defgroup RCCEx_Private_Macros RCCEx Private Macros + * @{ + */ +/** + * @} + */ + +/** @defgroup RCCEx_Private_Macros RCCEx Private Macros + * @{ + */ + +/** + * @} + */ + + +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ + +/** @defgroup RCCEx_Exported_Functions RCCEx Exported Functions + * @{ + */ + +/** @defgroup RCCEx_Exported_Functions_Group1 Extended Peripheral Control functions + * @brief Extended Peripheral Control functions + * +@verbatim + =============================================================================== + ##### Extended Peripheral Control functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to control the RCC Clocks + frequencies. + [..] + (@) Important note: Care must be taken when HAL_RCCEx_PeriphCLKConfig() is used to + select the RTC clock source; in this case the Backup domain will be reset in + order to modify the RTC Clock source, as consequence RTC registers (including + the backup registers) and RCC_BDCR register will be set to their reset values. + +@endverbatim + * @{ + */ +#if defined (STM32F745xx) || defined (STM32F746xx) || defined (STM32F756xx) || defined (STM32F765xx) || \ + defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || \ + defined (STM32F750xx) +/** + * @brief Initializes the RCC extended peripherals clocks according to the specified + * parameters in the RCC_PeriphCLKInitTypeDef. + * @param PeriphClkInit pointer to an RCC_PeriphCLKInitTypeDef structure that + * contains the configuration information for the Extended Peripherals + * clocks(I2S, SAI, LTDC, RTC, TIM, UARTs, USARTs, LTPIM, SDMMC...). + * + * @note Care must be taken when HAL_RCCEx_PeriphCLKConfig() is used to select + * the RTC clock source; in this case the Backup domain will be reset in + * order to modify the RTC Clock source, as consequence RTC registers (including + * the backup registers) are set to their reset values. + * + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit) +{ + uint32_t tickstart = 0; + uint32_t tmpreg0 = 0; + uint32_t tmpreg1 = 0; + uint32_t plli2sused = 0; + uint32_t pllsaiused = 0; + + /* Check the parameters */ + assert_param(IS_RCC_PERIPHCLOCK(PeriphClkInit->PeriphClockSelection)); + + /*----------------------------------- I2S configuration ----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == (RCC_PERIPHCLK_I2S)) + { + /* Check the parameters */ + assert_param(IS_RCC_I2SCLKSOURCE(PeriphClkInit->I2sClockSelection)); + + /* Configure I2S Clock source */ + __HAL_RCC_I2S_CONFIG(PeriphClkInit->I2sClockSelection); + + /* Enable the PLLI2S when it's used as clock source for I2S */ + if(PeriphClkInit->I2sClockSelection == RCC_I2SCLKSOURCE_PLLI2S) + { + plli2sused = 1; + } + } + + /*------------------------------------ SAI1 configuration --------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI1) == (RCC_PERIPHCLK_SAI1)) + { + /* Check the parameters */ + assert_param(IS_RCC_SAI1CLKSOURCE(PeriphClkInit->Sai1ClockSelection)); + + /* Configure SAI1 Clock source */ + __HAL_RCC_SAI1_CONFIG(PeriphClkInit->Sai1ClockSelection); + /* Enable the PLLI2S when it's used as clock source for SAI */ + if(PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLI2S) + { + plli2sused = 1; + } + /* Enable the PLLSAI when it's used as clock source for SAI */ + if(PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLSAI) + { + pllsaiused = 1; + } + } + + /*------------------------------------ SAI2 configuration --------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI2) == (RCC_PERIPHCLK_SAI2)) + { + /* Check the parameters */ + assert_param(IS_RCC_SAI2CLKSOURCE(PeriphClkInit->Sai2ClockSelection)); + + /* Configure SAI2 Clock source */ + __HAL_RCC_SAI2_CONFIG(PeriphClkInit->Sai2ClockSelection); + + /* Enable the PLLI2S when it's used as clock source for SAI */ + if(PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLI2S) + { + plli2sused = 1; + } + /* Enable the PLLSAI when it's used as clock source for SAI */ + if(PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLSAI) + { + pllsaiused = 1; + } + } + + /*-------------------------------------- SPDIF-RX Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SPDIFRX) == RCC_PERIPHCLK_SPDIFRX) + { + plli2sused = 1; + } + + /*------------------------------------ RTC configuration --------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_RTC) == (RCC_PERIPHCLK_RTC)) + { + /* Check for RTC Parameters used to output RTCCLK */ + assert_param(IS_RCC_RTCCLKSOURCE(PeriphClkInit->RTCClockSelection)); + + /* Enable Power Clock*/ + __HAL_RCC_PWR_CLK_ENABLE(); + + /* Enable write access to Backup domain */ + PWR->CR1 |= PWR_CR1_DBP; + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait for Backup domain Write protection disable */ + while((PWR->CR1 & PWR_CR1_DBP) == RESET) + { + if((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Reset the Backup domain only if the RTC Clock source selection is modified */ + tmpreg0 = (RCC->BDCR & RCC_BDCR_RTCSEL); + + if((tmpreg0 != 0x00000000U) && (tmpreg0 != (PeriphClkInit->RTCClockSelection & RCC_BDCR_RTCSEL))) + { + /* Store the content of BDCR register before the reset of Backup Domain */ + tmpreg0 = (RCC->BDCR & ~(RCC_BDCR_RTCSEL)); + + /* RTC Clock selection can be changed only if the Backup Domain is reset */ + __HAL_RCC_BACKUPRESET_FORCE(); + __HAL_RCC_BACKUPRESET_RELEASE(); + + /* Restore the Content of BDCR register */ + RCC->BDCR = tmpreg0; + + /* Wait for LSE reactivation if LSE was enable prior to Backup Domain reset */ + if (HAL_IS_BIT_SET(RCC->BDCR, RCC_BDCR_LSEON)) + { + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till LSE is ready */ + while(__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) + { + if((HAL_GetTick() - tickstart ) > RCC_LSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + __HAL_RCC_RTC_CONFIG(PeriphClkInit->RTCClockSelection); + } + + /*------------------------------------ TIM configuration --------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_TIM) == (RCC_PERIPHCLK_TIM)) + { + /* Check the parameters */ + assert_param(IS_RCC_TIMPRES(PeriphClkInit->TIMPresSelection)); + + /* Configure Timer Prescaler */ + __HAL_RCC_TIMCLKPRESCALER(PeriphClkInit->TIMPresSelection); + } + + /*-------------------------------------- I2C1 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2C1) == RCC_PERIPHCLK_I2C1) + { + /* Check the parameters */ + assert_param(IS_RCC_I2C1CLKSOURCE(PeriphClkInit->I2c1ClockSelection)); + + /* Configure the I2C1 clock source */ + __HAL_RCC_I2C1_CONFIG(PeriphClkInit->I2c1ClockSelection); + } + + /*-------------------------------------- I2C2 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2C2) == RCC_PERIPHCLK_I2C2) + { + /* Check the parameters */ + assert_param(IS_RCC_I2C2CLKSOURCE(PeriphClkInit->I2c2ClockSelection)); + + /* Configure the I2C2 clock source */ + __HAL_RCC_I2C2_CONFIG(PeriphClkInit->I2c2ClockSelection); + } + + /*-------------------------------------- I2C3 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2C3) == RCC_PERIPHCLK_I2C3) + { + /* Check the parameters */ + assert_param(IS_RCC_I2C3CLKSOURCE(PeriphClkInit->I2c3ClockSelection)); + + /* Configure the I2C3 clock source */ + __HAL_RCC_I2C3_CONFIG(PeriphClkInit->I2c3ClockSelection); + } + + /*-------------------------------------- I2C4 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2C4) == RCC_PERIPHCLK_I2C4) + { + /* Check the parameters */ + assert_param(IS_RCC_I2C4CLKSOURCE(PeriphClkInit->I2c4ClockSelection)); + + /* Configure the I2C4 clock source */ + __HAL_RCC_I2C4_CONFIG(PeriphClkInit->I2c4ClockSelection); + } + + /*-------------------------------------- USART1 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_USART1) == RCC_PERIPHCLK_USART1) + { + /* Check the parameters */ + assert_param(IS_RCC_USART1CLKSOURCE(PeriphClkInit->Usart1ClockSelection)); + + /* Configure the USART1 clock source */ + __HAL_RCC_USART1_CONFIG(PeriphClkInit->Usart1ClockSelection); + } + + /*-------------------------------------- USART2 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_USART2) == RCC_PERIPHCLK_USART2) + { + /* Check the parameters */ + assert_param(IS_RCC_USART2CLKSOURCE(PeriphClkInit->Usart2ClockSelection)); + + /* Configure the USART2 clock source */ + __HAL_RCC_USART2_CONFIG(PeriphClkInit->Usart2ClockSelection); + } + + /*-------------------------------------- USART3 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_USART3) == RCC_PERIPHCLK_USART3) + { + /* Check the parameters */ + assert_param(IS_RCC_USART3CLKSOURCE(PeriphClkInit->Usart3ClockSelection)); + + /* Configure the USART3 clock source */ + __HAL_RCC_USART3_CONFIG(PeriphClkInit->Usart3ClockSelection); + } + + /*-------------------------------------- UART4 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_UART4) == RCC_PERIPHCLK_UART4) + { + /* Check the parameters */ + assert_param(IS_RCC_UART4CLKSOURCE(PeriphClkInit->Uart4ClockSelection)); + + /* Configure the UART4 clock source */ + __HAL_RCC_UART4_CONFIG(PeriphClkInit->Uart4ClockSelection); + } + + /*-------------------------------------- UART5 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_UART5) == RCC_PERIPHCLK_UART5) + { + /* Check the parameters */ + assert_param(IS_RCC_UART5CLKSOURCE(PeriphClkInit->Uart5ClockSelection)); + + /* Configure the UART5 clock source */ + __HAL_RCC_UART5_CONFIG(PeriphClkInit->Uart5ClockSelection); + } + + /*-------------------------------------- USART6 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_USART6) == RCC_PERIPHCLK_USART6) + { + /* Check the parameters */ + assert_param(IS_RCC_USART6CLKSOURCE(PeriphClkInit->Usart6ClockSelection)); + + /* Configure the USART6 clock source */ + __HAL_RCC_USART6_CONFIG(PeriphClkInit->Usart6ClockSelection); + } + + /*-------------------------------------- UART7 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_UART7) == RCC_PERIPHCLK_UART7) + { + /* Check the parameters */ + assert_param(IS_RCC_UART7CLKSOURCE(PeriphClkInit->Uart7ClockSelection)); + + /* Configure the UART7 clock source */ + __HAL_RCC_UART7_CONFIG(PeriphClkInit->Uart7ClockSelection); + } + + /*-------------------------------------- UART8 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_UART8) == RCC_PERIPHCLK_UART8) + { + /* Check the parameters */ + assert_param(IS_RCC_UART8CLKSOURCE(PeriphClkInit->Uart8ClockSelection)); + + /* Configure the UART8 clock source */ + __HAL_RCC_UART8_CONFIG(PeriphClkInit->Uart8ClockSelection); + } + + /*--------------------------------------- CEC Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CEC) == RCC_PERIPHCLK_CEC) + { + /* Check the parameters */ + assert_param(IS_RCC_CECCLKSOURCE(PeriphClkInit->CecClockSelection)); + + /* Configure the CEC clock source */ + __HAL_RCC_CEC_CONFIG(PeriphClkInit->CecClockSelection); + } + + /*-------------------------------------- CK48 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) + { + /* Check the parameters */ + assert_param(IS_RCC_CLK48SOURCE(PeriphClkInit->Clk48ClockSelection)); + + /* Configure the CLK48 source */ + __HAL_RCC_CLK48_CONFIG(PeriphClkInit->Clk48ClockSelection); + + /* Enable the PLLSAI when it's used as clock source for CK48 */ + if(PeriphClkInit->Clk48ClockSelection == RCC_CLK48SOURCE_PLLSAIP) + { + pllsaiused = 1; + } + } + + /*-------------------------------------- LTDC Configuration -----------------------------------*/ +#if defined(STM32F746xx) || defined(STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LTDC) == RCC_PERIPHCLK_LTDC) + { + pllsaiused = 1; + } +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + + /*-------------------------------------- LPTIM1 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LPTIM1) == RCC_PERIPHCLK_LPTIM1) + { + /* Check the parameters */ + assert_param(IS_RCC_LPTIM1CLK(PeriphClkInit->Lptim1ClockSelection)); + + /* Configure the LTPIM1 clock source */ + __HAL_RCC_LPTIM1_CONFIG(PeriphClkInit->Lptim1ClockSelection); + } + + /*------------------------------------- SDMMC1 Configuration ------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SDMMC1) == RCC_PERIPHCLK_SDMMC1) + { + /* Check the parameters */ + assert_param(IS_RCC_SDMMC1CLKSOURCE(PeriphClkInit->Sdmmc1ClockSelection)); + + /* Configure the SDMMC1 clock source */ + __HAL_RCC_SDMMC1_CONFIG(PeriphClkInit->Sdmmc1ClockSelection); + } + +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) + /*------------------------------------- SDMMC2 Configuration ------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SDMMC2) == RCC_PERIPHCLK_SDMMC2) + { + /* Check the parameters */ + assert_param(IS_RCC_SDMMC2CLKSOURCE(PeriphClkInit->Sdmmc2ClockSelection)); + + /* Configure the SDMMC2 clock source */ + __HAL_RCC_SDMMC2_CONFIG(PeriphClkInit->Sdmmc2ClockSelection); + } + + /*------------------------------------- DFSDM1 Configuration -------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_DFSDM1) == RCC_PERIPHCLK_DFSDM1) + { + /* Check the parameters */ + assert_param(IS_RCC_DFSDM1CLKSOURCE(PeriphClkInit->Dfsdm1ClockSelection)); + + /* Configure the DFSDM1 interface clock source */ + __HAL_RCC_DFSDM1_CONFIG(PeriphClkInit->Dfsdm1ClockSelection); + } + + /*------------------------------------- DFSDM AUDIO Configuration -------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_DFSDM1_AUDIO) == RCC_PERIPHCLK_DFSDM1_AUDIO) + { + /* Check the parameters */ + assert_param(IS_RCC_DFSDM1AUDIOCLKSOURCE(PeriphClkInit->Dfsdm1AudioClockSelection)); + + /* Configure the DFSDM interface clock source */ + __HAL_RCC_DFSDM1AUDIO_CONFIG(PeriphClkInit->Dfsdm1AudioClockSelection); + } +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + + /*-------------------------------------- PLLI2S Configuration ---------------------------------*/ + /* PLLI2S is configured when a peripheral will use it as source clock : SAI1, SAI2, I2S or SPDIF-RX */ + if((plli2sused == 1) || ((PeriphClkInit->PeriphClockSelection & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S)) + { + /* Disable the PLLI2S */ + __HAL_RCC_PLLI2S_DISABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLLI2S is disabled */ + while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) != RESET) + { + if((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + /* check for common PLLI2S Parameters */ + assert_param(IS_RCC_PLLI2SN_VALUE(PeriphClkInit->PLLI2S.PLLI2SN)); + + /*----------------- In Case of PLLI2S is selected as source clock for I2S -------------------*/ + if(((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) && (PeriphClkInit->I2sClockSelection == RCC_I2SCLKSOURCE_PLLI2S))) + { + /* check for Parameters */ + assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); + + /* Read PLLI2SP and PLLI2SQ value from PLLI2SCFGR register (this value is not needed for I2S configuration) */ + tmpreg0 = ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SP) >> RCC_PLLI2SCFGR_PLLI2SP_Pos); + tmpreg1 = ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SQ) >> RCC_PLLI2SCFGR_PLLI2SQ_Pos); + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) x (PLLI2SN/PLLM) */ + /* I2SCLK = f(PLLI2S clock output) = f(VCO clock) / PLLI2SR */ + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN , tmpreg0, tmpreg1, PeriphClkInit->PLLI2S.PLLI2SR); + } + + /*----------------- In Case of PLLI2S is selected as source clock for SAI -------------------*/ + if(((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI1) == RCC_PERIPHCLK_SAI1) && (PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLI2S)) || + ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI2) == RCC_PERIPHCLK_SAI2) && (PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLI2S))) + { + /* Check for PLLI2S Parameters */ + assert_param(IS_RCC_PLLI2SQ_VALUE(PeriphClkInit->PLLI2S.PLLI2SQ)); + /* Check for PLLI2S/DIVQ parameters */ + assert_param(IS_RCC_PLLI2S_DIVQ_VALUE(PeriphClkInit->PLLI2SDivQ)); + + /* Read PLLI2SP and PLLI2SR values from PLLI2SCFGR register (this value is not needed for SAI configuration) */ + tmpreg0 = ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SP) >> RCC_PLLI2SCFGR_PLLI2SP_Pos); + tmpreg1 = ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> RCC_PLLI2SCFGR_PLLI2SR_Pos); + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO Input = PLL_SOURCE/PLLM */ + /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ + /* SAI_CLK(first level) = PLLI2S_VCO Output/PLLI2SQ */ + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN, tmpreg0, PeriphClkInit->PLLI2S.PLLI2SQ, tmpreg1); + + /* SAI_CLK_x = SAI_CLK(first level)/PLLI2SDIVQ */ + __HAL_RCC_PLLI2S_PLLSAICLKDIVQ_CONFIG(PeriphClkInit->PLLI2SDivQ); + } + + /*----------------- In Case of PLLI2S is selected as source clock for SPDIF-RX -------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SPDIFRX) == RCC_PERIPHCLK_SPDIFRX) + { + /* check for Parameters */ + assert_param(IS_RCC_PLLI2SP_VALUE(PeriphClkInit->PLLI2S.PLLI2SP)); + + /* Read PLLI2SR value from PLLI2SCFGR register (this value is not needed for SPDIF-RX configuration) */ + tmpreg0 = ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SQ) >> RCC_PLLI2SCFGR_PLLI2SQ_Pos); + tmpreg1 = ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> RCC_PLLI2SCFGR_PLLI2SR_Pos); + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) x (PLLI2SN/PLLM) */ + /* SPDIFCLK = f(PLLI2S clock output) = f(VCO clock) / PLLI2SP */ + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN , PeriphClkInit->PLLI2S.PLLI2SP, tmpreg0, tmpreg1); + } + + /*----------------- In Case of PLLI2S is just selected -----------------*/ + if((PeriphClkInit->PeriphClockSelection & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S) + { + /* Check for Parameters */ + assert_param(IS_RCC_PLLI2SP_VALUE(PeriphClkInit->PLLI2S.PLLI2SP)); + assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); + assert_param(IS_RCC_PLLI2SQ_VALUE(PeriphClkInit->PLLI2S.PLLI2SQ)); + + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) x (PLLI2SN/PLLI2SM) */ + /* SPDIFRXCLK = f(PLLI2S clock output) = f(VCO clock) / PLLI2SP */ + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN , PeriphClkInit->PLLI2S.PLLI2SP, PeriphClkInit->PLLI2S.PLLI2SQ, PeriphClkInit->PLLI2S.PLLI2SR); + } + + /* Enable the PLLI2S */ + __HAL_RCC_PLLI2S_ENABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLLI2S is ready */ + while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) == RESET) + { + if((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + } + + /*-------------------------------------- PLLSAI Configuration ---------------------------------*/ + /* PLLSAI is configured when a peripheral will use it as source clock : SAI1, SAI2, LTDC or CK48 */ + if(pllsaiused == 1) + { + /* Disable PLLSAI Clock */ + __HAL_RCC_PLLSAI_DISABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLLSAI is disabled */ + while(__HAL_RCC_PLLSAI_GET_FLAG() != RESET) + { + if((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + /* Check the PLLSAI division factors */ + assert_param(IS_RCC_PLLSAIN_VALUE(PeriphClkInit->PLLSAI.PLLSAIN)); + + /*----------------- In Case of PLLSAI is selected as source clock for SAI -------------------*/ + if(((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI1) == RCC_PERIPHCLK_SAI1) && (PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLSAI)) ||\ + ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI2) == RCC_PERIPHCLK_SAI2) && (PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLSAI))) + { + /* check for PLLSAIQ Parameter */ + assert_param(IS_RCC_PLLSAIQ_VALUE(PeriphClkInit->PLLSAI.PLLSAIQ)); + /* check for PLLSAI/DIVQ Parameter */ + assert_param(IS_RCC_PLLSAI_DIVQ_VALUE(PeriphClkInit->PLLSAIDivQ)); + + /* Read PLLSAIP value from PLLSAICFGR register (this value is not needed for SAI configuration) */ + tmpreg0 = ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIP) >> RCC_PLLSAICFGR_PLLSAIP_Pos); + tmpreg1 = ((RCC->PLLSAICFGR & RCC_PLLI2SCFGR_PLLI2SR) >> RCC_PLLSAICFGR_PLLSAIR_Pos); + /* PLLSAI_VCO Input = PLL_SOURCE/PLLM */ + /* PLLSAI_VCO Output = PLLSAI_VCO Input * PLLSAIN */ + /* SAI_CLK(first level) = PLLSAI_VCO Output/PLLSAIQ */ + __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIN , tmpreg0, PeriphClkInit->PLLSAI.PLLSAIQ, tmpreg1); + + /* SAI_CLK_x = SAI_CLK(first level)/PLLSAIDIVQ */ + __HAL_RCC_PLLSAI_PLLSAICLKDIVQ_CONFIG(PeriphClkInit->PLLSAIDivQ); + } + + /*----------------- In Case of PLLSAI is selected as source clock for CLK48 -------------------*/ + /* In Case of PLLI2S is selected as source clock for CK48 */ + if((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) && (PeriphClkInit->Clk48ClockSelection == RCC_CLK48SOURCE_PLLSAIP)) + { + /* check for Parameters */ + assert_param(IS_RCC_PLLSAIP_VALUE(PeriphClkInit->PLLSAI.PLLSAIP)); + /* Read PLLSAIQ and PLLSAIR value from PLLSAICFGR register (this value is not needed for CK48 configuration) */ + tmpreg0 = ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIQ) >> RCC_PLLSAICFGR_PLLSAIQ_Pos); + tmpreg1 = ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIR) >> RCC_PLLSAICFGR_PLLSAIR_Pos); + + /* Configure the PLLSAI division factors */ + /* PLLSAI_VCO = f(VCO clock) = f(PLLSAI clock input) x (PLLI2SN/PLLM) */ + /* 48CLK = f(PLLSAI clock output) = f(VCO clock) / PLLSAIP */ + __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIN , PeriphClkInit->PLLSAI.PLLSAIP, tmpreg0, tmpreg1); + } + +#if defined(STM32F746xx) || defined(STM32F756xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) || defined (STM32F750xx) + /*---------------------------- LTDC configuration -------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LTDC) == (RCC_PERIPHCLK_LTDC)) + { + assert_param(IS_RCC_PLLSAIR_VALUE(PeriphClkInit->PLLSAI.PLLSAIR)); + assert_param(IS_RCC_PLLSAI_DIVR_VALUE(PeriphClkInit->PLLSAIDivR)); + + /* Read PLLSAIP and PLLSAIQ value from PLLSAICFGR register (these value are not needed for LTDC configuration) */ + tmpreg0 = ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIQ) >> RCC_PLLSAICFGR_PLLSAIQ_Pos); + tmpreg1 = ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIP) >> RCC_PLLSAICFGR_PLLSAIP_Pos); + + /* PLLSAI_VCO Input = PLL_SOURCE/PLLM */ + /* PLLSAI_VCO Output = PLLSAI_VCO Input * PLLSAIN */ + /* LTDC_CLK(first level) = PLLSAI_VCO Output/PLLSAIR */ + __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIN , tmpreg1, tmpreg0, PeriphClkInit->PLLSAI.PLLSAIR); + + /* LTDC_CLK = LTDC_CLK(first level)/PLLSAIDIVR */ + __HAL_RCC_PLLSAI_PLLSAICLKDIVR_CONFIG(PeriphClkInit->PLLSAIDivR); + } +#endif /* STM32F746xx || STM32F756xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + + /* Enable PLLSAI Clock */ + __HAL_RCC_PLLSAI_ENABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLLSAI is ready */ + while(__HAL_RCC_PLLSAI_GET_FLAG() == RESET) + { + if((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + } + return HAL_OK; +} + +/** + * @brief Get the RCC_PeriphCLKInitTypeDef according to the internal + * RCC configuration registers. + * @param PeriphClkInit pointer to the configured RCC_PeriphCLKInitTypeDef structure + * @retval None + */ +void HAL_RCCEx_GetPeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit) +{ + uint32_t tempreg = 0; + + /* Set all possible values for the extended clock type parameter------------*/ +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) + PeriphClkInit->PeriphClockSelection = RCC_PERIPHCLK_I2S | RCC_PERIPHCLK_LPTIM1 |\ + RCC_PERIPHCLK_SAI1 | RCC_PERIPHCLK_SAI2 |\ + RCC_PERIPHCLK_TIM | RCC_PERIPHCLK_RTC |\ + RCC_PERIPHCLK_CEC | RCC_PERIPHCLK_I2C4 |\ + RCC_PERIPHCLK_I2C1 | RCC_PERIPHCLK_I2C2 |\ + RCC_PERIPHCLK_I2C3 | RCC_PERIPHCLK_USART1 |\ + RCC_PERIPHCLK_USART2 | RCC_PERIPHCLK_USART3 |\ + RCC_PERIPHCLK_UART4 | RCC_PERIPHCLK_UART5 |\ + RCC_PERIPHCLK_USART6 | RCC_PERIPHCLK_UART7 |\ + RCC_PERIPHCLK_UART8 | RCC_PERIPHCLK_SDMMC1 |\ + RCC_PERIPHCLK_CLK48 | RCC_PERIPHCLK_SDMMC2 |\ + RCC_PERIPHCLK_DFSDM1 | RCC_PERIPHCLK_DFSDM1_AUDIO; +#else + PeriphClkInit->PeriphClockSelection = RCC_PERIPHCLK_I2S | RCC_PERIPHCLK_LPTIM1 |\ + RCC_PERIPHCLK_SAI1 | RCC_PERIPHCLK_SAI2 |\ + RCC_PERIPHCLK_TIM | RCC_PERIPHCLK_RTC |\ + RCC_PERIPHCLK_CEC | RCC_PERIPHCLK_I2C4 |\ + RCC_PERIPHCLK_I2C1 | RCC_PERIPHCLK_I2C2 |\ + RCC_PERIPHCLK_I2C3 | RCC_PERIPHCLK_USART1 |\ + RCC_PERIPHCLK_USART2 | RCC_PERIPHCLK_USART3 |\ + RCC_PERIPHCLK_UART4 | RCC_PERIPHCLK_UART5 |\ + RCC_PERIPHCLK_USART6 | RCC_PERIPHCLK_UART7 |\ + RCC_PERIPHCLK_UART8 | RCC_PERIPHCLK_SDMMC1 |\ + RCC_PERIPHCLK_CLK48; +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + + /* Get the PLLI2S Clock configuration -----------------------------------------------*/ + PeriphClkInit->PLLI2S.PLLI2SN = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> RCC_PLLI2SCFGR_PLLI2SN_Pos); + PeriphClkInit->PLLI2S.PLLI2SP = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SP) >> RCC_PLLI2SCFGR_PLLI2SP_Pos); + PeriphClkInit->PLLI2S.PLLI2SQ = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SQ) >> RCC_PLLI2SCFGR_PLLI2SQ_Pos); + PeriphClkInit->PLLI2S.PLLI2SR = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> RCC_PLLI2SCFGR_PLLI2SR_Pos); + + /* Get the PLLSAI Clock configuration -----------------------------------------------*/ + PeriphClkInit->PLLSAI.PLLSAIN = (uint32_t)((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIN) >> RCC_PLLSAICFGR_PLLSAIN_Pos); + PeriphClkInit->PLLSAI.PLLSAIP = (uint32_t)((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIP) >> RCC_PLLSAICFGR_PLLSAIP_Pos); + PeriphClkInit->PLLSAI.PLLSAIQ = (uint32_t)((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIQ) >> RCC_PLLSAICFGR_PLLSAIQ_Pos); + PeriphClkInit->PLLSAI.PLLSAIR = (uint32_t)((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIR) >> RCC_PLLSAICFGR_PLLSAIR_Pos); + + /* Get the PLLSAI/PLLI2S division factors -------------------------------------------*/ + PeriphClkInit->PLLI2SDivQ = (uint32_t)((RCC->DCKCFGR1 & RCC_DCKCFGR1_PLLI2SDIVQ) >> RCC_DCKCFGR1_PLLI2SDIVQ_Pos); + PeriphClkInit->PLLSAIDivQ = (uint32_t)((RCC->DCKCFGR1 & RCC_DCKCFGR1_PLLSAIDIVQ) >> RCC_DCKCFGR1_PLLSAIDIVQ_Pos); + PeriphClkInit->PLLSAIDivR = (uint32_t)((RCC->DCKCFGR1 & RCC_DCKCFGR1_PLLSAIDIVR) >> RCC_DCKCFGR1_PLLSAIDIVR_Pos); + + /* Get the SAI1 clock configuration ----------------------------------------------*/ + PeriphClkInit->Sai1ClockSelection = __HAL_RCC_GET_SAI1_SOURCE(); + + /* Get the SAI2 clock configuration ----------------------------------------------*/ + PeriphClkInit->Sai2ClockSelection = __HAL_RCC_GET_SAI2_SOURCE(); + + /* Get the I2S clock configuration ------------------------------------------*/ + PeriphClkInit->I2sClockSelection = __HAL_RCC_GET_I2SCLKSOURCE(); + + /* Get the I2C1 clock configuration ------------------------------------------*/ + PeriphClkInit->I2c1ClockSelection = __HAL_RCC_GET_I2C1_SOURCE(); + + /* Get the I2C2 clock configuration ------------------------------------------*/ + PeriphClkInit->I2c2ClockSelection = __HAL_RCC_GET_I2C2_SOURCE(); + + /* Get the I2C3 clock configuration ------------------------------------------*/ + PeriphClkInit->I2c3ClockSelection = __HAL_RCC_GET_I2C3_SOURCE(); + + /* Get the I2C4 clock configuration ------------------------------------------*/ + PeriphClkInit->I2c4ClockSelection = __HAL_RCC_GET_I2C4_SOURCE(); + + /* Get the USART1 clock configuration ------------------------------------------*/ + PeriphClkInit->Usart1ClockSelection = __HAL_RCC_GET_USART1_SOURCE(); + + /* Get the USART2 clock configuration ------------------------------------------*/ + PeriphClkInit->Usart2ClockSelection = __HAL_RCC_GET_USART2_SOURCE(); + + /* Get the USART3 clock configuration ------------------------------------------*/ + PeriphClkInit->Usart3ClockSelection = __HAL_RCC_GET_USART3_SOURCE(); + + /* Get the UART4 clock configuration ------------------------------------------*/ + PeriphClkInit->Uart4ClockSelection = __HAL_RCC_GET_UART4_SOURCE(); + + /* Get the UART5 clock configuration ------------------------------------------*/ + PeriphClkInit->Uart5ClockSelection = __HAL_RCC_GET_UART5_SOURCE(); + + /* Get the USART6 clock configuration ------------------------------------------*/ + PeriphClkInit->Usart6ClockSelection = __HAL_RCC_GET_USART6_SOURCE(); + + /* Get the UART7 clock configuration ------------------------------------------*/ + PeriphClkInit->Uart7ClockSelection = __HAL_RCC_GET_UART7_SOURCE(); + + /* Get the UART8 clock configuration ------------------------------------------*/ + PeriphClkInit->Uart8ClockSelection = __HAL_RCC_GET_UART8_SOURCE(); + + /* Get the LPTIM1 clock configuration ------------------------------------------*/ + PeriphClkInit->Lptim1ClockSelection = __HAL_RCC_GET_LPTIM1_SOURCE(); + + /* Get the CEC clock configuration -----------------------------------------------*/ + PeriphClkInit->CecClockSelection = __HAL_RCC_GET_CEC_SOURCE(); + + /* Get the CK48 clock configuration -----------------------------------------------*/ + PeriphClkInit->Clk48ClockSelection = __HAL_RCC_GET_CLK48_SOURCE(); + + /* Get the SDMMC1 clock configuration -----------------------------------------------*/ + PeriphClkInit->Sdmmc1ClockSelection = __HAL_RCC_GET_SDMMC1_SOURCE(); + +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) + /* Get the SDMMC2 clock configuration -----------------------------------------------*/ + PeriphClkInit->Sdmmc2ClockSelection = __HAL_RCC_GET_SDMMC2_SOURCE(); + + /* Get the DFSDM clock configuration -----------------------------------------------*/ + PeriphClkInit->Dfsdm1ClockSelection = __HAL_RCC_GET_DFSDM1_SOURCE(); + + /* Get the DFSDM AUDIO clock configuration -----------------------------------------------*/ + PeriphClkInit->Dfsdm1AudioClockSelection = __HAL_RCC_GET_DFSDM1AUDIO_SOURCE(); +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + + /* Get the RTC Clock configuration -----------------------------------------------*/ + tempreg = (RCC->CFGR & RCC_CFGR_RTCPRE); + PeriphClkInit->RTCClockSelection = (uint32_t)((tempreg) | (RCC->BDCR & RCC_BDCR_RTCSEL)); + + /* Get the TIM Prescaler configuration --------------------------------------------*/ + if ((RCC->DCKCFGR1 & RCC_DCKCFGR1_TIMPRE) == RESET) + { + PeriphClkInit->TIMPresSelection = RCC_TIMPRES_DESACTIVATED; + } + else + { + PeriphClkInit->TIMPresSelection = RCC_TIMPRES_ACTIVATED; + } +} +#endif /* STM32F745xx || STM32F746xx || STM32F756xx || STM32F765xx || STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx || STM32F750xx */ + +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F730xx) +/** + * @brief Initializes the RCC extended peripherals clocks according to the specified + * parameters in the RCC_PeriphCLKInitTypeDef. + * @param PeriphClkInit pointer to an RCC_PeriphCLKInitTypeDef structure that + * contains the configuration information for the Extended Peripherals + * clocks(I2S, SAI, RTC, TIM, UARTs, USARTs, LTPIM, SDMMC...). + * + * @note Care must be taken when HAL_RCCEx_PeriphCLKConfig() is used to select + * the RTC clock source; in this case the Backup domain will be reset in + * order to modify the RTC Clock source, as consequence RTC registers (including + * the backup registers) are set to their reset values. + * + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCCEx_PeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit) +{ + uint32_t tickstart = 0; + uint32_t tmpreg0 = 0; + uint32_t plli2sused = 0; + uint32_t pllsaiused = 0; + + /* Check the parameters */ + assert_param(IS_RCC_PERIPHCLOCK(PeriphClkInit->PeriphClockSelection)); + + /*----------------------------------- I2S configuration ----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == (RCC_PERIPHCLK_I2S)) + { + /* Check the parameters */ + assert_param(IS_RCC_I2SCLKSOURCE(PeriphClkInit->I2sClockSelection)); + + /* Configure I2S Clock source */ + __HAL_RCC_I2S_CONFIG(PeriphClkInit->I2sClockSelection); + + /* Enable the PLLI2S when it's used as clock source for I2S */ + if(PeriphClkInit->I2sClockSelection == RCC_I2SCLKSOURCE_PLLI2S) + { + plli2sused = 1; + } + } + + /*------------------------------------ SAI1 configuration --------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI1) == (RCC_PERIPHCLK_SAI1)) + { + /* Check the parameters */ + assert_param(IS_RCC_SAI1CLKSOURCE(PeriphClkInit->Sai1ClockSelection)); + + /* Configure SAI1 Clock source */ + __HAL_RCC_SAI1_CONFIG(PeriphClkInit->Sai1ClockSelection); + /* Enable the PLLI2S when it's used as clock source for SAI */ + if(PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLI2S) + { + plli2sused = 1; + } + /* Enable the PLLSAI when it's used as clock source for SAI */ + if(PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLSAI) + { + pllsaiused = 1; + } + } + + /*------------------------------------ SAI2 configuration --------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI2) == (RCC_PERIPHCLK_SAI2)) + { + /* Check the parameters */ + assert_param(IS_RCC_SAI2CLKSOURCE(PeriphClkInit->Sai2ClockSelection)); + + /* Configure SAI2 Clock source */ + __HAL_RCC_SAI2_CONFIG(PeriphClkInit->Sai2ClockSelection); + + /* Enable the PLLI2S when it's used as clock source for SAI */ + if(PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLI2S) + { + plli2sused = 1; + } + /* Enable the PLLSAI when it's used as clock source for SAI */ + if(PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLSAI) + { + pllsaiused = 1; + } + } + + /*------------------------------------ RTC configuration --------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_RTC) == (RCC_PERIPHCLK_RTC)) + { + /* Check for RTC Parameters used to output RTCCLK */ + assert_param(IS_RCC_RTCCLKSOURCE(PeriphClkInit->RTCClockSelection)); + + /* Enable Power Clock*/ + __HAL_RCC_PWR_CLK_ENABLE(); + + /* Enable write access to Backup domain */ + PWR->CR1 |= PWR_CR1_DBP; + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait for Backup domain Write protection disable */ + while((PWR->CR1 & PWR_CR1_DBP) == RESET) + { + if((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + + /* Reset the Backup domain only if the RTC Clock source selection is modified */ + tmpreg0 = (RCC->BDCR & RCC_BDCR_RTCSEL); + + if((tmpreg0 != 0x00000000U) && (tmpreg0 != (PeriphClkInit->RTCClockSelection & RCC_BDCR_RTCSEL))) + { + /* Store the content of BDCR register before the reset of Backup Domain */ + tmpreg0 = (RCC->BDCR & ~(RCC_BDCR_RTCSEL)); + + /* RTC Clock selection can be changed only if the Backup Domain is reset */ + __HAL_RCC_BACKUPRESET_FORCE(); + __HAL_RCC_BACKUPRESET_RELEASE(); + + /* Restore the Content of BDCR register */ + RCC->BDCR = tmpreg0; + + /* Wait for LSE reactivation if LSE was enable prior to Backup Domain reset */ + if (HAL_IS_BIT_SET(RCC->BDCR, RCC_BDCR_LSEON)) + { + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till LSE is ready */ + while(__HAL_RCC_GET_FLAG(RCC_FLAG_LSERDY) == RESET) + { + if((HAL_GetTick() - tickstart ) > RCC_LSE_TIMEOUT_VALUE) + { + return HAL_TIMEOUT; + } + } + } + } + __HAL_RCC_RTC_CONFIG(PeriphClkInit->RTCClockSelection); + } + + /*------------------------------------ TIM configuration --------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_TIM) == (RCC_PERIPHCLK_TIM)) + { + /* Check the parameters */ + assert_param(IS_RCC_TIMPRES(PeriphClkInit->TIMPresSelection)); + + /* Configure Timer Prescaler */ + __HAL_RCC_TIMCLKPRESCALER(PeriphClkInit->TIMPresSelection); + } + + /*-------------------------------------- I2C1 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2C1) == RCC_PERIPHCLK_I2C1) + { + /* Check the parameters */ + assert_param(IS_RCC_I2C1CLKSOURCE(PeriphClkInit->I2c1ClockSelection)); + + /* Configure the I2C1 clock source */ + __HAL_RCC_I2C1_CONFIG(PeriphClkInit->I2c1ClockSelection); + } + + /*-------------------------------------- I2C2 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2C2) == RCC_PERIPHCLK_I2C2) + { + /* Check the parameters */ + assert_param(IS_RCC_I2C2CLKSOURCE(PeriphClkInit->I2c2ClockSelection)); + + /* Configure the I2C2 clock source */ + __HAL_RCC_I2C2_CONFIG(PeriphClkInit->I2c2ClockSelection); + } + + /*-------------------------------------- I2C3 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2C3) == RCC_PERIPHCLK_I2C3) + { + /* Check the parameters */ + assert_param(IS_RCC_I2C3CLKSOURCE(PeriphClkInit->I2c3ClockSelection)); + + /* Configure the I2C3 clock source */ + __HAL_RCC_I2C3_CONFIG(PeriphClkInit->I2c3ClockSelection); + } + + /*-------------------------------------- USART1 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_USART1) == RCC_PERIPHCLK_USART1) + { + /* Check the parameters */ + assert_param(IS_RCC_USART1CLKSOURCE(PeriphClkInit->Usart1ClockSelection)); + + /* Configure the USART1 clock source */ + __HAL_RCC_USART1_CONFIG(PeriphClkInit->Usart1ClockSelection); + } + + /*-------------------------------------- USART2 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_USART2) == RCC_PERIPHCLK_USART2) + { + /* Check the parameters */ + assert_param(IS_RCC_USART2CLKSOURCE(PeriphClkInit->Usart2ClockSelection)); + + /* Configure the USART2 clock source */ + __HAL_RCC_USART2_CONFIG(PeriphClkInit->Usart2ClockSelection); + } + + /*-------------------------------------- USART3 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_USART3) == RCC_PERIPHCLK_USART3) + { + /* Check the parameters */ + assert_param(IS_RCC_USART3CLKSOURCE(PeriphClkInit->Usart3ClockSelection)); + + /* Configure the USART3 clock source */ + __HAL_RCC_USART3_CONFIG(PeriphClkInit->Usart3ClockSelection); + } + + /*-------------------------------------- UART4 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_UART4) == RCC_PERIPHCLK_UART4) + { + /* Check the parameters */ + assert_param(IS_RCC_UART4CLKSOURCE(PeriphClkInit->Uart4ClockSelection)); + + /* Configure the UART4 clock source */ + __HAL_RCC_UART4_CONFIG(PeriphClkInit->Uart4ClockSelection); + } + + /*-------------------------------------- UART5 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_UART5) == RCC_PERIPHCLK_UART5) + { + /* Check the parameters */ + assert_param(IS_RCC_UART5CLKSOURCE(PeriphClkInit->Uart5ClockSelection)); + + /* Configure the UART5 clock source */ + __HAL_RCC_UART5_CONFIG(PeriphClkInit->Uart5ClockSelection); + } + + /*-------------------------------------- USART6 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_USART6) == RCC_PERIPHCLK_USART6) + { + /* Check the parameters */ + assert_param(IS_RCC_USART6CLKSOURCE(PeriphClkInit->Usart6ClockSelection)); + + /* Configure the USART6 clock source */ + __HAL_RCC_USART6_CONFIG(PeriphClkInit->Usart6ClockSelection); + } + + /*-------------------------------------- UART7 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_UART7) == RCC_PERIPHCLK_UART7) + { + /* Check the parameters */ + assert_param(IS_RCC_UART7CLKSOURCE(PeriphClkInit->Uart7ClockSelection)); + + /* Configure the UART7 clock source */ + __HAL_RCC_UART7_CONFIG(PeriphClkInit->Uart7ClockSelection); + } + + /*-------------------------------------- UART8 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_UART8) == RCC_PERIPHCLK_UART8) + { + /* Check the parameters */ + assert_param(IS_RCC_UART8CLKSOURCE(PeriphClkInit->Uart8ClockSelection)); + + /* Configure the UART8 clock source */ + __HAL_RCC_UART8_CONFIG(PeriphClkInit->Uart8ClockSelection); + } + + /*-------------------------------------- CK48 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) + { + /* Check the parameters */ + assert_param(IS_RCC_CLK48SOURCE(PeriphClkInit->Clk48ClockSelection)); + + /* Configure the CLK48 source */ + __HAL_RCC_CLK48_CONFIG(PeriphClkInit->Clk48ClockSelection); + + /* Enable the PLLSAI when it's used as clock source for CK48 */ + if(PeriphClkInit->Clk48ClockSelection == RCC_CLK48SOURCE_PLLSAIP) + { + pllsaiused = 1; + } + } + + /*-------------------------------------- LPTIM1 Configuration -----------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_LPTIM1) == RCC_PERIPHCLK_LPTIM1) + { + /* Check the parameters */ + assert_param(IS_RCC_LPTIM1CLK(PeriphClkInit->Lptim1ClockSelection)); + + /* Configure the LTPIM1 clock source */ + __HAL_RCC_LPTIM1_CONFIG(PeriphClkInit->Lptim1ClockSelection); + } + + /*------------------------------------- SDMMC1 Configuration ------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SDMMC1) == RCC_PERIPHCLK_SDMMC1) + { + /* Check the parameters */ + assert_param(IS_RCC_SDMMC1CLKSOURCE(PeriphClkInit->Sdmmc1ClockSelection)); + + /* Configure the SDMMC1 clock source */ + __HAL_RCC_SDMMC1_CONFIG(PeriphClkInit->Sdmmc1ClockSelection); + } + + /*------------------------------------- SDMMC2 Configuration ------------------------------------*/ + if(((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SDMMC2) == RCC_PERIPHCLK_SDMMC2) + { + /* Check the parameters */ + assert_param(IS_RCC_SDMMC2CLKSOURCE(PeriphClkInit->Sdmmc2ClockSelection)); + + /* Configure the SDMMC2 clock source */ + __HAL_RCC_SDMMC2_CONFIG(PeriphClkInit->Sdmmc2ClockSelection); + } + + /*-------------------------------------- PLLI2S Configuration ---------------------------------*/ + /* PLLI2S is configured when a peripheral will use it as source clock : SAI1, SAI2 or I2S */ + if((plli2sused == 1) || ((PeriphClkInit->PeriphClockSelection & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S)) + { + /* Disable the PLLI2S */ + __HAL_RCC_PLLI2S_DISABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLLI2S is disabled */ + while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) != RESET) + { + if((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + /* check for common PLLI2S Parameters */ + assert_param(IS_RCC_PLLI2SN_VALUE(PeriphClkInit->PLLI2S.PLLI2SN)); + + /*----------------- In Case of PLLI2S is selected as source clock for I2S -------------------*/ + if(((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_I2S) == RCC_PERIPHCLK_I2S) && (PeriphClkInit->I2sClockSelection == RCC_I2SCLKSOURCE_PLLI2S))) + { + /* check for Parameters */ + assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); + + /* Read PLLI2SQ value from PLLI2SCFGR register (this value is not needed for I2S configuration) */ + tmpreg0 = ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SQ) >> RCC_PLLI2SCFGR_PLLI2SQ_Pos); + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) x (PLLI2SN/PLLM) */ + /* I2SCLK = f(PLLI2S clock output) = f(VCO clock) / PLLI2SR */ + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN , tmpreg0, PeriphClkInit->PLLI2S.PLLI2SR); + } + + /*----------------- In Case of PLLI2S is selected as source clock for SAI -------------------*/ + if(((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI1) == RCC_PERIPHCLK_SAI1) && (PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLI2S)) || + ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI2) == RCC_PERIPHCLK_SAI2) && (PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLI2S))) + { + /* Check for PLLI2S Parameters */ + assert_param(IS_RCC_PLLI2SQ_VALUE(PeriphClkInit->PLLI2S.PLLI2SQ)); + /* Check for PLLI2S/DIVQ parameters */ + assert_param(IS_RCC_PLLI2S_DIVQ_VALUE(PeriphClkInit->PLLI2SDivQ)); + + /* Read PLLI2SP and PLLI2SR values from PLLI2SCFGR register (this value is not needed for SAI configuration) */ + tmpreg0 = ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> RCC_PLLI2SCFGR_PLLI2SR_Pos); + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO Input = PLL_SOURCE/PLLM */ + /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ + /* SAI_CLK(first level) = PLLI2S_VCO Output/PLLI2SQ */ + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN, PeriphClkInit->PLLI2S.PLLI2SQ, tmpreg0); + + /* SAI_CLK_x = SAI_CLK(first level)/PLLI2SDIVQ */ + __HAL_RCC_PLLI2S_PLLSAICLKDIVQ_CONFIG(PeriphClkInit->PLLI2SDivQ); + } + + /*----------------- In Case of PLLI2S is just selected -----------------*/ + if((PeriphClkInit->PeriphClockSelection & RCC_PERIPHCLK_PLLI2S) == RCC_PERIPHCLK_PLLI2S) + { + /* Check for Parameters */ + assert_param(IS_RCC_PLLI2SR_VALUE(PeriphClkInit->PLLI2S.PLLI2SR)); + assert_param(IS_RCC_PLLI2SQ_VALUE(PeriphClkInit->PLLI2S.PLLI2SQ)); + + /* Configure the PLLI2S division factors */ + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) x (PLLI2SN/PLLI2SM) */ + __HAL_RCC_PLLI2S_CONFIG(PeriphClkInit->PLLI2S.PLLI2SN , PeriphClkInit->PLLI2S.PLLI2SQ, PeriphClkInit->PLLI2S.PLLI2SR); + } + + /* Enable the PLLI2S */ + __HAL_RCC_PLLI2S_ENABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLLI2S is ready */ + while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) == RESET) + { + if((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + } + + /*-------------------------------------- PLLSAI Configuration ---------------------------------*/ + /* PLLSAI is configured when a peripheral will use it as source clock : SAI1, SAI2, LTDC or CK48 */ + if(pllsaiused == 1) + { + /* Disable PLLSAI Clock */ + __HAL_RCC_PLLSAI_DISABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLLSAI is disabled */ + while(__HAL_RCC_PLLSAI_GET_FLAG() != RESET) + { + if((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + /* Check the PLLSAI division factors */ + assert_param(IS_RCC_PLLSAIN_VALUE(PeriphClkInit->PLLSAI.PLLSAIN)); + + /*----------------- In Case of PLLSAI is selected as source clock for SAI -------------------*/ + if(((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI1) == RCC_PERIPHCLK_SAI1) && (PeriphClkInit->Sai1ClockSelection == RCC_SAI1CLKSOURCE_PLLSAI)) ||\ + ((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_SAI2) == RCC_PERIPHCLK_SAI2) && (PeriphClkInit->Sai2ClockSelection == RCC_SAI2CLKSOURCE_PLLSAI))) + { + /* check for PLLSAIQ Parameter */ + assert_param(IS_RCC_PLLSAIQ_VALUE(PeriphClkInit->PLLSAI.PLLSAIQ)); + /* check for PLLSAI/DIVQ Parameter */ + assert_param(IS_RCC_PLLSAI_DIVQ_VALUE(PeriphClkInit->PLLSAIDivQ)); + + /* Read PLLSAIP value from PLLSAICFGR register (this value is not needed for SAI configuration) */ + tmpreg0 = ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIP) >> RCC_PLLSAICFGR_PLLSAIP_Pos); + /* PLLSAI_VCO Input = PLL_SOURCE/PLLM */ + /* PLLSAI_VCO Output = PLLSAI_VCO Input * PLLSAIN */ + /* SAI_CLK(first level) = PLLSAI_VCO Output/PLLSAIQ */ + __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIN , tmpreg0, PeriphClkInit->PLLSAI.PLLSAIQ); + + /* SAI_CLK_x = SAI_CLK(first level)/PLLSAIDIVQ */ + __HAL_RCC_PLLSAI_PLLSAICLKDIVQ_CONFIG(PeriphClkInit->PLLSAIDivQ); + } + + /*----------------- In Case of PLLSAI is selected as source clock for CLK48 -------------------*/ + /* In Case of PLLI2S is selected as source clock for CK48 */ + if((((PeriphClkInit->PeriphClockSelection) & RCC_PERIPHCLK_CLK48) == RCC_PERIPHCLK_CLK48) && (PeriphClkInit->Clk48ClockSelection == RCC_CLK48SOURCE_PLLSAIP)) + { + /* check for Parameters */ + assert_param(IS_RCC_PLLSAIP_VALUE(PeriphClkInit->PLLSAI.PLLSAIP)); + /* Read PLLSAIQ and PLLSAIR value from PLLSAICFGR register (this value is not needed for CK48 configuration) */ + tmpreg0 = ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIQ) >> RCC_PLLSAICFGR_PLLSAIQ_Pos); + + /* Configure the PLLSAI division factors */ + /* PLLSAI_VCO = f(VCO clock) = f(PLLSAI clock input) x (PLLI2SN/PLLM) */ + /* 48CLK = f(PLLSAI clock output) = f(VCO clock) / PLLSAIP */ + __HAL_RCC_PLLSAI_CONFIG(PeriphClkInit->PLLSAI.PLLSAIN , PeriphClkInit->PLLSAI.PLLSAIP, tmpreg0); + } + + /* Enable PLLSAI Clock */ + __HAL_RCC_PLLSAI_ENABLE(); + + /* Get Start Tick*/ + tickstart = HAL_GetTick(); + + /* Wait till PLLSAI is ready */ + while(__HAL_RCC_PLLSAI_GET_FLAG() == RESET) + { + if((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + } + return HAL_OK; +} + +/** + * @brief Get the RCC_PeriphCLKInitTypeDef according to the internal + * RCC configuration registers. + * @param PeriphClkInit pointer to the configured RCC_PeriphCLKInitTypeDef structure + * @retval None + */ +void HAL_RCCEx_GetPeriphCLKConfig(RCC_PeriphCLKInitTypeDef *PeriphClkInit) +{ + uint32_t tempreg = 0; + + /* Set all possible values for the extended clock type parameter------------*/ + PeriphClkInit->PeriphClockSelection = RCC_PERIPHCLK_I2S | RCC_PERIPHCLK_LPTIM1 |\ + RCC_PERIPHCLK_SAI1 | RCC_PERIPHCLK_SAI2 |\ + RCC_PERIPHCLK_TIM | RCC_PERIPHCLK_RTC |\ + RCC_PERIPHCLK_I2C1 | RCC_PERIPHCLK_I2C2 |\ + RCC_PERIPHCLK_I2C3 | RCC_PERIPHCLK_USART1 |\ + RCC_PERIPHCLK_USART2 | RCC_PERIPHCLK_USART3 |\ + RCC_PERIPHCLK_UART4 | RCC_PERIPHCLK_UART5 |\ + RCC_PERIPHCLK_USART6 | RCC_PERIPHCLK_UART7 |\ + RCC_PERIPHCLK_UART8 | RCC_PERIPHCLK_SDMMC1 |\ + RCC_PERIPHCLK_CLK48 | RCC_PERIPHCLK_SDMMC2; + + /* Get the PLLI2S Clock configuration -----------------------------------------------*/ + PeriphClkInit->PLLI2S.PLLI2SN = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> RCC_PLLI2SCFGR_PLLI2SN_Pos); + PeriphClkInit->PLLI2S.PLLI2SQ = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SQ) >> RCC_PLLI2SCFGR_PLLI2SQ_Pos); + PeriphClkInit->PLLI2S.PLLI2SR = (uint32_t)((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SR) >> RCC_PLLI2SCFGR_PLLI2SR_Pos); + + /* Get the PLLSAI Clock configuration -----------------------------------------------*/ + PeriphClkInit->PLLSAI.PLLSAIN = (uint32_t)((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIN) >> RCC_PLLSAICFGR_PLLSAIN_Pos); + PeriphClkInit->PLLSAI.PLLSAIP = (uint32_t)((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIP) >> RCC_PLLSAICFGR_PLLSAIP_Pos); + PeriphClkInit->PLLSAI.PLLSAIQ = (uint32_t)((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIQ) >> RCC_PLLSAICFGR_PLLSAIQ_Pos); + + /* Get the PLLSAI/PLLI2S division factors -------------------------------------------*/ + PeriphClkInit->PLLI2SDivQ = (uint32_t)((RCC->DCKCFGR1 & RCC_DCKCFGR1_PLLI2SDIVQ) >> RCC_DCKCFGR1_PLLI2SDIVQ_Pos); + PeriphClkInit->PLLSAIDivQ = (uint32_t)((RCC->DCKCFGR1 & RCC_DCKCFGR1_PLLSAIDIVQ) >> RCC_DCKCFGR1_PLLSAIDIVQ_Pos); + + /* Get the SAI1 clock configuration ----------------------------------------------*/ + PeriphClkInit->Sai1ClockSelection = __HAL_RCC_GET_SAI1_SOURCE(); + + /* Get the SAI2 clock configuration ----------------------------------------------*/ + PeriphClkInit->Sai2ClockSelection = __HAL_RCC_GET_SAI2_SOURCE(); + + /* Get the I2S clock configuration ------------------------------------------*/ + PeriphClkInit->I2sClockSelection = __HAL_RCC_GET_I2SCLKSOURCE(); + + /* Get the I2C1 clock configuration ------------------------------------------*/ + PeriphClkInit->I2c1ClockSelection = __HAL_RCC_GET_I2C1_SOURCE(); + + /* Get the I2C2 clock configuration ------------------------------------------*/ + PeriphClkInit->I2c2ClockSelection = __HAL_RCC_GET_I2C2_SOURCE(); + + /* Get the I2C3 clock configuration ------------------------------------------*/ + PeriphClkInit->I2c3ClockSelection = __HAL_RCC_GET_I2C3_SOURCE(); + + /* Get the USART1 clock configuration ------------------------------------------*/ + PeriphClkInit->Usart1ClockSelection = __HAL_RCC_GET_USART1_SOURCE(); + + /* Get the USART2 clock configuration ------------------------------------------*/ + PeriphClkInit->Usart2ClockSelection = __HAL_RCC_GET_USART2_SOURCE(); + + /* Get the USART3 clock configuration ------------------------------------------*/ + PeriphClkInit->Usart3ClockSelection = __HAL_RCC_GET_USART3_SOURCE(); + + /* Get the UART4 clock configuration ------------------------------------------*/ + PeriphClkInit->Uart4ClockSelection = __HAL_RCC_GET_UART4_SOURCE(); + + /* Get the UART5 clock configuration ------------------------------------------*/ + PeriphClkInit->Uart5ClockSelection = __HAL_RCC_GET_UART5_SOURCE(); + + /* Get the USART6 clock configuration ------------------------------------------*/ + PeriphClkInit->Usart6ClockSelection = __HAL_RCC_GET_USART6_SOURCE(); + + /* Get the UART7 clock configuration ------------------------------------------*/ + PeriphClkInit->Uart7ClockSelection = __HAL_RCC_GET_UART7_SOURCE(); + + /* Get the UART8 clock configuration ------------------------------------------*/ + PeriphClkInit->Uart8ClockSelection = __HAL_RCC_GET_UART8_SOURCE(); + + /* Get the LPTIM1 clock configuration ------------------------------------------*/ + PeriphClkInit->Lptim1ClockSelection = __HAL_RCC_GET_LPTIM1_SOURCE(); + + /* Get the CK48 clock configuration -----------------------------------------------*/ + PeriphClkInit->Clk48ClockSelection = __HAL_RCC_GET_CLK48_SOURCE(); + + /* Get the SDMMC1 clock configuration -----------------------------------------------*/ + PeriphClkInit->Sdmmc1ClockSelection = __HAL_RCC_GET_SDMMC1_SOURCE(); + + /* Get the SDMMC2 clock configuration -----------------------------------------------*/ + PeriphClkInit->Sdmmc2ClockSelection = __HAL_RCC_GET_SDMMC2_SOURCE(); + + /* Get the RTC Clock configuration -----------------------------------------------*/ + tempreg = (RCC->CFGR & RCC_CFGR_RTCPRE); + PeriphClkInit->RTCClockSelection = (uint32_t)((tempreg) | (RCC->BDCR & RCC_BDCR_RTCSEL)); + + /* Get the TIM Prescaler configuration --------------------------------------------*/ + if ((RCC->DCKCFGR1 & RCC_DCKCFGR1_TIMPRE) == RESET) + { + PeriphClkInit->TIMPresSelection = RCC_TIMPRES_DESACTIVATED; + } + else + { + PeriphClkInit->TIMPresSelection = RCC_TIMPRES_ACTIVATED; + } +} +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F730xx */ + +/** + * @brief Return the peripheral clock frequency for a given peripheral(SAI..) + * @note Return 0 if peripheral clock identifier not managed by this API + * @param PeriphClk Peripheral clock identifier + * This parameter can be one of the following values: + * @arg RCC_PERIPHCLK_SAI1: SAI1 peripheral clock + * @arg RCC_PERIPHCLK_SAI2: SAI2 peripheral clock + * @retval Frequency in KHz + */ +uint32_t HAL_RCCEx_GetPeriphCLKFreq(uint32_t PeriphClk) +{ + uint32_t tmpreg = 0; + /* This variable is used to store the SAI clock frequency (value in Hz) */ + uint32_t frequency = 0; + /* This variable is used to store the VCO Input (value in Hz) */ + uint32_t vcoinput = 0; + /* This variable is used to store the SAI clock source */ + uint32_t saiclocksource = 0; + + if (PeriphClk == RCC_PERIPHCLK_SAI1) + { + saiclocksource = RCC->DCKCFGR1; + saiclocksource &= RCC_DCKCFGR1_SAI1SEL; + switch (saiclocksource) + { + case 0: /* PLLSAI is the clock source for SAI1 */ + { + /* Configure the PLLSAI division factor */ + /* PLLSAI_VCO Input = PLL_SOURCE/PLLM */ + if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSI) + { + /* In Case the PLL Source is HSI (Internal Clock) */ + vcoinput = (HSI_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } + else + { + /* In Case the PLL Source is HSE (External Clock) */ + vcoinput = ((HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM))); + } + /* PLLSAI_VCO Output = PLLSAI_VCO Input * PLLSAIN */ + /* SAI_CLK(first level) = PLLSAI_VCO Output/PLLSAIQ */ + tmpreg = (RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIQ) >> 24; + frequency = (vcoinput * ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIN) >> 6))/(tmpreg); + + /* SAI_CLK_x = SAI_CLK(first level)/PLLSAIDIVQ */ + tmpreg = (((RCC->DCKCFGR1 & RCC_DCKCFGR1_PLLSAIDIVQ) >> 8) + 1); + frequency = frequency/(tmpreg); + break; + } + case RCC_DCKCFGR1_SAI1SEL_0: /* PLLI2S is the clock source for SAI1 */ + { + /* Configure the PLLI2S division factor */ + /* PLLI2S_VCO Input = PLL_SOURCE/PLLM */ + if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSI) + { + /* In Case the PLL Source is HSI (Internal Clock) */ + vcoinput = (HSI_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } + else + { + /* In Case the PLL Source is HSE (External Clock) */ + vcoinput = ((HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM))); + } + + /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ + /* SAI_CLK(first level) = PLLI2S_VCO Output/PLLI2SQ */ + tmpreg = (RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SQ) >> 24; + frequency = (vcoinput * ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> 6))/(tmpreg); + + /* SAI_CLK_x = SAI_CLK(first level)/PLLI2SDIVQ */ + tmpreg = ((RCC->DCKCFGR1 & RCC_DCKCFGR1_PLLI2SDIVQ) + 1); + frequency = frequency/(tmpreg); + break; + } + case RCC_DCKCFGR1_SAI1SEL_1: /* External clock is the clock source for SAI1 */ + { + frequency = EXTERNAL_CLOCK_VALUE; + break; + } +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) + case RCC_DCKCFGR1_SAI1SEL: /* HSI or HSE is the clock source for SAI*/ + { + if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSI) + { + /* In Case the main PLL Source is HSI */ + frequency = HSI_VALUE; + } + else + { + /* In Case the main PLL Source is HSE */ + frequency = HSE_VALUE; + } + break; + } +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + default : + { + break; + } + } + } + + if (PeriphClk == RCC_PERIPHCLK_SAI2) + { + saiclocksource = RCC->DCKCFGR1; + saiclocksource &= RCC_DCKCFGR1_SAI2SEL; + switch (saiclocksource) + { + case 0: /* PLLSAI is the clock source for SAI*/ + { + /* Configure the PLLSAI division factor */ + /* PLLSAI_VCO Input = PLL_SOURCE/PLLM */ + if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSI) + { + /* In Case the PLL Source is HSI (Internal Clock) */ + vcoinput = (HSI_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } + else + { + /* In Case the PLL Source is HSE (External Clock) */ + vcoinput = ((HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM))); + } + /* PLLSAI_VCO Output = PLLSAI_VCO Input * PLLSAIN */ + /* SAI_CLK(first level) = PLLSAI_VCO Output/PLLSAIQ */ + tmpreg = (RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIQ) >> 24; + frequency = (vcoinput * ((RCC->PLLSAICFGR & RCC_PLLSAICFGR_PLLSAIN) >> 6))/(tmpreg); + + /* SAI_CLK_x = SAI_CLK(first level)/PLLSAIDIVQ */ + tmpreg = (((RCC->DCKCFGR1 & RCC_DCKCFGR1_PLLSAIDIVQ) >> 8) + 1); + frequency = frequency/(tmpreg); + break; + } + case RCC_DCKCFGR1_SAI2SEL_0: /* PLLI2S is the clock source for SAI2 */ + { + /* Configure the PLLI2S division factor */ + /* PLLI2S_VCO Input = PLL_SOURCE/PLLM */ + if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSI) + { + /* In Case the PLL Source is HSI (Internal Clock) */ + vcoinput = (HSI_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM)); + } + else + { + /* In Case the PLL Source is HSE (External Clock) */ + vcoinput = ((HSE_VALUE / (uint32_t)(RCC->PLLCFGR & RCC_PLLCFGR_PLLM))); + } + + /* PLLI2S_VCO Output = PLLI2S_VCO Input * PLLI2SN */ + /* SAI_CLK(first level) = PLLI2S_VCO Output/PLLI2SQ */ + tmpreg = (RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SQ) >> 24; + frequency = (vcoinput * ((RCC->PLLI2SCFGR & RCC_PLLI2SCFGR_PLLI2SN) >> 6))/(tmpreg); + + /* SAI_CLK_x = SAI_CLK(first level)/PLLI2SDIVQ */ + tmpreg = ((RCC->DCKCFGR1 & RCC_DCKCFGR1_PLLI2SDIVQ) + 1); + frequency = frequency/(tmpreg); + break; + } + case RCC_DCKCFGR1_SAI2SEL_1: /* External clock is the clock source for SAI2 */ + { + frequency = EXTERNAL_CLOCK_VALUE; + break; + } +#if defined (STM32F765xx) || defined (STM32F767xx) || defined (STM32F769xx) || defined (STM32F777xx) || defined (STM32F779xx) + case RCC_DCKCFGR1_SAI2SEL: /* HSI or HSE is the clock source for SAI2 */ + { + if((RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC) == RCC_PLLSOURCE_HSI) + { + /* In Case the main PLL Source is HSI */ + frequency = HSI_VALUE; + } + else + { + /* In Case the main PLL Source is HSE */ + frequency = HSE_VALUE; + } + break; + } +#endif /* STM32F767xx || STM32F769xx || STM32F777xx || STM32F779xx */ + default : + { + break; + } + } + } + + return frequency; +} + +/** + * @} + */ + +/** @defgroup RCCEx_Exported_Functions_Group2 Extended Clock management functions + * @brief Extended Clock management functions + * +@verbatim + =============================================================================== + ##### Extended clock management functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to control the + activation or deactivation of PLLI2S, PLLSAI. +@endverbatim + * @{ + */ + +/** + * @brief Enable PLLI2S. + * @param PLLI2SInit pointer to an RCC_PLLI2SInitTypeDef structure that + * contains the configuration information for the PLLI2S + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCCEx_EnablePLLI2S(RCC_PLLI2SInitTypeDef *PLLI2SInit) +{ + uint32_t tickstart; + + /* Check for parameters */ + assert_param(IS_RCC_PLLI2SN_VALUE(PLLI2SInit->PLLI2SN)); + assert_param(IS_RCC_PLLI2SR_VALUE(PLLI2SInit->PLLI2SR)); + assert_param(IS_RCC_PLLI2SQ_VALUE(PLLI2SInit->PLLI2SQ)); +#if defined(RCC_PLLI2SCFGR_PLLI2SP) + assert_param(IS_RCC_PLLI2SP_VALUE(PLLI2SInit->PLLI2SP)); +#endif /* RCC_PLLI2SCFGR_PLLI2SP */ + + /* Disable the PLLI2S */ + __HAL_RCC_PLLI2S_DISABLE(); + + /* Wait till PLLI2S is disabled */ + tickstart = HAL_GetTick(); + while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) != RESET) + { + if((HAL_GetTick() - tickstart ) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + /* Configure the PLLI2S division factors */ +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F730xx) + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) * PLLI2SN */ + /* I2SQCLK = PLLI2S_VCO / PLLI2SQ */ + /* I2SRCLK = PLLI2S_VCO / PLLI2SR */ + __HAL_RCC_PLLI2S_CONFIG(PLLI2SInit->PLLI2SN, PLLI2SInit->PLLI2SQ, PLLI2SInit->PLLI2SR); +#else + /* PLLI2S_VCO = f(VCO clock) = f(PLLI2S clock input) * PLLI2SN */ + /* I2SPCLK = PLLI2S_VCO / PLLI2SP */ + /* I2SQCLK = PLLI2S_VCO / PLLI2SQ */ + /* I2SRCLK = PLLI2S_VCO / PLLI2SR */ + __HAL_RCC_PLLI2S_CONFIG(PLLI2SInit->PLLI2SN, PLLI2SInit->PLLI2SP, PLLI2SInit->PLLI2SQ, PLLI2SInit->PLLI2SR); +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F730xx */ + + /* Enable the PLLI2S */ + __HAL_RCC_PLLI2S_ENABLE(); + + /* Wait till PLLI2S is ready */ + tickstart = HAL_GetTick(); + while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLI2SRDY) == RESET) + { + if((HAL_GetTick() - tickstart ) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + return HAL_OK; +} + +/** + * @brief Disable PLLI2S. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCCEx_DisablePLLI2S(void) +{ + uint32_t tickstart; + + /* Disable the PLLI2S */ + __HAL_RCC_PLLI2S_DISABLE(); + + /* Wait till PLLI2S is disabled */ + tickstart = HAL_GetTick(); + while(READ_BIT(RCC->CR, RCC_CR_PLLI2SRDY) != RESET) + { + if((HAL_GetTick() - tickstart) > PLLI2S_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + return HAL_OK; +} + +/** + * @brief Enable PLLSAI. + * @param PLLSAIInit pointer to an RCC_PLLSAIInitTypeDef structure that + * contains the configuration information for the PLLSAI + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCCEx_EnablePLLSAI(RCC_PLLSAIInitTypeDef *PLLSAIInit) +{ + uint32_t tickstart; + + /* Check for parameters */ + assert_param(IS_RCC_PLLSAIN_VALUE(PLLSAIInit->PLLSAIN)); + assert_param(IS_RCC_PLLSAIQ_VALUE(PLLSAIInit->PLLSAIQ)); + assert_param(IS_RCC_PLLSAIP_VALUE(PLLSAIInit->PLLSAIP)); +#if defined(RCC_PLLSAICFGR_PLLSAIR) + assert_param(IS_RCC_PLLSAIR_VALUE(PLLSAIInit->PLLSAIR)); +#endif /* RCC_PLLSAICFGR_PLLSAIR */ + + /* Disable the PLLSAI */ + __HAL_RCC_PLLSAI_DISABLE(); + + /* Wait till PLLSAI is disabled */ + tickstart = HAL_GetTick(); + while(__HAL_RCC_PLLSAI_GET_FLAG() != RESET) + { + if((HAL_GetTick() - tickstart ) > PLLSAI_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + /* Configure the PLLSAI division factors */ +#if defined (STM32F722xx) || defined (STM32F723xx) || defined (STM32F732xx) || defined (STM32F733xx) || defined (STM32F730xx) + /* PLLSAI_VCO = f(VCO clock) = f(PLLSAI clock input) * PLLSAIN */ + /* SAIPCLK = PLLSAI_VCO / PLLSAIP */ + /* SAIQCLK = PLLSAI_VCO / PLLSAIQ */ + __HAL_RCC_PLLSAI_CONFIG(PLLSAIInit->PLLSAIN, PLLSAIInit->PLLSAIP, PLLSAIInit->PLLSAIQ); +#else + /* PLLSAI_VCO = f(VCO clock) = f(PLLSAI clock input) * PLLSAIN */ + /* SAIPCLK = PLLSAI_VCO / PLLSAIP */ + /* SAIQCLK = PLLSAI_VCO / PLLSAIQ */ + /* SAIRCLK = PLLSAI_VCO / PLLSAIR */ + __HAL_RCC_PLLSAI_CONFIG(PLLSAIInit->PLLSAIN, PLLSAIInit->PLLSAIP, \ + PLLSAIInit->PLLSAIQ, PLLSAIInit->PLLSAIR); +#endif /* STM32F722xx || STM32F723xx || STM32F732xx || STM32F733xx || STM32F730xx */ + + /* Enable the PLLSAI */ + __HAL_RCC_PLLSAI_ENABLE(); + + /* Wait till PLLSAI is ready */ + tickstart = HAL_GetTick(); + while(__HAL_RCC_PLLSAI_GET_FLAG() == RESET) + { + if((HAL_GetTick() - tickstart ) > PLLSAI_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + return HAL_OK; +} + +/** + * @brief Disable PLLSAI. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RCCEx_DisablePLLSAI(void) +{ + uint32_t tickstart; + + /* Disable the PLLSAI */ + __HAL_RCC_PLLSAI_DISABLE(); + + /* Wait till PLLSAI is disabled */ + tickstart = HAL_GetTick(); + while(__HAL_RCC_PLLSAI_GET_FLAG() != RESET) + { + if((HAL_GetTick() - tickstart) > PLLSAI_TIMEOUT_VALUE) + { + /* return in case of Timeout detected */ + return HAL_TIMEOUT; + } + } + + return HAL_OK; +} + +/** + * @} + */ + +/** + * @} + */ + +#endif /* HAL_RCC_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim.c new file mode 100644 index 0000000..5e683ca --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim.c @@ -0,0 +1,7883 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_tim.c + * @author MCD Application Team + * @brief TIM HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Timer (TIM) peripheral: + * + TIM Time Base Initialization + * + TIM Time Base Start + * + TIM Time Base Start Interruption + * + TIM Time Base Start DMA + * + TIM Output Compare/PWM Initialization + * + TIM Output Compare/PWM Channel Configuration + * + TIM Output Compare/PWM Start + * + TIM Output Compare/PWM Start Interruption + * + TIM Output Compare/PWM Start DMA + * + TIM Input Capture Initialization + * + TIM Input Capture Channel Configuration + * + TIM Input Capture Start + * + TIM Input Capture Start Interruption + * + TIM Input Capture Start DMA + * + TIM One Pulse Initialization + * + TIM One Pulse Channel Configuration + * + TIM One Pulse Start + * + TIM Encoder Interface Initialization + * + TIM Encoder Interface Start + * + TIM Encoder Interface Start Interruption + * + TIM Encoder Interface Start DMA + * + Commutation Event configuration with Interruption and DMA + * + TIM OCRef clear configuration + * + TIM External Clock configuration + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + @verbatim + ============================================================================== + ##### TIMER Generic features ##### + ============================================================================== + [..] The Timer features include: + (#) 16-bit up, down, up/down auto-reload counter. + (#) 16-bit programmable prescaler allowing dividing (also on the fly) the + counter clock frequency either by any factor between 1 and 65536. + (#) Up to 4 independent channels for: + (++) Input Capture + (++) Output Compare + (++) PWM generation (Edge and Center-aligned Mode) + (++) One-pulse mode output + (#) Synchronization circuit to control the timer with external signals and to interconnect + several timers together. + (#) Supports incremental encoder for positioning purposes + + ##### How to use this driver ##### + ============================================================================== + [..] + (#) Initialize the TIM low level resources by implementing the following functions + depending on the selected feature: + (++) Time Base : HAL_TIM_Base_MspInit() + (++) Input Capture : HAL_TIM_IC_MspInit() + (++) Output Compare : HAL_TIM_OC_MspInit() + (++) PWM generation : HAL_TIM_PWM_MspInit() + (++) One-pulse mode output : HAL_TIM_OnePulse_MspInit() + (++) Encoder mode output : HAL_TIM_Encoder_MspInit() + + (#) Initialize the TIM low level resources : + (##) Enable the TIM interface clock using __HAL_RCC_TIMx_CLK_ENABLE(); + (##) TIM pins configuration + (+++) Enable the clock for the TIM GPIOs using the following function: + __HAL_RCC_GPIOx_CLK_ENABLE(); + (+++) Configure these TIM pins in Alternate function mode using HAL_GPIO_Init(); + + (#) The external Clock can be configured, if needed (the default clock is the + internal clock from the APBx), using the following function: + HAL_TIM_ConfigClockSource, the clock configuration should be done before + any start function. + + (#) Configure the TIM in the desired functioning mode using one of the + Initialization function of this driver: + (++) HAL_TIM_Base_Init: to use the Timer to generate a simple time base + (++) HAL_TIM_OC_Init and HAL_TIM_OC_ConfigChannel: to use the Timer to generate an + Output Compare signal. + (++) HAL_TIM_PWM_Init and HAL_TIM_PWM_ConfigChannel: to use the Timer to generate a + PWM signal. + (++) HAL_TIM_IC_Init and HAL_TIM_IC_ConfigChannel: to use the Timer to measure an + external signal. + (++) HAL_TIM_OnePulse_Init and HAL_TIM_OnePulse_ConfigChannel: to use the Timer + in One Pulse Mode. + (++) HAL_TIM_Encoder_Init: to use the Timer Encoder Interface. + + (#) Activate the TIM peripheral using one of the start functions depending from the feature used: + (++) Time Base : HAL_TIM_Base_Start(), HAL_TIM_Base_Start_DMA(), HAL_TIM_Base_Start_IT() + (++) Input Capture : HAL_TIM_IC_Start(), HAL_TIM_IC_Start_DMA(), HAL_TIM_IC_Start_IT() + (++) Output Compare : HAL_TIM_OC_Start(), HAL_TIM_OC_Start_DMA(), HAL_TIM_OC_Start_IT() + (++) PWM generation : HAL_TIM_PWM_Start(), HAL_TIM_PWM_Start_DMA(), HAL_TIM_PWM_Start_IT() + (++) One-pulse mode output : HAL_TIM_OnePulse_Start(), HAL_TIM_OnePulse_Start_IT() + (++) Encoder mode output : HAL_TIM_Encoder_Start(), HAL_TIM_Encoder_Start_DMA(), HAL_TIM_Encoder_Start_IT(). + + (#) The DMA Burst is managed with the two following functions: + HAL_TIM_DMABurst_WriteStart() + HAL_TIM_DMABurst_ReadStart() + + *** Callback registration *** + ============================================= + + [..] + The compilation define USE_HAL_TIM_REGISTER_CALLBACKS when set to 1 + allows the user to configure dynamically the driver callbacks. + + [..] + Use Function HAL_TIM_RegisterCallback() to register a callback. + HAL_TIM_RegisterCallback() takes as parameters the HAL peripheral handle, + the Callback ID and a pointer to the user callback function. + + [..] + Use function HAL_TIM_UnRegisterCallback() to reset a callback to the default + weak function. + HAL_TIM_UnRegisterCallback takes as parameters the HAL peripheral handle, + and the Callback ID. + + [..] + These functions allow to register/unregister following callbacks: + (+) Base_MspInitCallback : TIM Base Msp Init Callback. + (+) Base_MspDeInitCallback : TIM Base Msp DeInit Callback. + (+) IC_MspInitCallback : TIM IC Msp Init Callback. + (+) IC_MspDeInitCallback : TIM IC Msp DeInit Callback. + (+) OC_MspInitCallback : TIM OC Msp Init Callback. + (+) OC_MspDeInitCallback : TIM OC Msp DeInit Callback. + (+) PWM_MspInitCallback : TIM PWM Msp Init Callback. + (+) PWM_MspDeInitCallback : TIM PWM Msp DeInit Callback. + (+) OnePulse_MspInitCallback : TIM One Pulse Msp Init Callback. + (+) OnePulse_MspDeInitCallback : TIM One Pulse Msp DeInit Callback. + (+) Encoder_MspInitCallback : TIM Encoder Msp Init Callback. + (+) Encoder_MspDeInitCallback : TIM Encoder Msp DeInit Callback. + (+) HallSensor_MspInitCallback : TIM Hall Sensor Msp Init Callback. + (+) HallSensor_MspDeInitCallback : TIM Hall Sensor Msp DeInit Callback. + (+) PeriodElapsedCallback : TIM Period Elapsed Callback. + (+) PeriodElapsedHalfCpltCallback : TIM Period Elapsed half complete Callback. + (+) TriggerCallback : TIM Trigger Callback. + (+) TriggerHalfCpltCallback : TIM Trigger half complete Callback. + (+) IC_CaptureCallback : TIM Input Capture Callback. + (+) IC_CaptureHalfCpltCallback : TIM Input Capture half complete Callback. + (+) OC_DelayElapsedCallback : TIM Output Compare Delay Elapsed Callback. + (+) PWM_PulseFinishedCallback : TIM PWM Pulse Finished Callback. + (+) PWM_PulseFinishedHalfCpltCallback : TIM PWM Pulse Finished half complete Callback. + (+) ErrorCallback : TIM Error Callback. + (+) CommutationCallback : TIM Commutation Callback. + (+) CommutationHalfCpltCallback : TIM Commutation half complete Callback. + (+) BreakCallback : TIM Break Callback. + (+) Break2Callback : TIM Break2 Callback. + + [..] +By default, after the Init and when the state is HAL_TIM_STATE_RESET +all interrupt callbacks are set to the corresponding weak functions: + examples HAL_TIM_TriggerCallback(), HAL_TIM_ErrorCallback(). + + [..] + Exception done for MspInit and MspDeInit functions that are reset to the legacy weak + functionalities in the Init / DeInit only when these callbacks are null + (not registered beforehand). If not, MspInit or MspDeInit are not null, the Init / DeInit + keep and use the user MspInit / MspDeInit callbacks(registered beforehand) + + [..] + Callbacks can be registered / unregistered in HAL_TIM_STATE_READY state only. + Exception done MspInit / MspDeInit that can be registered / unregistered + in HAL_TIM_STATE_READY or HAL_TIM_STATE_RESET state, + thus registered(user) MspInit / DeInit callbacks can be used during the Init / DeInit. + In that case first register the MspInit/MspDeInit user callbacks + using HAL_TIM_RegisterCallback() before calling DeInit or Init function. + + [..] + When The compilation define USE_HAL_TIM_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registration feature is not available and all callbacks + are set to the corresponding weak functions. + + @endverbatim + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup TIM TIM + * @brief TIM HAL module driver + * @{ + */ + +#ifdef HAL_TIM_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/** @addtogroup TIM_Private_Functions + * @{ + */ +static void TIM_OC1_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config); +static void TIM_OC3_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config); +static void TIM_OC4_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config); +static void TIM_OC5_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config); +static void TIM_OC6_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config); +static void TIM_TI1_ConfigInputStage(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICFilter); +static void TIM_TI2_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, + uint32_t TIM_ICFilter); +static void TIM_TI2_ConfigInputStage(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICFilter); +static void TIM_TI3_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, + uint32_t TIM_ICFilter); +static void TIM_TI4_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, + uint32_t TIM_ICFilter); +static void TIM_ITRx_SetConfig(TIM_TypeDef *TIMx, uint32_t InputTriggerSource); +static void TIM_DMAPeriodElapsedCplt(DMA_HandleTypeDef *hdma); +static void TIM_DMAPeriodElapsedHalfCplt(DMA_HandleTypeDef *hdma); +static void TIM_DMADelayPulseCplt(DMA_HandleTypeDef *hdma); +static void TIM_DMATriggerCplt(DMA_HandleTypeDef *hdma); +static void TIM_DMATriggerHalfCplt(DMA_HandleTypeDef *hdma); +static HAL_StatusTypeDef TIM_SlaveTimer_SetConfig(TIM_HandleTypeDef *htim, + TIM_SlaveConfigTypeDef *sSlaveConfig); +/** + * @} + */ +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup TIM_Exported_Functions TIM Exported Functions + * @{ + */ + +/** @defgroup TIM_Exported_Functions_Group1 TIM Time Base functions + * @brief Time Base functions + * +@verbatim + ============================================================================== + ##### Time Base functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Initialize and configure the TIM base. + (+) De-initialize the TIM base. + (+) Start the Time Base. + (+) Stop the Time Base. + (+) Start the Time Base and enable interrupt. + (+) Stop the Time Base and disable interrupt. + (+) Start the Time Base and enable DMA transfer. + (+) Stop the Time Base and disable DMA transfer. + +@endverbatim + * @{ + */ +/** + * @brief Initializes the TIM Time base Unit according to the specified + * parameters in the TIM_HandleTypeDef and initialize the associated handle. + * @note Switching from Center Aligned counter mode to Edge counter mode (or reverse) + * requires a timer reset to avoid unexpected direction + * due to DIR bit readonly in center aligned mode. + * Ex: call @ref HAL_TIM_Base_DeInit() before HAL_TIM_Base_Init() + * @param htim TIM Base handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Base_Init(TIM_HandleTypeDef *htim) +{ + /* Check the TIM handle allocation */ + if (htim == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); + assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); + + if (htim->State == HAL_TIM_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + htim->Lock = HAL_UNLOCKED; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + /* Reset interrupt callbacks to legacy weak callbacks */ + TIM_ResetCallback(htim); + + if (htim->Base_MspInitCallback == NULL) + { + htim->Base_MspInitCallback = HAL_TIM_Base_MspInit; + } + /* Init the low level hardware : GPIO, CLOCK, NVIC */ + htim->Base_MspInitCallback(htim); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC */ + HAL_TIM_Base_MspInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Set the Time Base configuration */ + TIM_Base_SetConfig(htim->Instance, &htim->Init); + + /* Initialize the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + + /* Initialize the TIM channels state */ + TIM_CHANNEL_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_READY); + + /* Initialize the TIM state*/ + htim->State = HAL_TIM_STATE_READY; + + return HAL_OK; +} + +/** + * @brief DeInitializes the TIM Base peripheral + * @param htim TIM Base handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Base_DeInit(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Disable the TIM Peripheral Clock */ + __HAL_TIM_DISABLE(htim); + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + if (htim->Base_MspDeInitCallback == NULL) + { + htim->Base_MspDeInitCallback = HAL_TIM_Base_MspDeInit; + } + /* DeInit the low level hardware */ + htim->Base_MspDeInitCallback(htim); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC */ + HAL_TIM_Base_MspDeInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_RESET; + + /* Change the TIM channels state */ + TIM_CHANNEL_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_RESET); + + /* Change TIM state */ + htim->State = HAL_TIM_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Initializes the TIM Base MSP. + * @param htim TIM Base handle + * @retval None + */ +__weak void HAL_TIM_Base_MspInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_Base_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes TIM Base MSP. + * @param htim TIM Base handle + * @retval None + */ +__weak void HAL_TIM_Base_MspDeInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_Base_MspDeInit could be implemented in the user file + */ +} + + +/** + * @brief Starts the TIM Base generation. + * @param htim TIM Base handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Base_Start(TIM_HandleTypeDef *htim) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + /* Check the TIM state */ + if (htim->State != HAL_TIM_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Base generation. + * @param htim TIM Base handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Base_Stop(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Base generation in interrupt mode. + * @param htim TIM Base handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Base_Start_IT(TIM_HandleTypeDef *htim) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + /* Check the TIM state */ + if (htim->State != HAL_TIM_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Enable the TIM Update interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_UPDATE); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Base generation in interrupt mode. + * @param htim TIM Base handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Base_Stop_IT(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + /* Disable the TIM Update interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_UPDATE); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Base generation in DMA mode. + * @param htim TIM Base handle + * @param pData The source Buffer address. + * @param Length The length of data to be transferred from memory to peripheral. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Base_Start_DMA(TIM_HandleTypeDef *htim, uint32_t *pData, uint16_t Length) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_DMA_INSTANCE(htim->Instance)); + + /* Set the TIM state */ + if (htim->State == HAL_TIM_STATE_BUSY) + { + return HAL_BUSY; + } + else if (htim->State == HAL_TIM_STATE_READY) + { + if ((pData == NULL) && (Length > 0U)) + { + return HAL_ERROR; + } + else + { + htim->State = HAL_TIM_STATE_BUSY; + } + } + else + { + return HAL_ERROR; + } + + /* Set the DMA Period elapsed callbacks */ + htim->hdma[TIM_DMA_ID_UPDATE]->XferCpltCallback = TIM_DMAPeriodElapsedCplt; + htim->hdma[TIM_DMA_ID_UPDATE]->XferHalfCpltCallback = TIM_DMAPeriodElapsedHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_UPDATE]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_UPDATE], (uint32_t)pData, (uint32_t)&htim->Instance->ARR, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + + /* Enable the TIM Update DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_UPDATE); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Base generation in DMA mode. + * @param htim TIM Base handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Base_Stop_DMA(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_DMA_INSTANCE(htim->Instance)); + + /* Disable the TIM Update DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_UPDATE); + + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_UPDATE]); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_READY; + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group2 TIM Output Compare functions + * @brief TIM Output Compare functions + * +@verbatim + ============================================================================== + ##### TIM Output Compare functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Initialize and configure the TIM Output Compare. + (+) De-initialize the TIM Output Compare. + (+) Start the TIM Output Compare. + (+) Stop the TIM Output Compare. + (+) Start the TIM Output Compare and enable interrupt. + (+) Stop the TIM Output Compare and disable interrupt. + (+) Start the TIM Output Compare and enable DMA transfer. + (+) Stop the TIM Output Compare and disable DMA transfer. + +@endverbatim + * @{ + */ +/** + * @brief Initializes the TIM Output Compare according to the specified + * parameters in the TIM_HandleTypeDef and initializes the associated handle. + * @note Switching from Center Aligned counter mode to Edge counter mode (or reverse) + * requires a timer reset to avoid unexpected direction + * due to DIR bit readonly in center aligned mode. + * Ex: call @ref HAL_TIM_OC_DeInit() before HAL_TIM_OC_Init() + * @param htim TIM Output Compare handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_Init(TIM_HandleTypeDef *htim) +{ + /* Check the TIM handle allocation */ + if (htim == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); + assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); + + if (htim->State == HAL_TIM_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + htim->Lock = HAL_UNLOCKED; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + /* Reset interrupt callbacks to legacy weak callbacks */ + TIM_ResetCallback(htim); + + if (htim->OC_MspInitCallback == NULL) + { + htim->OC_MspInitCallback = HAL_TIM_OC_MspInit; + } + /* Init the low level hardware : GPIO, CLOCK, NVIC */ + htim->OC_MspInitCallback(htim); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC and DMA */ + HAL_TIM_OC_MspInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Init the base time for the Output Compare */ + TIM_Base_SetConfig(htim->Instance, &htim->Init); + + /* Initialize the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + + /* Initialize the TIM channels state */ + TIM_CHANNEL_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_READY); + + /* Initialize the TIM state*/ + htim->State = HAL_TIM_STATE_READY; + + return HAL_OK; +} + +/** + * @brief DeInitializes the TIM peripheral + * @param htim TIM Output Compare handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_DeInit(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Disable the TIM Peripheral Clock */ + __HAL_TIM_DISABLE(htim); + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + if (htim->OC_MspDeInitCallback == NULL) + { + htim->OC_MspDeInitCallback = HAL_TIM_OC_MspDeInit; + } + /* DeInit the low level hardware */ + htim->OC_MspDeInitCallback(htim); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC and DMA */ + HAL_TIM_OC_MspDeInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_RESET; + + /* Change the TIM channels state */ + TIM_CHANNEL_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_RESET); + + /* Change TIM state */ + htim->State = HAL_TIM_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Initializes the TIM Output Compare MSP. + * @param htim TIM Output Compare handle + * @retval None + */ +__weak void HAL_TIM_OC_MspInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_OC_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes TIM Output Compare MSP. + * @param htim TIM Output Compare handle + * @retval None + */ +__weak void HAL_TIM_OC_MspDeInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_OC_MspDeInit could be implemented in the user file + */ +} + +/** + * @brief Starts the TIM Output Compare signal generation. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @arg TIM_CHANNEL_5: TIM Channel 5 selected + * @arg TIM_CHANNEL_6: TIM Channel 6 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_Start(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Check the TIM channel state */ + if (TIM_CHANNEL_STATE_GET(htim, Channel) != HAL_TIM_CHANNEL_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + + /* Enable the Output compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Output Compare signal generation. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @arg TIM_CHANNEL_5: TIM Channel 5 selected + * @arg TIM_CHANNEL_6: TIM Channel 6 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Disable the Output compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Output Compare signal generation in interrupt mode. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Check the TIM channel state */ + if (TIM_CHANNEL_STATE_GET(htim, Channel) != HAL_TIM_CHANNEL_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Enable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Enable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Enable the TIM Capture/Compare 3 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Enable the TIM Capture/Compare 4 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC4); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Enable the Output compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + } + + /* Return function status */ + return status; +} + +/** + * @brief Stops the TIM Output Compare signal generation in interrupt mode. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Capture/Compare 3 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Disable the TIM Capture/Compare 4 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC4); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Disable the Output compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return status; +} + +/** + * @brief Starts the TIM Output Compare signal generation in DMA mode. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @param pData The source Buffer address. + * @param Length The length of data to be transferred from memory to TIM peripheral + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Set the TIM channel state */ + if (TIM_CHANNEL_STATE_GET(htim, Channel) == HAL_TIM_CHANNEL_STATE_BUSY) + { + return HAL_BUSY; + } + else if (TIM_CHANNEL_STATE_GET(htim, Channel) == HAL_TIM_CHANNEL_STATE_READY) + { + if ((pData == NULL) && (Length > 0U)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + return HAL_ERROR; + } + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)pData, (uint32_t)&htim->Instance->CCR1, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + + /* Enable the TIM Capture/Compare 1 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)pData, (uint32_t)&htim->Instance->CCR2, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + + /* Enable the TIM Capture/Compare 2 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC3]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)pData, (uint32_t)&htim->Instance->CCR3, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 3 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC4]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC4]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC4]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC4], (uint32_t)pData, (uint32_t)&htim->Instance->CCR4, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 4 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC4); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Enable the Output compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + } + + /* Return function status */ + return status; +} + +/** + * @brief Stops the TIM Output Compare signal generation in DMA mode. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Capture/Compare 1 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Capture/Compare 2 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC2); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Capture/Compare 3 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC3); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); + break; + } + + case TIM_CHANNEL_4: + { + /* Disable the TIM Capture/Compare 4 interrupt */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC4); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC4]); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Disable the Output compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return status; +} + +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group3 TIM PWM functions + * @brief TIM PWM functions + * +@verbatim + ============================================================================== + ##### TIM PWM functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Initialize and configure the TIM PWM. + (+) De-initialize the TIM PWM. + (+) Start the TIM PWM. + (+) Stop the TIM PWM. + (+) Start the TIM PWM and enable interrupt. + (+) Stop the TIM PWM and disable interrupt. + (+) Start the TIM PWM and enable DMA transfer. + (+) Stop the TIM PWM and disable DMA transfer. + +@endverbatim + * @{ + */ +/** + * @brief Initializes the TIM PWM Time Base according to the specified + * parameters in the TIM_HandleTypeDef and initializes the associated handle. + * @note Switching from Center Aligned counter mode to Edge counter mode (or reverse) + * requires a timer reset to avoid unexpected direction + * due to DIR bit readonly in center aligned mode. + * Ex: call @ref HAL_TIM_PWM_DeInit() before HAL_TIM_PWM_Init() + * @param htim TIM PWM handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_Init(TIM_HandleTypeDef *htim) +{ + /* Check the TIM handle allocation */ + if (htim == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); + assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); + + if (htim->State == HAL_TIM_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + htim->Lock = HAL_UNLOCKED; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + /* Reset interrupt callbacks to legacy weak callbacks */ + TIM_ResetCallback(htim); + + if (htim->PWM_MspInitCallback == NULL) + { + htim->PWM_MspInitCallback = HAL_TIM_PWM_MspInit; + } + /* Init the low level hardware : GPIO, CLOCK, NVIC */ + htim->PWM_MspInitCallback(htim); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC and DMA */ + HAL_TIM_PWM_MspInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Init the base time for the PWM */ + TIM_Base_SetConfig(htim->Instance, &htim->Init); + + /* Initialize the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + + /* Initialize the TIM channels state */ + TIM_CHANNEL_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_READY); + + /* Initialize the TIM state*/ + htim->State = HAL_TIM_STATE_READY; + + return HAL_OK; +} + +/** + * @brief DeInitializes the TIM peripheral + * @param htim TIM PWM handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_DeInit(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Disable the TIM Peripheral Clock */ + __HAL_TIM_DISABLE(htim); + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + if (htim->PWM_MspDeInitCallback == NULL) + { + htim->PWM_MspDeInitCallback = HAL_TIM_PWM_MspDeInit; + } + /* DeInit the low level hardware */ + htim->PWM_MspDeInitCallback(htim); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC and DMA */ + HAL_TIM_PWM_MspDeInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_RESET; + + /* Change the TIM channels state */ + TIM_CHANNEL_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_RESET); + + /* Change TIM state */ + htim->State = HAL_TIM_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Initializes the TIM PWM MSP. + * @param htim TIM PWM handle + * @retval None + */ +__weak void HAL_TIM_PWM_MspInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_PWM_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes TIM PWM MSP. + * @param htim TIM PWM handle + * @retval None + */ +__weak void HAL_TIM_PWM_MspDeInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_PWM_MspDeInit could be implemented in the user file + */ +} + +/** + * @brief Starts the PWM signal generation. + * @param htim TIM handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @arg TIM_CHANNEL_5: TIM Channel 5 selected + * @arg TIM_CHANNEL_6: TIM Channel 6 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_Start(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Check the TIM channel state */ + if (TIM_CHANNEL_STATE_GET(htim, Channel) != HAL_TIM_CHANNEL_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + + /* Enable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the PWM signal generation. + * @param htim TIM PWM handle + * @param Channel TIM Channels to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @arg TIM_CHANNEL_5: TIM Channel 5 selected + * @arg TIM_CHANNEL_6: TIM Channel 6 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Disable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the PWM signal generation in interrupt mode. + * @param htim TIM PWM handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Check the TIM channel state */ + if (TIM_CHANNEL_STATE_GET(htim, Channel) != HAL_TIM_CHANNEL_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Enable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Enable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Enable the TIM Capture/Compare 3 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Enable the TIM Capture/Compare 4 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC4); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Enable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + } + + /* Return function status */ + return status; +} + +/** + * @brief Stops the PWM signal generation in interrupt mode. + * @param htim TIM PWM handle + * @param Channel TIM Channels to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Capture/Compare 3 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Disable the TIM Capture/Compare 4 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC4); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Disable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return status; +} + +/** + * @brief Starts the TIM PWM signal generation in DMA mode. + * @param htim TIM PWM handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @param pData The source Buffer address. + * @param Length The length of data to be transferred from memory to TIM peripheral + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Set the TIM channel state */ + if (TIM_CHANNEL_STATE_GET(htim, Channel) == HAL_TIM_CHANNEL_STATE_BUSY) + { + return HAL_BUSY; + } + else if (TIM_CHANNEL_STATE_GET(htim, Channel) == HAL_TIM_CHANNEL_STATE_READY) + { + if ((pData == NULL) && (Length > 0U)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + return HAL_ERROR; + } + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)pData, (uint32_t)&htim->Instance->CCR1, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + + /* Enable the TIM Capture/Compare 1 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)pData, (uint32_t)&htim->Instance->CCR2, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 2 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC3]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)pData, (uint32_t)&htim->Instance->CCR3, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Output Capture/Compare 3 request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC4]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC4]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC4]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC4], (uint32_t)pData, (uint32_t)&htim->Instance->CCR4, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 4 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC4); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Enable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + } + + /* Return function status */ + return status; +} + +/** + * @brief Stops the TIM PWM signal generation in DMA mode. + * @param htim TIM PWM handle + * @param Channel TIM Channels to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Capture/Compare 1 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Capture/Compare 2 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC2); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Capture/Compare 3 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC3); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); + break; + } + + case TIM_CHANNEL_4: + { + /* Disable the TIM Capture/Compare 4 interrupt */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC4); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC4]); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Disable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return status; +} + +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group4 TIM Input Capture functions + * @brief TIM Input Capture functions + * +@verbatim + ============================================================================== + ##### TIM Input Capture functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Initialize and configure the TIM Input Capture. + (+) De-initialize the TIM Input Capture. + (+) Start the TIM Input Capture. + (+) Stop the TIM Input Capture. + (+) Start the TIM Input Capture and enable interrupt. + (+) Stop the TIM Input Capture and disable interrupt. + (+) Start the TIM Input Capture and enable DMA transfer. + (+) Stop the TIM Input Capture and disable DMA transfer. + +@endverbatim + * @{ + */ +/** + * @brief Initializes the TIM Input Capture Time base according to the specified + * parameters in the TIM_HandleTypeDef and initializes the associated handle. + * @note Switching from Center Aligned counter mode to Edge counter mode (or reverse) + * requires a timer reset to avoid unexpected direction + * due to DIR bit readonly in center aligned mode. + * Ex: call @ref HAL_TIM_IC_DeInit() before HAL_TIM_IC_Init() + * @param htim TIM Input Capture handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_Init(TIM_HandleTypeDef *htim) +{ + /* Check the TIM handle allocation */ + if (htim == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); + assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); + + if (htim->State == HAL_TIM_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + htim->Lock = HAL_UNLOCKED; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + /* Reset interrupt callbacks to legacy weak callbacks */ + TIM_ResetCallback(htim); + + if (htim->IC_MspInitCallback == NULL) + { + htim->IC_MspInitCallback = HAL_TIM_IC_MspInit; + } + /* Init the low level hardware : GPIO, CLOCK, NVIC */ + htim->IC_MspInitCallback(htim); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC and DMA */ + HAL_TIM_IC_MspInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Init the base time for the input capture */ + TIM_Base_SetConfig(htim->Instance, &htim->Init); + + /* Initialize the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + + /* Initialize the TIM channels state */ + TIM_CHANNEL_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_READY); + + /* Initialize the TIM state*/ + htim->State = HAL_TIM_STATE_READY; + + return HAL_OK; +} + +/** + * @brief DeInitializes the TIM peripheral + * @param htim TIM Input Capture handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_DeInit(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Disable the TIM Peripheral Clock */ + __HAL_TIM_DISABLE(htim); + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + if (htim->IC_MspDeInitCallback == NULL) + { + htim->IC_MspDeInitCallback = HAL_TIM_IC_MspDeInit; + } + /* DeInit the low level hardware */ + htim->IC_MspDeInitCallback(htim); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC and DMA */ + HAL_TIM_IC_MspDeInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_RESET; + + /* Change the TIM channels state */ + TIM_CHANNEL_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET_ALL(htim, HAL_TIM_CHANNEL_STATE_RESET); + + /* Change TIM state */ + htim->State = HAL_TIM_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Initializes the TIM Input Capture MSP. + * @param htim TIM Input Capture handle + * @retval None + */ +__weak void HAL_TIM_IC_MspInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_IC_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes TIM Input Capture MSP. + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIM_IC_MspDeInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_IC_MspDeInit could be implemented in the user file + */ +} + +/** + * @brief Starts the TIM Input Capture measurement. + * @param htim TIM Input Capture handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_Start(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpsmcr; + HAL_TIM_ChannelStateTypeDef channel_state = TIM_CHANNEL_STATE_GET(htim, Channel); + HAL_TIM_ChannelStateTypeDef complementary_channel_state = TIM_CHANNEL_N_STATE_GET(htim, Channel); + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Check the TIM channel state */ + if ((channel_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + + /* Enable the Input Capture channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Input Capture measurement. + * @param htim TIM Input Capture handle + * @param Channel TIM Channels to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Disable the Input Capture channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Input Capture measurement in interrupt mode. + * @param htim TIM Input Capture handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpsmcr; + + HAL_TIM_ChannelStateTypeDef channel_state = TIM_CHANNEL_STATE_GET(htim, Channel); + HAL_TIM_ChannelStateTypeDef complementary_channel_state = TIM_CHANNEL_N_STATE_GET(htim, Channel); + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + /* Check the TIM channel state */ + if ((channel_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Enable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Enable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Enable the TIM Capture/Compare 3 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Enable the TIM Capture/Compare 4 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC4); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Enable the Input Capture channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + } + + /* Return function status */ + return status; +} + +/** + * @brief Stops the TIM Input Capture measurement in interrupt mode. + * @param htim TIM Input Capture handle + * @param Channel TIM Channels to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Capture/Compare 3 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Disable the TIM Capture/Compare 4 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC4); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Disable the Input Capture channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return status; +} + +/** + * @brief Starts the TIM Input Capture measurement in DMA mode. + * @param htim TIM Input Capture handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @param pData The destination Buffer address. + * @param Length The length of data to be transferred from TIM peripheral to memory. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpsmcr; + + HAL_TIM_ChannelStateTypeDef channel_state = TIM_CHANNEL_STATE_GET(htim, Channel); + HAL_TIM_ChannelStateTypeDef complementary_channel_state = TIM_CHANNEL_N_STATE_GET(htim, Channel); + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + assert_param(IS_TIM_DMA_CC_INSTANCE(htim->Instance)); + + /* Set the TIM channel state */ + if ((channel_state == HAL_TIM_CHANNEL_STATE_BUSY) + || (complementary_channel_state == HAL_TIM_CHANNEL_STATE_BUSY)) + { + return HAL_BUSY; + } + else if ((channel_state == HAL_TIM_CHANNEL_STATE_READY) + && (complementary_channel_state == HAL_TIM_CHANNEL_STATE_READY)) + { + if ((pData == NULL) && (Length > 0U)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + return HAL_ERROR; + } + + /* Enable the Input Capture channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_ENABLE); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)&htim->Instance->CCR1, (uint32_t)pData, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 1 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)&htim->Instance->CCR2, (uint32_t)pData, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 2 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC3]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)&htim->Instance->CCR3, (uint32_t)pData, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 3 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC3); + break; + } + + case TIM_CHANNEL_4: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC4]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC4]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC4]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC4], (uint32_t)&htim->Instance->CCR4, (uint32_t)pData, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 4 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC4); + break; + } + + default: + status = HAL_ERROR; + break; + } + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return status; +} + +/** + * @brief Stops the TIM Input Capture measurement in DMA mode. + * @param htim TIM Input Capture handle + * @param Channel TIM Channels to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + assert_param(IS_TIM_DMA_CC_INSTANCE(htim->Instance)); + + /* Disable the Input Capture channel */ + TIM_CCxChannelCmd(htim->Instance, Channel, TIM_CCx_DISABLE); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Capture/Compare 1 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Capture/Compare 2 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC2); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Capture/Compare 3 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC3); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); + break; + } + + case TIM_CHANNEL_4: + { + /* Disable the TIM Capture/Compare 4 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC4); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC4]); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return status; +} +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group5 TIM One Pulse functions + * @brief TIM One Pulse functions + * +@verbatim + ============================================================================== + ##### TIM One Pulse functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Initialize and configure the TIM One Pulse. + (+) De-initialize the TIM One Pulse. + (+) Start the TIM One Pulse. + (+) Stop the TIM One Pulse. + (+) Start the TIM One Pulse and enable interrupt. + (+) Stop the TIM One Pulse and disable interrupt. + (+) Start the TIM One Pulse and enable DMA transfer. + (+) Stop the TIM One Pulse and disable DMA transfer. + +@endverbatim + * @{ + */ +/** + * @brief Initializes the TIM One Pulse Time Base according to the specified + * parameters in the TIM_HandleTypeDef and initializes the associated handle. + * @note Switching from Center Aligned counter mode to Edge counter mode (or reverse) + * requires a timer reset to avoid unexpected direction + * due to DIR bit readonly in center aligned mode. + * Ex: call @ref HAL_TIM_OnePulse_DeInit() before HAL_TIM_OnePulse_Init() + * @note When the timer instance is initialized in One Pulse mode, timer + * channels 1 and channel 2 are reserved and cannot be used for other + * purpose. + * @param htim TIM One Pulse handle + * @param OnePulseMode Select the One pulse mode. + * This parameter can be one of the following values: + * @arg TIM_OPMODE_SINGLE: Only one pulse will be generated. + * @arg TIM_OPMODE_REPETITIVE: Repetitive pulses will be generated. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OnePulse_Init(TIM_HandleTypeDef *htim, uint32_t OnePulseMode) +{ + /* Check the TIM handle allocation */ + if (htim == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); + assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_OPM_MODE(OnePulseMode)); + assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); + + if (htim->State == HAL_TIM_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + htim->Lock = HAL_UNLOCKED; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + /* Reset interrupt callbacks to legacy weak callbacks */ + TIM_ResetCallback(htim); + + if (htim->OnePulse_MspInitCallback == NULL) + { + htim->OnePulse_MspInitCallback = HAL_TIM_OnePulse_MspInit; + } + /* Init the low level hardware : GPIO, CLOCK, NVIC */ + htim->OnePulse_MspInitCallback(htim); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC and DMA */ + HAL_TIM_OnePulse_MspInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Configure the Time base in the One Pulse Mode */ + TIM_Base_SetConfig(htim->Instance, &htim->Init); + + /* Reset the OPM Bit */ + htim->Instance->CR1 &= ~TIM_CR1_OPM; + + /* Configure the OPM Mode */ + htim->Instance->CR1 |= OnePulseMode; + + /* Initialize the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + + /* Initialize the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + + /* Initialize the TIM state*/ + htim->State = HAL_TIM_STATE_READY; + + return HAL_OK; +} + +/** + * @brief DeInitializes the TIM One Pulse + * @param htim TIM One Pulse handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OnePulse_DeInit(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Disable the TIM Peripheral Clock */ + __HAL_TIM_DISABLE(htim); + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + if (htim->OnePulse_MspDeInitCallback == NULL) + { + htim->OnePulse_MspDeInitCallback = HAL_TIM_OnePulse_MspDeInit; + } + /* DeInit the low level hardware */ + htim->OnePulse_MspDeInitCallback(htim); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC */ + HAL_TIM_OnePulse_MspDeInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_RESET; + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_RESET); + + /* Change TIM state */ + htim->State = HAL_TIM_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Initializes the TIM One Pulse MSP. + * @param htim TIM One Pulse handle + * @retval None + */ +__weak void HAL_TIM_OnePulse_MspInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_OnePulse_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes TIM One Pulse MSP. + * @param htim TIM One Pulse handle + * @retval None + */ +__weak void HAL_TIM_OnePulse_MspDeInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_OnePulse_MspDeInit could be implemented in the user file + */ +} + +/** + * @brief Starts the TIM One Pulse signal generation. + * @note Though OutputChannel parameter is deprecated and ignored by the function + * it has been kept to avoid HAL_TIM API compatibility break. + * @note The pulse output channel is determined when calling + * @ref HAL_TIM_OnePulse_ConfigChannel(). + * @param htim TIM One Pulse handle + * @param OutputChannel See note above + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OnePulse_Start(TIM_HandleTypeDef *htim, uint32_t OutputChannel) +{ + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + + /* Prevent unused argument(s) compilation warning */ + UNUSED(OutputChannel); + + /* Check the TIM channels state */ + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + + /* Enable the Capture compare and the Input Capture channels + (in the OPM Mode the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) + if TIM_CHANNEL_1 is used as output, the TIM_CHANNEL_2 will be used as input and + if TIM_CHANNEL_1 is used as input, the TIM_CHANNEL_2 will be used as output + whatever the combination, the TIM_CHANNEL_1 and TIM_CHANNEL_2 should be enabled together + + No need to enable the counter, it's enabled automatically by hardware + (the counter starts in response to a stimulus and generate a pulse */ + + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM One Pulse signal generation. + * @note Though OutputChannel parameter is deprecated and ignored by the function + * it has been kept to avoid HAL_TIM API compatibility break. + * @note The pulse output channel is determined when calling + * @ref HAL_TIM_OnePulse_ConfigChannel(). + * @param htim TIM One Pulse handle + * @param OutputChannel See note above + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OnePulse_Stop(TIM_HandleTypeDef *htim, uint32_t OutputChannel) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(OutputChannel); + + /* Disable the Capture compare and the Input Capture channels + (in the OPM Mode the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) + if TIM_CHANNEL_1 is used as output, the TIM_CHANNEL_2 will be used as input and + if TIM_CHANNEL_1 is used as input, the TIM_CHANNEL_2 will be used as output + whatever the combination, the TIM_CHANNEL_1 and TIM_CHANNEL_2 should be disabled together */ + + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM One Pulse signal generation in interrupt mode. + * @note Though OutputChannel parameter is deprecated and ignored by the function + * it has been kept to avoid HAL_TIM API compatibility break. + * @note The pulse output channel is determined when calling + * @ref HAL_TIM_OnePulse_ConfigChannel(). + * @param htim TIM One Pulse handle + * @param OutputChannel See note above + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OnePulse_Start_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel) +{ + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + + /* Prevent unused argument(s) compilation warning */ + UNUSED(OutputChannel); + + /* Check the TIM channels state */ + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + + /* Enable the Capture compare and the Input Capture channels + (in the OPM Mode the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) + if TIM_CHANNEL_1 is used as output, the TIM_CHANNEL_2 will be used as input and + if TIM_CHANNEL_1 is used as input, the TIM_CHANNEL_2 will be used as output + whatever the combination, the TIM_CHANNEL_1 and TIM_CHANNEL_2 should be enabled together + + No need to enable the counter, it's enabled automatically by hardware + (the counter starts in response to a stimulus and generate a pulse */ + + /* Enable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + + /* Enable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Enable the main output */ + __HAL_TIM_MOE_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM One Pulse signal generation in interrupt mode. + * @note Though OutputChannel parameter is deprecated and ignored by the function + * it has been kept to avoid HAL_TIM API compatibility break. + * @note The pulse output channel is determined when calling + * @ref HAL_TIM_OnePulse_ConfigChannel(). + * @param htim TIM One Pulse handle + * @param OutputChannel See note above + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OnePulse_Stop_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(OutputChannel); + + /* Disable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + + /* Disable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + + /* Disable the Capture compare and the Input Capture channels + (in the OPM Mode the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) + if TIM_CHANNEL_1 is used as output, the TIM_CHANNEL_2 will be used as input and + if TIM_CHANNEL_1 is used as input, the TIM_CHANNEL_2 will be used as output + whatever the combination, the TIM_CHANNEL_1 and TIM_CHANNEL_2 should be disabled together */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); + + if (IS_TIM_BREAK_INSTANCE(htim->Instance) != RESET) + { + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group6 TIM Encoder functions + * @brief TIM Encoder functions + * +@verbatim + ============================================================================== + ##### TIM Encoder functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Initialize and configure the TIM Encoder. + (+) De-initialize the TIM Encoder. + (+) Start the TIM Encoder. + (+) Stop the TIM Encoder. + (+) Start the TIM Encoder and enable interrupt. + (+) Stop the TIM Encoder and disable interrupt. + (+) Start the TIM Encoder and enable DMA transfer. + (+) Stop the TIM Encoder and disable DMA transfer. + +@endverbatim + * @{ + */ +/** + * @brief Initializes the TIM Encoder Interface and initialize the associated handle. + * @note Switching from Center Aligned counter mode to Edge counter mode (or reverse) + * requires a timer reset to avoid unexpected direction + * due to DIR bit readonly in center aligned mode. + * Ex: call @ref HAL_TIM_Encoder_DeInit() before HAL_TIM_Encoder_Init() + * @note Encoder mode and External clock mode 2 are not compatible and must not be selected together + * Ex: A call for @ref HAL_TIM_Encoder_Init will erase the settings of @ref HAL_TIM_ConfigClockSource + * using TIM_CLOCKSOURCE_ETRMODE2 and vice versa + * @note When the timer instance is initialized in Encoder mode, timer + * channels 1 and channel 2 are reserved and cannot be used for other + * purpose. + * @param htim TIM Encoder Interface handle + * @param sConfig TIM Encoder Interface configuration structure + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Encoder_Init(TIM_HandleTypeDef *htim, TIM_Encoder_InitTypeDef *sConfig) +{ + uint32_t tmpsmcr; + uint32_t tmpccmr1; + uint32_t tmpccer; + + /* Check the TIM handle allocation */ + if (htim == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); + assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); + assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); + assert_param(IS_TIM_ENCODER_MODE(sConfig->EncoderMode)); + assert_param(IS_TIM_IC_SELECTION(sConfig->IC1Selection)); + assert_param(IS_TIM_IC_SELECTION(sConfig->IC2Selection)); + assert_param(IS_TIM_ENCODERINPUT_POLARITY(sConfig->IC1Polarity)); + assert_param(IS_TIM_ENCODERINPUT_POLARITY(sConfig->IC2Polarity)); + assert_param(IS_TIM_IC_PRESCALER(sConfig->IC1Prescaler)); + assert_param(IS_TIM_IC_PRESCALER(sConfig->IC2Prescaler)); + assert_param(IS_TIM_IC_FILTER(sConfig->IC1Filter)); + assert_param(IS_TIM_IC_FILTER(sConfig->IC2Filter)); + + if (htim->State == HAL_TIM_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + htim->Lock = HAL_UNLOCKED; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + /* Reset interrupt callbacks to legacy weak callbacks */ + TIM_ResetCallback(htim); + + if (htim->Encoder_MspInitCallback == NULL) + { + htim->Encoder_MspInitCallback = HAL_TIM_Encoder_MspInit; + } + /* Init the low level hardware : GPIO, CLOCK, NVIC */ + htim->Encoder_MspInitCallback(htim); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC and DMA */ + HAL_TIM_Encoder_MspInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Reset the SMS and ECE bits */ + htim->Instance->SMCR &= ~(TIM_SMCR_SMS | TIM_SMCR_ECE); + + /* Configure the Time base in the Encoder Mode */ + TIM_Base_SetConfig(htim->Instance, &htim->Init); + + /* Get the TIMx SMCR register value */ + tmpsmcr = htim->Instance->SMCR; + + /* Get the TIMx CCMR1 register value */ + tmpccmr1 = htim->Instance->CCMR1; + + /* Get the TIMx CCER register value */ + tmpccer = htim->Instance->CCER; + + /* Set the encoder Mode */ + tmpsmcr |= sConfig->EncoderMode; + + /* Select the Capture Compare 1 and the Capture Compare 2 as input */ + tmpccmr1 &= ~(TIM_CCMR1_CC1S | TIM_CCMR1_CC2S); + tmpccmr1 |= (sConfig->IC1Selection | (sConfig->IC2Selection << 8U)); + + /* Set the Capture Compare 1 and the Capture Compare 2 prescalers and filters */ + tmpccmr1 &= ~(TIM_CCMR1_IC1PSC | TIM_CCMR1_IC2PSC); + tmpccmr1 &= ~(TIM_CCMR1_IC1F | TIM_CCMR1_IC2F); + tmpccmr1 |= sConfig->IC1Prescaler | (sConfig->IC2Prescaler << 8U); + tmpccmr1 |= (sConfig->IC1Filter << 4U) | (sConfig->IC2Filter << 12U); + + /* Set the TI1 and the TI2 Polarities */ + tmpccer &= ~(TIM_CCER_CC1P | TIM_CCER_CC2P); + tmpccer &= ~(TIM_CCER_CC1NP | TIM_CCER_CC2NP); + tmpccer |= sConfig->IC1Polarity | (sConfig->IC2Polarity << 4U); + + /* Write to TIMx SMCR */ + htim->Instance->SMCR = tmpsmcr; + + /* Write to TIMx CCMR1 */ + htim->Instance->CCMR1 = tmpccmr1; + + /* Write to TIMx CCER */ + htim->Instance->CCER = tmpccer; + + /* Initialize the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + + /* Initialize the TIM state*/ + htim->State = HAL_TIM_STATE_READY; + + return HAL_OK; +} + + +/** + * @brief DeInitializes the TIM Encoder interface + * @param htim TIM Encoder Interface handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Encoder_DeInit(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Disable the TIM Peripheral Clock */ + __HAL_TIM_DISABLE(htim); + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + if (htim->Encoder_MspDeInitCallback == NULL) + { + htim->Encoder_MspDeInitCallback = HAL_TIM_Encoder_MspDeInit; + } + /* DeInit the low level hardware */ + htim->Encoder_MspDeInitCallback(htim); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC */ + HAL_TIM_Encoder_MspDeInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_RESET; + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_RESET); + + /* Change TIM state */ + htim->State = HAL_TIM_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Initializes the TIM Encoder Interface MSP. + * @param htim TIM Encoder Interface handle + * @retval None + */ +__weak void HAL_TIM_Encoder_MspInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_Encoder_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes TIM Encoder Interface MSP. + * @param htim TIM Encoder Interface handle + * @retval None + */ +__weak void HAL_TIM_Encoder_MspDeInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_Encoder_MspDeInit could be implemented in the user file + */ +} + +/** + * @brief Starts the TIM Encoder Interface. + * @param htim TIM Encoder Interface handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_ALL: TIM Channel 1 and TIM Channel 2 are selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Encoder_Start(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + + /* Check the parameters */ + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); + + /* Set the TIM channel(s) state */ + if (Channel == TIM_CHANNEL_1) + { + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else if (Channel == TIM_CHANNEL_2) + { + if ((channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + + /* Enable the encoder interface channels */ + switch (Channel) + { + case TIM_CHANNEL_1: + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + break; + } + + case TIM_CHANNEL_2: + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + break; + } + + default : + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + break; + } + } + /* Enable the Peripheral */ + __HAL_TIM_ENABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Encoder Interface. + * @param htim TIM Encoder Interface handle + * @param Channel TIM Channels to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_ALL: TIM Channel 1 and TIM Channel 2 are selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Encoder_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); + + /* Disable the Input Capture channels 1 and 2 + (in the EncoderInterface the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) */ + switch (Channel) + { + case TIM_CHANNEL_1: + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + break; + } + + case TIM_CHANNEL_2: + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); + break; + } + + default : + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); + break; + } + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel(s) state */ + if ((Channel == TIM_CHANNEL_1) || (Channel == TIM_CHANNEL_2)) + { + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Encoder Interface in interrupt mode. + * @param htim TIM Encoder Interface handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_ALL: TIM Channel 1 and TIM Channel 2 are selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Encoder_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + + /* Check the parameters */ + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); + + /* Set the TIM channel(s) state */ + if (Channel == TIM_CHANNEL_1) + { + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else if (Channel == TIM_CHANNEL_2) + { + if ((channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + + /* Enable the encoder interface channels */ + /* Enable the capture compare Interrupts 1 and/or 2 */ + switch (Channel) + { + case TIM_CHANNEL_1: + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + break; + } + + default : + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + break; + } + } + + /* Enable the Peripheral */ + __HAL_TIM_ENABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Encoder Interface in interrupt mode. + * @param htim TIM Encoder Interface handle + * @param Channel TIM Channels to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_ALL: TIM Channel 1 and TIM Channel 2 are selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Encoder_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); + + /* Disable the Input Capture channels 1 and 2 + (in the EncoderInterface the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) */ + if (Channel == TIM_CHANNEL_1) + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + + /* Disable the capture compare Interrupts 1 */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + } + else if (Channel == TIM_CHANNEL_2) + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); + + /* Disable the capture compare Interrupts 2 */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + } + else + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); + + /* Disable the capture compare Interrupts 1 and 2 */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel(s) state */ + if ((Channel == TIM_CHANNEL_1) || (Channel == TIM_CHANNEL_2)) + { + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Encoder Interface in DMA mode. + * @param htim TIM Encoder Interface handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_ALL: TIM Channel 1 and TIM Channel 2 are selected + * @param pData1 The destination Buffer address for IC1. + * @param pData2 The destination Buffer address for IC2. + * @param Length The length of data to be transferred from TIM peripheral to memory. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Encoder_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData1, + uint32_t *pData2, uint16_t Length) +{ + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + + /* Check the parameters */ + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); + + /* Set the TIM channel(s) state */ + if (Channel == TIM_CHANNEL_1) + { + if ((channel_1_state == HAL_TIM_CHANNEL_STATE_BUSY) + || (complementary_channel_1_state == HAL_TIM_CHANNEL_STATE_BUSY)) + { + return HAL_BUSY; + } + else if ((channel_1_state == HAL_TIM_CHANNEL_STATE_READY) + && (complementary_channel_1_state == HAL_TIM_CHANNEL_STATE_READY)) + { + if ((pData1 == NULL) && (Length > 0U)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + return HAL_ERROR; + } + } + else if (Channel == TIM_CHANNEL_2) + { + if ((channel_2_state == HAL_TIM_CHANNEL_STATE_BUSY) + || (complementary_channel_2_state == HAL_TIM_CHANNEL_STATE_BUSY)) + { + return HAL_BUSY; + } + else if ((channel_2_state == HAL_TIM_CHANNEL_STATE_READY) + && (complementary_channel_2_state == HAL_TIM_CHANNEL_STATE_READY)) + { + if ((pData2 == NULL) && (Length > 0U)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + return HAL_ERROR; + } + } + else + { + if ((channel_1_state == HAL_TIM_CHANNEL_STATE_BUSY) + || (channel_2_state == HAL_TIM_CHANNEL_STATE_BUSY) + || (complementary_channel_1_state == HAL_TIM_CHANNEL_STATE_BUSY) + || (complementary_channel_2_state == HAL_TIM_CHANNEL_STATE_BUSY)) + { + return HAL_BUSY; + } + else if ((channel_1_state == HAL_TIM_CHANNEL_STATE_READY) + && (channel_2_state == HAL_TIM_CHANNEL_STATE_READY) + && (complementary_channel_1_state == HAL_TIM_CHANNEL_STATE_READY) + && (complementary_channel_2_state == HAL_TIM_CHANNEL_STATE_READY)) + { + if ((((pData1 == NULL) || (pData2 == NULL))) && (Length > 0U)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + return HAL_ERROR; + } + } + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)&htim->Instance->CCR1, (uint32_t)pData1, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Input Capture DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + + /* Enable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + + /* Enable the Peripheral */ + __HAL_TIM_ENABLE(htim); + + break; + } + + case TIM_CHANNEL_2: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError; + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)&htim->Instance->CCR2, (uint32_t)pData2, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Input Capture DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC2); + + /* Enable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + + /* Enable the Peripheral */ + __HAL_TIM_ENABLE(htim); + + break; + } + + default: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)&htim->Instance->CCR1, (uint32_t)pData1, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)&htim->Instance->CCR2, (uint32_t)pData2, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + + /* Enable the TIM Input Capture DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + /* Enable the TIM Input Capture DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC2); + + /* Enable the Capture compare channel */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_ENABLE); + + /* Enable the Peripheral */ + __HAL_TIM_ENABLE(htim); + + break; + } + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Encoder Interface in DMA mode. + * @param htim TIM Encoder Interface handle + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_ALL: TIM Channel 1 and TIM Channel 2 are selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_Encoder_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_ENCODER_INTERFACE_INSTANCE(htim->Instance)); + + /* Disable the Input Capture channels 1 and 2 + (in the EncoderInterface the two possible channels that can be used are TIM_CHANNEL_1 and TIM_CHANNEL_2) */ + if (Channel == TIM_CHANNEL_1) + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + + /* Disable the capture compare DMA Request 1 */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + } + else if (Channel == TIM_CHANNEL_2) + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); + + /* Disable the capture compare DMA Request 2 */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC2); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + } + else + { + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_2, TIM_CCx_DISABLE); + + /* Disable the capture compare DMA Request 1 and 2 */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC2); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + } + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel(s) state */ + if ((Channel == TIM_CHANNEL_1) || (Channel == TIM_CHANNEL_2)) + { + TIM_CHANNEL_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ +/** @defgroup TIM_Exported_Functions_Group7 TIM IRQ handler management + * @brief TIM IRQ handler management + * +@verbatim + ============================================================================== + ##### IRQ handler management ##### + ============================================================================== + [..] + This section provides Timer IRQ handler function. + +@endverbatim + * @{ + */ +/** + * @brief This function handles TIM interrupts requests. + * @param htim TIM handle + * @retval None + */ +void HAL_TIM_IRQHandler(TIM_HandleTypeDef *htim) +{ + /* Capture compare 1 event */ + if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_CC1) != RESET) + { + if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_CC1) != RESET) + { + { + __HAL_TIM_CLEAR_IT(htim, TIM_IT_CC1); + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + + /* Input capture event */ + if ((htim->Instance->CCMR1 & TIM_CCMR1_CC1S) != 0x00U) + { +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->IC_CaptureCallback(htim); +#else + HAL_TIM_IC_CaptureCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + /* Output compare event */ + else + { +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->OC_DelayElapsedCallback(htim); + htim->PWM_PulseFinishedCallback(htim); +#else + HAL_TIM_OC_DelayElapsedCallback(htim); + HAL_TIM_PWM_PulseFinishedCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; + } + } + } + /* Capture compare 2 event */ + if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_CC2) != RESET) + { + if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_CC2) != RESET) + { + __HAL_TIM_CLEAR_IT(htim, TIM_IT_CC2); + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + /* Input capture event */ + if ((htim->Instance->CCMR1 & TIM_CCMR1_CC2S) != 0x00U) + { +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->IC_CaptureCallback(htim); +#else + HAL_TIM_IC_CaptureCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + /* Output compare event */ + else + { +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->OC_DelayElapsedCallback(htim); + htim->PWM_PulseFinishedCallback(htim); +#else + HAL_TIM_OC_DelayElapsedCallback(htim); + HAL_TIM_PWM_PulseFinishedCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; + } + } + /* Capture compare 3 event */ + if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_CC3) != RESET) + { + if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_CC3) != RESET) + { + __HAL_TIM_CLEAR_IT(htim, TIM_IT_CC3); + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + /* Input capture event */ + if ((htim->Instance->CCMR2 & TIM_CCMR2_CC3S) != 0x00U) + { +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->IC_CaptureCallback(htim); +#else + HAL_TIM_IC_CaptureCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + /* Output compare event */ + else + { +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->OC_DelayElapsedCallback(htim); + htim->PWM_PulseFinishedCallback(htim); +#else + HAL_TIM_OC_DelayElapsedCallback(htim); + HAL_TIM_PWM_PulseFinishedCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; + } + } + /* Capture compare 4 event */ + if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_CC4) != RESET) + { + if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_CC4) != RESET) + { + __HAL_TIM_CLEAR_IT(htim, TIM_IT_CC4); + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_4; + /* Input capture event */ + if ((htim->Instance->CCMR2 & TIM_CCMR2_CC4S) != 0x00U) + { +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->IC_CaptureCallback(htim); +#else + HAL_TIM_IC_CaptureCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + /* Output compare event */ + else + { +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->OC_DelayElapsedCallback(htim); + htim->PWM_PulseFinishedCallback(htim); +#else + HAL_TIM_OC_DelayElapsedCallback(htim); + HAL_TIM_PWM_PulseFinishedCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; + } + } + /* TIM Update event */ + if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_UPDATE) != RESET) + { + if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_UPDATE) != RESET) + { + __HAL_TIM_CLEAR_IT(htim, TIM_IT_UPDATE); +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->PeriodElapsedCallback(htim); +#else + HAL_TIM_PeriodElapsedCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + } + /* TIM Break input event */ + if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_BREAK) != RESET) + { + if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_BREAK) != RESET) + { + __HAL_TIM_CLEAR_IT(htim, TIM_IT_BREAK); +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->BreakCallback(htim); +#else + HAL_TIMEx_BreakCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + } + /* TIM Break2 input event */ + if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_BREAK2) != RESET) + { + if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_BREAK) != RESET) + { + __HAL_TIM_CLEAR_FLAG(htim, TIM_FLAG_BREAK2); +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->Break2Callback(htim); +#else + HAL_TIMEx_Break2Callback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + } + /* TIM Trigger detection event */ + if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_TRIGGER) != RESET) + { + if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_TRIGGER) != RESET) + { + __HAL_TIM_CLEAR_IT(htim, TIM_IT_TRIGGER); +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->TriggerCallback(htim); +#else + HAL_TIM_TriggerCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + } + /* TIM commutation event */ + if (__HAL_TIM_GET_FLAG(htim, TIM_FLAG_COM) != RESET) + { + if (__HAL_TIM_GET_IT_SOURCE(htim, TIM_IT_COM) != RESET) + { + __HAL_TIM_CLEAR_IT(htim, TIM_FLAG_COM); +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->CommutationCallback(htim); +#else + HAL_TIMEx_CommutCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + } +} + +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group8 TIM Peripheral Control functions + * @brief TIM Peripheral Control functions + * +@verbatim + ============================================================================== + ##### Peripheral Control functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Configure The Input Output channels for OC, PWM, IC or One Pulse mode. + (+) Configure External Clock source. + (+) Configure Complementary channels, break features and dead time. + (+) Configure Master and the Slave synchronization. + (+) Configure the DMA Burst Mode. + +@endverbatim + * @{ + */ + +/** + * @brief Initializes the TIM Output Compare Channels according to the specified + * parameters in the TIM_OC_InitTypeDef. + * @param htim TIM Output Compare handle + * @param sConfig TIM Output Compare configuration structure + * @param Channel TIM Channels to configure + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @arg TIM_CHANNEL_5: TIM Channel 5 selected + * @arg TIM_CHANNEL_6: TIM Channel 6 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OC_ConfigChannel(TIM_HandleTypeDef *htim, + TIM_OC_InitTypeDef *sConfig, + uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_CHANNELS(Channel)); + assert_param(IS_TIM_OC_MODE(sConfig->OCMode)); + assert_param(IS_TIM_OC_POLARITY(sConfig->OCPolarity)); + + /* Process Locked */ + __HAL_LOCK(htim); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Check the parameters */ + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + + /* Configure the TIM Channel 1 in Output Compare */ + TIM_OC1_SetConfig(htim->Instance, sConfig); + break; + } + + case TIM_CHANNEL_2: + { + /* Check the parameters */ + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + + /* Configure the TIM Channel 2 in Output Compare */ + TIM_OC2_SetConfig(htim->Instance, sConfig); + break; + } + + case TIM_CHANNEL_3: + { + /* Check the parameters */ + assert_param(IS_TIM_CC3_INSTANCE(htim->Instance)); + + /* Configure the TIM Channel 3 in Output Compare */ + TIM_OC3_SetConfig(htim->Instance, sConfig); + break; + } + + case TIM_CHANNEL_4: + { + /* Check the parameters */ + assert_param(IS_TIM_CC4_INSTANCE(htim->Instance)); + + /* Configure the TIM Channel 4 in Output Compare */ + TIM_OC4_SetConfig(htim->Instance, sConfig); + break; + } + + case TIM_CHANNEL_5: + { + /* Check the parameters */ + assert_param(IS_TIM_CC5_INSTANCE(htim->Instance)); + + /* Configure the TIM Channel 5 in Output Compare */ + TIM_OC5_SetConfig(htim->Instance, sConfig); + break; + } + + case TIM_CHANNEL_6: + { + /* Check the parameters */ + assert_param(IS_TIM_CC6_INSTANCE(htim->Instance)); + + /* Configure the TIM Channel 6 in Output Compare */ + TIM_OC6_SetConfig(htim->Instance, sConfig); + break; + } + + default: + status = HAL_ERROR; + break; + } + + __HAL_UNLOCK(htim); + + return status; +} + +/** + * @brief Initializes the TIM Input Capture Channels according to the specified + * parameters in the TIM_IC_InitTypeDef. + * @param htim TIM IC handle + * @param sConfig TIM Input Capture configuration structure + * @param Channel TIM Channel to configure + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_IC_ConfigChannel(TIM_HandleTypeDef *htim, TIM_IC_InitTypeDef *sConfig, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + assert_param(IS_TIM_IC_POLARITY(sConfig->ICPolarity)); + assert_param(IS_TIM_IC_SELECTION(sConfig->ICSelection)); + assert_param(IS_TIM_IC_PRESCALER(sConfig->ICPrescaler)); + assert_param(IS_TIM_IC_FILTER(sConfig->ICFilter)); + + /* Process Locked */ + __HAL_LOCK(htim); + + if (Channel == TIM_CHANNEL_1) + { + /* TI1 Configuration */ + TIM_TI1_SetConfig(htim->Instance, + sConfig->ICPolarity, + sConfig->ICSelection, + sConfig->ICFilter); + + /* Reset the IC1PSC Bits */ + htim->Instance->CCMR1 &= ~TIM_CCMR1_IC1PSC; + + /* Set the IC1PSC value */ + htim->Instance->CCMR1 |= sConfig->ICPrescaler; + } + else if (Channel == TIM_CHANNEL_2) + { + /* TI2 Configuration */ + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + + TIM_TI2_SetConfig(htim->Instance, + sConfig->ICPolarity, + sConfig->ICSelection, + sConfig->ICFilter); + + /* Reset the IC2PSC Bits */ + htim->Instance->CCMR1 &= ~TIM_CCMR1_IC2PSC; + + /* Set the IC2PSC value */ + htim->Instance->CCMR1 |= (sConfig->ICPrescaler << 8U); + } + else if (Channel == TIM_CHANNEL_3) + { + /* TI3 Configuration */ + assert_param(IS_TIM_CC3_INSTANCE(htim->Instance)); + + TIM_TI3_SetConfig(htim->Instance, + sConfig->ICPolarity, + sConfig->ICSelection, + sConfig->ICFilter); + + /* Reset the IC3PSC Bits */ + htim->Instance->CCMR2 &= ~TIM_CCMR2_IC3PSC; + + /* Set the IC3PSC value */ + htim->Instance->CCMR2 |= sConfig->ICPrescaler; + } + else if (Channel == TIM_CHANNEL_4) + { + /* TI4 Configuration */ + assert_param(IS_TIM_CC4_INSTANCE(htim->Instance)); + + TIM_TI4_SetConfig(htim->Instance, + sConfig->ICPolarity, + sConfig->ICSelection, + sConfig->ICFilter); + + /* Reset the IC4PSC Bits */ + htim->Instance->CCMR2 &= ~TIM_CCMR2_IC4PSC; + + /* Set the IC4PSC value */ + htim->Instance->CCMR2 |= (sConfig->ICPrescaler << 8U); + } + else + { + status = HAL_ERROR; + } + + __HAL_UNLOCK(htim); + + return status; +} + +/** + * @brief Initializes the TIM PWM channels according to the specified + * parameters in the TIM_OC_InitTypeDef. + * @param htim TIM PWM handle + * @param sConfig TIM PWM configuration structure + * @param Channel TIM Channels to be configured + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @arg TIM_CHANNEL_5: TIM Channel 5 selected + * @arg TIM_CHANNEL_6: TIM Channel 6 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_PWM_ConfigChannel(TIM_HandleTypeDef *htim, + TIM_OC_InitTypeDef *sConfig, + uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_CHANNELS(Channel)); + assert_param(IS_TIM_PWM_MODE(sConfig->OCMode)); + assert_param(IS_TIM_OC_POLARITY(sConfig->OCPolarity)); + assert_param(IS_TIM_FAST_STATE(sConfig->OCFastMode)); + + /* Process Locked */ + __HAL_LOCK(htim); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Check the parameters */ + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + + /* Configure the Channel 1 in PWM mode */ + TIM_OC1_SetConfig(htim->Instance, sConfig); + + /* Set the Preload enable bit for channel1 */ + htim->Instance->CCMR1 |= TIM_CCMR1_OC1PE; + + /* Configure the Output Fast mode */ + htim->Instance->CCMR1 &= ~TIM_CCMR1_OC1FE; + htim->Instance->CCMR1 |= sConfig->OCFastMode; + break; + } + + case TIM_CHANNEL_2: + { + /* Check the parameters */ + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + + /* Configure the Channel 2 in PWM mode */ + TIM_OC2_SetConfig(htim->Instance, sConfig); + + /* Set the Preload enable bit for channel2 */ + htim->Instance->CCMR1 |= TIM_CCMR1_OC2PE; + + /* Configure the Output Fast mode */ + htim->Instance->CCMR1 &= ~TIM_CCMR1_OC2FE; + htim->Instance->CCMR1 |= sConfig->OCFastMode << 8U; + break; + } + + case TIM_CHANNEL_3: + { + /* Check the parameters */ + assert_param(IS_TIM_CC3_INSTANCE(htim->Instance)); + + /* Configure the Channel 3 in PWM mode */ + TIM_OC3_SetConfig(htim->Instance, sConfig); + + /* Set the Preload enable bit for channel3 */ + htim->Instance->CCMR2 |= TIM_CCMR2_OC3PE; + + /* Configure the Output Fast mode */ + htim->Instance->CCMR2 &= ~TIM_CCMR2_OC3FE; + htim->Instance->CCMR2 |= sConfig->OCFastMode; + break; + } + + case TIM_CHANNEL_4: + { + /* Check the parameters */ + assert_param(IS_TIM_CC4_INSTANCE(htim->Instance)); + + /* Configure the Channel 4 in PWM mode */ + TIM_OC4_SetConfig(htim->Instance, sConfig); + + /* Set the Preload enable bit for channel4 */ + htim->Instance->CCMR2 |= TIM_CCMR2_OC4PE; + + /* Configure the Output Fast mode */ + htim->Instance->CCMR2 &= ~TIM_CCMR2_OC4FE; + htim->Instance->CCMR2 |= sConfig->OCFastMode << 8U; + break; + } + + case TIM_CHANNEL_5: + { + /* Check the parameters */ + assert_param(IS_TIM_CC5_INSTANCE(htim->Instance)); + + /* Configure the Channel 5 in PWM mode */ + TIM_OC5_SetConfig(htim->Instance, sConfig); + + /* Set the Preload enable bit for channel5*/ + htim->Instance->CCMR3 |= TIM_CCMR3_OC5PE; + + /* Configure the Output Fast mode */ + htim->Instance->CCMR3 &= ~TIM_CCMR3_OC5FE; + htim->Instance->CCMR3 |= sConfig->OCFastMode; + break; + } + + case TIM_CHANNEL_6: + { + /* Check the parameters */ + assert_param(IS_TIM_CC6_INSTANCE(htim->Instance)); + + /* Configure the Channel 6 in PWM mode */ + TIM_OC6_SetConfig(htim->Instance, sConfig); + + /* Set the Preload enable bit for channel6 */ + htim->Instance->CCMR3 |= TIM_CCMR3_OC6PE; + + /* Configure the Output Fast mode */ + htim->Instance->CCMR3 &= ~TIM_CCMR3_OC6FE; + htim->Instance->CCMR3 |= sConfig->OCFastMode << 8U; + break; + } + + default: + status = HAL_ERROR; + break; + } + + __HAL_UNLOCK(htim); + + return status; +} + +/** + * @brief Initializes the TIM One Pulse Channels according to the specified + * parameters in the TIM_OnePulse_InitTypeDef. + * @param htim TIM One Pulse handle + * @param sConfig TIM One Pulse configuration structure + * @param OutputChannel TIM output channel to configure + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @param InputChannel TIM input Channel to configure + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @note To output a waveform with a minimum delay user can enable the fast + * mode by calling the @ref __HAL_TIM_ENABLE_OCxFAST macro. Then CCx + * output is forced in response to the edge detection on TIx input, + * without taking in account the comparison. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_OnePulse_ConfigChannel(TIM_HandleTypeDef *htim, TIM_OnePulse_InitTypeDef *sConfig, + uint32_t OutputChannel, uint32_t InputChannel) +{ + HAL_StatusTypeDef status = HAL_OK; + TIM_OC_InitTypeDef temp1; + + /* Check the parameters */ + assert_param(IS_TIM_OPM_CHANNELS(OutputChannel)); + assert_param(IS_TIM_OPM_CHANNELS(InputChannel)); + + if (OutputChannel != InputChannel) + { + /* Process Locked */ + __HAL_LOCK(htim); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Extract the Output compare configuration from sConfig structure */ + temp1.OCMode = sConfig->OCMode; + temp1.Pulse = sConfig->Pulse; + temp1.OCPolarity = sConfig->OCPolarity; + temp1.OCNPolarity = sConfig->OCNPolarity; + temp1.OCIdleState = sConfig->OCIdleState; + temp1.OCNIdleState = sConfig->OCNIdleState; + + switch (OutputChannel) + { + case TIM_CHANNEL_1: + { + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + + TIM_OC1_SetConfig(htim->Instance, &temp1); + break; + } + + case TIM_CHANNEL_2: + { + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + + TIM_OC2_SetConfig(htim->Instance, &temp1); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + switch (InputChannel) + { + case TIM_CHANNEL_1: + { + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + + TIM_TI1_SetConfig(htim->Instance, sConfig->ICPolarity, + sConfig->ICSelection, sConfig->ICFilter); + + /* Reset the IC1PSC Bits */ + htim->Instance->CCMR1 &= ~TIM_CCMR1_IC1PSC; + + /* Select the Trigger source */ + htim->Instance->SMCR &= ~TIM_SMCR_TS; + htim->Instance->SMCR |= TIM_TS_TI1FP1; + + /* Select the Slave Mode */ + htim->Instance->SMCR &= ~TIM_SMCR_SMS; + htim->Instance->SMCR |= TIM_SLAVEMODE_TRIGGER; + break; + } + + case TIM_CHANNEL_2: + { + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + + TIM_TI2_SetConfig(htim->Instance, sConfig->ICPolarity, + sConfig->ICSelection, sConfig->ICFilter); + + /* Reset the IC2PSC Bits */ + htim->Instance->CCMR1 &= ~TIM_CCMR1_IC2PSC; + + /* Select the Trigger source */ + htim->Instance->SMCR &= ~TIM_SMCR_TS; + htim->Instance->SMCR |= TIM_TS_TI2FP2; + + /* Select the Slave Mode */ + htim->Instance->SMCR &= ~TIM_SMCR_SMS; + htim->Instance->SMCR |= TIM_SLAVEMODE_TRIGGER; + break; + } + + default: + status = HAL_ERROR; + break; + } + } + + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return status; + } + else + { + return HAL_ERROR; + } +} + +/** + * @brief Configure the DMA Burst to transfer Data from the memory to the TIM peripheral + * @param htim TIM handle + * @param BurstBaseAddress TIM Base address from where the DMA will start the Data write + * This parameter can be one of the following values: + * @arg TIM_DMABASE_CR1 + * @arg TIM_DMABASE_CR2 + * @arg TIM_DMABASE_SMCR + * @arg TIM_DMABASE_DIER + * @arg TIM_DMABASE_SR + * @arg TIM_DMABASE_EGR + * @arg TIM_DMABASE_CCMR1 + * @arg TIM_DMABASE_CCMR2 + * @arg TIM_DMABASE_CCER + * @arg TIM_DMABASE_CNT + * @arg TIM_DMABASE_PSC + * @arg TIM_DMABASE_ARR + * @arg TIM_DMABASE_RCR + * @arg TIM_DMABASE_CCR1 + * @arg TIM_DMABASE_CCR2 + * @arg TIM_DMABASE_CCR3 + * @arg TIM_DMABASE_CCR4 + * @arg TIM_DMABASE_BDTR + * @arg TIM_DMABASE_OR + * @arg TIM_DMABASE_CCMR3 + * @arg TIM_DMABASE_CCR5 + * @arg TIM_DMABASE_CCR6 + * @arg TIM_DMABASE_AF1 (*) + * @arg TIM_DMABASE_AF2 (*) + * (*) value not defined in all devices + * @param BurstRequestSrc TIM DMA Request sources + * This parameter can be one of the following values: + * @arg TIM_DMA_UPDATE: TIM update Interrupt source + * @arg TIM_DMA_CC1: TIM Capture Compare 1 DMA source + * @arg TIM_DMA_CC2: TIM Capture Compare 2 DMA source + * @arg TIM_DMA_CC3: TIM Capture Compare 3 DMA source + * @arg TIM_DMA_CC4: TIM Capture Compare 4 DMA source + * @arg TIM_DMA_COM: TIM Commutation DMA source + * @arg TIM_DMA_TRIGGER: TIM Trigger DMA source + * @param BurstBuffer The Buffer address. + * @param BurstLength DMA Burst length. This parameter can be one value + * between: TIM_DMABURSTLENGTH_1TRANSFER and TIM_DMABURSTLENGTH_18TRANSFERS. + * @note This function should be used only when BurstLength is equal to DMA data transfer length. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, uint32_t *BurstBuffer, uint32_t BurstLength) +{ + HAL_StatusTypeDef status; + + status = HAL_TIM_DMABurst_MultiWriteStart(htim, BurstBaseAddress, BurstRequestSrc, BurstBuffer, BurstLength, + ((BurstLength) >> 8U) + 1U); + + + + return status; +} + +/** + * @brief Configure the DMA Burst to transfer multiple Data from the memory to the TIM peripheral + * @param htim TIM handle + * @param BurstBaseAddress TIM Base address from where the DMA will start the Data write + * This parameter can be one of the following values: + * @arg TIM_DMABASE_CR1 + * @arg TIM_DMABASE_CR2 + * @arg TIM_DMABASE_SMCR + * @arg TIM_DMABASE_DIER + * @arg TIM_DMABASE_SR + * @arg TIM_DMABASE_EGR + * @arg TIM_DMABASE_CCMR1 + * @arg TIM_DMABASE_CCMR2 + * @arg TIM_DMABASE_CCER + * @arg TIM_DMABASE_CNT + * @arg TIM_DMABASE_PSC + * @arg TIM_DMABASE_ARR + * @arg TIM_DMABASE_RCR + * @arg TIM_DMABASE_CCR1 + * @arg TIM_DMABASE_CCR2 + * @arg TIM_DMABASE_CCR3 + * @arg TIM_DMABASE_CCR4 + * @arg TIM_DMABASE_BDTR + * @arg TIM_DMABASE_OR + * @arg TIM_DMABASE_CCMR3 + * @arg TIM_DMABASE_CCR5 + * @arg TIM_DMABASE_CCR6 + * @arg TIM_DMABASE_AF1 (*) + * @arg TIM_DMABASE_AF2 (*) + * (*) value not defined in all devices + * @param BurstRequestSrc TIM DMA Request sources + * This parameter can be one of the following values: + * @arg TIM_DMA_UPDATE: TIM update Interrupt source + * @arg TIM_DMA_CC1: TIM Capture Compare 1 DMA source + * @arg TIM_DMA_CC2: TIM Capture Compare 2 DMA source + * @arg TIM_DMA_CC3: TIM Capture Compare 3 DMA source + * @arg TIM_DMA_CC4: TIM Capture Compare 4 DMA source + * @arg TIM_DMA_COM: TIM Commutation DMA source + * @arg TIM_DMA_TRIGGER: TIM Trigger DMA source + * @param BurstBuffer The Buffer address. + * @param BurstLength DMA Burst length. This parameter can be one value + * between: TIM_DMABURSTLENGTH_1TRANSFER and TIM_DMABURSTLENGTH_18TRANSFERS. + * @param DataLength Data length. This parameter can be one value + * between 1 and 0xFFFF. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_DMABurst_MultiWriteStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, uint32_t *BurstBuffer, + uint32_t BurstLength, uint32_t DataLength) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_DMABURST_INSTANCE(htim->Instance)); + assert_param(IS_TIM_DMA_BASE(BurstBaseAddress)); + assert_param(IS_TIM_DMA_SOURCE(BurstRequestSrc)); + assert_param(IS_TIM_DMA_LENGTH(BurstLength)); + assert_param(IS_TIM_DMA_DATA_LENGTH(DataLength)); + + if (htim->DMABurstState == HAL_DMA_BURST_STATE_BUSY) + { + return HAL_BUSY; + } + else if (htim->DMABurstState == HAL_DMA_BURST_STATE_READY) + { + if ((BurstBuffer == NULL) && (BurstLength > 0U)) + { + return HAL_ERROR; + } + else + { + htim->DMABurstState = HAL_DMA_BURST_STATE_BUSY; + } + } + else + { + /* nothing to do */ + } + + switch (BurstRequestSrc) + { + case TIM_DMA_UPDATE: + { + /* Set the DMA Period elapsed callbacks */ + htim->hdma[TIM_DMA_ID_UPDATE]->XferCpltCallback = TIM_DMAPeriodElapsedCplt; + htim->hdma[TIM_DMA_ID_UPDATE]->XferHalfCpltCallback = TIM_DMAPeriodElapsedHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_UPDATE]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_UPDATE], (uint32_t)BurstBuffer, + (uint32_t)&htim->Instance->DMAR, DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + case TIM_DMA_CC1: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)BurstBuffer, + (uint32_t)&htim->Instance->DMAR, DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + case TIM_DMA_CC2: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)BurstBuffer, + (uint32_t)&htim->Instance->DMAR, DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + case TIM_DMA_CC3: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC3]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)BurstBuffer, + (uint32_t)&htim->Instance->DMAR, DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + case TIM_DMA_CC4: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC4]->XferCpltCallback = TIM_DMADelayPulseCplt; + htim->hdma[TIM_DMA_ID_CC4]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC4]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC4], (uint32_t)BurstBuffer, + (uint32_t)&htim->Instance->DMAR, DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + case TIM_DMA_COM: + { + /* Set the DMA commutation callbacks */ + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferCpltCallback = TIMEx_DMACommutationCplt; + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferHalfCpltCallback = TIMEx_DMACommutationHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_COMMUTATION], (uint32_t)BurstBuffer, + (uint32_t)&htim->Instance->DMAR, DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + case TIM_DMA_TRIGGER: + { + /* Set the DMA trigger callbacks */ + htim->hdma[TIM_DMA_ID_TRIGGER]->XferCpltCallback = TIM_DMATriggerCplt; + htim->hdma[TIM_DMA_ID_TRIGGER]->XferHalfCpltCallback = TIM_DMATriggerHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_TRIGGER]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_TRIGGER], (uint32_t)BurstBuffer, + (uint32_t)&htim->Instance->DMAR, DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Configure the DMA Burst Mode */ + htim->Instance->DCR = (BurstBaseAddress | BurstLength); + /* Enable the TIM DMA Request */ + __HAL_TIM_ENABLE_DMA(htim, BurstRequestSrc); + } + + /* Return function status */ + return status; +} + +/** + * @brief Stops the TIM DMA Burst mode + * @param htim TIM handle + * @param BurstRequestSrc TIM DMA Request sources to disable + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_DMABurst_WriteStop(TIM_HandleTypeDef *htim, uint32_t BurstRequestSrc) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_DMA_SOURCE(BurstRequestSrc)); + + /* Abort the DMA transfer (at least disable the DMA stream) */ + switch (BurstRequestSrc) + { + case TIM_DMA_UPDATE: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_UPDATE]); + break; + } + case TIM_DMA_CC1: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + break; + } + case TIM_DMA_CC2: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + break; + } + case TIM_DMA_CC3: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); + break; + } + case TIM_DMA_CC4: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC4]); + break; + } + case TIM_DMA_COM: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_COMMUTATION]); + break; + } + case TIM_DMA_TRIGGER: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_TRIGGER]); + break; + } + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Disable the TIM Update DMA request */ + __HAL_TIM_DISABLE_DMA(htim, BurstRequestSrc); + + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + } + + /* Return function status */ + return status; +} + +/** + * @brief Configure the DMA Burst to transfer Data from the TIM peripheral to the memory + * @param htim TIM handle + * @param BurstBaseAddress TIM Base address from where the DMA will start the Data read + * This parameter can be one of the following values: + * @arg TIM_DMABASE_CR1 + * @arg TIM_DMABASE_CR2 + * @arg TIM_DMABASE_SMCR + * @arg TIM_DMABASE_DIER + * @arg TIM_DMABASE_SR + * @arg TIM_DMABASE_EGR + * @arg TIM_DMABASE_CCMR1 + * @arg TIM_DMABASE_CCMR2 + * @arg TIM_DMABASE_CCER + * @arg TIM_DMABASE_CNT + * @arg TIM_DMABASE_PSC + * @arg TIM_DMABASE_ARR + * @arg TIM_DMABASE_RCR + * @arg TIM_DMABASE_CCR1 + * @arg TIM_DMABASE_CCR2 + * @arg TIM_DMABASE_CCR3 + * @arg TIM_DMABASE_CCR4 + * @arg TIM_DMABASE_BDTR + * @arg TIM_DMABASE_OR + * @arg TIM_DMABASE_CCMR3 + * @arg TIM_DMABASE_CCR5 + * @arg TIM_DMABASE_CCR6 + * @arg TIM_DMABASE_AF1 (*) + * @arg TIM_DMABASE_AF2 (*) + * (*) value not defined in all devices + * @param BurstRequestSrc TIM DMA Request sources + * This parameter can be one of the following values: + * @arg TIM_DMA_UPDATE: TIM update Interrupt source + * @arg TIM_DMA_CC1: TIM Capture Compare 1 DMA source + * @arg TIM_DMA_CC2: TIM Capture Compare 2 DMA source + * @arg TIM_DMA_CC3: TIM Capture Compare 3 DMA source + * @arg TIM_DMA_CC4: TIM Capture Compare 4 DMA source + * @arg TIM_DMA_COM: TIM Commutation DMA source + * @arg TIM_DMA_TRIGGER: TIM Trigger DMA source + * @param BurstBuffer The Buffer address. + * @param BurstLength DMA Burst length. This parameter can be one value + * between: TIM_DMABURSTLENGTH_1TRANSFER and TIM_DMABURSTLENGTH_18TRANSFERS. + * @note This function should be used only when BurstLength is equal to DMA data transfer length. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, uint32_t *BurstBuffer, uint32_t BurstLength) +{ + HAL_StatusTypeDef status; + + status = HAL_TIM_DMABurst_MultiReadStart(htim, BurstBaseAddress, BurstRequestSrc, BurstBuffer, BurstLength, + ((BurstLength) >> 8U) + 1U); + + + return status; +} + +/** + * @brief Configure the DMA Burst to transfer Data from the TIM peripheral to the memory + * @param htim TIM handle + * @param BurstBaseAddress TIM Base address from where the DMA will start the Data read + * This parameter can be one of the following values: + * @arg TIM_DMABASE_CR1 + * @arg TIM_DMABASE_CR2 + * @arg TIM_DMABASE_SMCR + * @arg TIM_DMABASE_DIER + * @arg TIM_DMABASE_SR + * @arg TIM_DMABASE_EGR + * @arg TIM_DMABASE_CCMR1 + * @arg TIM_DMABASE_CCMR2 + * @arg TIM_DMABASE_CCER + * @arg TIM_DMABASE_CNT + * @arg TIM_DMABASE_PSC + * @arg TIM_DMABASE_ARR + * @arg TIM_DMABASE_RCR + * @arg TIM_DMABASE_CCR1 + * @arg TIM_DMABASE_CCR2 + * @arg TIM_DMABASE_CCR3 + * @arg TIM_DMABASE_CCR4 + * @arg TIM_DMABASE_BDTR + * @arg TIM_DMABASE_OR + * @arg TIM_DMABASE_CCMR3 + * @arg TIM_DMABASE_CCR5 + * @arg TIM_DMABASE_CCR6 + * @arg TIM_DMABASE_AF1 (*) + * @arg TIM_DMABASE_AF2 (*) + * (*) value not defined in all devices + * @param BurstRequestSrc TIM DMA Request sources + * This parameter can be one of the following values: + * @arg TIM_DMA_UPDATE: TIM update Interrupt source + * @arg TIM_DMA_CC1: TIM Capture Compare 1 DMA source + * @arg TIM_DMA_CC2: TIM Capture Compare 2 DMA source + * @arg TIM_DMA_CC3: TIM Capture Compare 3 DMA source + * @arg TIM_DMA_CC4: TIM Capture Compare 4 DMA source + * @arg TIM_DMA_COM: TIM Commutation DMA source + * @arg TIM_DMA_TRIGGER: TIM Trigger DMA source + * @param BurstBuffer The Buffer address. + * @param BurstLength DMA Burst length. This parameter can be one value + * between: TIM_DMABURSTLENGTH_1TRANSFER and TIM_DMABURSTLENGTH_18TRANSFERS. + * @param DataLength Data length. This parameter can be one value + * between 1 and 0xFFFF. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_DMABurst_MultiReadStart(TIM_HandleTypeDef *htim, uint32_t BurstBaseAddress, + uint32_t BurstRequestSrc, uint32_t *BurstBuffer, + uint32_t BurstLength, uint32_t DataLength) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_DMABURST_INSTANCE(htim->Instance)); + assert_param(IS_TIM_DMA_BASE(BurstBaseAddress)); + assert_param(IS_TIM_DMA_SOURCE(BurstRequestSrc)); + assert_param(IS_TIM_DMA_LENGTH(BurstLength)); + assert_param(IS_TIM_DMA_DATA_LENGTH(DataLength)); + + if (htim->DMABurstState == HAL_DMA_BURST_STATE_BUSY) + { + return HAL_BUSY; + } + else if (htim->DMABurstState == HAL_DMA_BURST_STATE_READY) + { + if ((BurstBuffer == NULL) && (BurstLength > 0U)) + { + return HAL_ERROR; + } + else + { + htim->DMABurstState = HAL_DMA_BURST_STATE_BUSY; + } + } + else + { + /* nothing to do */ + } + switch (BurstRequestSrc) + { + case TIM_DMA_UPDATE: + { + /* Set the DMA Period elapsed callbacks */ + htim->hdma[TIM_DMA_ID_UPDATE]->XferCpltCallback = TIM_DMAPeriodElapsedCplt; + htim->hdma[TIM_DMA_ID_UPDATE]->XferHalfCpltCallback = TIM_DMAPeriodElapsedHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_UPDATE]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_UPDATE], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, + DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + case TIM_DMA_CC1: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, + DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + case TIM_DMA_CC2: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, + DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + case TIM_DMA_CC3: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC3]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, + DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + case TIM_DMA_CC4: + { + /* Set the DMA capture callbacks */ + htim->hdma[TIM_DMA_ID_CC4]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC4]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC4]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC4], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, + DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + case TIM_DMA_COM: + { + /* Set the DMA commutation callbacks */ + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferCpltCallback = TIMEx_DMACommutationCplt; + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferHalfCpltCallback = TIMEx_DMACommutationHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_COMMUTATION], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, + DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + case TIM_DMA_TRIGGER: + { + /* Set the DMA trigger callbacks */ + htim->hdma[TIM_DMA_ID_TRIGGER]->XferCpltCallback = TIM_DMATriggerCplt; + htim->hdma[TIM_DMA_ID_TRIGGER]->XferHalfCpltCallback = TIM_DMATriggerHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_TRIGGER]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_TRIGGER], (uint32_t)&htim->Instance->DMAR, (uint32_t)BurstBuffer, + DataLength) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + break; + } + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Configure the DMA Burst Mode */ + htim->Instance->DCR = (BurstBaseAddress | BurstLength); + + /* Enable the TIM DMA Request */ + __HAL_TIM_ENABLE_DMA(htim, BurstRequestSrc); + } + + /* Return function status */ + return status; +} + +/** + * @brief Stop the DMA burst reading + * @param htim TIM handle + * @param BurstRequestSrc TIM DMA Request sources to disable. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_DMABurst_ReadStop(TIM_HandleTypeDef *htim, uint32_t BurstRequestSrc) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_DMA_SOURCE(BurstRequestSrc)); + + /* Abort the DMA transfer (at least disable the DMA stream) */ + switch (BurstRequestSrc) + { + case TIM_DMA_UPDATE: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_UPDATE]); + break; + } + case TIM_DMA_CC1: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + break; + } + case TIM_DMA_CC2: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + break; + } + case TIM_DMA_CC3: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); + break; + } + case TIM_DMA_CC4: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC4]); + break; + } + case TIM_DMA_COM: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_COMMUTATION]); + break; + } + case TIM_DMA_TRIGGER: + { + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_TRIGGER]); + break; + } + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Disable the TIM Update DMA request */ + __HAL_TIM_DISABLE_DMA(htim, BurstRequestSrc); + + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + } + + /* Return function status */ + return status; +} + +/** + * @brief Generate a software event + * @param htim TIM handle + * @param EventSource specifies the event source. + * This parameter can be one of the following values: + * @arg TIM_EVENTSOURCE_UPDATE: Timer update Event source + * @arg TIM_EVENTSOURCE_CC1: Timer Capture Compare 1 Event source + * @arg TIM_EVENTSOURCE_CC2: Timer Capture Compare 2 Event source + * @arg TIM_EVENTSOURCE_CC3: Timer Capture Compare 3 Event source + * @arg TIM_EVENTSOURCE_CC4: Timer Capture Compare 4 Event source + * @arg TIM_EVENTSOURCE_COM: Timer COM event source + * @arg TIM_EVENTSOURCE_TRIGGER: Timer Trigger Event source + * @arg TIM_EVENTSOURCE_BREAK: Timer Break event source + * @arg TIM_EVENTSOURCE_BREAK2: Timer Break2 event source + * @note Basic timers can only generate an update event. + * @note TIM_EVENTSOURCE_COM is relevant only with advanced timer instances. + * @note TIM_EVENTSOURCE_BREAK and TIM_EVENTSOURCE_BREAK2 are relevant + * only for timer instances supporting break input(s). + * @retval HAL status + */ + +HAL_StatusTypeDef HAL_TIM_GenerateEvent(TIM_HandleTypeDef *htim, uint32_t EventSource) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + assert_param(IS_TIM_EVENT_SOURCE(EventSource)); + + /* Process Locked */ + __HAL_LOCK(htim); + + /* Change the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Set the event sources */ + htim->Instance->EGR = EventSource; + + /* Change the TIM state */ + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Configures the OCRef clear feature + * @param htim TIM handle + * @param sClearInputConfig pointer to a TIM_ClearInputConfigTypeDef structure that + * contains the OCREF clear feature and parameters for the TIM peripheral. + * @param Channel specifies the TIM Channel + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 + * @arg TIM_CHANNEL_2: TIM Channel 2 + * @arg TIM_CHANNEL_3: TIM Channel 3 + * @arg TIM_CHANNEL_4: TIM Channel 4 + * @arg TIM_CHANNEL_5: TIM Channel 5 + * @arg TIM_CHANNEL_6: TIM Channel 6 + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_ConfigOCrefClear(TIM_HandleTypeDef *htim, + TIM_ClearInputConfigTypeDef *sClearInputConfig, + uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_OCXREF_CLEAR_INSTANCE(htim->Instance)); + assert_param(IS_TIM_CLEARINPUT_SOURCE(sClearInputConfig->ClearInputSource)); + + /* Process Locked */ + __HAL_LOCK(htim); + + htim->State = HAL_TIM_STATE_BUSY; + + switch (sClearInputConfig->ClearInputSource) + { + case TIM_CLEARINPUTSOURCE_NONE: + { + /* Clear the OCREF clear selection bit and the the ETR Bits */ + CLEAR_BIT(htim->Instance->SMCR, (TIM_SMCR_ETF | TIM_SMCR_ETPS | TIM_SMCR_ECE | TIM_SMCR_ETP)); + break; + } + + case TIM_CLEARINPUTSOURCE_ETR: + { + /* Check the parameters */ + assert_param(IS_TIM_CLEARINPUT_POLARITY(sClearInputConfig->ClearInputPolarity)); + assert_param(IS_TIM_CLEARINPUT_PRESCALER(sClearInputConfig->ClearInputPrescaler)); + assert_param(IS_TIM_CLEARINPUT_FILTER(sClearInputConfig->ClearInputFilter)); + + /* When OCRef clear feature is used with ETR source, ETR prescaler must be off */ + if (sClearInputConfig->ClearInputPrescaler != TIM_CLEARINPUTPRESCALER_DIV1) + { + htim->State = HAL_TIM_STATE_READY; + __HAL_UNLOCK(htim); + return HAL_ERROR; + } + + TIM_ETR_SetConfig(htim->Instance, + sClearInputConfig->ClearInputPrescaler, + sClearInputConfig->ClearInputPolarity, + sClearInputConfig->ClearInputFilter); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + switch (Channel) + { + case TIM_CHANNEL_1: + { + if (sClearInputConfig->ClearInputState != (uint32_t)DISABLE) + { + /* Enable the OCREF clear feature for Channel 1 */ + SET_BIT(htim->Instance->CCMR1, TIM_CCMR1_OC1CE); + } + else + { + /* Disable the OCREF clear feature for Channel 1 */ + CLEAR_BIT(htim->Instance->CCMR1, TIM_CCMR1_OC1CE); + } + break; + } + case TIM_CHANNEL_2: + { + if (sClearInputConfig->ClearInputState != (uint32_t)DISABLE) + { + /* Enable the OCREF clear feature for Channel 2 */ + SET_BIT(htim->Instance->CCMR1, TIM_CCMR1_OC2CE); + } + else + { + /* Disable the OCREF clear feature for Channel 2 */ + CLEAR_BIT(htim->Instance->CCMR1, TIM_CCMR1_OC2CE); + } + break; + } + case TIM_CHANNEL_3: + { + if (sClearInputConfig->ClearInputState != (uint32_t)DISABLE) + { + /* Enable the OCREF clear feature for Channel 3 */ + SET_BIT(htim->Instance->CCMR2, TIM_CCMR2_OC3CE); + } + else + { + /* Disable the OCREF clear feature for Channel 3 */ + CLEAR_BIT(htim->Instance->CCMR2, TIM_CCMR2_OC3CE); + } + break; + } + case TIM_CHANNEL_4: + { + if (sClearInputConfig->ClearInputState != (uint32_t)DISABLE) + { + /* Enable the OCREF clear feature for Channel 4 */ + SET_BIT(htim->Instance->CCMR2, TIM_CCMR2_OC4CE); + } + else + { + /* Disable the OCREF clear feature for Channel 4 */ + CLEAR_BIT(htim->Instance->CCMR2, TIM_CCMR2_OC4CE); + } + break; + } + case TIM_CHANNEL_5: + { + if (sClearInputConfig->ClearInputState != (uint32_t)DISABLE) + { + /* Enable the OCREF clear feature for Channel 5 */ + SET_BIT(htim->Instance->CCMR3, TIM_CCMR3_OC5CE); + } + else + { + /* Disable the OCREF clear feature for Channel 5 */ + CLEAR_BIT(htim->Instance->CCMR3, TIM_CCMR3_OC5CE); + } + break; + } + case TIM_CHANNEL_6: + { + if (sClearInputConfig->ClearInputState != (uint32_t)DISABLE) + { + /* Enable the OCREF clear feature for Channel 6 */ + SET_BIT(htim->Instance->CCMR3, TIM_CCMR3_OC6CE); + } + else + { + /* Disable the OCREF clear feature for Channel 6 */ + CLEAR_BIT(htim->Instance->CCMR3, TIM_CCMR3_OC6CE); + } + break; + } + default: + break; + } + } + + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return status; +} + +/** + * @brief Configures the clock source to be used + * @param htim TIM handle + * @param sClockSourceConfig pointer to a TIM_ClockConfigTypeDef structure that + * contains the clock source information for the TIM peripheral. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_ConfigClockSource(TIM_HandleTypeDef *htim, TIM_ClockConfigTypeDef *sClockSourceConfig) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpsmcr; + + /* Process Locked */ + __HAL_LOCK(htim); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Check the parameters */ + assert_param(IS_TIM_CLOCKSOURCE(sClockSourceConfig->ClockSource)); + + /* Reset the SMS, TS, ECE, ETPS and ETRF bits */ + tmpsmcr = htim->Instance->SMCR; + tmpsmcr &= ~(TIM_SMCR_SMS | TIM_SMCR_TS); + tmpsmcr &= ~(TIM_SMCR_ETF | TIM_SMCR_ETPS | TIM_SMCR_ECE | TIM_SMCR_ETP); + htim->Instance->SMCR = tmpsmcr; + + switch (sClockSourceConfig->ClockSource) + { + case TIM_CLOCKSOURCE_INTERNAL: + { + assert_param(IS_TIM_INSTANCE(htim->Instance)); + break; + } + + case TIM_CLOCKSOURCE_ETRMODE1: + { + /* Check whether or not the timer instance supports external trigger input mode 1 (ETRF)*/ + assert_param(IS_TIM_CLOCKSOURCE_ETRMODE1_INSTANCE(htim->Instance)); + + /* Check ETR input conditioning related parameters */ + assert_param(IS_TIM_CLOCKPRESCALER(sClockSourceConfig->ClockPrescaler)); + assert_param(IS_TIM_CLOCKPOLARITY(sClockSourceConfig->ClockPolarity)); + assert_param(IS_TIM_CLOCKFILTER(sClockSourceConfig->ClockFilter)); + + /* Configure the ETR Clock source */ + TIM_ETR_SetConfig(htim->Instance, + sClockSourceConfig->ClockPrescaler, + sClockSourceConfig->ClockPolarity, + sClockSourceConfig->ClockFilter); + + /* Select the External clock mode1 and the ETRF trigger */ + tmpsmcr = htim->Instance->SMCR; + tmpsmcr |= (TIM_SLAVEMODE_EXTERNAL1 | TIM_CLOCKSOURCE_ETRMODE1); + /* Write to TIMx SMCR */ + htim->Instance->SMCR = tmpsmcr; + break; + } + + case TIM_CLOCKSOURCE_ETRMODE2: + { + /* Check whether or not the timer instance supports external trigger input mode 2 (ETRF)*/ + assert_param(IS_TIM_CLOCKSOURCE_ETRMODE2_INSTANCE(htim->Instance)); + + /* Check ETR input conditioning related parameters */ + assert_param(IS_TIM_CLOCKPRESCALER(sClockSourceConfig->ClockPrescaler)); + assert_param(IS_TIM_CLOCKPOLARITY(sClockSourceConfig->ClockPolarity)); + assert_param(IS_TIM_CLOCKFILTER(sClockSourceConfig->ClockFilter)); + + /* Configure the ETR Clock source */ + TIM_ETR_SetConfig(htim->Instance, + sClockSourceConfig->ClockPrescaler, + sClockSourceConfig->ClockPolarity, + sClockSourceConfig->ClockFilter); + /* Enable the External clock mode2 */ + htim->Instance->SMCR |= TIM_SMCR_ECE; + break; + } + + case TIM_CLOCKSOURCE_TI1: + { + /* Check whether or not the timer instance supports external clock mode 1 */ + assert_param(IS_TIM_CLOCKSOURCE_TIX_INSTANCE(htim->Instance)); + + /* Check TI1 input conditioning related parameters */ + assert_param(IS_TIM_CLOCKPOLARITY(sClockSourceConfig->ClockPolarity)); + assert_param(IS_TIM_CLOCKFILTER(sClockSourceConfig->ClockFilter)); + + TIM_TI1_ConfigInputStage(htim->Instance, + sClockSourceConfig->ClockPolarity, + sClockSourceConfig->ClockFilter); + TIM_ITRx_SetConfig(htim->Instance, TIM_CLOCKSOURCE_TI1); + break; + } + + case TIM_CLOCKSOURCE_TI2: + { + /* Check whether or not the timer instance supports external clock mode 1 (ETRF)*/ + assert_param(IS_TIM_CLOCKSOURCE_TIX_INSTANCE(htim->Instance)); + + /* Check TI2 input conditioning related parameters */ + assert_param(IS_TIM_CLOCKPOLARITY(sClockSourceConfig->ClockPolarity)); + assert_param(IS_TIM_CLOCKFILTER(sClockSourceConfig->ClockFilter)); + + TIM_TI2_ConfigInputStage(htim->Instance, + sClockSourceConfig->ClockPolarity, + sClockSourceConfig->ClockFilter); + TIM_ITRx_SetConfig(htim->Instance, TIM_CLOCKSOURCE_TI2); + break; + } + + case TIM_CLOCKSOURCE_TI1ED: + { + /* Check whether or not the timer instance supports external clock mode 1 */ + assert_param(IS_TIM_CLOCKSOURCE_TIX_INSTANCE(htim->Instance)); + + /* Check TI1 input conditioning related parameters */ + assert_param(IS_TIM_CLOCKPOLARITY(sClockSourceConfig->ClockPolarity)); + assert_param(IS_TIM_CLOCKFILTER(sClockSourceConfig->ClockFilter)); + + TIM_TI1_ConfigInputStage(htim->Instance, + sClockSourceConfig->ClockPolarity, + sClockSourceConfig->ClockFilter); + TIM_ITRx_SetConfig(htim->Instance, TIM_CLOCKSOURCE_TI1ED); + break; + } + + case TIM_CLOCKSOURCE_ITR0: + case TIM_CLOCKSOURCE_ITR1: + case TIM_CLOCKSOURCE_ITR2: + case TIM_CLOCKSOURCE_ITR3: + { + /* Check whether or not the timer instance supports internal trigger input */ + assert_param(IS_TIM_CLOCKSOURCE_ITRX_INSTANCE(htim->Instance)); + + TIM_ITRx_SetConfig(htim->Instance, sClockSourceConfig->ClockSource); + break; + } + + default: + status = HAL_ERROR; + break; + } + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return status; +} + +/** + * @brief Selects the signal connected to the TI1 input: direct from CH1_input + * or a XOR combination between CH1_input, CH2_input & CH3_input + * @param htim TIM handle. + * @param TI1_Selection Indicate whether or not channel 1 is connected to the + * output of a XOR gate. + * This parameter can be one of the following values: + * @arg TIM_TI1SELECTION_CH1: The TIMx_CH1 pin is connected to TI1 input + * @arg TIM_TI1SELECTION_XORCOMBINATION: The TIMx_CH1, CH2 and CH3 + * pins are connected to the TI1 input (XOR combination) + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_ConfigTI1Input(TIM_HandleTypeDef *htim, uint32_t TI1_Selection) +{ + uint32_t tmpcr2; + + /* Check the parameters */ + assert_param(IS_TIM_XOR_INSTANCE(htim->Instance)); + assert_param(IS_TIM_TI1SELECTION(TI1_Selection)); + + /* Get the TIMx CR2 register value */ + tmpcr2 = htim->Instance->CR2; + + /* Reset the TI1 selection */ + tmpcr2 &= ~TIM_CR2_TI1S; + + /* Set the TI1 selection */ + tmpcr2 |= TI1_Selection; + + /* Write to TIMxCR2 */ + htim->Instance->CR2 = tmpcr2; + + return HAL_OK; +} + +/** + * @brief Configures the TIM in Slave mode + * @param htim TIM handle. + * @param sSlaveConfig pointer to a TIM_SlaveConfigTypeDef structure that + * contains the selected trigger (internal trigger input, filtered + * timer input or external trigger input) and the Slave mode + * (Disable, Reset, Gated, Trigger, External clock mode 1). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_SlaveConfigSynchro(TIM_HandleTypeDef *htim, TIM_SlaveConfigTypeDef *sSlaveConfig) +{ + /* Check the parameters */ + assert_param(IS_TIM_SLAVE_INSTANCE(htim->Instance)); + assert_param(IS_TIM_SLAVE_MODE(sSlaveConfig->SlaveMode)); + assert_param(IS_TIM_TRIGGER_SELECTION(sSlaveConfig->InputTrigger)); + + __HAL_LOCK(htim); + + htim->State = HAL_TIM_STATE_BUSY; + + if (TIM_SlaveTimer_SetConfig(htim, sSlaveConfig) != HAL_OK) + { + htim->State = HAL_TIM_STATE_READY; + __HAL_UNLOCK(htim); + return HAL_ERROR; + } + + /* Disable Trigger Interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_TRIGGER); + + /* Disable Trigger DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_TRIGGER); + + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Configures the TIM in Slave mode in interrupt mode + * @param htim TIM handle. + * @param sSlaveConfig pointer to a TIM_SlaveConfigTypeDef structure that + * contains the selected trigger (internal trigger input, filtered + * timer input or external trigger input) and the Slave mode + * (Disable, Reset, Gated, Trigger, External clock mode 1). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIM_SlaveConfigSynchro_IT(TIM_HandleTypeDef *htim, + TIM_SlaveConfigTypeDef *sSlaveConfig) +{ + /* Check the parameters */ + assert_param(IS_TIM_SLAVE_INSTANCE(htim->Instance)); + assert_param(IS_TIM_SLAVE_MODE(sSlaveConfig->SlaveMode)); + assert_param(IS_TIM_TRIGGER_SELECTION(sSlaveConfig->InputTrigger)); + + __HAL_LOCK(htim); + + htim->State = HAL_TIM_STATE_BUSY; + + if (TIM_SlaveTimer_SetConfig(htim, sSlaveConfig) != HAL_OK) + { + htim->State = HAL_TIM_STATE_READY; + __HAL_UNLOCK(htim); + return HAL_ERROR; + } + + /* Enable Trigger Interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_TRIGGER); + + /* Disable Trigger DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_TRIGGER); + + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Read the captured value from Capture Compare unit + * @param htim TIM handle. + * @param Channel TIM Channels to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @arg TIM_CHANNEL_4: TIM Channel 4 selected + * @retval Captured value + */ +uint32_t HAL_TIM_ReadCapturedValue(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpreg = 0U; + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Check the parameters */ + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + + /* Return the capture 1 value */ + tmpreg = htim->Instance->CCR1; + + break; + } + case TIM_CHANNEL_2: + { + /* Check the parameters */ + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + + /* Return the capture 2 value */ + tmpreg = htim->Instance->CCR2; + + break; + } + + case TIM_CHANNEL_3: + { + /* Check the parameters */ + assert_param(IS_TIM_CC3_INSTANCE(htim->Instance)); + + /* Return the capture 3 value */ + tmpreg = htim->Instance->CCR3; + + break; + } + + case TIM_CHANNEL_4: + { + /* Check the parameters */ + assert_param(IS_TIM_CC4_INSTANCE(htim->Instance)); + + /* Return the capture 4 value */ + tmpreg = htim->Instance->CCR4; + + break; + } + + default: + break; + } + + return tmpreg; +} + +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group9 TIM Callbacks functions + * @brief TIM Callbacks functions + * +@verbatim + ============================================================================== + ##### TIM Callbacks functions ##### + ============================================================================== + [..] + This section provides TIM callback functions: + (+) TIM Period elapsed callback + (+) TIM Output Compare callback + (+) TIM Input capture callback + (+) TIM Trigger callback + (+) TIM Error callback + +@endverbatim + * @{ + */ + +/** + * @brief Period elapsed callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIM_PeriodElapsedCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_PeriodElapsedCallback could be implemented in the user file + */ +} + +/** + * @brief Period elapsed half complete callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIM_PeriodElapsedHalfCpltCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_PeriodElapsedHalfCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Output Compare callback in non-blocking mode + * @param htim TIM OC handle + * @retval None + */ +__weak void HAL_TIM_OC_DelayElapsedCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_OC_DelayElapsedCallback could be implemented in the user file + */ +} + +/** + * @brief Input Capture callback in non-blocking mode + * @param htim TIM IC handle + * @retval None + */ +__weak void HAL_TIM_IC_CaptureCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_IC_CaptureCallback could be implemented in the user file + */ +} + +/** + * @brief Input Capture half complete callback in non-blocking mode + * @param htim TIM IC handle + * @retval None + */ +__weak void HAL_TIM_IC_CaptureHalfCpltCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_IC_CaptureHalfCpltCallback could be implemented in the user file + */ +} + +/** + * @brief PWM Pulse finished callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIM_PWM_PulseFinishedCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_PWM_PulseFinishedCallback could be implemented in the user file + */ +} + +/** + * @brief PWM Pulse finished half complete callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIM_PWM_PulseFinishedHalfCpltCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_PWM_PulseFinishedHalfCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Hall Trigger detection callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIM_TriggerCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_TriggerCallback could be implemented in the user file + */ +} + +/** + * @brief Hall Trigger detection half complete callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIM_TriggerHalfCpltCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_TriggerHalfCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Timer error callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIM_ErrorCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIM_ErrorCallback could be implemented in the user file + */ +} + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) +/** + * @brief Register a User TIM callback to be used instead of the weak predefined callback + * @param htim tim handle + * @param CallbackID ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_TIM_BASE_MSPINIT_CB_ID Base MspInit Callback ID + * @arg @ref HAL_TIM_BASE_MSPDEINIT_CB_ID Base MspDeInit Callback ID + * @arg @ref HAL_TIM_IC_MSPINIT_CB_ID IC MspInit Callback ID + * @arg @ref HAL_TIM_IC_MSPDEINIT_CB_ID IC MspDeInit Callback ID + * @arg @ref HAL_TIM_OC_MSPINIT_CB_ID OC MspInit Callback ID + * @arg @ref HAL_TIM_OC_MSPDEINIT_CB_ID OC MspDeInit Callback ID + * @arg @ref HAL_TIM_PWM_MSPINIT_CB_ID PWM MspInit Callback ID + * @arg @ref HAL_TIM_PWM_MSPDEINIT_CB_ID PWM MspDeInit Callback ID + * @arg @ref HAL_TIM_ONE_PULSE_MSPINIT_CB_ID One Pulse MspInit Callback ID + * @arg @ref HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID One Pulse MspDeInit Callback ID + * @arg @ref HAL_TIM_ENCODER_MSPINIT_CB_ID Encoder MspInit Callback ID + * @arg @ref HAL_TIM_ENCODER_MSPDEINIT_CB_ID Encoder MspDeInit Callback ID + * @arg @ref HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID Hall Sensor MspInit Callback ID + * @arg @ref HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID Hall Sensor MspDeInit Callback ID + * @arg @ref HAL_TIM_PERIOD_ELAPSED_CB_ID Period Elapsed Callback ID + * @arg @ref HAL_TIM_PERIOD_ELAPSED_HALF_CB_ID Period Elapsed half complete Callback ID + * @arg @ref HAL_TIM_TRIGGER_CB_ID Trigger Callback ID + * @arg @ref HAL_TIM_TRIGGER_HALF_CB_ID Trigger half complete Callback ID + * @arg @ref HAL_TIM_IC_CAPTURE_CB_ID Input Capture Callback ID + * @arg @ref HAL_TIM_IC_CAPTURE_HALF_CB_ID Input Capture half complete Callback ID + * @arg @ref HAL_TIM_OC_DELAY_ELAPSED_CB_ID Output Compare Delay Elapsed Callback ID + * @arg @ref HAL_TIM_PWM_PULSE_FINISHED_CB_ID PWM Pulse Finished Callback ID + * @arg @ref HAL_TIM_PWM_PULSE_FINISHED_HALF_CB_ID PWM Pulse Finished half complete Callback ID + * @arg @ref HAL_TIM_ERROR_CB_ID Error Callback ID + * @arg @ref HAL_TIM_COMMUTATION_CB_ID Commutation Callback ID + * @arg @ref HAL_TIM_COMMUTATION_HALF_CB_ID Commutation half complete Callback ID + * @arg @ref HAL_TIM_BREAK_CB_ID Break Callback ID + * @arg @ref HAL_TIM_BREAK2_CB_ID Break2 Callback ID + * @param pCallback pointer to the callback function + * @retval status + */ +HAL_StatusTypeDef HAL_TIM_RegisterCallback(TIM_HandleTypeDef *htim, HAL_TIM_CallbackIDTypeDef CallbackID, + pTIM_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + return HAL_ERROR; + } + /* Process locked */ + __HAL_LOCK(htim); + + if (htim->State == HAL_TIM_STATE_READY) + { + switch (CallbackID) + { + case HAL_TIM_BASE_MSPINIT_CB_ID : + htim->Base_MspInitCallback = pCallback; + break; + + case HAL_TIM_BASE_MSPDEINIT_CB_ID : + htim->Base_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_IC_MSPINIT_CB_ID : + htim->IC_MspInitCallback = pCallback; + break; + + case HAL_TIM_IC_MSPDEINIT_CB_ID : + htim->IC_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_OC_MSPINIT_CB_ID : + htim->OC_MspInitCallback = pCallback; + break; + + case HAL_TIM_OC_MSPDEINIT_CB_ID : + htim->OC_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_PWM_MSPINIT_CB_ID : + htim->PWM_MspInitCallback = pCallback; + break; + + case HAL_TIM_PWM_MSPDEINIT_CB_ID : + htim->PWM_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_ONE_PULSE_MSPINIT_CB_ID : + htim->OnePulse_MspInitCallback = pCallback; + break; + + case HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID : + htim->OnePulse_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_ENCODER_MSPINIT_CB_ID : + htim->Encoder_MspInitCallback = pCallback; + break; + + case HAL_TIM_ENCODER_MSPDEINIT_CB_ID : + htim->Encoder_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID : + htim->HallSensor_MspInitCallback = pCallback; + break; + + case HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID : + htim->HallSensor_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_PERIOD_ELAPSED_CB_ID : + htim->PeriodElapsedCallback = pCallback; + break; + + case HAL_TIM_PERIOD_ELAPSED_HALF_CB_ID : + htim->PeriodElapsedHalfCpltCallback = pCallback; + break; + + case HAL_TIM_TRIGGER_CB_ID : + htim->TriggerCallback = pCallback; + break; + + case HAL_TIM_TRIGGER_HALF_CB_ID : + htim->TriggerHalfCpltCallback = pCallback; + break; + + case HAL_TIM_IC_CAPTURE_CB_ID : + htim->IC_CaptureCallback = pCallback; + break; + + case HAL_TIM_IC_CAPTURE_HALF_CB_ID : + htim->IC_CaptureHalfCpltCallback = pCallback; + break; + + case HAL_TIM_OC_DELAY_ELAPSED_CB_ID : + htim->OC_DelayElapsedCallback = pCallback; + break; + + case HAL_TIM_PWM_PULSE_FINISHED_CB_ID : + htim->PWM_PulseFinishedCallback = pCallback; + break; + + case HAL_TIM_PWM_PULSE_FINISHED_HALF_CB_ID : + htim->PWM_PulseFinishedHalfCpltCallback = pCallback; + break; + + case HAL_TIM_ERROR_CB_ID : + htim->ErrorCallback = pCallback; + break; + + case HAL_TIM_COMMUTATION_CB_ID : + htim->CommutationCallback = pCallback; + break; + + case HAL_TIM_COMMUTATION_HALF_CB_ID : + htim->CommutationHalfCpltCallback = pCallback; + break; + + case HAL_TIM_BREAK_CB_ID : + htim->BreakCallback = pCallback; + break; + + case HAL_TIM_BREAK2_CB_ID : + htim->Break2Callback = pCallback; + break; + + default : + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (htim->State == HAL_TIM_STATE_RESET) + { + switch (CallbackID) + { + case HAL_TIM_BASE_MSPINIT_CB_ID : + htim->Base_MspInitCallback = pCallback; + break; + + case HAL_TIM_BASE_MSPDEINIT_CB_ID : + htim->Base_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_IC_MSPINIT_CB_ID : + htim->IC_MspInitCallback = pCallback; + break; + + case HAL_TIM_IC_MSPDEINIT_CB_ID : + htim->IC_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_OC_MSPINIT_CB_ID : + htim->OC_MspInitCallback = pCallback; + break; + + case HAL_TIM_OC_MSPDEINIT_CB_ID : + htim->OC_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_PWM_MSPINIT_CB_ID : + htim->PWM_MspInitCallback = pCallback; + break; + + case HAL_TIM_PWM_MSPDEINIT_CB_ID : + htim->PWM_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_ONE_PULSE_MSPINIT_CB_ID : + htim->OnePulse_MspInitCallback = pCallback; + break; + + case HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID : + htim->OnePulse_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_ENCODER_MSPINIT_CB_ID : + htim->Encoder_MspInitCallback = pCallback; + break; + + case HAL_TIM_ENCODER_MSPDEINIT_CB_ID : + htim->Encoder_MspDeInitCallback = pCallback; + break; + + case HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID : + htim->HallSensor_MspInitCallback = pCallback; + break; + + case HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID : + htim->HallSensor_MspDeInitCallback = pCallback; + break; + + default : + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return status; +} + +/** + * @brief Unregister a TIM callback + * TIM callback is redirected to the weak predefined callback + * @param htim tim handle + * @param CallbackID ID of the callback to be unregistered + * This parameter can be one of the following values: + * @arg @ref HAL_TIM_BASE_MSPINIT_CB_ID Base MspInit Callback ID + * @arg @ref HAL_TIM_BASE_MSPDEINIT_CB_ID Base MspDeInit Callback ID + * @arg @ref HAL_TIM_IC_MSPINIT_CB_ID IC MspInit Callback ID + * @arg @ref HAL_TIM_IC_MSPDEINIT_CB_ID IC MspDeInit Callback ID + * @arg @ref HAL_TIM_OC_MSPINIT_CB_ID OC MspInit Callback ID + * @arg @ref HAL_TIM_OC_MSPDEINIT_CB_ID OC MspDeInit Callback ID + * @arg @ref HAL_TIM_PWM_MSPINIT_CB_ID PWM MspInit Callback ID + * @arg @ref HAL_TIM_PWM_MSPDEINIT_CB_ID PWM MspDeInit Callback ID + * @arg @ref HAL_TIM_ONE_PULSE_MSPINIT_CB_ID One Pulse MspInit Callback ID + * @arg @ref HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID One Pulse MspDeInit Callback ID + * @arg @ref HAL_TIM_ENCODER_MSPINIT_CB_ID Encoder MspInit Callback ID + * @arg @ref HAL_TIM_ENCODER_MSPDEINIT_CB_ID Encoder MspDeInit Callback ID + * @arg @ref HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID Hall Sensor MspInit Callback ID + * @arg @ref HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID Hall Sensor MspDeInit Callback ID + * @arg @ref HAL_TIM_PERIOD_ELAPSED_CB_ID Period Elapsed Callback ID + * @arg @ref HAL_TIM_PERIOD_ELAPSED_HALF_CB_ID Period Elapsed half complete Callback ID + * @arg @ref HAL_TIM_TRIGGER_CB_ID Trigger Callback ID + * @arg @ref HAL_TIM_TRIGGER_HALF_CB_ID Trigger half complete Callback ID + * @arg @ref HAL_TIM_IC_CAPTURE_CB_ID Input Capture Callback ID + * @arg @ref HAL_TIM_IC_CAPTURE_HALF_CB_ID Input Capture half complete Callback ID + * @arg @ref HAL_TIM_OC_DELAY_ELAPSED_CB_ID Output Compare Delay Elapsed Callback ID + * @arg @ref HAL_TIM_PWM_PULSE_FINISHED_CB_ID PWM Pulse Finished Callback ID + * @arg @ref HAL_TIM_PWM_PULSE_FINISHED_HALF_CB_ID PWM Pulse Finished half complete Callback ID + * @arg @ref HAL_TIM_ERROR_CB_ID Error Callback ID + * @arg @ref HAL_TIM_COMMUTATION_CB_ID Commutation Callback ID + * @arg @ref HAL_TIM_COMMUTATION_HALF_CB_ID Commutation half complete Callback ID + * @arg @ref HAL_TIM_BREAK_CB_ID Break Callback ID + * @arg @ref HAL_TIM_BREAK2_CB_ID Break2 Callback ID + * @retval status + */ +HAL_StatusTypeDef HAL_TIM_UnRegisterCallback(TIM_HandleTypeDef *htim, HAL_TIM_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(htim); + + if (htim->State == HAL_TIM_STATE_READY) + { + switch (CallbackID) + { + case HAL_TIM_BASE_MSPINIT_CB_ID : + /* Legacy weak Base MspInit Callback */ + htim->Base_MspInitCallback = HAL_TIM_Base_MspInit; + break; + + case HAL_TIM_BASE_MSPDEINIT_CB_ID : + /* Legacy weak Base Msp DeInit Callback */ + htim->Base_MspDeInitCallback = HAL_TIM_Base_MspDeInit; + break; + + case HAL_TIM_IC_MSPINIT_CB_ID : + /* Legacy weak IC Msp Init Callback */ + htim->IC_MspInitCallback = HAL_TIM_IC_MspInit; + break; + + case HAL_TIM_IC_MSPDEINIT_CB_ID : + /* Legacy weak IC Msp DeInit Callback */ + htim->IC_MspDeInitCallback = HAL_TIM_IC_MspDeInit; + break; + + case HAL_TIM_OC_MSPINIT_CB_ID : + /* Legacy weak OC Msp Init Callback */ + htim->OC_MspInitCallback = HAL_TIM_OC_MspInit; + break; + + case HAL_TIM_OC_MSPDEINIT_CB_ID : + /* Legacy weak OC Msp DeInit Callback */ + htim->OC_MspDeInitCallback = HAL_TIM_OC_MspDeInit; + break; + + case HAL_TIM_PWM_MSPINIT_CB_ID : + /* Legacy weak PWM Msp Init Callback */ + htim->PWM_MspInitCallback = HAL_TIM_PWM_MspInit; + break; + + case HAL_TIM_PWM_MSPDEINIT_CB_ID : + /* Legacy weak PWM Msp DeInit Callback */ + htim->PWM_MspDeInitCallback = HAL_TIM_PWM_MspDeInit; + break; + + case HAL_TIM_ONE_PULSE_MSPINIT_CB_ID : + /* Legacy weak One Pulse Msp Init Callback */ + htim->OnePulse_MspInitCallback = HAL_TIM_OnePulse_MspInit; + break; + + case HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID : + /* Legacy weak One Pulse Msp DeInit Callback */ + htim->OnePulse_MspDeInitCallback = HAL_TIM_OnePulse_MspDeInit; + break; + + case HAL_TIM_ENCODER_MSPINIT_CB_ID : + /* Legacy weak Encoder Msp Init Callback */ + htim->Encoder_MspInitCallback = HAL_TIM_Encoder_MspInit; + break; + + case HAL_TIM_ENCODER_MSPDEINIT_CB_ID : + /* Legacy weak Encoder Msp DeInit Callback */ + htim->Encoder_MspDeInitCallback = HAL_TIM_Encoder_MspDeInit; + break; + + case HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID : + /* Legacy weak Hall Sensor Msp Init Callback */ + htim->HallSensor_MspInitCallback = HAL_TIMEx_HallSensor_MspInit; + break; + + case HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID : + /* Legacy weak Hall Sensor Msp DeInit Callback */ + htim->HallSensor_MspDeInitCallback = HAL_TIMEx_HallSensor_MspDeInit; + break; + + case HAL_TIM_PERIOD_ELAPSED_CB_ID : + /* Legacy weak Period Elapsed Callback */ + htim->PeriodElapsedCallback = HAL_TIM_PeriodElapsedCallback; + break; + + case HAL_TIM_PERIOD_ELAPSED_HALF_CB_ID : + /* Legacy weak Period Elapsed half complete Callback */ + htim->PeriodElapsedHalfCpltCallback = HAL_TIM_PeriodElapsedHalfCpltCallback; + break; + + case HAL_TIM_TRIGGER_CB_ID : + /* Legacy weak Trigger Callback */ + htim->TriggerCallback = HAL_TIM_TriggerCallback; + break; + + case HAL_TIM_TRIGGER_HALF_CB_ID : + /* Legacy weak Trigger half complete Callback */ + htim->TriggerHalfCpltCallback = HAL_TIM_TriggerHalfCpltCallback; + break; + + case HAL_TIM_IC_CAPTURE_CB_ID : + /* Legacy weak IC Capture Callback */ + htim->IC_CaptureCallback = HAL_TIM_IC_CaptureCallback; + break; + + case HAL_TIM_IC_CAPTURE_HALF_CB_ID : + /* Legacy weak IC Capture half complete Callback */ + htim->IC_CaptureHalfCpltCallback = HAL_TIM_IC_CaptureHalfCpltCallback; + break; + + case HAL_TIM_OC_DELAY_ELAPSED_CB_ID : + /* Legacy weak OC Delay Elapsed Callback */ + htim->OC_DelayElapsedCallback = HAL_TIM_OC_DelayElapsedCallback; + break; + + case HAL_TIM_PWM_PULSE_FINISHED_CB_ID : + /* Legacy weak PWM Pulse Finished Callback */ + htim->PWM_PulseFinishedCallback = HAL_TIM_PWM_PulseFinishedCallback; + break; + + case HAL_TIM_PWM_PULSE_FINISHED_HALF_CB_ID : + /* Legacy weak PWM Pulse Finished half complete Callback */ + htim->PWM_PulseFinishedHalfCpltCallback = HAL_TIM_PWM_PulseFinishedHalfCpltCallback; + break; + + case HAL_TIM_ERROR_CB_ID : + /* Legacy weak Error Callback */ + htim->ErrorCallback = HAL_TIM_ErrorCallback; + break; + + case HAL_TIM_COMMUTATION_CB_ID : + /* Legacy weak Commutation Callback */ + htim->CommutationCallback = HAL_TIMEx_CommutCallback; + break; + + case HAL_TIM_COMMUTATION_HALF_CB_ID : + /* Legacy weak Commutation half complete Callback */ + htim->CommutationHalfCpltCallback = HAL_TIMEx_CommutHalfCpltCallback; + break; + + case HAL_TIM_BREAK_CB_ID : + /* Legacy weak Break Callback */ + htim->BreakCallback = HAL_TIMEx_BreakCallback; + break; + + case HAL_TIM_BREAK2_CB_ID : + /* Legacy weak Break2 Callback */ + htim->Break2Callback = HAL_TIMEx_Break2Callback; + break; + + default : + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else if (htim->State == HAL_TIM_STATE_RESET) + { + switch (CallbackID) + { + case HAL_TIM_BASE_MSPINIT_CB_ID : + /* Legacy weak Base MspInit Callback */ + htim->Base_MspInitCallback = HAL_TIM_Base_MspInit; + break; + + case HAL_TIM_BASE_MSPDEINIT_CB_ID : + /* Legacy weak Base Msp DeInit Callback */ + htim->Base_MspDeInitCallback = HAL_TIM_Base_MspDeInit; + break; + + case HAL_TIM_IC_MSPINIT_CB_ID : + /* Legacy weak IC Msp Init Callback */ + htim->IC_MspInitCallback = HAL_TIM_IC_MspInit; + break; + + case HAL_TIM_IC_MSPDEINIT_CB_ID : + /* Legacy weak IC Msp DeInit Callback */ + htim->IC_MspDeInitCallback = HAL_TIM_IC_MspDeInit; + break; + + case HAL_TIM_OC_MSPINIT_CB_ID : + /* Legacy weak OC Msp Init Callback */ + htim->OC_MspInitCallback = HAL_TIM_OC_MspInit; + break; + + case HAL_TIM_OC_MSPDEINIT_CB_ID : + /* Legacy weak OC Msp DeInit Callback */ + htim->OC_MspDeInitCallback = HAL_TIM_OC_MspDeInit; + break; + + case HAL_TIM_PWM_MSPINIT_CB_ID : + /* Legacy weak PWM Msp Init Callback */ + htim->PWM_MspInitCallback = HAL_TIM_PWM_MspInit; + break; + + case HAL_TIM_PWM_MSPDEINIT_CB_ID : + /* Legacy weak PWM Msp DeInit Callback */ + htim->PWM_MspDeInitCallback = HAL_TIM_PWM_MspDeInit; + break; + + case HAL_TIM_ONE_PULSE_MSPINIT_CB_ID : + /* Legacy weak One Pulse Msp Init Callback */ + htim->OnePulse_MspInitCallback = HAL_TIM_OnePulse_MspInit; + break; + + case HAL_TIM_ONE_PULSE_MSPDEINIT_CB_ID : + /* Legacy weak One Pulse Msp DeInit Callback */ + htim->OnePulse_MspDeInitCallback = HAL_TIM_OnePulse_MspDeInit; + break; + + case HAL_TIM_ENCODER_MSPINIT_CB_ID : + /* Legacy weak Encoder Msp Init Callback */ + htim->Encoder_MspInitCallback = HAL_TIM_Encoder_MspInit; + break; + + case HAL_TIM_ENCODER_MSPDEINIT_CB_ID : + /* Legacy weak Encoder Msp DeInit Callback */ + htim->Encoder_MspDeInitCallback = HAL_TIM_Encoder_MspDeInit; + break; + + case HAL_TIM_HALL_SENSOR_MSPINIT_CB_ID : + /* Legacy weak Hall Sensor Msp Init Callback */ + htim->HallSensor_MspInitCallback = HAL_TIMEx_HallSensor_MspInit; + break; + + case HAL_TIM_HALL_SENSOR_MSPDEINIT_CB_ID : + /* Legacy weak Hall Sensor Msp DeInit Callback */ + htim->HallSensor_MspDeInitCallback = HAL_TIMEx_HallSensor_MspDeInit; + break; + + default : + /* Return error status */ + status = HAL_ERROR; + break; + } + } + else + { + /* Return error status */ + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return status; +} +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @defgroup TIM_Exported_Functions_Group10 TIM Peripheral State functions + * @brief TIM Peripheral State functions + * +@verbatim + ============================================================================== + ##### Peripheral State functions ##### + ============================================================================== + [..] + This subsection permits to get in run-time the status of the peripheral + and the data flow. + +@endverbatim + * @{ + */ + +/** + * @brief Return the TIM Base handle state. + * @param htim TIM Base handle + * @retval HAL state + */ +HAL_TIM_StateTypeDef HAL_TIM_Base_GetState(TIM_HandleTypeDef *htim) +{ + return htim->State; +} + +/** + * @brief Return the TIM OC handle state. + * @param htim TIM Output Compare handle + * @retval HAL state + */ +HAL_TIM_StateTypeDef HAL_TIM_OC_GetState(TIM_HandleTypeDef *htim) +{ + return htim->State; +} + +/** + * @brief Return the TIM PWM handle state. + * @param htim TIM handle + * @retval HAL state + */ +HAL_TIM_StateTypeDef HAL_TIM_PWM_GetState(TIM_HandleTypeDef *htim) +{ + return htim->State; +} + +/** + * @brief Return the TIM Input Capture handle state. + * @param htim TIM IC handle + * @retval HAL state + */ +HAL_TIM_StateTypeDef HAL_TIM_IC_GetState(TIM_HandleTypeDef *htim) +{ + return htim->State; +} + +/** + * @brief Return the TIM One Pulse Mode handle state. + * @param htim TIM OPM handle + * @retval HAL state + */ +HAL_TIM_StateTypeDef HAL_TIM_OnePulse_GetState(TIM_HandleTypeDef *htim) +{ + return htim->State; +} + +/** + * @brief Return the TIM Encoder Mode handle state. + * @param htim TIM Encoder Interface handle + * @retval HAL state + */ +HAL_TIM_StateTypeDef HAL_TIM_Encoder_GetState(TIM_HandleTypeDef *htim) +{ + return htim->State; +} + +/** + * @brief Return the TIM Encoder Mode handle state. + * @param htim TIM handle + * @retval Active channel + */ +HAL_TIM_ActiveChannel HAL_TIM_GetActiveChannel(TIM_HandleTypeDef *htim) +{ + return htim->Channel; +} + +/** + * @brief Return actual state of the TIM channel. + * @param htim TIM handle + * @param Channel TIM Channel + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 + * @arg TIM_CHANNEL_2: TIM Channel 2 + * @arg TIM_CHANNEL_3: TIM Channel 3 + * @arg TIM_CHANNEL_4: TIM Channel 4 + * @arg TIM_CHANNEL_5: TIM Channel 5 + * @arg TIM_CHANNEL_6: TIM Channel 6 + * @retval TIM Channel state + */ +HAL_TIM_ChannelStateTypeDef HAL_TIM_GetChannelState(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_TIM_ChannelStateTypeDef channel_state; + + /* Check the parameters */ + assert_param(IS_TIM_CCX_INSTANCE(htim->Instance, Channel)); + + channel_state = TIM_CHANNEL_STATE_GET(htim, Channel); + + return channel_state; +} + +/** + * @brief Return actual state of a DMA burst operation. + * @param htim TIM handle + * @retval DMA burst state + */ +HAL_TIM_DMABurstStateTypeDef HAL_TIM_DMABurstState(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_DMABURST_INSTANCE(htim->Instance)); + + return htim->DMABurstState; +} + +/** + * @} + */ + +/** + * @} + */ + +/** @defgroup TIM_Private_Functions TIM Private Functions + * @{ + */ + +/** + * @brief TIM DMA error callback + * @param hdma pointer to DMA handle. + * @retval None + */ +void TIM_DMAError(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + if (hdma == htim->hdma[TIM_DMA_ID_CC1]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC2]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC3]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_3, HAL_TIM_CHANNEL_STATE_READY); + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC4]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_4; + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_4, HAL_TIM_CHANNEL_STATE_READY); + } + else + { + htim->State = HAL_TIM_STATE_READY; + } + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->ErrorCallback(htim); +#else + HAL_TIM_ErrorCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; +} + +/** + * @brief TIM DMA Delay Pulse complete callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +static void TIM_DMADelayPulseCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + if (hdma == htim->hdma[TIM_DMA_ID_CC1]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + } + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC2]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + } + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC3]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_3, HAL_TIM_CHANNEL_STATE_READY); + } + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC4]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_4; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_4, HAL_TIM_CHANNEL_STATE_READY); + } + } + else + { + /* nothing to do */ + } + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->PWM_PulseFinishedCallback(htim); +#else + HAL_TIM_PWM_PulseFinishedCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; +} + +/** + * @brief TIM DMA Delay Pulse half complete callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +void TIM_DMADelayPulseHalfCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + if (hdma == htim->hdma[TIM_DMA_ID_CC1]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC2]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC3]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC4]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_4; + } + else + { + /* nothing to do */ + } + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->PWM_PulseFinishedHalfCpltCallback(htim); +#else + HAL_TIM_PWM_PulseFinishedHalfCpltCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; +} + +/** + * @brief TIM DMA Capture complete callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +void TIM_DMACaptureCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + if (hdma == htim->hdma[TIM_DMA_ID_CC1]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + } + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC2]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + } + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC3]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_3, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_3, HAL_TIM_CHANNEL_STATE_READY); + } + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC4]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_4; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_4, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_4, HAL_TIM_CHANNEL_STATE_READY); + } + } + else + { + /* nothing to do */ + } + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->IC_CaptureCallback(htim); +#else + HAL_TIM_IC_CaptureCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; +} + +/** + * @brief TIM DMA Capture half complete callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +void TIM_DMACaptureHalfCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + if (hdma == htim->hdma[TIM_DMA_ID_CC1]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC2]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC3]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC4]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_4; + } + else + { + /* nothing to do */ + } + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->IC_CaptureHalfCpltCallback(htim); +#else + HAL_TIM_IC_CaptureHalfCpltCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; +} + +/** + * @brief TIM DMA Period Elapse complete callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +static void TIM_DMAPeriodElapsedCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + if (htim->hdma[TIM_DMA_ID_UPDATE]->Init.Mode == DMA_NORMAL) + { + htim->State = HAL_TIM_STATE_READY; + } + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->PeriodElapsedCallback(htim); +#else + HAL_TIM_PeriodElapsedCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +} + +/** + * @brief TIM DMA Period Elapse half complete callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +static void TIM_DMAPeriodElapsedHalfCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->PeriodElapsedHalfCpltCallback(htim); +#else + HAL_TIM_PeriodElapsedHalfCpltCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +} + +/** + * @brief TIM DMA Trigger callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +static void TIM_DMATriggerCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + if (htim->hdma[TIM_DMA_ID_TRIGGER]->Init.Mode == DMA_NORMAL) + { + htim->State = HAL_TIM_STATE_READY; + } + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->TriggerCallback(htim); +#else + HAL_TIM_TriggerCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +} + +/** + * @brief TIM DMA Trigger half complete callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +static void TIM_DMATriggerHalfCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->TriggerHalfCpltCallback(htim); +#else + HAL_TIM_TriggerHalfCpltCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +} + +/** + * @brief Time Base configuration + * @param TIMx TIM peripheral + * @param Structure TIM Base configuration structure + * @retval None + */ +void TIM_Base_SetConfig(TIM_TypeDef *TIMx, TIM_Base_InitTypeDef *Structure) +{ + uint32_t tmpcr1; + tmpcr1 = TIMx->CR1; + + /* Set TIM Time Base Unit parameters ---------------------------------------*/ + if (IS_TIM_COUNTER_MODE_SELECT_INSTANCE(TIMx)) + { + /* Select the Counter Mode */ + tmpcr1 &= ~(TIM_CR1_DIR | TIM_CR1_CMS); + tmpcr1 |= Structure->CounterMode; + } + + if (IS_TIM_CLOCK_DIVISION_INSTANCE(TIMx)) + { + /* Set the clock division */ + tmpcr1 &= ~TIM_CR1_CKD; + tmpcr1 |= (uint32_t)Structure->ClockDivision; + } + + /* Set the auto-reload preload */ + MODIFY_REG(tmpcr1, TIM_CR1_ARPE, Structure->AutoReloadPreload); + + TIMx->CR1 = tmpcr1; + + /* Set the Autoreload value */ + TIMx->ARR = (uint32_t)Structure->Period ; + + /* Set the Prescaler value */ + TIMx->PSC = Structure->Prescaler; + + if (IS_TIM_REPETITION_COUNTER_INSTANCE(TIMx)) + { + /* Set the Repetition Counter value */ + TIMx->RCR = Structure->RepetitionCounter; + } + + /* Generate an update event to reload the Prescaler + and the repetition counter (only for advanced timer) value immediately */ + TIMx->EGR = TIM_EGR_UG; +} + +/** + * @brief Timer Output Compare 1 configuration + * @param TIMx to select the TIM peripheral + * @param OC_Config The output configuration structure + * @retval None + */ +static void TIM_OC1_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config) +{ + uint32_t tmpccmrx; + uint32_t tmpccer; + uint32_t tmpcr2; + + /* Disable the Channel 1: Reset the CC1E Bit */ + TIMx->CCER &= ~TIM_CCER_CC1E; + + /* Get the TIMx CCER register value */ + tmpccer = TIMx->CCER; + /* Get the TIMx CR2 register value */ + tmpcr2 = TIMx->CR2; + + /* Get the TIMx CCMR1 register value */ + tmpccmrx = TIMx->CCMR1; + + /* Reset the Output Compare Mode Bits */ + tmpccmrx &= ~TIM_CCMR1_OC1M; + tmpccmrx &= ~TIM_CCMR1_CC1S; + /* Select the Output Compare Mode */ + tmpccmrx |= OC_Config->OCMode; + + /* Reset the Output Polarity level */ + tmpccer &= ~TIM_CCER_CC1P; + /* Set the Output Compare Polarity */ + tmpccer |= OC_Config->OCPolarity; + + if (IS_TIM_CCXN_INSTANCE(TIMx, TIM_CHANNEL_1)) + { + /* Check parameters */ + assert_param(IS_TIM_OCN_POLARITY(OC_Config->OCNPolarity)); + + /* Reset the Output N Polarity level */ + tmpccer &= ~TIM_CCER_CC1NP; + /* Set the Output N Polarity */ + tmpccer |= OC_Config->OCNPolarity; + /* Reset the Output N State */ + tmpccer &= ~TIM_CCER_CC1NE; + } + + if (IS_TIM_BREAK_INSTANCE(TIMx)) + { + /* Check parameters */ + assert_param(IS_TIM_OCNIDLE_STATE(OC_Config->OCNIdleState)); + assert_param(IS_TIM_OCIDLE_STATE(OC_Config->OCIdleState)); + + /* Reset the Output Compare and Output Compare N IDLE State */ + tmpcr2 &= ~TIM_CR2_OIS1; + tmpcr2 &= ~TIM_CR2_OIS1N; + /* Set the Output Idle state */ + tmpcr2 |= OC_Config->OCIdleState; + /* Set the Output N Idle state */ + tmpcr2 |= OC_Config->OCNIdleState; + } + + /* Write to TIMx CR2 */ + TIMx->CR2 = tmpcr2; + + /* Write to TIMx CCMR1 */ + TIMx->CCMR1 = tmpccmrx; + + /* Set the Capture Compare Register value */ + TIMx->CCR1 = OC_Config->Pulse; + + /* Write to TIMx CCER */ + TIMx->CCER = tmpccer; +} + +/** + * @brief Timer Output Compare 2 configuration + * @param TIMx to select the TIM peripheral + * @param OC_Config The output configuration structure + * @retval None + */ +void TIM_OC2_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config) +{ + uint32_t tmpccmrx; + uint32_t tmpccer; + uint32_t tmpcr2; + + /* Disable the Channel 2: Reset the CC2E Bit */ + TIMx->CCER &= ~TIM_CCER_CC2E; + + /* Get the TIMx CCER register value */ + tmpccer = TIMx->CCER; + /* Get the TIMx CR2 register value */ + tmpcr2 = TIMx->CR2; + + /* Get the TIMx CCMR1 register value */ + tmpccmrx = TIMx->CCMR1; + + /* Reset the Output Compare mode and Capture/Compare selection Bits */ + tmpccmrx &= ~TIM_CCMR1_OC2M; + tmpccmrx &= ~TIM_CCMR1_CC2S; + + /* Select the Output Compare Mode */ + tmpccmrx |= (OC_Config->OCMode << 8U); + + /* Reset the Output Polarity level */ + tmpccer &= ~TIM_CCER_CC2P; + /* Set the Output Compare Polarity */ + tmpccer |= (OC_Config->OCPolarity << 4U); + + if (IS_TIM_CCXN_INSTANCE(TIMx, TIM_CHANNEL_2)) + { + assert_param(IS_TIM_OCN_POLARITY(OC_Config->OCNPolarity)); + + /* Reset the Output N Polarity level */ + tmpccer &= ~TIM_CCER_CC2NP; + /* Set the Output N Polarity */ + tmpccer |= (OC_Config->OCNPolarity << 4U); + /* Reset the Output N State */ + tmpccer &= ~TIM_CCER_CC2NE; + + } + + if (IS_TIM_BREAK_INSTANCE(TIMx)) + { + /* Check parameters */ + assert_param(IS_TIM_OCNIDLE_STATE(OC_Config->OCNIdleState)); + assert_param(IS_TIM_OCIDLE_STATE(OC_Config->OCIdleState)); + + /* Reset the Output Compare and Output Compare N IDLE State */ + tmpcr2 &= ~TIM_CR2_OIS2; + tmpcr2 &= ~TIM_CR2_OIS2N; + /* Set the Output Idle state */ + tmpcr2 |= (OC_Config->OCIdleState << 2U); + /* Set the Output N Idle state */ + tmpcr2 |= (OC_Config->OCNIdleState << 2U); + } + + /* Write to TIMx CR2 */ + TIMx->CR2 = tmpcr2; + + /* Write to TIMx CCMR1 */ + TIMx->CCMR1 = tmpccmrx; + + /* Set the Capture Compare Register value */ + TIMx->CCR2 = OC_Config->Pulse; + + /* Write to TIMx CCER */ + TIMx->CCER = tmpccer; +} + +/** + * @brief Timer Output Compare 3 configuration + * @param TIMx to select the TIM peripheral + * @param OC_Config The output configuration structure + * @retval None + */ +static void TIM_OC3_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config) +{ + uint32_t tmpccmrx; + uint32_t tmpccer; + uint32_t tmpcr2; + + /* Disable the Channel 3: Reset the CC2E Bit */ + TIMx->CCER &= ~TIM_CCER_CC3E; + + /* Get the TIMx CCER register value */ + tmpccer = TIMx->CCER; + /* Get the TIMx CR2 register value */ + tmpcr2 = TIMx->CR2; + + /* Get the TIMx CCMR2 register value */ + tmpccmrx = TIMx->CCMR2; + + /* Reset the Output Compare mode and Capture/Compare selection Bits */ + tmpccmrx &= ~TIM_CCMR2_OC3M; + tmpccmrx &= ~TIM_CCMR2_CC3S; + /* Select the Output Compare Mode */ + tmpccmrx |= OC_Config->OCMode; + + /* Reset the Output Polarity level */ + tmpccer &= ~TIM_CCER_CC3P; + /* Set the Output Compare Polarity */ + tmpccer |= (OC_Config->OCPolarity << 8U); + + if (IS_TIM_CCXN_INSTANCE(TIMx, TIM_CHANNEL_3)) + { + assert_param(IS_TIM_OCN_POLARITY(OC_Config->OCNPolarity)); + + /* Reset the Output N Polarity level */ + tmpccer &= ~TIM_CCER_CC3NP; + /* Set the Output N Polarity */ + tmpccer |= (OC_Config->OCNPolarity << 8U); + /* Reset the Output N State */ + tmpccer &= ~TIM_CCER_CC3NE; + } + + if (IS_TIM_BREAK_INSTANCE(TIMx)) + { + /* Check parameters */ + assert_param(IS_TIM_OCNIDLE_STATE(OC_Config->OCNIdleState)); + assert_param(IS_TIM_OCIDLE_STATE(OC_Config->OCIdleState)); + + /* Reset the Output Compare and Output Compare N IDLE State */ + tmpcr2 &= ~TIM_CR2_OIS3; + tmpcr2 &= ~TIM_CR2_OIS3N; + /* Set the Output Idle state */ + tmpcr2 |= (OC_Config->OCIdleState << 4U); + /* Set the Output N Idle state */ + tmpcr2 |= (OC_Config->OCNIdleState << 4U); + } + + /* Write to TIMx CR2 */ + TIMx->CR2 = tmpcr2; + + /* Write to TIMx CCMR2 */ + TIMx->CCMR2 = tmpccmrx; + + /* Set the Capture Compare Register value */ + TIMx->CCR3 = OC_Config->Pulse; + + /* Write to TIMx CCER */ + TIMx->CCER = tmpccer; +} + +/** + * @brief Timer Output Compare 4 configuration + * @param TIMx to select the TIM peripheral + * @param OC_Config The output configuration structure + * @retval None + */ +static void TIM_OC4_SetConfig(TIM_TypeDef *TIMx, TIM_OC_InitTypeDef *OC_Config) +{ + uint32_t tmpccmrx; + uint32_t tmpccer; + uint32_t tmpcr2; + + /* Disable the Channel 4: Reset the CC4E Bit */ + TIMx->CCER &= ~TIM_CCER_CC4E; + + /* Get the TIMx CCER register value */ + tmpccer = TIMx->CCER; + /* Get the TIMx CR2 register value */ + tmpcr2 = TIMx->CR2; + + /* Get the TIMx CCMR2 register value */ + tmpccmrx = TIMx->CCMR2; + + /* Reset the Output Compare mode and Capture/Compare selection Bits */ + tmpccmrx &= ~TIM_CCMR2_OC4M; + tmpccmrx &= ~TIM_CCMR2_CC4S; + + /* Select the Output Compare Mode */ + tmpccmrx |= (OC_Config->OCMode << 8U); + + /* Reset the Output Polarity level */ + tmpccer &= ~TIM_CCER_CC4P; + /* Set the Output Compare Polarity */ + tmpccer |= (OC_Config->OCPolarity << 12U); + + if (IS_TIM_BREAK_INSTANCE(TIMx)) + { + /* Check parameters */ + assert_param(IS_TIM_OCIDLE_STATE(OC_Config->OCIdleState)); + + /* Reset the Output Compare IDLE State */ + tmpcr2 &= ~TIM_CR2_OIS4; + + /* Set the Output Idle state */ + tmpcr2 |= (OC_Config->OCIdleState << 6U); + } + + /* Write to TIMx CR2 */ + TIMx->CR2 = tmpcr2; + + /* Write to TIMx CCMR2 */ + TIMx->CCMR2 = tmpccmrx; + + /* Set the Capture Compare Register value */ + TIMx->CCR4 = OC_Config->Pulse; + + /* Write to TIMx CCER */ + TIMx->CCER = tmpccer; +} + +/** + * @brief Timer Output Compare 5 configuration + * @param TIMx to select the TIM peripheral + * @param OC_Config The output configuration structure + * @retval None + */ +static void TIM_OC5_SetConfig(TIM_TypeDef *TIMx, + TIM_OC_InitTypeDef *OC_Config) +{ + uint32_t tmpccmrx; + uint32_t tmpccer; + uint32_t tmpcr2; + + /* Disable the output: Reset the CCxE Bit */ + TIMx->CCER &= ~TIM_CCER_CC5E; + + /* Get the TIMx CCER register value */ + tmpccer = TIMx->CCER; + /* Get the TIMx CR2 register value */ + tmpcr2 = TIMx->CR2; + /* Get the TIMx CCMR1 register value */ + tmpccmrx = TIMx->CCMR3; + + /* Reset the Output Compare Mode Bits */ + tmpccmrx &= ~(TIM_CCMR3_OC5M); + /* Select the Output Compare Mode */ + tmpccmrx |= OC_Config->OCMode; + + /* Reset the Output Polarity level */ + tmpccer &= ~TIM_CCER_CC5P; + /* Set the Output Compare Polarity */ + tmpccer |= (OC_Config->OCPolarity << 16U); + + if (IS_TIM_BREAK_INSTANCE(TIMx)) + { + /* Reset the Output Compare IDLE State */ + tmpcr2 &= ~TIM_CR2_OIS5; + /* Set the Output Idle state */ + tmpcr2 |= (OC_Config->OCIdleState << 8U); + } + /* Write to TIMx CR2 */ + TIMx->CR2 = tmpcr2; + + /* Write to TIMx CCMR3 */ + TIMx->CCMR3 = tmpccmrx; + + /* Set the Capture Compare Register value */ + TIMx->CCR5 = OC_Config->Pulse; + + /* Write to TIMx CCER */ + TIMx->CCER = tmpccer; +} + +/** + * @brief Timer Output Compare 6 configuration + * @param TIMx to select the TIM peripheral + * @param OC_Config The output configuration structure + * @retval None + */ +static void TIM_OC6_SetConfig(TIM_TypeDef *TIMx, + TIM_OC_InitTypeDef *OC_Config) +{ + uint32_t tmpccmrx; + uint32_t tmpccer; + uint32_t tmpcr2; + + /* Disable the output: Reset the CCxE Bit */ + TIMx->CCER &= ~TIM_CCER_CC6E; + + /* Get the TIMx CCER register value */ + tmpccer = TIMx->CCER; + /* Get the TIMx CR2 register value */ + tmpcr2 = TIMx->CR2; + /* Get the TIMx CCMR1 register value */ + tmpccmrx = TIMx->CCMR3; + + /* Reset the Output Compare Mode Bits */ + tmpccmrx &= ~(TIM_CCMR3_OC6M); + /* Select the Output Compare Mode */ + tmpccmrx |= (OC_Config->OCMode << 8U); + + /* Reset the Output Polarity level */ + tmpccer &= (uint32_t)~TIM_CCER_CC6P; + /* Set the Output Compare Polarity */ + tmpccer |= (OC_Config->OCPolarity << 20U); + + if (IS_TIM_BREAK_INSTANCE(TIMx)) + { + /* Reset the Output Compare IDLE State */ + tmpcr2 &= ~TIM_CR2_OIS6; + /* Set the Output Idle state */ + tmpcr2 |= (OC_Config->OCIdleState << 10U); + } + + /* Write to TIMx CR2 */ + TIMx->CR2 = tmpcr2; + + /* Write to TIMx CCMR3 */ + TIMx->CCMR3 = tmpccmrx; + + /* Set the Capture Compare Register value */ + TIMx->CCR6 = OC_Config->Pulse; + + /* Write to TIMx CCER */ + TIMx->CCER = tmpccer; +} + +/** + * @brief Slave Timer configuration function + * @param htim TIM handle + * @param sSlaveConfig Slave timer configuration + * @retval None + */ +static HAL_StatusTypeDef TIM_SlaveTimer_SetConfig(TIM_HandleTypeDef *htim, + TIM_SlaveConfigTypeDef *sSlaveConfig) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpsmcr; + uint32_t tmpccmr1; + uint32_t tmpccer; + + /* Get the TIMx SMCR register value */ + tmpsmcr = htim->Instance->SMCR; + + /* Reset the Trigger Selection Bits */ + tmpsmcr &= ~TIM_SMCR_TS; + /* Set the Input Trigger source */ + tmpsmcr |= sSlaveConfig->InputTrigger; + + /* Reset the slave mode Bits */ + tmpsmcr &= ~TIM_SMCR_SMS; + /* Set the slave mode */ + tmpsmcr |= sSlaveConfig->SlaveMode; + + /* Write to TIMx SMCR */ + htim->Instance->SMCR = tmpsmcr; + + /* Configure the trigger prescaler, filter, and polarity */ + switch (sSlaveConfig->InputTrigger) + { + case TIM_TS_ETRF: + { + /* Check the parameters */ + assert_param(IS_TIM_CLOCKSOURCE_ETRMODE1_INSTANCE(htim->Instance)); + assert_param(IS_TIM_TRIGGERPRESCALER(sSlaveConfig->TriggerPrescaler)); + assert_param(IS_TIM_TRIGGERPOLARITY(sSlaveConfig->TriggerPolarity)); + assert_param(IS_TIM_TRIGGERFILTER(sSlaveConfig->TriggerFilter)); + /* Configure the ETR Trigger source */ + TIM_ETR_SetConfig(htim->Instance, + sSlaveConfig->TriggerPrescaler, + sSlaveConfig->TriggerPolarity, + sSlaveConfig->TriggerFilter); + break; + } + + case TIM_TS_TI1F_ED: + { + /* Check the parameters */ + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + assert_param(IS_TIM_TRIGGERFILTER(sSlaveConfig->TriggerFilter)); + + if (sSlaveConfig->SlaveMode == TIM_SLAVEMODE_GATED) + { + return HAL_ERROR; + } + + /* Disable the Channel 1: Reset the CC1E Bit */ + tmpccer = htim->Instance->CCER; + htim->Instance->CCER &= ~TIM_CCER_CC1E; + tmpccmr1 = htim->Instance->CCMR1; + + /* Set the filter */ + tmpccmr1 &= ~TIM_CCMR1_IC1F; + tmpccmr1 |= ((sSlaveConfig->TriggerFilter) << 4U); + + /* Write to TIMx CCMR1 and CCER registers */ + htim->Instance->CCMR1 = tmpccmr1; + htim->Instance->CCER = tmpccer; + break; + } + + case TIM_TS_TI1FP1: + { + /* Check the parameters */ + assert_param(IS_TIM_CC1_INSTANCE(htim->Instance)); + assert_param(IS_TIM_TRIGGERPOLARITY(sSlaveConfig->TriggerPolarity)); + assert_param(IS_TIM_TRIGGERFILTER(sSlaveConfig->TriggerFilter)); + + /* Configure TI1 Filter and Polarity */ + TIM_TI1_ConfigInputStage(htim->Instance, + sSlaveConfig->TriggerPolarity, + sSlaveConfig->TriggerFilter); + break; + } + + case TIM_TS_TI2FP2: + { + /* Check the parameters */ + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + assert_param(IS_TIM_TRIGGERPOLARITY(sSlaveConfig->TriggerPolarity)); + assert_param(IS_TIM_TRIGGERFILTER(sSlaveConfig->TriggerFilter)); + + /* Configure TI2 Filter and Polarity */ + TIM_TI2_ConfigInputStage(htim->Instance, + sSlaveConfig->TriggerPolarity, + sSlaveConfig->TriggerFilter); + break; + } + + case TIM_TS_ITR0: + case TIM_TS_ITR1: + case TIM_TS_ITR2: + case TIM_TS_ITR3: + { + /* Check the parameter */ + assert_param(IS_TIM_CC2_INSTANCE(htim->Instance)); + break; + } + + default: + status = HAL_ERROR; + break; + } + + return status; +} + +/** + * @brief Configure the TI1 as Input. + * @param TIMx to select the TIM peripheral. + * @param TIM_ICPolarity The Input Polarity. + * This parameter can be one of the following values: + * @arg TIM_ICPOLARITY_RISING + * @arg TIM_ICPOLARITY_FALLING + * @arg TIM_ICPOLARITY_BOTHEDGE + * @param TIM_ICSelection specifies the input to be used. + * This parameter can be one of the following values: + * @arg TIM_ICSELECTION_DIRECTTI: TIM Input 1 is selected to be connected to IC1. + * @arg TIM_ICSELECTION_INDIRECTTI: TIM Input 1 is selected to be connected to IC2. + * @arg TIM_ICSELECTION_TRC: TIM Input 1 is selected to be connected to TRC. + * @param TIM_ICFilter Specifies the Input Capture Filter. + * This parameter must be a value between 0x00 and 0x0F. + * @retval None + * @note TIM_ICFilter and TIM_ICPolarity are not used in INDIRECT mode as TI2FP1 + * (on channel2 path) is used as the input signal. Therefore CCMR1 must be + * protected against un-initialized filter and polarity values. + */ +void TIM_TI1_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, + uint32_t TIM_ICFilter) +{ + uint32_t tmpccmr1; + uint32_t tmpccer; + + /* Disable the Channel 1: Reset the CC1E Bit */ + TIMx->CCER &= ~TIM_CCER_CC1E; + tmpccmr1 = TIMx->CCMR1; + tmpccer = TIMx->CCER; + + /* Select the Input */ + if (IS_TIM_CC2_INSTANCE(TIMx) != RESET) + { + tmpccmr1 &= ~TIM_CCMR1_CC1S; + tmpccmr1 |= TIM_ICSelection; + } + else + { + tmpccmr1 |= TIM_CCMR1_CC1S_0; + } + + /* Set the filter */ + tmpccmr1 &= ~TIM_CCMR1_IC1F; + tmpccmr1 |= ((TIM_ICFilter << 4U) & TIM_CCMR1_IC1F); + + /* Select the Polarity and set the CC1E Bit */ + tmpccer &= ~(TIM_CCER_CC1P | TIM_CCER_CC1NP); + tmpccer |= (TIM_ICPolarity & (TIM_CCER_CC1P | TIM_CCER_CC1NP)); + + /* Write to TIMx CCMR1 and CCER registers */ + TIMx->CCMR1 = tmpccmr1; + TIMx->CCER = tmpccer; +} + +/** + * @brief Configure the Polarity and Filter for TI1. + * @param TIMx to select the TIM peripheral. + * @param TIM_ICPolarity The Input Polarity. + * This parameter can be one of the following values: + * @arg TIM_ICPOLARITY_RISING + * @arg TIM_ICPOLARITY_FALLING + * @arg TIM_ICPOLARITY_BOTHEDGE + * @param TIM_ICFilter Specifies the Input Capture Filter. + * This parameter must be a value between 0x00 and 0x0F. + * @retval None + */ +static void TIM_TI1_ConfigInputStage(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICFilter) +{ + uint32_t tmpccmr1; + uint32_t tmpccer; + + /* Disable the Channel 1: Reset the CC1E Bit */ + tmpccer = TIMx->CCER; + TIMx->CCER &= ~TIM_CCER_CC1E; + tmpccmr1 = TIMx->CCMR1; + + /* Set the filter */ + tmpccmr1 &= ~TIM_CCMR1_IC1F; + tmpccmr1 |= (TIM_ICFilter << 4U); + + /* Select the Polarity and set the CC1E Bit */ + tmpccer &= ~(TIM_CCER_CC1P | TIM_CCER_CC1NP); + tmpccer |= TIM_ICPolarity; + + /* Write to TIMx CCMR1 and CCER registers */ + TIMx->CCMR1 = tmpccmr1; + TIMx->CCER = tmpccer; +} + +/** + * @brief Configure the TI2 as Input. + * @param TIMx to select the TIM peripheral + * @param TIM_ICPolarity The Input Polarity. + * This parameter can be one of the following values: + * @arg TIM_ICPOLARITY_RISING + * @arg TIM_ICPOLARITY_FALLING + * @arg TIM_ICPOLARITY_BOTHEDGE + * @param TIM_ICSelection specifies the input to be used. + * This parameter can be one of the following values: + * @arg TIM_ICSELECTION_DIRECTTI: TIM Input 2 is selected to be connected to IC2. + * @arg TIM_ICSELECTION_INDIRECTTI: TIM Input 2 is selected to be connected to IC1. + * @arg TIM_ICSELECTION_TRC: TIM Input 2 is selected to be connected to TRC. + * @param TIM_ICFilter Specifies the Input Capture Filter. + * This parameter must be a value between 0x00 and 0x0F. + * @retval None + * @note TIM_ICFilter and TIM_ICPolarity are not used in INDIRECT mode as TI1FP2 + * (on channel1 path) is used as the input signal. Therefore CCMR1 must be + * protected against un-initialized filter and polarity values. + */ +static void TIM_TI2_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, + uint32_t TIM_ICFilter) +{ + uint32_t tmpccmr1; + uint32_t tmpccer; + + /* Disable the Channel 2: Reset the CC2E Bit */ + TIMx->CCER &= ~TIM_CCER_CC2E; + tmpccmr1 = TIMx->CCMR1; + tmpccer = TIMx->CCER; + + /* Select the Input */ + tmpccmr1 &= ~TIM_CCMR1_CC2S; + tmpccmr1 |= (TIM_ICSelection << 8U); + + /* Set the filter */ + tmpccmr1 &= ~TIM_CCMR1_IC2F; + tmpccmr1 |= ((TIM_ICFilter << 12U) & TIM_CCMR1_IC2F); + + /* Select the Polarity and set the CC2E Bit */ + tmpccer &= ~(TIM_CCER_CC2P | TIM_CCER_CC2NP); + tmpccer |= ((TIM_ICPolarity << 4U) & (TIM_CCER_CC2P | TIM_CCER_CC2NP)); + + /* Write to TIMx CCMR1 and CCER registers */ + TIMx->CCMR1 = tmpccmr1 ; + TIMx->CCER = tmpccer; +} + +/** + * @brief Configure the Polarity and Filter for TI2. + * @param TIMx to select the TIM peripheral. + * @param TIM_ICPolarity The Input Polarity. + * This parameter can be one of the following values: + * @arg TIM_ICPOLARITY_RISING + * @arg TIM_ICPOLARITY_FALLING + * @arg TIM_ICPOLARITY_BOTHEDGE + * @param TIM_ICFilter Specifies the Input Capture Filter. + * This parameter must be a value between 0x00 and 0x0F. + * @retval None + */ +static void TIM_TI2_ConfigInputStage(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICFilter) +{ + uint32_t tmpccmr1; + uint32_t tmpccer; + + /* Disable the Channel 2: Reset the CC2E Bit */ + TIMx->CCER &= ~TIM_CCER_CC2E; + tmpccmr1 = TIMx->CCMR1; + tmpccer = TIMx->CCER; + + /* Set the filter */ + tmpccmr1 &= ~TIM_CCMR1_IC2F; + tmpccmr1 |= (TIM_ICFilter << 12U); + + /* Select the Polarity and set the CC2E Bit */ + tmpccer &= ~(TIM_CCER_CC2P | TIM_CCER_CC2NP); + tmpccer |= (TIM_ICPolarity << 4U); + + /* Write to TIMx CCMR1 and CCER registers */ + TIMx->CCMR1 = tmpccmr1 ; + TIMx->CCER = tmpccer; +} + +/** + * @brief Configure the TI3 as Input. + * @param TIMx to select the TIM peripheral + * @param TIM_ICPolarity The Input Polarity. + * This parameter can be one of the following values: + * @arg TIM_ICPOLARITY_RISING + * @arg TIM_ICPOLARITY_FALLING + * @arg TIM_ICPOLARITY_BOTHEDGE + * @param TIM_ICSelection specifies the input to be used. + * This parameter can be one of the following values: + * @arg TIM_ICSELECTION_DIRECTTI: TIM Input 3 is selected to be connected to IC3. + * @arg TIM_ICSELECTION_INDIRECTTI: TIM Input 3 is selected to be connected to IC4. + * @arg TIM_ICSELECTION_TRC: TIM Input 3 is selected to be connected to TRC. + * @param TIM_ICFilter Specifies the Input Capture Filter. + * This parameter must be a value between 0x00 and 0x0F. + * @retval None + * @note TIM_ICFilter and TIM_ICPolarity are not used in INDIRECT mode as TI3FP4 + * (on channel1 path) is used as the input signal. Therefore CCMR2 must be + * protected against un-initialized filter and polarity values. + */ +static void TIM_TI3_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, + uint32_t TIM_ICFilter) +{ + uint32_t tmpccmr2; + uint32_t tmpccer; + + /* Disable the Channel 3: Reset the CC3E Bit */ + TIMx->CCER &= ~TIM_CCER_CC3E; + tmpccmr2 = TIMx->CCMR2; + tmpccer = TIMx->CCER; + + /* Select the Input */ + tmpccmr2 &= ~TIM_CCMR2_CC3S; + tmpccmr2 |= TIM_ICSelection; + + /* Set the filter */ + tmpccmr2 &= ~TIM_CCMR2_IC3F; + tmpccmr2 |= ((TIM_ICFilter << 4U) & TIM_CCMR2_IC3F); + + /* Select the Polarity and set the CC3E Bit */ + tmpccer &= ~(TIM_CCER_CC3P | TIM_CCER_CC3NP); + tmpccer |= ((TIM_ICPolarity << 8U) & (TIM_CCER_CC3P | TIM_CCER_CC3NP)); + + /* Write to TIMx CCMR2 and CCER registers */ + TIMx->CCMR2 = tmpccmr2; + TIMx->CCER = tmpccer; +} + +/** + * @brief Configure the TI4 as Input. + * @param TIMx to select the TIM peripheral + * @param TIM_ICPolarity The Input Polarity. + * This parameter can be one of the following values: + * @arg TIM_ICPOLARITY_RISING + * @arg TIM_ICPOLARITY_FALLING + * @arg TIM_ICPOLARITY_BOTHEDGE + * @param TIM_ICSelection specifies the input to be used. + * This parameter can be one of the following values: + * @arg TIM_ICSELECTION_DIRECTTI: TIM Input 4 is selected to be connected to IC4. + * @arg TIM_ICSELECTION_INDIRECTTI: TIM Input 4 is selected to be connected to IC3. + * @arg TIM_ICSELECTION_TRC: TIM Input 4 is selected to be connected to TRC. + * @param TIM_ICFilter Specifies the Input Capture Filter. + * This parameter must be a value between 0x00 and 0x0F. + * @note TIM_ICFilter and TIM_ICPolarity are not used in INDIRECT mode as TI4FP3 + * (on channel1 path) is used as the input signal. Therefore CCMR2 must be + * protected against un-initialized filter and polarity values. + * @retval None + */ +static void TIM_TI4_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ICPolarity, uint32_t TIM_ICSelection, + uint32_t TIM_ICFilter) +{ + uint32_t tmpccmr2; + uint32_t tmpccer; + + /* Disable the Channel 4: Reset the CC4E Bit */ + TIMx->CCER &= ~TIM_CCER_CC4E; + tmpccmr2 = TIMx->CCMR2; + tmpccer = TIMx->CCER; + + /* Select the Input */ + tmpccmr2 &= ~TIM_CCMR2_CC4S; + tmpccmr2 |= (TIM_ICSelection << 8U); + + /* Set the filter */ + tmpccmr2 &= ~TIM_CCMR2_IC4F; + tmpccmr2 |= ((TIM_ICFilter << 12U) & TIM_CCMR2_IC4F); + + /* Select the Polarity and set the CC4E Bit */ + tmpccer &= ~(TIM_CCER_CC4P | TIM_CCER_CC4NP); + tmpccer |= ((TIM_ICPolarity << 12U) & (TIM_CCER_CC4P | TIM_CCER_CC4NP)); + + /* Write to TIMx CCMR2 and CCER registers */ + TIMx->CCMR2 = tmpccmr2; + TIMx->CCER = tmpccer ; +} + +/** + * @brief Selects the Input Trigger source + * @param TIMx to select the TIM peripheral + * @param InputTriggerSource The Input Trigger source. + * This parameter can be one of the following values: + * @arg TIM_TS_ITR0: Internal Trigger 0 + * @arg TIM_TS_ITR1: Internal Trigger 1 + * @arg TIM_TS_ITR2: Internal Trigger 2 + * @arg TIM_TS_ITR3: Internal Trigger 3 + * @arg TIM_TS_TI1F_ED: TI1 Edge Detector + * @arg TIM_TS_TI1FP1: Filtered Timer Input 1 + * @arg TIM_TS_TI2FP2: Filtered Timer Input 2 + * @arg TIM_TS_ETRF: External Trigger input + * @retval None + */ +static void TIM_ITRx_SetConfig(TIM_TypeDef *TIMx, uint32_t InputTriggerSource) +{ + uint32_t tmpsmcr; + + /* Get the TIMx SMCR register value */ + tmpsmcr = TIMx->SMCR; + /* Reset the TS Bits */ + tmpsmcr &= ~TIM_SMCR_TS; + /* Set the Input Trigger source and the slave mode*/ + tmpsmcr |= (InputTriggerSource | TIM_SLAVEMODE_EXTERNAL1); + /* Write to TIMx SMCR */ + TIMx->SMCR = tmpsmcr; +} +/** + * @brief Configures the TIMx External Trigger (ETR). + * @param TIMx to select the TIM peripheral + * @param TIM_ExtTRGPrescaler The external Trigger Prescaler. + * This parameter can be one of the following values: + * @arg TIM_ETRPRESCALER_DIV1: ETRP Prescaler OFF. + * @arg TIM_ETRPRESCALER_DIV2: ETRP frequency divided by 2. + * @arg TIM_ETRPRESCALER_DIV4: ETRP frequency divided by 4. + * @arg TIM_ETRPRESCALER_DIV8: ETRP frequency divided by 8. + * @param TIM_ExtTRGPolarity The external Trigger Polarity. + * This parameter can be one of the following values: + * @arg TIM_ETRPOLARITY_INVERTED: active low or falling edge active. + * @arg TIM_ETRPOLARITY_NONINVERTED: active high or rising edge active. + * @param ExtTRGFilter External Trigger Filter. + * This parameter must be a value between 0x00 and 0x0F + * @retval None + */ +void TIM_ETR_SetConfig(TIM_TypeDef *TIMx, uint32_t TIM_ExtTRGPrescaler, + uint32_t TIM_ExtTRGPolarity, uint32_t ExtTRGFilter) +{ + uint32_t tmpsmcr; + + tmpsmcr = TIMx->SMCR; + + /* Reset the ETR Bits */ + tmpsmcr &= ~(TIM_SMCR_ETF | TIM_SMCR_ETPS | TIM_SMCR_ECE | TIM_SMCR_ETP); + + /* Set the Prescaler, the Filter value and the Polarity */ + tmpsmcr |= (uint32_t)(TIM_ExtTRGPrescaler | (TIM_ExtTRGPolarity | (ExtTRGFilter << 8U))); + + /* Write to TIMx SMCR */ + TIMx->SMCR = tmpsmcr; +} + +/** + * @brief Enables or disables the TIM Capture Compare Channel x. + * @param TIMx to select the TIM peripheral + * @param Channel specifies the TIM Channel + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 + * @arg TIM_CHANNEL_2: TIM Channel 2 + * @arg TIM_CHANNEL_3: TIM Channel 3 + * @arg TIM_CHANNEL_4: TIM Channel 4 + * @arg TIM_CHANNEL_5: TIM Channel 5 selected + * @arg TIM_CHANNEL_6: TIM Channel 6 selected + * @param ChannelState specifies the TIM Channel CCxE bit new state. + * This parameter can be: TIM_CCx_ENABLE or TIM_CCx_DISABLE. + * @retval None + */ +void TIM_CCxChannelCmd(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t ChannelState) +{ + uint32_t tmp; + + /* Check the parameters */ + assert_param(IS_TIM_CC1_INSTANCE(TIMx)); + assert_param(IS_TIM_CHANNELS(Channel)); + + tmp = TIM_CCER_CC1E << (Channel & 0x1FU); /* 0x1FU = 31 bits max shift */ + + /* Reset the CCxE Bit */ + TIMx->CCER &= ~tmp; + + /* Set or reset the CCxE Bit */ + TIMx->CCER |= (uint32_t)(ChannelState << (Channel & 0x1FU)); /* 0x1FU = 31 bits max shift */ +} + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) +/** + * @brief Reset interrupt callbacks to the legacy weak callbacks. + * @param htim pointer to a TIM_HandleTypeDef structure that contains + * the configuration information for TIM module. + * @retval None + */ +void TIM_ResetCallback(TIM_HandleTypeDef *htim) +{ + /* Reset the TIM callback to the legacy weak callbacks */ + htim->PeriodElapsedCallback = HAL_TIM_PeriodElapsedCallback; + htim->PeriodElapsedHalfCpltCallback = HAL_TIM_PeriodElapsedHalfCpltCallback; + htim->TriggerCallback = HAL_TIM_TriggerCallback; + htim->TriggerHalfCpltCallback = HAL_TIM_TriggerHalfCpltCallback; + htim->IC_CaptureCallback = HAL_TIM_IC_CaptureCallback; + htim->IC_CaptureHalfCpltCallback = HAL_TIM_IC_CaptureHalfCpltCallback; + htim->OC_DelayElapsedCallback = HAL_TIM_OC_DelayElapsedCallback; + htim->PWM_PulseFinishedCallback = HAL_TIM_PWM_PulseFinishedCallback; + htim->PWM_PulseFinishedHalfCpltCallback = HAL_TIM_PWM_PulseFinishedHalfCpltCallback; + htim->ErrorCallback = HAL_TIM_ErrorCallback; + htim->CommutationCallback = HAL_TIMEx_CommutCallback; + htim->CommutationHalfCpltCallback = HAL_TIMEx_CommutHalfCpltCallback; + htim->BreakCallback = HAL_TIMEx_BreakCallback; + htim->Break2Callback = HAL_TIMEx_Break2Callback; +} +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + +/** + * @} + */ + +#endif /* HAL_TIM_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim_ex.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim_ex.c new file mode 100644 index 0000000..cd13ddc --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim_ex.c @@ -0,0 +1,2607 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_tim_ex.c + * @author MCD Application Team + * @brief TIM HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Timer Extended peripheral: + * + Time Hall Sensor Interface Initialization + * + Time Hall Sensor Interface Start + * + Time Complementary signal break and dead time configuration + * + Time Master and Slave synchronization configuration + * + Time Output Compare/PWM Channel Configuration (for channels 5 and 6) + * + Timer remapping capabilities configuration + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + @verbatim + ============================================================================== + ##### TIMER Extended features ##### + ============================================================================== + [..] + The Timer Extended features include: + (#) Complementary outputs with programmable dead-time for : + (++) Output Compare + (++) PWM generation (Edge and Center-aligned Mode) + (++) One-pulse mode output + (#) Synchronization circuit to control the timer with external signals and to + interconnect several timers together. + (#) Break input to put the timer output signals in reset state or in a known state. + (#) Supports incremental (quadrature) encoder and hall-sensor circuitry for + positioning purposes + + ##### How to use this driver ##### + ============================================================================== + [..] + (#) Initialize the TIM low level resources by implementing the following functions + depending on the selected feature: + (++) Hall Sensor output : HAL_TIMEx_HallSensor_MspInit() + + (#) Initialize the TIM low level resources : + (##) Enable the TIM interface clock using __HAL_RCC_TIMx_CLK_ENABLE(); + (##) TIM pins configuration + (+++) Enable the clock for the TIM GPIOs using the following function: + __HAL_RCC_GPIOx_CLK_ENABLE(); + (+++) Configure these TIM pins in Alternate function mode using HAL_GPIO_Init(); + + (#) The external Clock can be configured, if needed (the default clock is the + internal clock from the APBx), using the following function: + HAL_TIM_ConfigClockSource, the clock configuration should be done before + any start function. + + (#) Configure the TIM in the desired functioning mode using one of the + initialization function of this driver: + (++) HAL_TIMEx_HallSensor_Init() and HAL_TIMEx_ConfigCommutEvent(): to use the + Timer Hall Sensor Interface and the commutation event with the corresponding + Interrupt and DMA request if needed (Note that One Timer is used to interface + with the Hall sensor Interface and another Timer should be used to use + the commutation event). + + (#) Activate the TIM peripheral using one of the start functions: + (++) Complementary Output Compare : HAL_TIMEx_OCN_Start(), HAL_TIMEx_OCN_Start_DMA(), + HAL_TIMEx_OCN_Start_IT() + (++) Complementary PWM generation : HAL_TIMEx_PWMN_Start(), HAL_TIMEx_PWMN_Start_DMA(), + HAL_TIMEx_PWMN_Start_IT() + (++) Complementary One-pulse mode output : HAL_TIMEx_OnePulseN_Start(), HAL_TIMEx_OnePulseN_Start_IT() + (++) Hall Sensor output : HAL_TIMEx_HallSensor_Start(), HAL_TIMEx_HallSensor_Start_DMA(), + HAL_TIMEx_HallSensor_Start_IT(). + + @endverbatim + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup TIMEx TIMEx + * @brief TIM Extended HAL module driver + * @{ + */ + +#ifdef HAL_TIM_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macros ------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +static void TIM_DMADelayPulseNCplt(DMA_HandleTypeDef *hdma); +static void TIM_DMAErrorCCxN(DMA_HandleTypeDef *hdma); +static void TIM_CCxNChannelCmd(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t ChannelNState); + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup TIMEx_Exported_Functions TIM Extended Exported Functions + * @{ + */ + +/** @defgroup TIMEx_Exported_Functions_Group1 Extended Timer Hall Sensor functions + * @brief Timer Hall Sensor functions + * +@verbatim + ============================================================================== + ##### Timer Hall Sensor functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Initialize and configure TIM HAL Sensor. + (+) De-initialize TIM HAL Sensor. + (+) Start the Hall Sensor Interface. + (+) Stop the Hall Sensor Interface. + (+) Start the Hall Sensor Interface and enable interrupts. + (+) Stop the Hall Sensor Interface and disable interrupts. + (+) Start the Hall Sensor Interface and enable DMA transfers. + (+) Stop the Hall Sensor Interface and disable DMA transfers. + +@endverbatim + * @{ + */ +/** + * @brief Initializes the TIM Hall Sensor Interface and initialize the associated handle. + * @note When the timer instance is initialized in Hall Sensor Interface mode, + * timer channels 1 and channel 2 are reserved and cannot be used for + * other purpose. + * @param htim TIM Hall Sensor Interface handle + * @param sConfig TIM Hall Sensor configuration structure + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Init(TIM_HandleTypeDef *htim, TIM_HallSensor_InitTypeDef *sConfig) +{ + TIM_OC_InitTypeDef OC_Config; + + /* Check the TIM handle allocation */ + if (htim == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); + assert_param(IS_TIM_COUNTER_MODE(htim->Init.CounterMode)); + assert_param(IS_TIM_CLOCKDIVISION_DIV(htim->Init.ClockDivision)); + assert_param(IS_TIM_AUTORELOAD_PRELOAD(htim->Init.AutoReloadPreload)); + assert_param(IS_TIM_IC_POLARITY(sConfig->IC1Polarity)); + assert_param(IS_TIM_IC_PRESCALER(sConfig->IC1Prescaler)); + assert_param(IS_TIM_IC_FILTER(sConfig->IC1Filter)); + + if (htim->State == HAL_TIM_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + htim->Lock = HAL_UNLOCKED; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + /* Reset interrupt callbacks to legacy week callbacks */ + TIM_ResetCallback(htim); + + if (htim->HallSensor_MspInitCallback == NULL) + { + htim->HallSensor_MspInitCallback = HAL_TIMEx_HallSensor_MspInit; + } + /* Init the low level hardware : GPIO, CLOCK, NVIC */ + htim->HallSensor_MspInitCallback(htim); +#else + /* Init the low level hardware : GPIO, CLOCK, NVIC and DMA */ + HAL_TIMEx_HallSensor_MspInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + } + + /* Set the TIM state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Configure the Time base in the Encoder Mode */ + TIM_Base_SetConfig(htim->Instance, &htim->Init); + + /* Configure the Channel 1 as Input Channel to interface with the three Outputs of the Hall sensor */ + TIM_TI1_SetConfig(htim->Instance, sConfig->IC1Polarity, TIM_ICSELECTION_TRC, sConfig->IC1Filter); + + /* Reset the IC1PSC Bits */ + htim->Instance->CCMR1 &= ~TIM_CCMR1_IC1PSC; + /* Set the IC1PSC value */ + htim->Instance->CCMR1 |= sConfig->IC1Prescaler; + + /* Enable the Hall sensor interface (XOR function of the three inputs) */ + htim->Instance->CR2 |= TIM_CR2_TI1S; + + /* Select the TIM_TS_TI1F_ED signal as Input trigger for the TIM */ + htim->Instance->SMCR &= ~TIM_SMCR_TS; + htim->Instance->SMCR |= TIM_TS_TI1F_ED; + + /* Use the TIM_TS_TI1F_ED signal to reset the TIM counter each edge detection */ + htim->Instance->SMCR &= ~TIM_SMCR_SMS; + htim->Instance->SMCR |= TIM_SLAVEMODE_RESET; + + /* Program channel 2 in PWM 2 mode with the desired Commutation_Delay*/ + OC_Config.OCFastMode = TIM_OCFAST_DISABLE; + OC_Config.OCIdleState = TIM_OCIDLESTATE_RESET; + OC_Config.OCMode = TIM_OCMODE_PWM2; + OC_Config.OCNIdleState = TIM_OCNIDLESTATE_RESET; + OC_Config.OCNPolarity = TIM_OCNPOLARITY_HIGH; + OC_Config.OCPolarity = TIM_OCPOLARITY_HIGH; + OC_Config.Pulse = sConfig->Commutation_Delay; + + TIM_OC2_SetConfig(htim->Instance, &OC_Config); + + /* Select OC2REF as trigger output on TRGO: write the MMS bits in the TIMx_CR2 + register to 101 */ + htim->Instance->CR2 &= ~TIM_CR2_MMS; + htim->Instance->CR2 |= TIM_TRGO_OC2REF; + + /* Initialize the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_READY; + + /* Initialize the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + + /* Initialize the TIM state*/ + htim->State = HAL_TIM_STATE_READY; + + return HAL_OK; +} + +/** + * @brief DeInitializes the TIM Hall Sensor interface + * @param htim TIM Hall Sensor Interface handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_DeInit(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_INSTANCE(htim->Instance)); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Disable the TIM Peripheral Clock */ + __HAL_TIM_DISABLE(htim); + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + if (htim->HallSensor_MspDeInitCallback == NULL) + { + htim->HallSensor_MspDeInitCallback = HAL_TIMEx_HallSensor_MspDeInit; + } + /* DeInit the low level hardware */ + htim->HallSensor_MspDeInitCallback(htim); +#else + /* DeInit the low level hardware: GPIO, CLOCK, NVIC */ + HAL_TIMEx_HallSensor_MspDeInit(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + /* Change the DMA burst operation state */ + htim->DMABurstState = HAL_DMA_BURST_STATE_RESET; + + /* Change the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_RESET); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_RESET); + + /* Change TIM state */ + htim->State = HAL_TIM_STATE_RESET; + + /* Release Lock */ + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Initializes the TIM Hall Sensor MSP. + * @param htim TIM Hall Sensor Interface handle + * @retval None + */ +__weak void HAL_TIMEx_HallSensor_MspInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIMEx_HallSensor_MspInit could be implemented in the user file + */ +} + +/** + * @brief DeInitializes TIM Hall Sensor MSP. + * @param htim TIM Hall Sensor Interface handle + * @retval None + */ +__weak void HAL_TIMEx_HallSensor_MspDeInit(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIMEx_HallSensor_MspDeInit could be implemented in the user file + */ +} + +/** + * @brief Starts the TIM Hall Sensor Interface. + * @param htim TIM Hall Sensor Interface handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Start(TIM_HandleTypeDef *htim) +{ + uint32_t tmpsmcr; + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + + /* Check the parameters */ + assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); + + /* Check the TIM channels state */ + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + + /* Enable the Input Capture channel 1 + (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, + TIM_CHANNEL_2 and TIM_CHANNEL_3) */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Hall sensor Interface. + * @param htim TIM Hall Sensor Interface handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); + + /* Disable the Input Capture channels 1, 2 and 3 + (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, + TIM_CHANNEL_2 and TIM_CHANNEL_3) */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Hall Sensor Interface in interrupt mode. + * @param htim TIM Hall Sensor Interface handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Start_IT(TIM_HandleTypeDef *htim) +{ + uint32_t tmpsmcr; + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + + /* Check the parameters */ + assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); + + /* Check the TIM channels state */ + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + + /* Enable the capture compare Interrupts 1 event */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + + /* Enable the Input Capture channel 1 + (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, + TIM_CHANNEL_2 and TIM_CHANNEL_3) */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Hall Sensor Interface in interrupt mode. + * @param htim TIM Hall Sensor Interface handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop_IT(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); + + /* Disable the Input Capture channel 1 + (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, + TIM_CHANNEL_2 and TIM_CHANNEL_3) */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + + /* Disable the capture compare Interrupts event */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Hall Sensor Interface in DMA mode. + * @param htim TIM Hall Sensor Interface handle + * @param pData The destination Buffer address. + * @param Length The length of data to be transferred from TIM peripheral to memory. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Start_DMA(TIM_HandleTypeDef *htim, uint32_t *pData, uint16_t Length) +{ + uint32_t tmpsmcr; + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + + /* Check the parameters */ + assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); + + /* Set the TIM channel state */ + if ((channel_1_state == HAL_TIM_CHANNEL_STATE_BUSY) + || (complementary_channel_1_state == HAL_TIM_CHANNEL_STATE_BUSY)) + { + return HAL_BUSY; + } + else if ((channel_1_state == HAL_TIM_CHANNEL_STATE_READY) + && (complementary_channel_1_state == HAL_TIM_CHANNEL_STATE_READY)) + { + if ((pData == NULL) && (Length > 0U)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + return HAL_ERROR; + } + + /* Enable the Input Capture channel 1 + (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, + TIM_CHANNEL_2 and TIM_CHANNEL_3) */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_ENABLE); + + /* Set the DMA Input Capture 1 Callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMACaptureCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMACaptureHalfCplt; + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAError ; + + /* Enable the DMA stream for Capture 1*/ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)&htim->Instance->CCR1, (uint32_t)pData, Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the capture compare 1 Interrupt */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Hall Sensor Interface in DMA mode. + * @param htim TIM Hall Sensor Interface handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_HallSensor_Stop_DMA(TIM_HandleTypeDef *htim) +{ + /* Check the parameters */ + assert_param(IS_TIM_HALL_SENSOR_INTERFACE_INSTANCE(htim->Instance)); + + /* Disable the Input Capture channel 1 + (in the Hall Sensor Interface the three possible channels that can be used are TIM_CHANNEL_1, + TIM_CHANNEL_2 and TIM_CHANNEL_3) */ + TIM_CCxChannelCmd(htim->Instance, TIM_CHANNEL_1, TIM_CCx_DISABLE); + + + /* Disable the capture compare Interrupts 1 event */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); + + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channel state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup TIMEx_Exported_Functions_Group2 Extended Timer Complementary Output Compare functions + * @brief Timer Complementary Output Compare functions + * +@verbatim + ============================================================================== + ##### Timer Complementary Output Compare functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Start the Complementary Output Compare/PWM. + (+) Stop the Complementary Output Compare/PWM. + (+) Start the Complementary Output Compare/PWM and enable interrupts. + (+) Stop the Complementary Output Compare/PWM and disable interrupts. + (+) Start the Complementary Output Compare/PWM and enable DMA transfers. + (+) Stop the Complementary Output Compare/PWM and disable DMA transfers. + +@endverbatim + * @{ + */ + +/** + * @brief Starts the TIM Output Compare signal generation on the complementary + * output. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Start(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + /* Check the TIM complementary channel state */ + if (TIM_CHANNEL_N_STATE_GET(htim, Channel) != HAL_TIM_CHANNEL_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + + /* Enable the Capture compare channel N */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); + + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM Output Compare signal generation on the complementary + * output. + * @param htim TIM handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + /* Disable the Capture compare channel N */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); + + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM Output Compare signal generation in interrupt mode + * on the complementary output. + * @param htim TIM OC handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + /* Check the TIM complementary channel state */ + if (TIM_CHANNEL_N_STATE_GET(htim, Channel) != HAL_TIM_CHANNEL_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Enable the TIM Output Compare interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Enable the TIM Output Compare interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Enable the TIM Output Compare interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC3); + break; + } + + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Enable the TIM Break interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_BREAK); + + /* Enable the Capture compare channel N */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); + + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + } + + /* Return function status */ + return status; +} + +/** + * @brief Stops the TIM Output Compare signal generation in interrupt mode + * on the complementary output. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpccer; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Output Compare interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Output Compare interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Output Compare interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC3); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Disable the Capture compare channel N */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); + + /* Disable the TIM Break interrupt (only if no more channel is active) */ + tmpccer = htim->Instance->CCER; + if ((tmpccer & (TIM_CCER_CC1NE | TIM_CCER_CC2NE | TIM_CCER_CC3NE)) == (uint32_t)RESET) + { + __HAL_TIM_DISABLE_IT(htim, TIM_IT_BREAK); + } + + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return status; +} + +/** + * @brief Starts the TIM Output Compare signal generation in DMA mode + * on the complementary output. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @param pData The source Buffer address. + * @param Length The length of data to be transferred from memory to TIM peripheral + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + /* Set the TIM complementary channel state */ + if (TIM_CHANNEL_N_STATE_GET(htim, Channel) == HAL_TIM_CHANNEL_STATE_BUSY) + { + return HAL_BUSY; + } + else if (TIM_CHANNEL_N_STATE_GET(htim, Channel) == HAL_TIM_CHANNEL_STATE_READY) + { + if ((pData == NULL) && (Length > 0U)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + return HAL_ERROR; + } + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMADelayPulseNCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAErrorCCxN ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)pData, (uint32_t)&htim->Instance->CCR1, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Output Compare DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMADelayPulseNCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAErrorCCxN ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)pData, (uint32_t)&htim->Instance->CCR2, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Output Compare DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMADelayPulseNCplt; + htim->hdma[TIM_DMA_ID_CC3]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAErrorCCxN ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)pData, (uint32_t)&htim->Instance->CCR3, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Output Compare DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC3); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Enable the Capture compare channel N */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); + + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + } + + /* Return function status */ + return status; +} + +/** + * @brief Stops the TIM Output Compare signal generation in DMA mode + * on the complementary output. + * @param htim TIM Output Compare handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OCN_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Output Compare DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Output Compare DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC2); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Output Compare DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC3); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Disable the Capture compare channel N */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); + + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return status; +} + +/** + * @} + */ + +/** @defgroup TIMEx_Exported_Functions_Group3 Extended Timer Complementary PWM functions + * @brief Timer Complementary PWM functions + * +@verbatim + ============================================================================== + ##### Timer Complementary PWM functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Start the Complementary PWM. + (+) Stop the Complementary PWM. + (+) Start the Complementary PWM and enable interrupts. + (+) Stop the Complementary PWM and disable interrupts. + (+) Start the Complementary PWM and enable DMA transfers. + (+) Stop the Complementary PWM and disable DMA transfers. + (+) Start the Complementary Input Capture measurement. + (+) Stop the Complementary Input Capture. + (+) Start the Complementary Input Capture and enable interrupts. + (+) Stop the Complementary Input Capture and disable interrupts. + (+) Start the Complementary Input Capture and enable DMA transfers. + (+) Stop the Complementary Input Capture and disable DMA transfers. + (+) Start the Complementary One Pulse generation. + (+) Stop the Complementary One Pulse. + (+) Start the Complementary One Pulse and enable interrupts. + (+) Stop the Complementary One Pulse and disable interrupts. + +@endverbatim + * @{ + */ + +/** + * @brief Starts the PWM signal generation on the complementary output. + * @param htim TIM handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Start(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + /* Check the TIM complementary channel state */ + if (TIM_CHANNEL_N_STATE_GET(htim, Channel) != HAL_TIM_CHANNEL_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + + /* Enable the complementary PWM output */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); + + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the PWM signal generation on the complementary output. + * @param htim TIM handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + /* Disable the complementary PWM output */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); + + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the PWM signal generation in interrupt mode on the + * complementary output. + * @param htim TIM handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Start_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + /* Check the TIM complementary channel state */ + if (TIM_CHANNEL_N_STATE_GET(htim, Channel) != HAL_TIM_CHANNEL_STATE_READY) + { + return HAL_ERROR; + } + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Enable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Enable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Enable the TIM Capture/Compare 3 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC3); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Enable the TIM Break interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_BREAK); + + /* Enable the complementary PWM output */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); + + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + } + + /* Return function status */ + return status; +} + +/** + * @brief Stops the PWM signal generation in interrupt mode on the + * complementary output. + * @param htim TIM handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpccer; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Capture/Compare 3 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC3); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Disable the complementary PWM output */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); + + /* Disable the TIM Break interrupt (only if no more channel is active) */ + tmpccer = htim->Instance->CCER; + if ((tmpccer & (TIM_CCER_CC1NE | TIM_CCER_CC2NE | TIM_CCER_CC3NE)) == (uint32_t)RESET) + { + __HAL_TIM_DISABLE_IT(htim, TIM_IT_BREAK); + } + + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return status; +} + +/** + * @brief Starts the TIM PWM signal generation in DMA mode on the + * complementary output + * @param htim TIM handle + * @param Channel TIM Channel to be enabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @param pData The source Buffer address. + * @param Length The length of data to be transferred from memory to TIM peripheral + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Start_DMA(TIM_HandleTypeDef *htim, uint32_t Channel, uint32_t *pData, uint16_t Length) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + /* Set the TIM complementary channel state */ + if (TIM_CHANNEL_N_STATE_GET(htim, Channel) == HAL_TIM_CHANNEL_STATE_BUSY) + { + return HAL_BUSY; + } + else if (TIM_CHANNEL_N_STATE_GET(htim, Channel) == HAL_TIM_CHANNEL_STATE_READY) + { + if ((pData == NULL) && (Length > 0U)) + { + return HAL_ERROR; + } + else + { + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_BUSY); + } + } + else + { + return HAL_ERROR; + } + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC1]->XferCpltCallback = TIM_DMADelayPulseNCplt; + htim->hdma[TIM_DMA_ID_CC1]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC1]->XferErrorCallback = TIM_DMAErrorCCxN ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC1], (uint32_t)pData, (uint32_t)&htim->Instance->CCR1, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 1 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC1); + break; + } + + case TIM_CHANNEL_2: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC2]->XferCpltCallback = TIM_DMADelayPulseNCplt; + htim->hdma[TIM_DMA_ID_CC2]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC2]->XferErrorCallback = TIM_DMAErrorCCxN ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC2], (uint32_t)pData, (uint32_t)&htim->Instance->CCR2, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 2 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC2); + break; + } + + case TIM_CHANNEL_3: + { + /* Set the DMA compare callbacks */ + htim->hdma[TIM_DMA_ID_CC3]->XferCpltCallback = TIM_DMADelayPulseNCplt; + htim->hdma[TIM_DMA_ID_CC3]->XferHalfCpltCallback = TIM_DMADelayPulseHalfCplt; + + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_CC3]->XferErrorCallback = TIM_DMAErrorCCxN ; + + /* Enable the DMA stream */ + if (HAL_DMA_Start_IT(htim->hdma[TIM_DMA_ID_CC3], (uint32_t)pData, (uint32_t)&htim->Instance->CCR3, + Length) != HAL_OK) + { + /* Return error status */ + return HAL_ERROR; + } + /* Enable the TIM Capture/Compare 3 DMA request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_CC3); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Enable the complementary PWM output */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_ENABLE); + + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); + + /* Enable the Peripheral, except in trigger mode where enable is automatically done with trigger */ + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + tmpsmcr = htim->Instance->SMCR & TIM_SMCR_SMS; + if (!IS_TIM_SLAVEMODE_TRIGGER_ENABLED(tmpsmcr)) + { + __HAL_TIM_ENABLE(htim); + } + } + else + { + __HAL_TIM_ENABLE(htim); + } + } + + /* Return function status */ + return status; +} + +/** + * @brief Stops the TIM PWM signal generation in DMA mode on the complementary + * output + * @param htim TIM handle + * @param Channel TIM Channel to be disabled + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @arg TIM_CHANNEL_3: TIM Channel 3 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_PWMN_Stop_DMA(TIM_HandleTypeDef *htim, uint32_t Channel) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, Channel)); + + switch (Channel) + { + case TIM_CHANNEL_1: + { + /* Disable the TIM Capture/Compare 1 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC1); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC1]); + break; + } + + case TIM_CHANNEL_2: + { + /* Disable the TIM Capture/Compare 2 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC2); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC2]); + break; + } + + case TIM_CHANNEL_3: + { + /* Disable the TIM Capture/Compare 3 DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_CC3); + (void)HAL_DMA_Abort_IT(htim->hdma[TIM_DMA_ID_CC3]); + break; + } + + default: + status = HAL_ERROR; + break; + } + + if (status == HAL_OK) + { + /* Disable the complementary PWM output */ + TIM_CCxNChannelCmd(htim->Instance, Channel, TIM_CCxN_DISABLE); + + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM complementary channel state */ + TIM_CHANNEL_N_STATE_SET(htim, Channel, HAL_TIM_CHANNEL_STATE_READY); + } + + /* Return function status */ + return status; +} + +/** + * @} + */ + +/** @defgroup TIMEx_Exported_Functions_Group4 Extended Timer Complementary One Pulse functions + * @brief Timer Complementary One Pulse functions + * +@verbatim + ============================================================================== + ##### Timer Complementary One Pulse functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Start the Complementary One Pulse generation. + (+) Stop the Complementary One Pulse. + (+) Start the Complementary One Pulse and enable interrupts. + (+) Stop the Complementary One Pulse and disable interrupts. + +@endverbatim + * @{ + */ + +/** + * @brief Starts the TIM One Pulse signal generation on the complementary + * output. + * @note OutputChannel must match the pulse output channel chosen when calling + * @ref HAL_TIM_OnePulse_ConfigChannel(). + * @param htim TIM One Pulse handle + * @param OutputChannel pulse output channel to enable + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Start(TIM_HandleTypeDef *htim, uint32_t OutputChannel) +{ + uint32_t input_channel = (OutputChannel == TIM_CHANNEL_1) ? TIM_CHANNEL_2 : TIM_CHANNEL_1; + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, OutputChannel)); + + /* Check the TIM channels state */ + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + + /* Enable the complementary One Pulse output channel and the Input Capture channel */ + TIM_CCxNChannelCmd(htim->Instance, OutputChannel, TIM_CCxN_ENABLE); + TIM_CCxChannelCmd(htim->Instance, input_channel, TIM_CCx_ENABLE); + + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM One Pulse signal generation on the complementary + * output. + * @note OutputChannel must match the pulse output channel chosen when calling + * @ref HAL_TIM_OnePulse_ConfigChannel(). + * @param htim TIM One Pulse handle + * @param OutputChannel pulse output channel to disable + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Stop(TIM_HandleTypeDef *htim, uint32_t OutputChannel) +{ + uint32_t input_channel = (OutputChannel == TIM_CHANNEL_1) ? TIM_CHANNEL_2 : TIM_CHANNEL_1; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, OutputChannel)); + + /* Disable the complementary One Pulse output channel and the Input Capture channel */ + TIM_CCxNChannelCmd(htim->Instance, OutputChannel, TIM_CCxN_DISABLE); + TIM_CCxChannelCmd(htim->Instance, input_channel, TIM_CCx_DISABLE); + + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Starts the TIM One Pulse signal generation in interrupt mode on the + * complementary channel. + * @note OutputChannel must match the pulse output channel chosen when calling + * @ref HAL_TIM_OnePulse_ConfigChannel(). + * @param htim TIM One Pulse handle + * @param OutputChannel pulse output channel to enable + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Start_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel) +{ + uint32_t input_channel = (OutputChannel == TIM_CHANNEL_1) ? TIM_CHANNEL_2 : TIM_CHANNEL_1; + HAL_TIM_ChannelStateTypeDef channel_1_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef channel_2_state = TIM_CHANNEL_STATE_GET(htim, TIM_CHANNEL_2); + HAL_TIM_ChannelStateTypeDef complementary_channel_1_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_1); + HAL_TIM_ChannelStateTypeDef complementary_channel_2_state = TIM_CHANNEL_N_STATE_GET(htim, TIM_CHANNEL_2); + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, OutputChannel)); + + /* Check the TIM channels state */ + if ((channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (channel_2_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_1_state != HAL_TIM_CHANNEL_STATE_READY) + || (complementary_channel_2_state != HAL_TIM_CHANNEL_STATE_READY)) + { + return HAL_ERROR; + } + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_BUSY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_BUSY); + + /* Enable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC1); + + /* Enable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_CC2); + + /* Enable the complementary One Pulse output channel and the Input Capture channel */ + TIM_CCxNChannelCmd(htim->Instance, OutputChannel, TIM_CCxN_ENABLE); + TIM_CCxChannelCmd(htim->Instance, input_channel, TIM_CCx_ENABLE); + + /* Enable the Main Output */ + __HAL_TIM_MOE_ENABLE(htim); + + /* Return function status */ + return HAL_OK; +} + +/** + * @brief Stops the TIM One Pulse signal generation in interrupt mode on the + * complementary channel. + * @note OutputChannel must match the pulse output channel chosen when calling + * @ref HAL_TIM_OnePulse_ConfigChannel(). + * @param htim TIM One Pulse handle + * @param OutputChannel pulse output channel to disable + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 selected + * @arg TIM_CHANNEL_2: TIM Channel 2 selected + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_OnePulseN_Stop_IT(TIM_HandleTypeDef *htim, uint32_t OutputChannel) +{ + uint32_t input_channel = (OutputChannel == TIM_CHANNEL_1) ? TIM_CHANNEL_2 : TIM_CHANNEL_1; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, OutputChannel)); + + /* Disable the TIM Capture/Compare 1 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC1); + + /* Disable the TIM Capture/Compare 2 interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_CC2); + + /* Disable the complementary One Pulse output channel and the Input Capture channel */ + TIM_CCxNChannelCmd(htim->Instance, OutputChannel, TIM_CCxN_DISABLE); + TIM_CCxChannelCmd(htim->Instance, input_channel, TIM_CCx_DISABLE); + + /* Disable the Main Output */ + __HAL_TIM_MOE_DISABLE(htim); + + /* Disable the Peripheral */ + __HAL_TIM_DISABLE(htim); + + /* Set the TIM channels state */ + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + + /* Return function status */ + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup TIMEx_Exported_Functions_Group5 Extended Peripheral Control functions + * @brief Peripheral Control functions + * +@verbatim + ============================================================================== + ##### Peripheral Control functions ##### + ============================================================================== + [..] + This section provides functions allowing to: + (+) Configure the commutation event in case of use of the Hall sensor interface. + (+) Configure Output channels for OC and PWM mode. + + (+) Configure Complementary channels, break features and dead time. + (+) Configure Master synchronization. + (+) Configure timer remapping capabilities. + (+) Enable or disable channel grouping. + +@endverbatim + * @{ + */ + +/** + * @brief Configure the TIM commutation event sequence. + * @note This function is mandatory to use the commutation event in order to + * update the configuration at each commutation detection on the TRGI input of the Timer, + * the typical use of this feature is with the use of another Timer(interface Timer) + * configured in Hall sensor interface, this interface Timer will generate the + * commutation at its TRGO output (connected to Timer used in this function) each time + * the TI1 of the Interface Timer detect a commutation at its input TI1. + * @param htim TIM handle + * @param InputTrigger the Internal trigger corresponding to the Timer Interfacing with the Hall sensor + * This parameter can be one of the following values: + * @arg TIM_TS_ITR0: Internal trigger 0 selected + * @arg TIM_TS_ITR1: Internal trigger 1 selected + * @arg TIM_TS_ITR2: Internal trigger 2 selected + * @arg TIM_TS_ITR3: Internal trigger 3 selected + * @arg TIM_TS_NONE: No trigger is needed + * @param CommutationSource the Commutation Event source + * This parameter can be one of the following values: + * @arg TIM_COMMUTATION_TRGI: Commutation source is the TRGI of the Interface Timer + * @arg TIM_COMMUTATION_SOFTWARE: Commutation source is set by software using the COMG bit + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_ConfigCommutEvent(TIM_HandleTypeDef *htim, uint32_t InputTrigger, + uint32_t CommutationSource) +{ + /* Check the parameters */ + assert_param(IS_TIM_COMMUTATION_EVENT_INSTANCE(htim->Instance)); + assert_param(IS_TIM_INTERNAL_TRIGGEREVENT_SELECTION(InputTrigger)); + + __HAL_LOCK(htim); + + if ((InputTrigger == TIM_TS_ITR0) || (InputTrigger == TIM_TS_ITR1) || + (InputTrigger == TIM_TS_ITR2) || (InputTrigger == TIM_TS_ITR3)) + { + /* Select the Input trigger */ + htim->Instance->SMCR &= ~TIM_SMCR_TS; + htim->Instance->SMCR |= InputTrigger; + } + + /* Select the Capture Compare preload feature */ + htim->Instance->CR2 |= TIM_CR2_CCPC; + /* Select the Commutation event source */ + htim->Instance->CR2 &= ~TIM_CR2_CCUS; + htim->Instance->CR2 |= CommutationSource; + + /* Disable Commutation Interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_COM); + + /* Disable Commutation DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_COM); + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Configure the TIM commutation event sequence with interrupt. + * @note This function is mandatory to use the commutation event in order to + * update the configuration at each commutation detection on the TRGI input of the Timer, + * the typical use of this feature is with the use of another Timer(interface Timer) + * configured in Hall sensor interface, this interface Timer will generate the + * commutation at its TRGO output (connected to Timer used in this function) each time + * the TI1 of the Interface Timer detect a commutation at its input TI1. + * @param htim TIM handle + * @param InputTrigger the Internal trigger corresponding to the Timer Interfacing with the Hall sensor + * This parameter can be one of the following values: + * @arg TIM_TS_ITR0: Internal trigger 0 selected + * @arg TIM_TS_ITR1: Internal trigger 1 selected + * @arg TIM_TS_ITR2: Internal trigger 2 selected + * @arg TIM_TS_ITR3: Internal trigger 3 selected + * @arg TIM_TS_NONE: No trigger is needed + * @param CommutationSource the Commutation Event source + * This parameter can be one of the following values: + * @arg TIM_COMMUTATION_TRGI: Commutation source is the TRGI of the Interface Timer + * @arg TIM_COMMUTATION_SOFTWARE: Commutation source is set by software using the COMG bit + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_ConfigCommutEvent_IT(TIM_HandleTypeDef *htim, uint32_t InputTrigger, + uint32_t CommutationSource) +{ + /* Check the parameters */ + assert_param(IS_TIM_COMMUTATION_EVENT_INSTANCE(htim->Instance)); + assert_param(IS_TIM_INTERNAL_TRIGGEREVENT_SELECTION(InputTrigger)); + + __HAL_LOCK(htim); + + if ((InputTrigger == TIM_TS_ITR0) || (InputTrigger == TIM_TS_ITR1) || + (InputTrigger == TIM_TS_ITR2) || (InputTrigger == TIM_TS_ITR3)) + { + /* Select the Input trigger */ + htim->Instance->SMCR &= ~TIM_SMCR_TS; + htim->Instance->SMCR |= InputTrigger; + } + + /* Select the Capture Compare preload feature */ + htim->Instance->CR2 |= TIM_CR2_CCPC; + /* Select the Commutation event source */ + htim->Instance->CR2 &= ~TIM_CR2_CCUS; + htim->Instance->CR2 |= CommutationSource; + + /* Disable Commutation DMA request */ + __HAL_TIM_DISABLE_DMA(htim, TIM_DMA_COM); + + /* Enable the Commutation Interrupt */ + __HAL_TIM_ENABLE_IT(htim, TIM_IT_COM); + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Configure the TIM commutation event sequence with DMA. + * @note This function is mandatory to use the commutation event in order to + * update the configuration at each commutation detection on the TRGI input of the Timer, + * the typical use of this feature is with the use of another Timer(interface Timer) + * configured in Hall sensor interface, this interface Timer will generate the + * commutation at its TRGO output (connected to Timer used in this function) each time + * the TI1 of the Interface Timer detect a commutation at its input TI1. + * @note The user should configure the DMA in his own software, in This function only the COMDE bit is set + * @param htim TIM handle + * @param InputTrigger the Internal trigger corresponding to the Timer Interfacing with the Hall sensor + * This parameter can be one of the following values: + * @arg TIM_TS_ITR0: Internal trigger 0 selected + * @arg TIM_TS_ITR1: Internal trigger 1 selected + * @arg TIM_TS_ITR2: Internal trigger 2 selected + * @arg TIM_TS_ITR3: Internal trigger 3 selected + * @arg TIM_TS_NONE: No trigger is needed + * @param CommutationSource the Commutation Event source + * This parameter can be one of the following values: + * @arg TIM_COMMUTATION_TRGI: Commutation source is the TRGI of the Interface Timer + * @arg TIM_COMMUTATION_SOFTWARE: Commutation source is set by software using the COMG bit + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_ConfigCommutEvent_DMA(TIM_HandleTypeDef *htim, uint32_t InputTrigger, + uint32_t CommutationSource) +{ + /* Check the parameters */ + assert_param(IS_TIM_COMMUTATION_EVENT_INSTANCE(htim->Instance)); + assert_param(IS_TIM_INTERNAL_TRIGGEREVENT_SELECTION(InputTrigger)); + + __HAL_LOCK(htim); + + if ((InputTrigger == TIM_TS_ITR0) || (InputTrigger == TIM_TS_ITR1) || + (InputTrigger == TIM_TS_ITR2) || (InputTrigger == TIM_TS_ITR3)) + { + /* Select the Input trigger */ + htim->Instance->SMCR &= ~TIM_SMCR_TS; + htim->Instance->SMCR |= InputTrigger; + } + + /* Select the Capture Compare preload feature */ + htim->Instance->CR2 |= TIM_CR2_CCPC; + /* Select the Commutation event source */ + htim->Instance->CR2 &= ~TIM_CR2_CCUS; + htim->Instance->CR2 |= CommutationSource; + + /* Enable the Commutation DMA Request */ + /* Set the DMA Commutation Callback */ + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferCpltCallback = TIMEx_DMACommutationCplt; + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferHalfCpltCallback = TIMEx_DMACommutationHalfCplt; + /* Set the DMA error callback */ + htim->hdma[TIM_DMA_ID_COMMUTATION]->XferErrorCallback = TIM_DMAError; + + /* Disable Commutation Interrupt */ + __HAL_TIM_DISABLE_IT(htim, TIM_IT_COM); + + /* Enable the Commutation DMA Request */ + __HAL_TIM_ENABLE_DMA(htim, TIM_DMA_COM); + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Configures the TIM in master mode. + * @param htim TIM handle. + * @param sMasterConfig pointer to a TIM_MasterConfigTypeDef structure that + * contains the selected trigger output (TRGO) and the Master/Slave + * mode. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_MasterConfigSynchronization(TIM_HandleTypeDef *htim, + TIM_MasterConfigTypeDef *sMasterConfig) +{ + uint32_t tmpcr2; + uint32_t tmpsmcr; + + /* Check the parameters */ + assert_param(IS_TIM_MASTER_INSTANCE(htim->Instance)); + assert_param(IS_TIM_TRGO_SOURCE(sMasterConfig->MasterOutputTrigger)); + assert_param(IS_TIM_MSM_STATE(sMasterConfig->MasterSlaveMode)); + + /* Check input state */ + __HAL_LOCK(htim); + + /* Change the handler state */ + htim->State = HAL_TIM_STATE_BUSY; + + /* Get the TIMx CR2 register value */ + tmpcr2 = htim->Instance->CR2; + + /* Get the TIMx SMCR register value */ + tmpsmcr = htim->Instance->SMCR; + + /* If the timer supports ADC synchronization through TRGO2, set the master mode selection 2 */ + if (IS_TIM_TRGO2_INSTANCE(htim->Instance)) + { + /* Check the parameters */ + assert_param(IS_TIM_TRGO2_SOURCE(sMasterConfig->MasterOutputTrigger2)); + + /* Clear the MMS2 bits */ + tmpcr2 &= ~TIM_CR2_MMS2; + /* Select the TRGO2 source*/ + tmpcr2 |= sMasterConfig->MasterOutputTrigger2; + } + + /* Reset the MMS Bits */ + tmpcr2 &= ~TIM_CR2_MMS; + /* Select the TRGO source */ + tmpcr2 |= sMasterConfig->MasterOutputTrigger; + + /* Update TIMx CR2 */ + htim->Instance->CR2 = tmpcr2; + + if (IS_TIM_SLAVE_INSTANCE(htim->Instance)) + { + /* Reset the MSM Bit */ + tmpsmcr &= ~TIM_SMCR_MSM; + /* Set master mode */ + tmpsmcr |= sMasterConfig->MasterSlaveMode; + + /* Update TIMx SMCR */ + htim->Instance->SMCR = tmpsmcr; + } + + /* Change the htim state */ + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Configures the Break feature, dead time, Lock level, OSSI/OSSR State + * and the AOE(automatic output enable). + * @param htim TIM handle + * @param sBreakDeadTimeConfig pointer to a TIM_ConfigBreakDeadConfigTypeDef structure that + * contains the BDTR Register configuration information for the TIM peripheral. + * @note Interrupts can be generated when an active level is detected on the + * break input, the break 2 input or the system break input. Break + * interrupt can be enabled by calling the @ref __HAL_TIM_ENABLE_IT macro. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_ConfigBreakDeadTime(TIM_HandleTypeDef *htim, + TIM_BreakDeadTimeConfigTypeDef *sBreakDeadTimeConfig) +{ + /* Keep this variable initialized to 0 as it is used to configure BDTR register */ + uint32_t tmpbdtr = 0U; + + /* Check the parameters */ + assert_param(IS_TIM_BREAK_INSTANCE(htim->Instance)); + assert_param(IS_TIM_OSSR_STATE(sBreakDeadTimeConfig->OffStateRunMode)); + assert_param(IS_TIM_OSSI_STATE(sBreakDeadTimeConfig->OffStateIDLEMode)); + assert_param(IS_TIM_LOCK_LEVEL(sBreakDeadTimeConfig->LockLevel)); + assert_param(IS_TIM_DEADTIME(sBreakDeadTimeConfig->DeadTime)); + assert_param(IS_TIM_BREAK_STATE(sBreakDeadTimeConfig->BreakState)); + assert_param(IS_TIM_BREAK_POLARITY(sBreakDeadTimeConfig->BreakPolarity)); + assert_param(IS_TIM_BREAK_FILTER(sBreakDeadTimeConfig->BreakFilter)); + assert_param(IS_TIM_AUTOMATIC_OUTPUT_STATE(sBreakDeadTimeConfig->AutomaticOutput)); + + /* Check input state */ + __HAL_LOCK(htim); + + /* Set the Lock level, the Break enable Bit and the Polarity, the OSSR State, + the OSSI State, the dead time value and the Automatic Output Enable Bit */ + + /* Set the BDTR bits */ + MODIFY_REG(tmpbdtr, TIM_BDTR_DTG, sBreakDeadTimeConfig->DeadTime); + MODIFY_REG(tmpbdtr, TIM_BDTR_LOCK, sBreakDeadTimeConfig->LockLevel); + MODIFY_REG(tmpbdtr, TIM_BDTR_OSSI, sBreakDeadTimeConfig->OffStateIDLEMode); + MODIFY_REG(tmpbdtr, TIM_BDTR_OSSR, sBreakDeadTimeConfig->OffStateRunMode); + MODIFY_REG(tmpbdtr, TIM_BDTR_BKE, sBreakDeadTimeConfig->BreakState); + MODIFY_REG(tmpbdtr, TIM_BDTR_BKP, sBreakDeadTimeConfig->BreakPolarity); + MODIFY_REG(tmpbdtr, TIM_BDTR_AOE, sBreakDeadTimeConfig->AutomaticOutput); + MODIFY_REG(tmpbdtr, TIM_BDTR_BKF, (sBreakDeadTimeConfig->BreakFilter << TIM_BDTR_BKF_Pos)); + + if (IS_TIM_BKIN2_INSTANCE(htim->Instance)) + { + /* Check the parameters */ + assert_param(IS_TIM_BREAK2_STATE(sBreakDeadTimeConfig->Break2State)); + assert_param(IS_TIM_BREAK2_POLARITY(sBreakDeadTimeConfig->Break2Polarity)); + assert_param(IS_TIM_BREAK_FILTER(sBreakDeadTimeConfig->Break2Filter)); + + /* Set the BREAK2 input related BDTR bits */ + MODIFY_REG(tmpbdtr, TIM_BDTR_BK2F, (sBreakDeadTimeConfig->Break2Filter << TIM_BDTR_BK2F_Pos)); + MODIFY_REG(tmpbdtr, TIM_BDTR_BK2E, sBreakDeadTimeConfig->Break2State); + MODIFY_REG(tmpbdtr, TIM_BDTR_BK2P, sBreakDeadTimeConfig->Break2Polarity); + } + + /* Set TIMx_BDTR */ + htim->Instance->BDTR = tmpbdtr; + + __HAL_UNLOCK(htim); + + return HAL_OK; +} +#if defined(TIM_BREAK_INPUT_SUPPORT) + +/** + * @brief Configures the break input source. + * @param htim TIM handle. + * @param BreakInput Break input to configure + * This parameter can be one of the following values: + * @arg TIM_BREAKINPUT_BRK: Timer break input + * @arg TIM_BREAKINPUT_BRK2: Timer break 2 input + * @param sBreakInputConfig Break input source configuration + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_ConfigBreakInput(TIM_HandleTypeDef *htim, + uint32_t BreakInput, + TIMEx_BreakInputConfigTypeDef *sBreakInputConfig) + +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tmporx; + uint32_t bkin_enable_mask; + uint32_t bkin_polarity_mask; + uint32_t bkin_enable_bitpos; + uint32_t bkin_polarity_bitpos; + + /* Check the parameters */ + assert_param(IS_TIM_BREAK_INSTANCE(htim->Instance)); + assert_param(IS_TIM_BREAKINPUT(BreakInput)); + assert_param(IS_TIM_BREAKINPUTSOURCE(sBreakInputConfig->Source)); + assert_param(IS_TIM_BREAKINPUTSOURCE_STATE(sBreakInputConfig->Enable)); +#if defined(DFSDM1_Channel0) + if (sBreakInputConfig->Source != TIM_BREAKINPUTSOURCE_DFSDM1) + { + assert_param(IS_TIM_BREAKINPUTSOURCE_POLARITY(sBreakInputConfig->Polarity)); + } +#else + assert_param(IS_TIM_BREAKINPUTSOURCE_POLARITY(sBreakInputConfig->Polarity)); +#endif /* DFSDM1_Channel0 */ + + /* Check input state */ + __HAL_LOCK(htim); + + switch (sBreakInputConfig->Source) + { + case TIM_BREAKINPUTSOURCE_BKIN: + { + bkin_enable_mask = TIM1_AF1_BKINE; + bkin_enable_bitpos = TIM1_AF1_BKINE_Pos; + bkin_polarity_mask = TIM1_AF1_BKINP; + bkin_polarity_bitpos = TIM1_AF1_BKINP_Pos; + break; + } +#if defined(DFSDM1_Channel0) + case TIM_BREAKINPUTSOURCE_DFSDM1: + { + bkin_enable_mask = TIM1_AF1_BKDF1BKE; + bkin_enable_bitpos = TIM1_AF1_BKDF1BKE_Pos; + bkin_polarity_mask = 0U; + bkin_polarity_bitpos = 0U; + break; + } +#endif /* DFSDM1_Channel0 */ + + default: + { + bkin_enable_mask = 0U; + bkin_polarity_mask = 0U; + bkin_enable_bitpos = 0U; + bkin_polarity_bitpos = 0U; + break; + } + } + + switch (BreakInput) + { + case TIM_BREAKINPUT_BRK: + { + /* Get the TIMx_AF1 register value */ + tmporx = htim->Instance->AF1; + + /* Enable the break input */ + tmporx &= ~bkin_enable_mask; + tmporx |= (sBreakInputConfig->Enable << bkin_enable_bitpos) & bkin_enable_mask; + + /* Set the break input polarity */ +#if defined(DFSDM1_Channel0) + if (sBreakInputConfig->Source != TIM_BREAKINPUTSOURCE_DFSDM1) +#endif /* DFSDM1_Channel0 */ + { + tmporx &= ~bkin_polarity_mask; + tmporx |= (sBreakInputConfig->Polarity << bkin_polarity_bitpos) & bkin_polarity_mask; + } + + /* Set TIMx_AF1 */ + htim->Instance->AF1 = tmporx; + break; + } + case TIM_BREAKINPUT_BRK2: + { + /* Get the TIMx_AF2 register value */ + tmporx = htim->Instance->AF2; + + /* Enable the break input */ + tmporx &= ~bkin_enable_mask; + tmporx |= (sBreakInputConfig->Enable << bkin_enable_bitpos) & bkin_enable_mask; + + /* Set the break input polarity */ +#if defined(DFSDM1_Channel0) + if (sBreakInputConfig->Source != TIM_BREAKINPUTSOURCE_DFSDM1) +#endif /* DFSDM1_Channel0 */ + { + tmporx &= ~bkin_polarity_mask; + tmporx |= (sBreakInputConfig->Polarity << bkin_polarity_bitpos) & bkin_polarity_mask; + } + + /* Set TIMx_AF2 */ + htim->Instance->AF2 = tmporx; + break; + } + default: + status = HAL_ERROR; + break; + } + + __HAL_UNLOCK(htim); + + return status; +} +#endif /*TIM_BREAK_INPUT_SUPPORT */ + +/** + * @brief Configures the TIMx Remapping input capabilities. + * @param htim TIM handle. + * @param Remap specifies the TIM remapping source. + * This parameter can be one of the following values: + * @arg TIM_TIM2_TIM8_TRGO: TIM2 ITR1 input is connected to TIM8 Trigger output(default) + * @arg TIM_TIM2_ETH_PTP: TIM2 ITR1 input is connected to ETH PTP trigger output. + * @arg TIM_TIM2_USBFS_SOF: TIM2 ITR1 input is connected to USB FS SOF. + * @arg TIM_TIM2_USBHS_SOF: TIM2 ITR1 input is connected to USB HS SOF. + * @arg TIM_TIM5_GPIO: TIM5 CH4 input is connected to dedicated Timer pin(default) + * @arg TIM_TIM5_LSI: TIM5 CH4 input is connected to LSI clock. + * @arg TIM_TIM5_LSE: TIM5 CH4 input is connected to LSE clock. + * @arg TIM_TIM5_RTC: TIM5 CH4 input is connected to RTC Output event. + * @arg TIM_TIM11_GPIO: TIM11 CH4 input is connected to dedicated Timer pin(default) + * @arg TIM_TIM11_SPDIF: SPDIF Frame synchronous + * @arg TIM_TIM11_HSE: TIM11 CH4 input is connected to HSE_RTC clock + * (HSE divided by a programmable prescaler) + * @arg TIM_TIM11_MCO1: TIM11 CH1 input is connected to MCO1 + * + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_RemapConfig(TIM_HandleTypeDef *htim, uint32_t Remap) +{ + + /* Check parameters */ + assert_param(IS_TIM_REMAP_INSTANCE(htim->Instance)); + assert_param(IS_TIM_REMAP(Remap)); + + __HAL_LOCK(htim); + + /* Set the Timer remapping configuration */ + htim->Instance->OR = Remap; + + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @brief Group channel 5 and channel 1, 2 or 3 + * @param htim TIM handle. + * @param Channels specifies the reference signal(s) the OC5REF is combined with. + * This parameter can be any combination of the following values: + * TIM_GROUPCH5_NONE: No effect of OC5REF on OC1REFC, OC2REFC and OC3REFC + * TIM_GROUPCH5_OC1REFC: OC1REFC is the logical AND of OC1REFC and OC5REF + * TIM_GROUPCH5_OC2REFC: OC2REFC is the logical AND of OC2REFC and OC5REF + * TIM_GROUPCH5_OC3REFC: OC3REFC is the logical AND of OC3REFC and OC5REF + * @retval HAL status + */ +HAL_StatusTypeDef HAL_TIMEx_GroupChannel5(TIM_HandleTypeDef *htim, uint32_t Channels) +{ + /* Check parameters */ + assert_param(IS_TIM_COMBINED3PHASEPWM_INSTANCE(htim->Instance)); + assert_param(IS_TIM_GROUPCH5(Channels)); + + /* Process Locked */ + __HAL_LOCK(htim); + + htim->State = HAL_TIM_STATE_BUSY; + + /* Clear GC5Cx bit fields */ + htim->Instance->CCR5 &= ~(TIM_CCR5_GC5C3 | TIM_CCR5_GC5C2 | TIM_CCR5_GC5C1); + + /* Set GC5Cx bit fields */ + htim->Instance->CCR5 |= Channels; + + /* Change the htim state */ + htim->State = HAL_TIM_STATE_READY; + + __HAL_UNLOCK(htim); + + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup TIMEx_Exported_Functions_Group6 Extended Callbacks functions + * @brief Extended Callbacks functions + * +@verbatim + ============================================================================== + ##### Extended Callbacks functions ##### + ============================================================================== + [..] + This section provides Extended TIM callback functions: + (+) Timer Commutation callback + (+) Timer Break callback + +@endverbatim + * @{ + */ + +/** + * @brief Hall commutation changed callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIMEx_CommutCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIMEx_CommutCallback could be implemented in the user file + */ +} +/** + * @brief Hall commutation changed half complete callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIMEx_CommutHalfCpltCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIMEx_CommutHalfCpltCallback could be implemented in the user file + */ +} + +/** + * @brief Hall Break detection callback in non-blocking mode + * @param htim TIM handle + * @retval None + */ +__weak void HAL_TIMEx_BreakCallback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_TIMEx_BreakCallback could be implemented in the user file + */ +} + +/** + * @brief Hall Break2 detection callback in non blocking mode + * @param htim: TIM handle + * @retval None + */ +__weak void HAL_TIMEx_Break2Callback(TIM_HandleTypeDef *htim) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(htim); + + /* NOTE : This function Should not be modified, when the callback is needed, + the HAL_TIMEx_Break2Callback could be implemented in the user file + */ +} +/** + * @} + */ + +/** @defgroup TIMEx_Exported_Functions_Group7 Extended Peripheral State functions + * @brief Extended Peripheral State functions + * +@verbatim + ============================================================================== + ##### Extended Peripheral State functions ##### + ============================================================================== + [..] + This subsection permits to get in run-time the status of the peripheral + and the data flow. + +@endverbatim + * @{ + */ + +/** + * @brief Return the TIM Hall Sensor interface handle state. + * @param htim TIM Hall Sensor handle + * @retval HAL state + */ +HAL_TIM_StateTypeDef HAL_TIMEx_HallSensor_GetState(TIM_HandleTypeDef *htim) +{ + return htim->State; +} + +/** + * @brief Return actual state of the TIM complementary channel. + * @param htim TIM handle + * @param ChannelN TIM Complementary channel + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 + * @arg TIM_CHANNEL_2: TIM Channel 2 + * @arg TIM_CHANNEL_3: TIM Channel 3 + * @retval TIM Complementary channel state + */ +HAL_TIM_ChannelStateTypeDef HAL_TIMEx_GetChannelNState(TIM_HandleTypeDef *htim, uint32_t ChannelN) +{ + HAL_TIM_ChannelStateTypeDef channel_state; + + /* Check the parameters */ + assert_param(IS_TIM_CCXN_INSTANCE(htim->Instance, ChannelN)); + + channel_state = TIM_CHANNEL_N_STATE_GET(htim, ChannelN); + + return channel_state; +} +/** + * @} + */ + +/** + * @} + */ + +/* Private functions ---------------------------------------------------------*/ +/** @defgroup TIMEx_Private_Functions TIM Extended Private Functions + * @{ + */ + +/** + * @brief TIM DMA Commutation callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +void TIMEx_DMACommutationCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + /* Change the htim state */ + htim->State = HAL_TIM_STATE_READY; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->CommutationCallback(htim); +#else + HAL_TIMEx_CommutCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +} + +/** + * @brief TIM DMA Commutation half complete callback. + * @param hdma pointer to DMA handle. + * @retval None + */ +void TIMEx_DMACommutationHalfCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + /* Change the htim state */ + htim->State = HAL_TIM_STATE_READY; + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->CommutationHalfCpltCallback(htim); +#else + HAL_TIMEx_CommutHalfCpltCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ +} + + +/** + * @brief TIM DMA Delay Pulse complete callback (complementary channel). + * @param hdma pointer to DMA handle. + * @retval None + */ +static void TIM_DMADelayPulseNCplt(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + if (hdma == htim->hdma[TIM_DMA_ID_CC1]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + } + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC2]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + } + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC3]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_3, HAL_TIM_CHANNEL_STATE_READY); + } + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC4]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_4; + + if (hdma->Init.Mode == DMA_NORMAL) + { + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_4, HAL_TIM_CHANNEL_STATE_READY); + } + } + else + { + /* nothing to do */ + } + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->PWM_PulseFinishedCallback(htim); +#else + HAL_TIM_PWM_PulseFinishedCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; +} + +/** + * @brief TIM DMA error callback (complementary channel) + * @param hdma pointer to DMA handle. + * @retval None + */ +static void TIM_DMAErrorCCxN(DMA_HandleTypeDef *hdma) +{ + TIM_HandleTypeDef *htim = (TIM_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + if (hdma == htim->hdma[TIM_DMA_ID_CC1]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_1; + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_1, HAL_TIM_CHANNEL_STATE_READY); + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC2]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_2; + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_2, HAL_TIM_CHANNEL_STATE_READY); + } + else if (hdma == htim->hdma[TIM_DMA_ID_CC3]) + { + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_3; + TIM_CHANNEL_N_STATE_SET(htim, TIM_CHANNEL_3, HAL_TIM_CHANNEL_STATE_READY); + } + else + { + /* nothing to do */ + } + +#if (USE_HAL_TIM_REGISTER_CALLBACKS == 1) + htim->ErrorCallback(htim); +#else + HAL_TIM_ErrorCallback(htim); +#endif /* USE_HAL_TIM_REGISTER_CALLBACKS */ + + htim->Channel = HAL_TIM_ACTIVE_CHANNEL_CLEARED; +} + +/** + * @brief Enables or disables the TIM Capture Compare Channel xN. + * @param TIMx to select the TIM peripheral + * @param Channel specifies the TIM Channel + * This parameter can be one of the following values: + * @arg TIM_CHANNEL_1: TIM Channel 1 + * @arg TIM_CHANNEL_2: TIM Channel 2 + * @arg TIM_CHANNEL_3: TIM Channel 3 + * @param ChannelNState specifies the TIM Channel CCxNE bit new state. + * This parameter can be: TIM_CCxN_ENABLE or TIM_CCxN_Disable. + * @retval None + */ +static void TIM_CCxNChannelCmd(TIM_TypeDef *TIMx, uint32_t Channel, uint32_t ChannelNState) +{ + uint32_t tmp; + + tmp = TIM_CCER_CC1NE << (Channel & 0x1FU); /* 0x1FU = 31 bits max shift */ + + /* Reset the CCxNE Bit */ + TIMx->CCER &= ~tmp; + + /* Set or reset the CCxNE Bit */ + TIMx->CCER |= (uint32_t)(ChannelNState << (Channel & 0x1FU)); /* 0x1FU = 31 bits max shift */ +} +/** + * @} + */ + +#endif /* HAL_TIM_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_uart.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_uart.c new file mode 100644 index 0000000..ab80845 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_uart.c @@ -0,0 +1,4021 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_uart.c + * @author MCD Application Team + * @brief UART HAL module driver. + * This file provides firmware functions to manage the following + * functionalities of the Universal Asynchronous Receiver Transmitter Peripheral (UART). + * + Initialization and de-initialization functions + * + IO operation functions + * + Peripheral Control functions + * + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + @verbatim + =============================================================================== + ##### How to use this driver ##### + =============================================================================== + [..] + The UART HAL driver can be used as follows: + + (#) Declare a UART_HandleTypeDef handle structure (eg. UART_HandleTypeDef huart). + (#) Initialize the UART low level resources by implementing the HAL_UART_MspInit() API: + (++) Enable the USARTx interface clock. + (++) UART pins configuration: + (+++) Enable the clock for the UART GPIOs. + (+++) Configure these UART pins as alternate function pull-up. + (++) NVIC configuration if you need to use interrupt process (HAL_UART_Transmit_IT() + and HAL_UART_Receive_IT() APIs): + (+++) Configure the USARTx interrupt priority. + (+++) Enable the NVIC USART IRQ handle. + (++) UART interrupts handling: + -@@- The specific UART interrupts (Transmission complete interrupt, + RXNE interrupt, RX/TX FIFOs related interrupts and Error Interrupts) + are managed using the macros __HAL_UART_ENABLE_IT() and __HAL_UART_DISABLE_IT() + inside the transmit and receive processes. + (++) DMA Configuration if you need to use DMA process (HAL_UART_Transmit_DMA() + and HAL_UART_Receive_DMA() APIs): + (+++) Declare a DMA handle structure for the Tx/Rx channel. + (+++) Enable the DMAx interface clock. + (+++) Configure the declared DMA handle structure with the required Tx/Rx parameters. + (+++) Configure the DMA Tx/Rx channel. + (+++) Associate the initialized DMA handle to the UART DMA Tx/Rx handle. + (+++) Configure the priority and enable the NVIC for the transfer complete + interrupt on the DMA Tx/Rx channel. + + (#) Program the Baud Rate, Word Length, Stop Bit, Parity, Hardware + flow control and Mode (Receiver/Transmitter) in the huart handle Init structure. + + (#) If required, program UART advanced features (TX/RX pins swap, auto Baud rate detection,...) + in the huart handle AdvancedInit structure. + + (#) For the UART asynchronous mode, initialize the UART registers by calling + the HAL_UART_Init() API. + + (#) For the UART Half duplex mode, initialize the UART registers by calling + the HAL_HalfDuplex_Init() API. + + (#) For the UART LIN (Local Interconnection Network) mode, initialize the UART registers + by calling the HAL_LIN_Init() API. + + (#) For the UART Multiprocessor mode, initialize the UART registers + by calling the HAL_MultiProcessor_Init() API. + + (#) For the UART RS485 Driver Enabled mode, initialize the UART registers + by calling the HAL_RS485Ex_Init() API. + + [..] + (@) These API's (HAL_UART_Init(), HAL_HalfDuplex_Init(), HAL_LIN_Init(), HAL_MultiProcessor_Init(), + also configure the low level Hardware GPIO, CLOCK, CORTEX...etc) by + calling the customized HAL_UART_MspInit() API. + + ##### Callback registration ##### + ================================== + + [..] + The compilation define USE_HAL_UART_REGISTER_CALLBACKS when set to 1 + allows the user to configure dynamically the driver callbacks. + + [..] + Use Function HAL_UART_RegisterCallback() to register a user callback. + Function HAL_UART_RegisterCallback() allows to register following callbacks: + (+) TxHalfCpltCallback : Tx Half Complete Callback. + (+) TxCpltCallback : Tx Complete Callback. + (+) RxHalfCpltCallback : Rx Half Complete Callback. + (+) RxCpltCallback : Rx Complete Callback. + (+) ErrorCallback : Error Callback. + (+) AbortCpltCallback : Abort Complete Callback. + (+) AbortTransmitCpltCallback : Abort Transmit Complete Callback. + (+) AbortReceiveCpltCallback : Abort Receive Complete Callback. + (+) WakeupCallback : Wakeup Callback. + (+) MspInitCallback : UART MspInit. + (+) MspDeInitCallback : UART MspDeInit. + This function takes as parameters the HAL peripheral handle, the Callback ID + and a pointer to the user callback function. + + [..] + Use function HAL_UART_UnRegisterCallback() to reset a callback to the default + weak (surcharged) function. + HAL_UART_UnRegisterCallback() takes as parameters the HAL peripheral handle, + and the Callback ID. + This function allows to reset following callbacks: + (+) TxHalfCpltCallback : Tx Half Complete Callback. + (+) TxCpltCallback : Tx Complete Callback. + (+) RxHalfCpltCallback : Rx Half Complete Callback. + (+) RxCpltCallback : Rx Complete Callback. + (+) ErrorCallback : Error Callback. + (+) AbortCpltCallback : Abort Complete Callback. + (+) AbortTransmitCpltCallback : Abort Transmit Complete Callback. + (+) AbortReceiveCpltCallback : Abort Receive Complete Callback. + (+) WakeupCallback : Wakeup Callback. + (+) MspInitCallback : UART MspInit. + (+) MspDeInitCallback : UART MspDeInit. + + [..] + For specific callback RxEventCallback, use dedicated registration/reset functions: + respectively HAL_UART_RegisterRxEventCallback() , HAL_UART_UnRegisterRxEventCallback(). + + [..] + By default, after the HAL_UART_Init() and when the state is HAL_UART_STATE_RESET + all callbacks are set to the corresponding weak (surcharged) functions: + examples HAL_UART_TxCpltCallback(), HAL_UART_RxHalfCpltCallback(). + Exception done for MspInit and MspDeInit functions that are respectively + reset to the legacy weak (surcharged) functions in the HAL_UART_Init() + and HAL_UART_DeInit() only when these callbacks are null (not registered beforehand). + If not, MspInit or MspDeInit are not null, the HAL_UART_Init() and HAL_UART_DeInit() + keep and use the user MspInit/MspDeInit callbacks (registered beforehand). + + [..] + Callbacks can be registered/unregistered in HAL_UART_STATE_READY state only. + Exception done MspInit/MspDeInit that can be registered/unregistered + in HAL_UART_STATE_READY or HAL_UART_STATE_RESET state, thus registered (user) + MspInit/DeInit callbacks can be used during the Init/DeInit. + In that case first register the MspInit/MspDeInit user callbacks + using HAL_UART_RegisterCallback() before calling HAL_UART_DeInit() + or HAL_UART_Init() function. + + [..] + When The compilation define USE_HAL_UART_REGISTER_CALLBACKS is set to 0 or + not defined, the callback registration feature is not available + and weak (surcharged) callbacks are used. + + + @endverbatim + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup UART UART + * @brief HAL UART module driver + * @{ + */ + +#ifdef HAL_UART_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/** @defgroup UART_Private_Constants UART Private Constants + * @{ + */ +#define USART_CR1_FIELDS ((uint32_t)(USART_CR1_M | USART_CR1_PCE | USART_CR1_PS | USART_CR1_TE | USART_CR1_RE | \ + USART_CR1_OVER8)) /*!< UART or USART CR1 fields of parameters set by UART_SetConfig API */ + +#define USART_CR3_FIELDS ((uint32_t)(USART_CR3_RTSE | USART_CR3_CTSE |\ + USART_CR3_ONEBIT)) /*!< UART or USART CR3 fields of parameters set by UART_SetConfig API */ + + +#define UART_BRR_MIN 0x10U /* UART BRR minimum authorized value */ +#define UART_BRR_MAX 0x0000FFFFU /* UART BRR maximum authorized value */ +/** + * @} + */ + +/* Private macros ------------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/** @addtogroup UART_Private_Functions + * @{ + */ +static void UART_EndTxTransfer(UART_HandleTypeDef *huart); +static void UART_EndRxTransfer(UART_HandleTypeDef *huart); +static void UART_DMATransmitCplt(DMA_HandleTypeDef *hdma); +static void UART_DMAReceiveCplt(DMA_HandleTypeDef *hdma); +static void UART_DMARxHalfCplt(DMA_HandleTypeDef *hdma); +static void UART_DMATxHalfCplt(DMA_HandleTypeDef *hdma); +static void UART_DMAError(DMA_HandleTypeDef *hdma); +static void UART_DMAAbortOnError(DMA_HandleTypeDef *hdma); +static void UART_DMATxAbortCallback(DMA_HandleTypeDef *hdma); +static void UART_DMARxAbortCallback(DMA_HandleTypeDef *hdma); +static void UART_DMATxOnlyAbortCallback(DMA_HandleTypeDef *hdma); +static void UART_DMARxOnlyAbortCallback(DMA_HandleTypeDef *hdma); +static void UART_TxISR_8BIT(UART_HandleTypeDef *huart); +static void UART_TxISR_16BIT(UART_HandleTypeDef *huart); +static void UART_EndTransmit_IT(UART_HandleTypeDef *huart); +static void UART_RxISR_8BIT(UART_HandleTypeDef *huart); +static void UART_RxISR_16BIT(UART_HandleTypeDef *huart); +/** + * @} + */ + +/* Private variables ---------------------------------------------------------*/ +/* Exported Constants --------------------------------------------------------*/ +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup UART_Exported_Functions UART Exported Functions + * @{ + */ + +/** @defgroup UART_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Initialization and Configuration functions + * +@verbatim +=============================================================================== + ##### Initialization and Configuration functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to initialize the USARTx or the UARTy + in asynchronous mode. + (+) For the asynchronous mode the parameters below can be configured: + (++) Baud Rate + (++) Word Length + (++) Stop Bit + (++) Parity: If the parity is enabled, then the MSB bit of the data written + in the data register is transmitted but is changed by the parity bit. + (++) Hardware flow control + (++) Receiver/transmitter modes + (++) Over Sampling Method + (++) One-Bit Sampling Method + (+) For the asynchronous mode, the following advanced features can be configured as well: + (++) TX and/or RX pin level inversion + (++) data logical level inversion + (++) RX and TX pins swap + (++) RX overrun detection disabling + (++) DMA disabling on RX error + (++) MSB first on communication line + (++) auto Baud rate detection + [..] + The HAL_UART_Init(), HAL_HalfDuplex_Init(), HAL_LIN_Init()and HAL_MultiProcessor_Init()API + follow respectively the UART asynchronous, UART Half duplex, UART LIN mode + and UART multiprocessor mode configuration procedures (details for the procedures + are available in reference manual). + +@endverbatim + + Depending on the frame length defined by the M1 and M0 bits (7-bit, + 8-bit or 9-bit), the possible UART formats are listed in the + following table. + + Table 1. UART frame format. + +-----------------------------------------------------------------------+ + | M1 bit | M0 bit | PCE bit | UART frame | + |---------|---------|-----------|---------------------------------------| + | 0 | 0 | 0 | | SB | 8 bit data | STB | | + |---------|---------|-----------|---------------------------------------| + | 0 | 0 | 1 | | SB | 7 bit data | PB | STB | | + |---------|---------|-----------|---------------------------------------| + | 0 | 1 | 0 | | SB | 9 bit data | STB | | + |---------|---------|-----------|---------------------------------------| + | 0 | 1 | 1 | | SB | 8 bit data | PB | STB | | + |---------|---------|-----------|---------------------------------------| + | 1 | 0 | 0 | | SB | 7 bit data | STB | | + |---------|---------|-----------|---------------------------------------| + | 1 | 0 | 1 | | SB | 6 bit data | PB | STB | | + +-----------------------------------------------------------------------+ + + * @{ + */ + +/** + * @brief Initialize the UART mode according to the specified + * parameters in the UART_InitTypeDef and initialize the associated handle. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Init(UART_HandleTypeDef *huart) +{ + /* Check the UART handle allocation */ + if (huart == NULL) + { + return HAL_ERROR; + } + + if (huart->Init.HwFlowCtl != UART_HWCONTROL_NONE) + { + /* Check the parameters */ + assert_param(IS_UART_HWFLOW_INSTANCE(huart->Instance)); + } + else + { + /* Check the parameters */ + assert_param(IS_UART_INSTANCE(huart->Instance)); + } + + if (huart->gState == HAL_UART_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + huart->Lock = HAL_UNLOCKED; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + UART_InitCallbacksToDefault(huart); + + if (huart->MspInitCallback == NULL) + { + huart->MspInitCallback = HAL_UART_MspInit; + } + + /* Init the low level hardware */ + huart->MspInitCallback(huart); +#else + /* Init the low level hardware : GPIO, CLOCK */ + HAL_UART_MspInit(huart); +#endif /* (USE_HAL_UART_REGISTER_CALLBACKS) */ + } + + huart->gState = HAL_UART_STATE_BUSY; + + __HAL_UART_DISABLE(huart); + + /* Set the UART Communication parameters */ + if (UART_SetConfig(huart) == HAL_ERROR) + { + return HAL_ERROR; + } + + if (huart->AdvancedInit.AdvFeatureInit != UART_ADVFEATURE_NO_INIT) + { + UART_AdvFeatureConfig(huart); + } + + /* In asynchronous mode, the following bits must be kept cleared: + - LINEN and CLKEN bits in the USART_CR2 register, + - SCEN, HDSEL and IREN bits in the USART_CR3 register.*/ + CLEAR_BIT(huart->Instance->CR2, (USART_CR2_LINEN | USART_CR2_CLKEN)); + CLEAR_BIT(huart->Instance->CR3, (USART_CR3_SCEN | USART_CR3_HDSEL | USART_CR3_IREN)); + + __HAL_UART_ENABLE(huart); + + /* TEACK and/or REACK to check before moving huart->gState and huart->RxState to Ready */ + return (UART_CheckIdleState(huart)); +} + +/** + * @brief Initialize the half-duplex mode according to the specified + * parameters in the UART_InitTypeDef and creates the associated handle. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_HalfDuplex_Init(UART_HandleTypeDef *huart) +{ + /* Check the UART handle allocation */ + if (huart == NULL) + { + return HAL_ERROR; + } + + /* Check UART instance */ + assert_param(IS_UART_HALFDUPLEX_INSTANCE(huart->Instance)); + + if (huart->gState == HAL_UART_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + huart->Lock = HAL_UNLOCKED; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + UART_InitCallbacksToDefault(huart); + + if (huart->MspInitCallback == NULL) + { + huart->MspInitCallback = HAL_UART_MspInit; + } + + /* Init the low level hardware */ + huart->MspInitCallback(huart); +#else + /* Init the low level hardware : GPIO, CLOCK */ + HAL_UART_MspInit(huart); +#endif /* (USE_HAL_UART_REGISTER_CALLBACKS) */ + } + + huart->gState = HAL_UART_STATE_BUSY; + + __HAL_UART_DISABLE(huart); + + /* Set the UART Communication parameters */ + if (UART_SetConfig(huart) == HAL_ERROR) + { + return HAL_ERROR; + } + + if (huart->AdvancedInit.AdvFeatureInit != UART_ADVFEATURE_NO_INIT) + { + UART_AdvFeatureConfig(huart); + } + + /* In half-duplex mode, the following bits must be kept cleared: + - LINEN and CLKEN bits in the USART_CR2 register, + - SCEN and IREN bits in the USART_CR3 register.*/ + CLEAR_BIT(huart->Instance->CR2, (USART_CR2_LINEN | USART_CR2_CLKEN)); + CLEAR_BIT(huart->Instance->CR3, (USART_CR3_IREN | USART_CR3_SCEN)); + + /* Enable the Half-Duplex mode by setting the HDSEL bit in the CR3 register */ + SET_BIT(huart->Instance->CR3, USART_CR3_HDSEL); + + __HAL_UART_ENABLE(huart); + + /* TEACK and/or REACK to check before moving huart->gState and huart->RxState to Ready */ + return (UART_CheckIdleState(huart)); +} + + +/** + * @brief Initialize the LIN mode according to the specified + * parameters in the UART_InitTypeDef and creates the associated handle. + * @param huart UART handle. + * @param BreakDetectLength Specifies the LIN break detection length. + * This parameter can be one of the following values: + * @arg @ref UART_LINBREAKDETECTLENGTH_10B 10-bit break detection + * @arg @ref UART_LINBREAKDETECTLENGTH_11B 11-bit break detection + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LIN_Init(UART_HandleTypeDef *huart, uint32_t BreakDetectLength) +{ + /* Check the UART handle allocation */ + if (huart == NULL) + { + return HAL_ERROR; + } + + /* Check the LIN UART instance */ + assert_param(IS_UART_LIN_INSTANCE(huart->Instance)); + /* Check the Break detection length parameter */ + assert_param(IS_UART_LIN_BREAK_DETECT_LENGTH(BreakDetectLength)); + + /* LIN mode limited to 16-bit oversampling only */ + if (huart->Init.OverSampling == UART_OVERSAMPLING_8) + { + return HAL_ERROR; + } + /* LIN mode limited to 8-bit data length */ + if (huart->Init.WordLength != UART_WORDLENGTH_8B) + { + return HAL_ERROR; + } + + if (huart->gState == HAL_UART_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + huart->Lock = HAL_UNLOCKED; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + UART_InitCallbacksToDefault(huart); + + if (huart->MspInitCallback == NULL) + { + huart->MspInitCallback = HAL_UART_MspInit; + } + + /* Init the low level hardware */ + huart->MspInitCallback(huart); +#else + /* Init the low level hardware : GPIO, CLOCK */ + HAL_UART_MspInit(huart); +#endif /* (USE_HAL_UART_REGISTER_CALLBACKS) */ + } + + huart->gState = HAL_UART_STATE_BUSY; + + __HAL_UART_DISABLE(huart); + + /* Set the UART Communication parameters */ + if (UART_SetConfig(huart) == HAL_ERROR) + { + return HAL_ERROR; + } + + if (huart->AdvancedInit.AdvFeatureInit != UART_ADVFEATURE_NO_INIT) + { + UART_AdvFeatureConfig(huart); + } + + /* In LIN mode, the following bits must be kept cleared: + - LINEN and CLKEN bits in the USART_CR2 register, + - SCEN and IREN bits in the USART_CR3 register.*/ + CLEAR_BIT(huart->Instance->CR2, USART_CR2_CLKEN); + CLEAR_BIT(huart->Instance->CR3, (USART_CR3_HDSEL | USART_CR3_IREN | USART_CR3_SCEN)); + + /* Enable the LIN mode by setting the LINEN bit in the CR2 register */ + SET_BIT(huart->Instance->CR2, USART_CR2_LINEN); + + /* Set the USART LIN Break detection length. */ + MODIFY_REG(huart->Instance->CR2, USART_CR2_LBDL, BreakDetectLength); + + __HAL_UART_ENABLE(huart); + + /* TEACK and/or REACK to check before moving huart->gState and huart->RxState to Ready */ + return (UART_CheckIdleState(huart)); +} + + +/** + * @brief Initialize the multiprocessor mode according to the specified + * parameters in the UART_InitTypeDef and initialize the associated handle. + * @param huart UART handle. + * @param Address UART node address (4-, 6-, 7- or 8-bit long). + * @param WakeUpMethod Specifies the UART wakeup method. + * This parameter can be one of the following values: + * @arg @ref UART_WAKEUPMETHOD_IDLELINE WakeUp by an idle line detection + * @arg @ref UART_WAKEUPMETHOD_ADDRESSMARK WakeUp by an address mark + * @note If the user resorts to idle line detection wake up, the Address parameter + * is useless and ignored by the initialization function. + * @note If the user resorts to address mark wake up, the address length detection + * is configured by default to 4 bits only. For the UART to be able to + * manage 6-, 7- or 8-bit long addresses detection, the API + * HAL_MultiProcessorEx_AddressLength_Set() must be called after + * HAL_MultiProcessor_Init(). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MultiProcessor_Init(UART_HandleTypeDef *huart, uint8_t Address, uint32_t WakeUpMethod) +{ + /* Check the UART handle allocation */ + if (huart == NULL) + { + return HAL_ERROR; + } + + /* Check the wake up method parameter */ + assert_param(IS_UART_WAKEUPMETHOD(WakeUpMethod)); + + if (huart->gState == HAL_UART_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + huart->Lock = HAL_UNLOCKED; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + UART_InitCallbacksToDefault(huart); + + if (huart->MspInitCallback == NULL) + { + huart->MspInitCallback = HAL_UART_MspInit; + } + + /* Init the low level hardware */ + huart->MspInitCallback(huart); +#else + /* Init the low level hardware : GPIO, CLOCK */ + HAL_UART_MspInit(huart); +#endif /* (USE_HAL_UART_REGISTER_CALLBACKS) */ + } + + huart->gState = HAL_UART_STATE_BUSY; + + __HAL_UART_DISABLE(huart); + + /* Set the UART Communication parameters */ + if (UART_SetConfig(huart) == HAL_ERROR) + { + return HAL_ERROR; + } + + if (huart->AdvancedInit.AdvFeatureInit != UART_ADVFEATURE_NO_INIT) + { + UART_AdvFeatureConfig(huart); + } + + /* In multiprocessor mode, the following bits must be kept cleared: + - LINEN and CLKEN bits in the USART_CR2 register, + - SCEN, HDSEL and IREN bits in the USART_CR3 register. */ + CLEAR_BIT(huart->Instance->CR2, (USART_CR2_LINEN | USART_CR2_CLKEN)); + CLEAR_BIT(huart->Instance->CR3, (USART_CR3_SCEN | USART_CR3_HDSEL | USART_CR3_IREN)); + + if (WakeUpMethod == UART_WAKEUPMETHOD_ADDRESSMARK) + { + /* If address mark wake up method is chosen, set the USART address node */ + MODIFY_REG(huart->Instance->CR2, USART_CR2_ADD, ((uint32_t)Address << UART_CR2_ADDRESS_LSB_POS)); + } + + /* Set the wake up method by setting the WAKE bit in the CR1 register */ + MODIFY_REG(huart->Instance->CR1, USART_CR1_WAKE, WakeUpMethod); + + __HAL_UART_ENABLE(huart); + + /* TEACK and/or REACK to check before moving huart->gState and huart->RxState to Ready */ + return (UART_CheckIdleState(huart)); +} + + +/** + * @brief DeInitialize the UART peripheral. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_DeInit(UART_HandleTypeDef *huart) +{ + /* Check the UART handle allocation */ + if (huart == NULL) + { + return HAL_ERROR; + } + + /* Check the parameters */ + assert_param(IS_UART_INSTANCE(huart->Instance)); + + huart->gState = HAL_UART_STATE_BUSY; + + __HAL_UART_DISABLE(huart); + + huart->Instance->CR1 = 0x0U; + huart->Instance->CR2 = 0x0U; + huart->Instance->CR3 = 0x0U; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + if (huart->MspDeInitCallback == NULL) + { + huart->MspDeInitCallback = HAL_UART_MspDeInit; + } + /* DeInit the low level hardware */ + huart->MspDeInitCallback(huart); +#else + /* DeInit the low level hardware */ + HAL_UART_MspDeInit(huart); +#endif /* (USE_HAL_UART_REGISTER_CALLBACKS) */ + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->gState = HAL_UART_STATE_RESET; + huart->RxState = HAL_UART_STATE_RESET; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +/** + * @brief Initialize the UART MSP. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UART_MspInit(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_UART_MspInit can be implemented in the user file + */ +} + +/** + * @brief DeInitialize the UART MSP. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UART_MspDeInit(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_UART_MspDeInit can be implemented in the user file + */ +} + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) +/** + * @brief Register a User UART Callback + * To be used instead of the weak predefined callback + * @param huart uart handle + * @param CallbackID ID of the callback to be registered + * This parameter can be one of the following values: + * @arg @ref HAL_UART_TX_HALFCOMPLETE_CB_ID Tx Half Complete Callback ID + * @arg @ref HAL_UART_TX_COMPLETE_CB_ID Tx Complete Callback ID + * @arg @ref HAL_UART_RX_HALFCOMPLETE_CB_ID Rx Half Complete Callback ID + * @arg @ref HAL_UART_RX_COMPLETE_CB_ID Rx Complete Callback ID + * @arg @ref HAL_UART_ERROR_CB_ID Error Callback ID + * @arg @ref HAL_UART_ABORT_COMPLETE_CB_ID Abort Complete Callback ID + * @arg @ref HAL_UART_ABORT_TRANSMIT_COMPLETE_CB_ID Abort Transmit Complete Callback ID + * @arg @ref HAL_UART_ABORT_RECEIVE_COMPLETE_CB_ID Abort Receive Complete Callback ID + * @arg @ref HAL_UART_WAKEUP_CB_ID Wakeup Callback ID + * @arg @ref HAL_UART_MSPINIT_CB_ID MspInit Callback ID + * @arg @ref HAL_UART_MSPDEINIT_CB_ID MspDeInit Callback ID + * @param pCallback pointer to the Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_RegisterCallback(UART_HandleTypeDef *huart, HAL_UART_CallbackIDTypeDef CallbackID, + pUART_CallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + + __HAL_LOCK(huart); + + if (huart->gState == HAL_UART_STATE_READY) + { + switch (CallbackID) + { + case HAL_UART_TX_HALFCOMPLETE_CB_ID : + huart->TxHalfCpltCallback = pCallback; + break; + + case HAL_UART_TX_COMPLETE_CB_ID : + huart->TxCpltCallback = pCallback; + break; + + case HAL_UART_RX_HALFCOMPLETE_CB_ID : + huart->RxHalfCpltCallback = pCallback; + break; + + case HAL_UART_RX_COMPLETE_CB_ID : + huart->RxCpltCallback = pCallback; + break; + + case HAL_UART_ERROR_CB_ID : + huart->ErrorCallback = pCallback; + break; + + case HAL_UART_ABORT_COMPLETE_CB_ID : + huart->AbortCpltCallback = pCallback; + break; + + case HAL_UART_ABORT_TRANSMIT_COMPLETE_CB_ID : + huart->AbortTransmitCpltCallback = pCallback; + break; + + case HAL_UART_ABORT_RECEIVE_COMPLETE_CB_ID : + huart->AbortReceiveCpltCallback = pCallback; + break; + +#if defined(USART_CR1_UESM) +#if defined(USART_CR3_WUFIE) + case HAL_UART_WAKEUP_CB_ID : + huart->WakeupCallback = pCallback; + break; + +#endif /* USART_CR3_WUFIE */ +#endif /* USART_CR1_UESM */ + + case HAL_UART_MSPINIT_CB_ID : + huart->MspInitCallback = pCallback; + break; + + case HAL_UART_MSPDEINIT_CB_ID : + huart->MspDeInitCallback = pCallback; + break; + + default : + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + status = HAL_ERROR; + break; + } + } + else if (huart->gState == HAL_UART_STATE_RESET) + { + switch (CallbackID) + { + case HAL_UART_MSPINIT_CB_ID : + huart->MspInitCallback = pCallback; + break; + + case HAL_UART_MSPDEINIT_CB_ID : + huart->MspDeInitCallback = pCallback; + break; + + default : + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + status = HAL_ERROR; + break; + } + } + else + { + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + status = HAL_ERROR; + } + + __HAL_UNLOCK(huart); + + return status; +} + +/** + * @brief Unregister an UART Callback + * UART callaback is redirected to the weak predefined callback + * @param huart uart handle + * @param CallbackID ID of the callback to be unregistered + * This parameter can be one of the following values: + * @arg @ref HAL_UART_TX_HALFCOMPLETE_CB_ID Tx Half Complete Callback ID + * @arg @ref HAL_UART_TX_COMPLETE_CB_ID Tx Complete Callback ID + * @arg @ref HAL_UART_RX_HALFCOMPLETE_CB_ID Rx Half Complete Callback ID + * @arg @ref HAL_UART_RX_COMPLETE_CB_ID Rx Complete Callback ID + * @arg @ref HAL_UART_ERROR_CB_ID Error Callback ID + * @arg @ref HAL_UART_ABORT_COMPLETE_CB_ID Abort Complete Callback ID + * @arg @ref HAL_UART_ABORT_TRANSMIT_COMPLETE_CB_ID Abort Transmit Complete Callback ID + * @arg @ref HAL_UART_ABORT_RECEIVE_COMPLETE_CB_ID Abort Receive Complete Callback ID + * @arg @ref HAL_UART_WAKEUP_CB_ID Wakeup Callback ID + * @arg @ref HAL_UART_MSPINIT_CB_ID MspInit Callback ID + * @arg @ref HAL_UART_MSPDEINIT_CB_ID MspDeInit Callback ID + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_UnRegisterCallback(UART_HandleTypeDef *huart, HAL_UART_CallbackIDTypeDef CallbackID) +{ + HAL_StatusTypeDef status = HAL_OK; + + __HAL_LOCK(huart); + + if (HAL_UART_STATE_READY == huart->gState) + { + switch (CallbackID) + { + case HAL_UART_TX_HALFCOMPLETE_CB_ID : + huart->TxHalfCpltCallback = HAL_UART_TxHalfCpltCallback; /* Legacy weak TxHalfCpltCallback */ + break; + + case HAL_UART_TX_COMPLETE_CB_ID : + huart->TxCpltCallback = HAL_UART_TxCpltCallback; /* Legacy weak TxCpltCallback */ + break; + + case HAL_UART_RX_HALFCOMPLETE_CB_ID : + huart->RxHalfCpltCallback = HAL_UART_RxHalfCpltCallback; /* Legacy weak RxHalfCpltCallback */ + break; + + case HAL_UART_RX_COMPLETE_CB_ID : + huart->RxCpltCallback = HAL_UART_RxCpltCallback; /* Legacy weak RxCpltCallback */ + break; + + case HAL_UART_ERROR_CB_ID : + huart->ErrorCallback = HAL_UART_ErrorCallback; /* Legacy weak ErrorCallback */ + break; + + case HAL_UART_ABORT_COMPLETE_CB_ID : + huart->AbortCpltCallback = HAL_UART_AbortCpltCallback; /* Legacy weak AbortCpltCallback */ + break; + + case HAL_UART_ABORT_TRANSMIT_COMPLETE_CB_ID : + huart->AbortTransmitCpltCallback = HAL_UART_AbortTransmitCpltCallback; /* Legacy weak + AbortTransmitCpltCallback */ + break; + + case HAL_UART_ABORT_RECEIVE_COMPLETE_CB_ID : + huart->AbortReceiveCpltCallback = HAL_UART_AbortReceiveCpltCallback; /* Legacy weak + AbortReceiveCpltCallback */ + break; + +#if defined(USART_CR1_UESM) +#if defined(USART_CR3_WUFIE) + case HAL_UART_WAKEUP_CB_ID : + huart->WakeupCallback = HAL_UARTEx_WakeupCallback; /* Legacy weak WakeupCallback */ + break; + +#endif /* USART_CR3_WUFIE */ +#endif /* USART_CR1_UESM */ + case HAL_UART_MSPINIT_CB_ID : + huart->MspInitCallback = HAL_UART_MspInit; /* Legacy weak MspInitCallback */ + break; + + case HAL_UART_MSPDEINIT_CB_ID : + huart->MspDeInitCallback = HAL_UART_MspDeInit; /* Legacy weak MspDeInitCallback */ + break; + + default : + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + status = HAL_ERROR; + break; + } + } + else if (HAL_UART_STATE_RESET == huart->gState) + { + switch (CallbackID) + { + case HAL_UART_MSPINIT_CB_ID : + huart->MspInitCallback = HAL_UART_MspInit; + break; + + case HAL_UART_MSPDEINIT_CB_ID : + huart->MspDeInitCallback = HAL_UART_MspDeInit; + break; + + default : + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + status = HAL_ERROR; + break; + } + } + else + { + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + status = HAL_ERROR; + } + + __HAL_UNLOCK(huart); + + return status; +} + +/** + * @brief Register a User UART Rx Event Callback + * To be used instead of the weak predefined callback + * @param huart Uart handle + * @param pCallback Pointer to the Rx Event Callback function + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_RegisterRxEventCallback(UART_HandleTypeDef *huart, pUART_RxEventCallbackTypeDef pCallback) +{ + HAL_StatusTypeDef status = HAL_OK; + + if (pCallback == NULL) + { + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + return HAL_ERROR; + } + + /* Process locked */ + __HAL_LOCK(huart); + + if (huart->gState == HAL_UART_STATE_READY) + { + huart->RxEventCallback = pCallback; + } + else + { + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(huart); + + return status; +} + +/** + * @brief UnRegister the UART Rx Event Callback + * UART Rx Event Callback is redirected to the weak HAL_UARTEx_RxEventCallback() predefined callback + * @param huart Uart handle + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_UnRegisterRxEventCallback(UART_HandleTypeDef *huart) +{ + HAL_StatusTypeDef status = HAL_OK; + + /* Process locked */ + __HAL_LOCK(huart); + + if (huart->gState == HAL_UART_STATE_READY) + { + huart->RxEventCallback = HAL_UARTEx_RxEventCallback; /* Legacy weak UART Rx Event Callback */ + } + else + { + huart->ErrorCode |= HAL_UART_ERROR_INVALID_CALLBACK; + + status = HAL_ERROR; + } + + /* Release Lock */ + __HAL_UNLOCK(huart); + return status; +} + +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + +/** + * @} + */ + +/** @defgroup UART_Exported_Functions_Group2 IO operation functions + * @brief UART Transmit/Receive functions + * +@verbatim + =============================================================================== + ##### IO operation functions ##### + =============================================================================== + This subsection provides a set of functions allowing to manage the UART asynchronous + and Half duplex data transfers. + + (#) There are two mode of transfer: + (+) Blocking mode: The communication is performed in polling mode. + The HAL status of all data processing is returned by the same function + after finishing transfer. + (+) Non-Blocking mode: The communication is performed using Interrupts + or DMA, These API's return the HAL status. + The end of the data processing will be indicated through the + dedicated UART IRQ when using Interrupt mode or the DMA IRQ when + using DMA mode. + The HAL_UART_TxCpltCallback(), HAL_UART_RxCpltCallback() user callbacks + will be executed respectively at the end of the transmit or Receive process + The HAL_UART_ErrorCallback()user callback will be executed when a communication error is detected + + (#) Blocking mode API's are : + (+) HAL_UART_Transmit() + (+) HAL_UART_Receive() + + (#) Non-Blocking mode API's with Interrupt are : + (+) HAL_UART_Transmit_IT() + (+) HAL_UART_Receive_IT() + (+) HAL_UART_IRQHandler() + + (#) Non-Blocking mode API's with DMA are : + (+) HAL_UART_Transmit_DMA() + (+) HAL_UART_Receive_DMA() + (+) HAL_UART_DMAPause() + (+) HAL_UART_DMAResume() + (+) HAL_UART_DMAStop() + + (#) A set of Transfer Complete Callbacks are provided in Non_Blocking mode: + (+) HAL_UART_TxHalfCpltCallback() + (+) HAL_UART_TxCpltCallback() + (+) HAL_UART_RxHalfCpltCallback() + (+) HAL_UART_RxCpltCallback() + (+) HAL_UART_ErrorCallback() + + (#) Non-Blocking mode transfers could be aborted using Abort API's : + (+) HAL_UART_Abort() + (+) HAL_UART_AbortTransmit() + (+) HAL_UART_AbortReceive() + (+) HAL_UART_Abort_IT() + (+) HAL_UART_AbortTransmit_IT() + (+) HAL_UART_AbortReceive_IT() + + (#) For Abort services based on interrupts (HAL_UART_Abortxxx_IT), a set of Abort Complete Callbacks are provided: + (+) HAL_UART_AbortCpltCallback() + (+) HAL_UART_AbortTransmitCpltCallback() + (+) HAL_UART_AbortReceiveCpltCallback() + + (#) A Rx Event Reception Callback (Rx event notification) is available for Non_Blocking modes of enhanced + reception services: + (+) HAL_UARTEx_RxEventCallback() +#if defined(USART_CR1_UESM) + + (#) Wakeup from Stop mode Callback: + (+) HAL_UARTEx_WakeupCallback() +#endif + + (#) In Non-Blocking mode transfers, possible errors are split into 2 categories. + Errors are handled as follows : + (+) Error is considered as Recoverable and non blocking : Transfer could go till end, but error severity is + to be evaluated by user : this concerns Frame Error, Parity Error or Noise Error + in Interrupt mode reception . + Received character is then retrieved and stored in Rx buffer, Error code is set to allow user + to identify error type, and HAL_UART_ErrorCallback() user callback is executed. + Transfer is kept ongoing on UART side. + If user wants to abort it, Abort services should be called by user. + (+) Error is considered as Blocking : Transfer could not be completed properly and is aborted. + This concerns Overrun Error In Interrupt mode reception and all errors in DMA mode. + Error code is set to allow user to identify error type, and HAL_UART_ErrorCallback() + user callback is executed. + + -@- In the Half duplex communication, it is forbidden to run the transmit + and receive process in parallel, the UART state HAL_UART_STATE_BUSY_TX_RX can't be useful. + +@endverbatim + * @{ + */ + +/** + * @brief Send an amount of data in blocking mode. + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M1-M0 = 01), + * the sent data is handled as a set of u16. In this case, Size must indicate the number + * of u16 provided through pData. + * @param huart UART handle. + * @param pData Pointer to data buffer (u8 or u16 data elements). + * @param Size Amount of data elements (u8 or u16) to be sent. + * @param Timeout Timeout duration. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Transmit(UART_HandleTypeDef *huart, const uint8_t *pData, uint16_t Size, uint32_t Timeout) +{ + const uint8_t *pdata8bits; + const uint16_t *pdata16bits; + uint32_t tickstart; + + /* Check that a Tx process is not already ongoing */ + if (huart->gState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + __HAL_LOCK(huart); + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->gState = HAL_UART_STATE_BUSY_TX; + + /* Init tickstart for timeout management */ + tickstart = HAL_GetTick(); + + huart->TxXferSize = Size; + huart->TxXferCount = Size; + + /* In case of 9bits/No Parity transfer, pData needs to be handled as a uint16_t pointer */ + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) && (huart->Init.Parity == UART_PARITY_NONE)) + { + pdata8bits = NULL; + pdata16bits = (const uint16_t *) pData; + } + else + { + pdata8bits = pData; + pdata16bits = NULL; + } + + __HAL_UNLOCK(huart); + + while (huart->TxXferCount > 0U) + { + if (UART_WaitOnFlagUntilTimeout(huart, UART_FLAG_TXE, RESET, tickstart, Timeout) != HAL_OK) + { + return HAL_TIMEOUT; + } + if (pdata8bits == NULL) + { + huart->Instance->TDR = (uint16_t)(*pdata16bits & 0x01FFU); + pdata16bits++; + } + else + { + huart->Instance->TDR = (uint8_t)(*pdata8bits & 0xFFU); + pdata8bits++; + } + huart->TxXferCount--; + } + + if (UART_WaitOnFlagUntilTimeout(huart, UART_FLAG_TC, RESET, tickstart, Timeout) != HAL_OK) + { + return HAL_TIMEOUT; + } + + /* At end of Tx process, restore huart->gState to Ready */ + huart->gState = HAL_UART_STATE_READY; + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receive an amount of data in blocking mode. + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M1-M0 = 01), + * the received data is handled as a set of u16. In this case, Size must indicate the number + * of u16 available through pData. + * @param huart UART handle. + * @param pData Pointer to data buffer (u8 or u16 data elements). + * @param Size Amount of data elements (u8 or u16) to be received. + * @param Timeout Timeout duration. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Receive(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size, uint32_t Timeout) +{ + uint8_t *pdata8bits; + uint16_t *pdata16bits; + uint16_t uhMask; + uint32_t tickstart; + + /* Check that a Rx process is not already ongoing */ + if (huart->RxState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + __HAL_LOCK(huart); + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->RxState = HAL_UART_STATE_BUSY_RX; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + /* Init tickstart for timeout management */ + tickstart = HAL_GetTick(); + + huart->RxXferSize = Size; + huart->RxXferCount = Size; + + /* Computation of UART mask to apply to RDR register */ + UART_MASK_COMPUTATION(huart); + uhMask = huart->Mask; + + /* In case of 9bits/No Parity transfer, pRxData needs to be handled as a uint16_t pointer */ + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) && (huart->Init.Parity == UART_PARITY_NONE)) + { + pdata8bits = NULL; + pdata16bits = (uint16_t *) pData; + } + else + { + pdata8bits = pData; + pdata16bits = NULL; + } + + __HAL_UNLOCK(huart); + + /* as long as data have to be received */ + while (huart->RxXferCount > 0U) + { + if (UART_WaitOnFlagUntilTimeout(huart, UART_FLAG_RXNE, RESET, tickstart, Timeout) != HAL_OK) + { + return HAL_TIMEOUT; + } + if (pdata8bits == NULL) + { + *pdata16bits = (uint16_t)(huart->Instance->RDR & uhMask); + pdata16bits++; + } + else + { + *pdata8bits = (uint8_t)(huart->Instance->RDR & (uint8_t)uhMask); + pdata8bits++; + } + huart->RxXferCount--; + } + + /* At end of Rx process, restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Send an amount of data in interrupt mode. + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M1-M0 = 01), + * the sent data is handled as a set of u16. In this case, Size must indicate the number + * of u16 provided through pData. + * @param huart UART handle. + * @param pData Pointer to data buffer (u8 or u16 data elements). + * @param Size Amount of data elements (u8 or u16) to be sent. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Transmit_IT(UART_HandleTypeDef *huart, const uint8_t *pData, uint16_t Size) +{ + /* Check that a Tx process is not already ongoing */ + if (huart->gState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + __HAL_LOCK(huart); + + huart->pTxBuffPtr = pData; + huart->TxXferSize = Size; + huart->TxXferCount = Size; + huart->TxISR = NULL; + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->gState = HAL_UART_STATE_BUSY_TX; + + /* Set the Tx ISR function pointer according to the data word length */ + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) && (huart->Init.Parity == UART_PARITY_NONE)) + { + huart->TxISR = UART_TxISR_16BIT; + } + else + { + huart->TxISR = UART_TxISR_8BIT; + } + + __HAL_UNLOCK(huart); + + /* Enable the Transmit Data Register Empty interrupt */ + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_TXEIE); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receive an amount of data in interrupt mode. + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M1-M0 = 01), + * the received data is handled as a set of u16. In this case, Size must indicate the number + * of u16 available through pData. + * @param huart UART handle. + * @param pData Pointer to data buffer (u8 or u16 data elements). + * @param Size Amount of data elements (u8 or u16) to be received. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Receive_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) +{ + /* Check that a Rx process is not already ongoing */ + if (huart->RxState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + __HAL_LOCK(huart); + + /* Set Reception type to Standard reception */ + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + /* Check that USART RTOEN bit is set */ + if (READ_BIT(huart->Instance->CR2, USART_CR2_RTOEN) != 0U) + { + /* Enable the UART Receiver Timeout Interrupt */ + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_RTOIE); + } + + return (UART_Start_Receive_IT(huart, pData, Size)); + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Send an amount of data in DMA mode. + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M1-M0 = 01), + * the sent data is handled as a set of u16. In this case, Size must indicate the number + * of u16 provided through pData. + * @param huart UART handle. + * @param pData Pointer to data buffer (u8 or u16 data elements). + * @param Size Amount of data elements (u8 or u16) to be sent. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Transmit_DMA(UART_HandleTypeDef *huart, const uint8_t *pData, uint16_t Size) +{ + /* Check that a Tx process is not already ongoing */ + if (huart->gState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + __HAL_LOCK(huart); + + huart->pTxBuffPtr = pData; + huart->TxXferSize = Size; + huart->TxXferCount = Size; + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->gState = HAL_UART_STATE_BUSY_TX; + + if (huart->hdmatx != NULL) + { + /* Set the UART DMA transfer complete callback */ + huart->hdmatx->XferCpltCallback = UART_DMATransmitCplt; + + /* Set the UART DMA Half transfer complete callback */ + huart->hdmatx->XferHalfCpltCallback = UART_DMATxHalfCplt; + + /* Set the DMA error callback */ + huart->hdmatx->XferErrorCallback = UART_DMAError; + + /* Set the DMA abort callback */ + huart->hdmatx->XferAbortCallback = NULL; + + /* Enable the UART transmit DMA channel */ + if (HAL_DMA_Start_IT(huart->hdmatx, (uint32_t)huart->pTxBuffPtr, (uint32_t)&huart->Instance->TDR, Size) != HAL_OK) + { + /* Set error code to DMA */ + huart->ErrorCode = HAL_UART_ERROR_DMA; + + __HAL_UNLOCK(huart); + + /* Restore huart->gState to ready */ + huart->gState = HAL_UART_STATE_READY; + + return HAL_ERROR; + } + } + /* Clear the TC flag in the ICR register */ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_TCF); + + __HAL_UNLOCK(huart); + + /* Enable the DMA transfer for transmit request by setting the DMAT bit + in the UART CR3 register */ + ATOMIC_SET_BIT(huart->Instance->CR3, USART_CR3_DMAT); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receive an amount of data in DMA mode. + * @note When the UART parity is enabled (PCE = 1), the received data contain + * the parity bit (MSB position). + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M1-M0 = 01), + * the received data is handled as a set of u16. In this case, Size must indicate the number + * of u16 available through pData. + * @param huart UART handle. + * @param pData Pointer to data buffer (u8 or u16 data elements). + * @param Size Amount of data elements (u8 or u16) to be received. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Receive_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) +{ + /* Check that a Rx process is not already ongoing */ + if (huart->RxState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + __HAL_LOCK(huart); + + /* Set Reception type to Standard reception */ + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + /* Check that USART RTOEN bit is set */ + if (READ_BIT(huart->Instance->CR2, USART_CR2_RTOEN) != 0U) + { + /* Enable the UART Receiver Timeout Interrupt */ + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_RTOIE); + } + + return (UART_Start_Receive_DMA(huart, pData, Size)); + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Pause the DMA Transfer. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_DMAPause(UART_HandleTypeDef *huart) +{ + const HAL_UART_StateTypeDef gstate = huart->gState; + const HAL_UART_StateTypeDef rxstate = huart->RxState; + + __HAL_LOCK(huart); + + if ((HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) && + (gstate == HAL_UART_STATE_BUSY_TX)) + { + /* Disable the UART DMA Tx request */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + } + if ((HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) && + (rxstate == HAL_UART_STATE_BUSY_RX)) + { + /* Disable PE and ERR (Frame error, noise error, overrun error) interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_PEIE); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Disable the UART DMA Rx request */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + } + + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +/** + * @brief Resume the DMA Transfer. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_DMAResume(UART_HandleTypeDef *huart) +{ + __HAL_LOCK(huart); + + if (huart->gState == HAL_UART_STATE_BUSY_TX) + { + /* Enable the UART DMA Tx request */ + ATOMIC_SET_BIT(huart->Instance->CR3, USART_CR3_DMAT); + } + if (huart->RxState == HAL_UART_STATE_BUSY_RX) + { + /* Clear the Overrun flag before resuming the Rx transfer */ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_OREF); + + /* Re-enable PE and ERR (Frame error, noise error, overrun error) interrupts */ + if (huart->Init.Parity != UART_PARITY_NONE) + { + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_PEIE); + } + ATOMIC_SET_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Enable the UART DMA Rx request */ + ATOMIC_SET_BIT(huart->Instance->CR3, USART_CR3_DMAR); + } + + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +/** + * @brief Stop the DMA Transfer. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_DMAStop(UART_HandleTypeDef *huart) +{ + /* The Lock is not implemented on this API to allow the user application + to call the HAL UART API under callbacks HAL_UART_TxCpltCallback() / HAL_UART_RxCpltCallback() / + HAL_UART_TxHalfCpltCallback / HAL_UART_RxHalfCpltCallback: + indeed, when HAL_DMA_Abort() API is called, the DMA TX/RX Transfer or Half Transfer complete + interrupt is generated if the DMA transfer interruption occurs at the middle or at the end of + the stream and the corresponding call back is executed. */ + + const HAL_UART_StateTypeDef gstate = huart->gState; + const HAL_UART_StateTypeDef rxstate = huart->RxState; + + /* Stop UART DMA Tx request if ongoing */ + if ((HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) && + (gstate == HAL_UART_STATE_BUSY_TX)) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + + /* Abort the UART DMA Tx channel */ + if (huart->hdmatx != NULL) + { + if (HAL_DMA_Abort(huart->hdmatx) != HAL_OK) + { + if (HAL_DMA_GetError(huart->hdmatx) == HAL_DMA_ERROR_TIMEOUT) + { + /* Set error code to DMA */ + huart->ErrorCode = HAL_UART_ERROR_DMA; + + return HAL_TIMEOUT; + } + } + } + + UART_EndTxTransfer(huart); + } + + /* Stop UART DMA Rx request if ongoing */ + if ((HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) && + (rxstate == HAL_UART_STATE_BUSY_RX)) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* Abort the UART DMA Rx channel */ + if (huart->hdmarx != NULL) + { + if (HAL_DMA_Abort(huart->hdmarx) != HAL_OK) + { + if (HAL_DMA_GetError(huart->hdmarx) == HAL_DMA_ERROR_TIMEOUT) + { + /* Set error code to DMA */ + huart->ErrorCode = HAL_UART_ERROR_DMA; + + return HAL_TIMEOUT; + } + } + } + + UART_EndRxTransfer(huart); + } + + return HAL_OK; +} + +/** + * @brief Abort ongoing transfers (blocking mode). + * @param huart UART handle. + * @note This procedure could be used for aborting any ongoing transfer started in Interrupt or DMA mode. + * This procedure performs following operations : + * - Disable UART Interrupts (Tx and Rx) + * - Disable the DMA transfer in the peripheral register (if enabled) + * - Abort DMA transfer by calling HAL_DMA_Abort (in case of transfer in DMA mode) + * - Set handle State to READY + * @note This procedure is executed in blocking mode : when exiting function, Abort is considered as completed. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Abort(UART_HandleTypeDef *huart) +{ + /* Disable TXEIE, TCIE, RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE | USART_CR1_TXEIE | USART_CR1_TCIE)); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* If Reception till IDLE event was ongoing, disable IDLEIE interrupt */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_IDLEIE)); + } + + /* Abort the UART DMA Tx channel if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) + { + /* Disable the UART DMA Tx request if enabled */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + + /* Abort the UART DMA Tx channel : use blocking DMA Abort API (no callback) */ + if (huart->hdmatx != NULL) + { + /* Set the UART DMA Abort callback to Null. + No call back execution at end of DMA abort procedure */ + huart->hdmatx->XferAbortCallback = NULL; + + if (HAL_DMA_Abort(huart->hdmatx) != HAL_OK) + { + if (HAL_DMA_GetError(huart->hdmatx) == HAL_DMA_ERROR_TIMEOUT) + { + /* Set error code to DMA */ + huart->ErrorCode = HAL_UART_ERROR_DMA; + + return HAL_TIMEOUT; + } + } + } + } + + /* Abort the UART DMA Rx channel if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) + { + /* Disable the UART DMA Rx request if enabled */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* Abort the UART DMA Rx channel : use blocking DMA Abort API (no callback) */ + if (huart->hdmarx != NULL) + { + /* Set the UART DMA Abort callback to Null. + No call back execution at end of DMA abort procedure */ + huart->hdmarx->XferAbortCallback = NULL; + + if (HAL_DMA_Abort(huart->hdmarx) != HAL_OK) + { + if (HAL_DMA_GetError(huart->hdmarx) == HAL_DMA_ERROR_TIMEOUT) + { + /* Set error code to DMA */ + huart->ErrorCode = HAL_UART_ERROR_DMA; + + return HAL_TIMEOUT; + } + } + } + } + + /* Reset Tx and Rx transfer counters */ + huart->TxXferCount = 0U; + huart->RxXferCount = 0U; + + /* Clear the Error flags in the ICR register */ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_OREF | UART_CLEAR_NEF | UART_CLEAR_PEF | UART_CLEAR_FEF); + + + /* Discard the received data */ + __HAL_UART_SEND_REQ(huart, UART_RXDATA_FLUSH_REQUEST); + + /* Restore huart->gState and huart->RxState to Ready */ + huart->gState = HAL_UART_STATE_READY; + huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + huart->ErrorCode = HAL_UART_ERROR_NONE; + + return HAL_OK; +} + +/** + * @brief Abort ongoing Transmit transfer (blocking mode). + * @param huart UART handle. + * @note This procedure could be used for aborting any ongoing Tx transfer started in Interrupt or DMA mode. + * This procedure performs following operations : + * - Disable UART Interrupts (Tx) + * - Disable the DMA transfer in the peripheral register (if enabled) + * - Abort DMA transfer by calling HAL_DMA_Abort (in case of transfer in DMA mode) + * - Set handle State to READY + * @note This procedure is executed in blocking mode : when exiting function, Abort is considered as completed. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_AbortTransmit(UART_HandleTypeDef *huart) +{ + /* Disable TXEIE and TCIE interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_TXEIE | USART_CR1_TCIE)); + + /* Abort the UART DMA Tx channel if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) + { + /* Disable the UART DMA Tx request if enabled */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + + /* Abort the UART DMA Tx channel : use blocking DMA Abort API (no callback) */ + if (huart->hdmatx != NULL) + { + /* Set the UART DMA Abort callback to Null. + No call back execution at end of DMA abort procedure */ + huart->hdmatx->XferAbortCallback = NULL; + + if (HAL_DMA_Abort(huart->hdmatx) != HAL_OK) + { + if (HAL_DMA_GetError(huart->hdmatx) == HAL_DMA_ERROR_TIMEOUT) + { + /* Set error code to DMA */ + huart->ErrorCode = HAL_UART_ERROR_DMA; + + return HAL_TIMEOUT; + } + } + } + } + + /* Reset Tx transfer counter */ + huart->TxXferCount = 0U; + + + /* Restore huart->gState to Ready */ + huart->gState = HAL_UART_STATE_READY; + + return HAL_OK; +} + +/** + * @brief Abort ongoing Receive transfer (blocking mode). + * @param huart UART handle. + * @note This procedure could be used for aborting any ongoing Rx transfer started in Interrupt or DMA mode. + * This procedure performs following operations : + * - Disable UART Interrupts (Rx) + * - Disable the DMA transfer in the peripheral register (if enabled) + * - Abort DMA transfer by calling HAL_DMA_Abort (in case of transfer in DMA mode) + * - Set handle State to READY + * @note This procedure is executed in blocking mode : when exiting function, Abort is considered as completed. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_AbortReceive(UART_HandleTypeDef *huart) +{ + /* Disable RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* If Reception till IDLE event was ongoing, disable IDLEIE interrupt */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_IDLEIE)); + } + + /* Abort the UART DMA Rx channel if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) + { + /* Disable the UART DMA Rx request if enabled */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* Abort the UART DMA Rx channel : use blocking DMA Abort API (no callback) */ + if (huart->hdmarx != NULL) + { + /* Set the UART DMA Abort callback to Null. + No call back execution at end of DMA abort procedure */ + huart->hdmarx->XferAbortCallback = NULL; + + if (HAL_DMA_Abort(huart->hdmarx) != HAL_OK) + { + if (HAL_DMA_GetError(huart->hdmarx) == HAL_DMA_ERROR_TIMEOUT) + { + /* Set error code to DMA */ + huart->ErrorCode = HAL_UART_ERROR_DMA; + + return HAL_TIMEOUT; + } + } + } + } + + /* Reset Rx transfer counter */ + huart->RxXferCount = 0U; + + /* Clear the Error flags in the ICR register */ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_OREF | UART_CLEAR_NEF | UART_CLEAR_PEF | UART_CLEAR_FEF); + + /* Discard the received data */ + __HAL_UART_SEND_REQ(huart, UART_RXDATA_FLUSH_REQUEST); + + /* Restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + return HAL_OK; +} + +/** + * @brief Abort ongoing transfers (Interrupt mode). + * @param huart UART handle. + * @note This procedure could be used for aborting any ongoing transfer started in Interrupt or DMA mode. + * This procedure performs following operations : + * - Disable UART Interrupts (Tx and Rx) + * - Disable the DMA transfer in the peripheral register (if enabled) + * - Abort DMA transfer by calling HAL_DMA_Abort_IT (in case of transfer in DMA mode) + * - Set handle State to READY + * - At abort completion, call user abort complete callback + * @note This procedure is executed in Interrupt mode, meaning that abort procedure could be + * considered as completed only when user abort complete callback is executed (not when exiting function). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_Abort_IT(UART_HandleTypeDef *huart) +{ + uint32_t abortcplt = 1U; + + /* Disable interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE | USART_CR1_TXEIE | USART_CR1_TCIE)); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* If Reception till IDLE event was ongoing, disable IDLEIE interrupt */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_IDLEIE)); + } + + /* If DMA Tx and/or DMA Rx Handles are associated to UART Handle, DMA Abort complete callbacks should be initialised + before any call to DMA Abort functions */ + /* DMA Tx Handle is valid */ + if (huart->hdmatx != NULL) + { + /* Set DMA Abort Complete callback if UART DMA Tx request if enabled. + Otherwise, set it to NULL */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) + { + huart->hdmatx->XferAbortCallback = UART_DMATxAbortCallback; + } + else + { + huart->hdmatx->XferAbortCallback = NULL; + } + } + /* DMA Rx Handle is valid */ + if (huart->hdmarx != NULL) + { + /* Set DMA Abort Complete callback if UART DMA Rx request if enabled. + Otherwise, set it to NULL */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) + { + huart->hdmarx->XferAbortCallback = UART_DMARxAbortCallback; + } + else + { + huart->hdmarx->XferAbortCallback = NULL; + } + } + + /* Abort the UART DMA Tx channel if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) + { + /* Disable DMA Tx at UART level */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + + /* Abort the UART DMA Tx channel : use non blocking DMA Abort API (callback) */ + if (huart->hdmatx != NULL) + { + /* UART Tx DMA Abort callback has already been initialised : + will lead to call HAL_UART_AbortCpltCallback() at end of DMA abort procedure */ + + /* Abort DMA TX */ + if (HAL_DMA_Abort_IT(huart->hdmatx) != HAL_OK) + { + huart->hdmatx->XferAbortCallback = NULL; + } + else + { + abortcplt = 0U; + } + } + } + + /* Abort the UART DMA Rx channel if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) + { + /* Disable the UART DMA Rx request if enabled */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* Abort the UART DMA Rx channel : use non blocking DMA Abort API (callback) */ + if (huart->hdmarx != NULL) + { + /* UART Rx DMA Abort callback has already been initialised : + will lead to call HAL_UART_AbortCpltCallback() at end of DMA abort procedure */ + + /* Abort DMA RX */ + if (HAL_DMA_Abort_IT(huart->hdmarx) != HAL_OK) + { + huart->hdmarx->XferAbortCallback = NULL; + abortcplt = 1U; + } + else + { + abortcplt = 0U; + } + } + } + + /* if no DMA abort complete callback execution is required => call user Abort Complete callback */ + if (abortcplt == 1U) + { + /* Reset Tx and Rx transfer counters */ + huart->TxXferCount = 0U; + huart->RxXferCount = 0U; + + /* Clear ISR function pointers */ + huart->RxISR = NULL; + huart->TxISR = NULL; + + /* Reset errorCode */ + huart->ErrorCode = HAL_UART_ERROR_NONE; + + /* Clear the Error flags in the ICR register */ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_OREF | UART_CLEAR_NEF | UART_CLEAR_PEF | UART_CLEAR_FEF); + + + /* Discard the received data */ + __HAL_UART_SEND_REQ(huart, UART_RXDATA_FLUSH_REQUEST); + + /* Restore huart->gState and huart->RxState to Ready */ + huart->gState = HAL_UART_STATE_READY; + huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + /* As no DMA to be aborted, call directly user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort complete callback */ + huart->AbortCpltCallback(huart); +#else + /* Call legacy weak Abort complete callback */ + HAL_UART_AbortCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + + return HAL_OK; +} + +/** + * @brief Abort ongoing Transmit transfer (Interrupt mode). + * @param huart UART handle. + * @note This procedure could be used for aborting any ongoing Tx transfer started in Interrupt or DMA mode. + * This procedure performs following operations : + * - Disable UART Interrupts (Tx) + * - Disable the DMA transfer in the peripheral register (if enabled) + * - Abort DMA transfer by calling HAL_DMA_Abort_IT (in case of transfer in DMA mode) + * - Set handle State to READY + * - At abort completion, call user abort complete callback + * @note This procedure is executed in Interrupt mode, meaning that abort procedure could be + * considered as completed only when user abort complete callback is executed (not when exiting function). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_AbortTransmit_IT(UART_HandleTypeDef *huart) +{ + /* Disable interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_TXEIE | USART_CR1_TCIE)); + + /* Abort the UART DMA Tx channel if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) + { + /* Disable the UART DMA Tx request if enabled */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + + /* Abort the UART DMA Tx channel : use non blocking DMA Abort API (callback) */ + if (huart->hdmatx != NULL) + { + /* Set the UART DMA Abort callback : + will lead to call HAL_UART_AbortCpltCallback() at end of DMA abort procedure */ + huart->hdmatx->XferAbortCallback = UART_DMATxOnlyAbortCallback; + + /* Abort DMA TX */ + if (HAL_DMA_Abort_IT(huart->hdmatx) != HAL_OK) + { + /* Call Directly huart->hdmatx->XferAbortCallback function in case of error */ + huart->hdmatx->XferAbortCallback(huart->hdmatx); + } + } + else + { + /* Reset Tx transfer counter */ + huart->TxXferCount = 0U; + + /* Clear TxISR function pointers */ + huart->TxISR = NULL; + + /* Restore huart->gState to Ready */ + huart->gState = HAL_UART_STATE_READY; + + /* As no DMA to be aborted, call directly user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort Transmit Complete Callback */ + huart->AbortTransmitCpltCallback(huart); +#else + /* Call legacy weak Abort Transmit Complete Callback */ + HAL_UART_AbortTransmitCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + } + else + { + /* Reset Tx transfer counter */ + huart->TxXferCount = 0U; + + /* Clear TxISR function pointers */ + huart->TxISR = NULL; + + + /* Restore huart->gState to Ready */ + huart->gState = HAL_UART_STATE_READY; + + /* As no DMA to be aborted, call directly user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort Transmit Complete Callback */ + huart->AbortTransmitCpltCallback(huart); +#else + /* Call legacy weak Abort Transmit Complete Callback */ + HAL_UART_AbortTransmitCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + + return HAL_OK; +} + +/** + * @brief Abort ongoing Receive transfer (Interrupt mode). + * @param huart UART handle. + * @note This procedure could be used for aborting any ongoing Rx transfer started in Interrupt or DMA mode. + * This procedure performs following operations : + * - Disable UART Interrupts (Rx) + * - Disable the DMA transfer in the peripheral register (if enabled) + * - Abort DMA transfer by calling HAL_DMA_Abort_IT (in case of transfer in DMA mode) + * - Set handle State to READY + * - At abort completion, call user abort complete callback + * @note This procedure is executed in Interrupt mode, meaning that abort procedure could be + * considered as completed only when user abort complete callback is executed (not when exiting function). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_AbortReceive_IT(UART_HandleTypeDef *huart) +{ + /* Disable RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* If Reception till IDLE event was ongoing, disable IDLEIE interrupt */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_IDLEIE)); + } + + /* Abort the UART DMA Rx channel if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) + { + /* Disable the UART DMA Rx request if enabled */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* Abort the UART DMA Rx channel : use non blocking DMA Abort API (callback) */ + if (huart->hdmarx != NULL) + { + /* Set the UART DMA Abort callback : + will lead to call HAL_UART_AbortCpltCallback() at end of DMA abort procedure */ + huart->hdmarx->XferAbortCallback = UART_DMARxOnlyAbortCallback; + + /* Abort DMA RX */ + if (HAL_DMA_Abort_IT(huart->hdmarx) != HAL_OK) + { + /* Call Directly huart->hdmarx->XferAbortCallback function in case of error */ + huart->hdmarx->XferAbortCallback(huart->hdmarx); + } + } + else + { + /* Reset Rx transfer counter */ + huart->RxXferCount = 0U; + + /* Clear RxISR function pointer */ + huart->pRxBuffPtr = NULL; + + /* Clear the Error flags in the ICR register */ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_OREF | UART_CLEAR_NEF | UART_CLEAR_PEF | UART_CLEAR_FEF); + + /* Discard the received data */ + __HAL_UART_SEND_REQ(huart, UART_RXDATA_FLUSH_REQUEST); + + /* Restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + /* As no DMA to be aborted, call directly user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort Receive Complete Callback */ + huart->AbortReceiveCpltCallback(huart); +#else + /* Call legacy weak Abort Receive Complete Callback */ + HAL_UART_AbortReceiveCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + } + else + { + /* Reset Rx transfer counter */ + huart->RxXferCount = 0U; + + /* Clear RxISR function pointer */ + huart->pRxBuffPtr = NULL; + + /* Clear the Error flags in the ICR register */ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_OREF | UART_CLEAR_NEF | UART_CLEAR_PEF | UART_CLEAR_FEF); + + /* Restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + /* As no DMA to be aborted, call directly user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort Receive Complete Callback */ + huart->AbortReceiveCpltCallback(huart); +#else + /* Call legacy weak Abort Receive Complete Callback */ + HAL_UART_AbortReceiveCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + + return HAL_OK; +} + +/** + * @brief Handle UART interrupt request. + * @param huart UART handle. + * @retval None + */ +void HAL_UART_IRQHandler(UART_HandleTypeDef *huart) +{ + uint32_t isrflags = READ_REG(huart->Instance->ISR); + uint32_t cr1its = READ_REG(huart->Instance->CR1); + uint32_t cr3its = READ_REG(huart->Instance->CR3); + + uint32_t errorflags; + uint32_t errorcode; + + /* If no error occurs */ + errorflags = (isrflags & (uint32_t)(USART_ISR_PE | USART_ISR_FE | USART_ISR_ORE | USART_ISR_NE | USART_ISR_RTOF)); + if (errorflags == 0U) + { + /* UART in mode Receiver ---------------------------------------------------*/ + if (((isrflags & USART_ISR_RXNE) != 0U) + && ((cr1its & USART_CR1_RXNEIE) != 0U)) + { + if (huart->RxISR != NULL) + { + huart->RxISR(huart); + } + return; + } + } + + /* If some errors occur */ + if ((errorflags != 0U) + && (((cr3its & USART_CR3_EIE) != 0U) + || ((cr1its & (USART_CR1_RXNEIE | USART_CR1_PEIE | USART_CR1_RTOIE)) != 0U))) + { + /* UART parity error interrupt occurred -------------------------------------*/ + if (((isrflags & USART_ISR_PE) != 0U) && ((cr1its & USART_CR1_PEIE) != 0U)) + { + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_PEF); + + huart->ErrorCode |= HAL_UART_ERROR_PE; + } + + /* UART frame error interrupt occurred --------------------------------------*/ + if (((isrflags & USART_ISR_FE) != 0U) && ((cr3its & USART_CR3_EIE) != 0U)) + { + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_FEF); + + huart->ErrorCode |= HAL_UART_ERROR_FE; + } + + /* UART noise error interrupt occurred --------------------------------------*/ + if (((isrflags & USART_ISR_NE) != 0U) && ((cr3its & USART_CR3_EIE) != 0U)) + { + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_NEF); + + huart->ErrorCode |= HAL_UART_ERROR_NE; + } + + /* UART Over-Run interrupt occurred -----------------------------------------*/ + if (((isrflags & USART_ISR_ORE) != 0U) + && (((cr1its & USART_CR1_RXNEIE) != 0U) || + ((cr3its & USART_CR3_EIE) != 0U))) + { + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_OREF); + + huart->ErrorCode |= HAL_UART_ERROR_ORE; + } + + /* UART Receiver Timeout interrupt occurred ---------------------------------*/ + if (((isrflags & USART_ISR_RTOF) != 0U) && ((cr1its & USART_CR1_RTOIE) != 0U)) + { + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_RTOF); + + huart->ErrorCode |= HAL_UART_ERROR_RTO; + } + + /* Call UART Error Call back function if need be ----------------------------*/ + if (huart->ErrorCode != HAL_UART_ERROR_NONE) + { + /* UART in mode Receiver --------------------------------------------------*/ + if (((isrflags & USART_ISR_RXNE) != 0U) + && ((cr1its & USART_CR1_RXNEIE) != 0U)) + { + if (huart->RxISR != NULL) + { + huart->RxISR(huart); + } + } + + /* If Error is to be considered as blocking : + - Receiver Timeout error in Reception + - Overrun error in Reception + - any error occurs in DMA mode reception + */ + errorcode = huart->ErrorCode; + if ((HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) || + ((errorcode & (HAL_UART_ERROR_RTO | HAL_UART_ERROR_ORE)) != 0U)) + { + /* Blocking error : transfer is aborted + Set the UART state ready to be able to start again the process, + Disable Rx Interrupts, and disable Rx DMA request, if ongoing */ + UART_EndRxTransfer(huart); + + /* Abort the UART DMA Rx channel if enabled */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) + { + /* Disable the UART DMA Rx request if enabled */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* Abort the UART DMA Rx channel */ + if (huart->hdmarx != NULL) + { + /* Set the UART DMA Abort callback : + will lead to call HAL_UART_ErrorCallback() at end of DMA abort procedure */ + huart->hdmarx->XferAbortCallback = UART_DMAAbortOnError; + + /* Abort DMA RX */ + if (HAL_DMA_Abort_IT(huart->hdmarx) != HAL_OK) + { + /* Call Directly huart->hdmarx->XferAbortCallback function in case of error */ + huart->hdmarx->XferAbortCallback(huart->hdmarx); + } + } + else + { + /* Call user error callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered error callback*/ + huart->ErrorCallback(huart); +#else + /*Call legacy weak error callback*/ + HAL_UART_ErrorCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + + } + } + else + { + /* Call user error callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered error callback*/ + huart->ErrorCallback(huart); +#else + /*Call legacy weak error callback*/ + HAL_UART_ErrorCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + } + else + { + /* Non Blocking error : transfer could go on. + Error is notified to user through user error callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered error callback*/ + huart->ErrorCallback(huart); +#else + /*Call legacy weak error callback*/ + HAL_UART_ErrorCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + huart->ErrorCode = HAL_UART_ERROR_NONE; + } + } + return; + + } /* End if some error occurs */ + + /* Check current reception Mode : + If Reception till IDLE event has been selected : */ + if ((huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + && ((isrflags & USART_ISR_IDLE) != 0U) + && ((cr1its & USART_ISR_IDLE) != 0U)) + { + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_IDLEF); + + /* Check if DMA mode is enabled in UART */ + if (HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) + { + /* DMA mode enabled */ + /* Check received length : If all expected data are received, do nothing, + (DMA cplt callback will be called). + Otherwise, if at least one data has already been received, IDLE event is to be notified to user */ + uint16_t nb_remaining_rx_data = (uint16_t) __HAL_DMA_GET_COUNTER(huart->hdmarx); + if ((nb_remaining_rx_data > 0U) + && (nb_remaining_rx_data < huart->RxXferSize)) + { + /* Reception is not complete */ + huart->RxXferCount = nb_remaining_rx_data; + + /* In Normal mode, end DMA xfer and HAL UART Rx process*/ + if (huart->hdmarx->Init.Mode != DMA_CIRCULAR) + { + /* Disable PE and ERR (Frame error, noise error, overrun error) interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_PEIE); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Disable the DMA transfer for the receiver request by resetting the DMAR bit + in the UART CR3 register */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* At end of Rx process, restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_IDLEIE); + + /* Last bytes received, so no need as the abort is immediate */ + (void)HAL_DMA_Abort(huart->hdmarx); + } +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx Event callback*/ + huart->RxEventCallback(huart, (huart->RxXferSize - huart->RxXferCount)); +#else + /*Call legacy weak Rx Event callback*/ + HAL_UARTEx_RxEventCallback(huart, (huart->RxXferSize - huart->RxXferCount)); +#endif /* (USE_HAL_UART_REGISTER_CALLBACKS) */ + } + return; + } + else + { + /* DMA mode not enabled */ + /* Check received length : If all expected data are received, do nothing. + Otherwise, if at least one data has already been received, IDLE event is to be notified to user */ + uint16_t nb_rx_data = huart->RxXferSize - huart->RxXferCount; + if ((huart->RxXferCount > 0U) + && (nb_rx_data > 0U)) + { + /* Disable the UART Parity Error Interrupt and RXNE interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); + + /* Disable the UART Error Interrupt: (Frame error, noise error, overrun error) */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Rx process is completed, restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + /* Clear RxISR function pointer */ + huart->RxISR = NULL; + + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_IDLEIE); +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx complete callback*/ + huart->RxEventCallback(huart, nb_rx_data); +#else + /*Call legacy weak Rx Event callback*/ + HAL_UARTEx_RxEventCallback(huart, nb_rx_data); +#endif /* (USE_HAL_UART_REGISTER_CALLBACKS) */ + } + return; + } + } +#if defined(USART_CR1_UESM) +#if defined(USART_CR3_WUFIE) + + /* UART wakeup from Stop mode interrupt occurred ---------------------------*/ + if (((isrflags & USART_ISR_WUF) != 0U) && ((cr3its & USART_CR3_WUFIE) != 0U)) + { + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_WUF); + + /* UART Rx state is not reset as a reception process might be ongoing. + If UART handle state fields need to be reset to READY, this could be done in Wakeup callback */ + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Wakeup Callback */ + huart->WakeupCallback(huart); +#else + /* Call legacy weak Wakeup Callback */ + HAL_UARTEx_WakeupCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + return; + } +#endif /* USART_CR3_WUFIE */ +#endif /* USART_CR1_UESM */ + + /* UART in mode Transmitter ------------------------------------------------*/ + if (((isrflags & USART_ISR_TXE) != 0U) + && ((cr1its & USART_CR1_TXEIE) != 0U)) + { + if (huart->TxISR != NULL) + { + huart->TxISR(huart); + } + return; + } + + /* UART in mode Transmitter (transmission end) -----------------------------*/ + if (((isrflags & USART_ISR_TC) != 0U) && ((cr1its & USART_CR1_TCIE) != 0U)) + { + UART_EndTransmit_IT(huart); + return; + } + +} + +/** + * @brief Tx Transfer completed callback. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UART_TxCpltCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_UART_TxCpltCallback can be implemented in the user file. + */ +} + +/** + * @brief Tx Half Transfer completed callback. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UART_TxHalfCpltCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE: This function should not be modified, when the callback is needed, + the HAL_UART_TxHalfCpltCallback can be implemented in the user file. + */ +} + +/** + * @brief Rx Transfer completed callback. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UART_RxCpltCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_UART_RxCpltCallback can be implemented in the user file. + */ +} + +/** + * @brief Rx Half Transfer completed callback. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UART_RxHalfCpltCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE: This function should not be modified, when the callback is needed, + the HAL_UART_RxHalfCpltCallback can be implemented in the user file. + */ +} + +/** + * @brief UART error callback. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UART_ErrorCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_UART_ErrorCallback can be implemented in the user file. + */ +} + +/** + * @brief UART Abort Complete callback. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UART_AbortCpltCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_UART_AbortCpltCallback can be implemented in the user file. + */ +} + +/** + * @brief UART Abort Complete callback. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UART_AbortTransmitCpltCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_UART_AbortTransmitCpltCallback can be implemented in the user file. + */ +} + +/** + * @brief UART Abort Receive Complete callback. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UART_AbortReceiveCpltCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_UART_AbortReceiveCpltCallback can be implemented in the user file. + */ +} + +/** + * @brief Reception Event Callback (Rx event notification called after use of advanced reception service). + * @param huart UART handle + * @param Size Number of data available in application reception buffer (indicates a position in + * reception buffer until which, data are available) + * @retval None + */ +__weak void HAL_UARTEx_RxEventCallback(UART_HandleTypeDef *huart, uint16_t Size) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + UNUSED(Size); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_UARTEx_RxEventCallback can be implemented in the user file. + */ +} + +#if defined(USART_CR1_UESM) +/** + * @brief UART wakeup from Stop mode callback. + * @param huart UART handle. + * @retval None + */ +__weak void HAL_UARTEx_WakeupCallback(UART_HandleTypeDef *huart) +{ + /* Prevent unused argument(s) compilation warning */ + UNUSED(huart); + + /* NOTE : This function should not be modified, when the callback is needed, + the HAL_UARTEx_WakeupCallback can be implemented in the user file. + */ +} + +#endif /* USART_CR1_UESM */ +/** + * @} + */ + +/** @defgroup UART_Exported_Functions_Group3 Peripheral Control functions + * @brief UART control functions + * +@verbatim + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to control the UART. + (+) HAL_UART_ReceiverTimeout_Config() API allows to configure the receiver timeout value on the fly + (+) HAL_UART_EnableReceiverTimeout() API enables the receiver timeout feature + (+) HAL_UART_DisableReceiverTimeout() API disables the receiver timeout feature + (+) HAL_MultiProcessor_EnableMuteMode() API enables mute mode + (+) HAL_MultiProcessor_DisableMuteMode() API disables mute mode + (+) HAL_MultiProcessor_EnterMuteMode() API enters mute mode + (+) UART_SetConfig() API configures the UART peripheral + (+) UART_AdvFeatureConfig() API optionally configures the UART advanced features + (+) UART_CheckIdleState() API ensures that TEACK and/or REACK are set after initialization + (+) HAL_HalfDuplex_EnableTransmitter() API disables receiver and enables transmitter + (+) HAL_HalfDuplex_EnableReceiver() API disables transmitter and enables receiver + (+) HAL_LIN_SendBreak() API transmits the break characters +@endverbatim + * @{ + */ + +/** + * @brief Update on the fly the receiver timeout value in RTOR register. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @param TimeoutValue receiver timeout value in number of baud blocks. The timeout + * value must be less or equal to 0x0FFFFFFFF. + * @retval None + */ +void HAL_UART_ReceiverTimeout_Config(UART_HandleTypeDef *huart, uint32_t TimeoutValue) +{ + assert_param(IS_UART_RECEIVER_TIMEOUT_VALUE(TimeoutValue)); + MODIFY_REG(huart->Instance->RTOR, USART_RTOR_RTO, TimeoutValue); +} + +/** + * @brief Enable the UART receiver timeout feature. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_EnableReceiverTimeout(UART_HandleTypeDef *huart) +{ + if (huart->gState == HAL_UART_STATE_READY) + { + /* Process Locked */ + __HAL_LOCK(huart); + + huart->gState = HAL_UART_STATE_BUSY; + + /* Set the USART RTOEN bit */ + SET_BIT(huart->Instance->CR2, USART_CR2_RTOEN); + + huart->gState = HAL_UART_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(huart); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Disable the UART receiver timeout feature. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UART_DisableReceiverTimeout(UART_HandleTypeDef *huart) +{ + if (huart->gState == HAL_UART_STATE_READY) + { + /* Process Locked */ + __HAL_LOCK(huart); + + huart->gState = HAL_UART_STATE_BUSY; + + /* Clear the USART RTOEN bit */ + CLEAR_BIT(huart->Instance->CR2, USART_CR2_RTOEN); + + huart->gState = HAL_UART_STATE_READY; + + /* Process Unlocked */ + __HAL_UNLOCK(huart); + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Enable UART in mute mode (does not mean UART enters mute mode; + * to enter mute mode, HAL_MultiProcessor_EnterMuteMode() API must be called). + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MultiProcessor_EnableMuteMode(UART_HandleTypeDef *huart) +{ + __HAL_LOCK(huart); + + huart->gState = HAL_UART_STATE_BUSY; + + /* Enable USART mute mode by setting the MME bit in the CR1 register */ + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_MME); + + huart->gState = HAL_UART_STATE_READY; + + return (UART_CheckIdleState(huart)); +} + +/** + * @brief Disable UART mute mode (does not mean the UART actually exits mute mode + * as it may not have been in mute mode at this very moment). + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MultiProcessor_DisableMuteMode(UART_HandleTypeDef *huart) +{ + __HAL_LOCK(huart); + + huart->gState = HAL_UART_STATE_BUSY; + + /* Disable USART mute mode by clearing the MME bit in the CR1 register */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_MME); + + huart->gState = HAL_UART_STATE_READY; + + return (UART_CheckIdleState(huart)); +} + +/** + * @brief Enter UART mute mode (means UART actually enters mute mode). + * @note To exit from mute mode, HAL_MultiProcessor_DisableMuteMode() API must be called. + * @param huart UART handle. + * @retval None + */ +void HAL_MultiProcessor_EnterMuteMode(UART_HandleTypeDef *huart) +{ + __HAL_UART_SEND_REQ(huart, UART_MUTE_MODE_REQUEST); +} + +/** + * @brief Enable the UART transmitter and disable the UART receiver. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_HalfDuplex_EnableTransmitter(UART_HandleTypeDef *huart) +{ + __HAL_LOCK(huart); + huart->gState = HAL_UART_STATE_BUSY; + + /* Clear TE and RE bits */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_TE | USART_CR1_RE)); + + /* Enable the USART's transmit interface by setting the TE bit in the USART CR1 register */ + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_TE); + + huart->gState = HAL_UART_STATE_READY; + + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +/** + * @brief Enable the UART receiver and disable the UART transmitter. + * @param huart UART handle. + * @retval HAL status. + */ +HAL_StatusTypeDef HAL_HalfDuplex_EnableReceiver(UART_HandleTypeDef *huart) +{ + __HAL_LOCK(huart); + huart->gState = HAL_UART_STATE_BUSY; + + /* Clear TE and RE bits */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_TE | USART_CR1_RE)); + + /* Enable the USART's receive interface by setting the RE bit in the USART CR1 register */ + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_RE); + + huart->gState = HAL_UART_STATE_READY; + + __HAL_UNLOCK(huart); + + return HAL_OK; +} + + +/** + * @brief Transmit break characters. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_LIN_SendBreak(UART_HandleTypeDef *huart) +{ + /* Check the parameters */ + assert_param(IS_UART_LIN_INSTANCE(huart->Instance)); + + __HAL_LOCK(huart); + + huart->gState = HAL_UART_STATE_BUSY; + + /* Send break characters */ + __HAL_UART_SEND_REQ(huart, UART_SENDBREAK_REQUEST); + + huart->gState = HAL_UART_STATE_READY; + + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +/** + * @} + */ + +/** @defgroup UART_Exported_Functions_Group4 Peripheral State and Error functions + * @brief UART Peripheral State functions + * +@verbatim + ============================================================================== + ##### Peripheral State and Error functions ##### + ============================================================================== + [..] + This subsection provides functions allowing to : + (+) Return the UART handle state. + (+) Return the UART handle error code + +@endverbatim + * @{ + */ + +/** + * @brief Return the UART handle state. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART. + * @retval HAL state + */ +HAL_UART_StateTypeDef HAL_UART_GetState(UART_HandleTypeDef *huart) +{ + uint32_t temp1; + uint32_t temp2; + temp1 = huart->gState; + temp2 = huart->RxState; + + return (HAL_UART_StateTypeDef)(temp1 | temp2); +} + +/** + * @brief Return the UART handle error code. + * @param huart Pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART. + * @retval UART Error Code + */ +uint32_t HAL_UART_GetError(UART_HandleTypeDef *huart) +{ + return huart->ErrorCode; +} +/** + * @} + */ + +/** + * @} + */ + +/** @defgroup UART_Private_Functions UART Private Functions + * @{ + */ + +/** + * @brief Initialize the callbacks to their default values. + * @param huart UART handle. + * @retval none + */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) +void UART_InitCallbacksToDefault(UART_HandleTypeDef *huart) +{ + /* Init the UART Callback settings */ + huart->TxHalfCpltCallback = HAL_UART_TxHalfCpltCallback; /* Legacy weak TxHalfCpltCallback */ + huart->TxCpltCallback = HAL_UART_TxCpltCallback; /* Legacy weak TxCpltCallback */ + huart->RxHalfCpltCallback = HAL_UART_RxHalfCpltCallback; /* Legacy weak RxHalfCpltCallback */ + huart->RxCpltCallback = HAL_UART_RxCpltCallback; /* Legacy weak RxCpltCallback */ + huart->ErrorCallback = HAL_UART_ErrorCallback; /* Legacy weak ErrorCallback */ + huart->AbortCpltCallback = HAL_UART_AbortCpltCallback; /* Legacy weak AbortCpltCallback */ + huart->AbortTransmitCpltCallback = HAL_UART_AbortTransmitCpltCallback; /* Legacy weak AbortTransmitCpltCallback */ + huart->AbortReceiveCpltCallback = HAL_UART_AbortReceiveCpltCallback; /* Legacy weak AbortReceiveCpltCallback */ +#if defined(USART_CR1_UESM) +#if defined(USART_CR3_WUFIE) + huart->WakeupCallback = HAL_UARTEx_WakeupCallback; /* Legacy weak WakeupCallback */ +#endif /* USART_CR3_WUFIE */ +#endif /* USART_CR1_UESM */ + huart->RxEventCallback = HAL_UARTEx_RxEventCallback; /* Legacy weak RxEventCallback */ + +} +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + +/** + * @brief Configure the UART peripheral. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef UART_SetConfig(UART_HandleTypeDef *huart) +{ + uint32_t tmpreg; + uint16_t brrtemp; + UART_ClockSourceTypeDef clocksource; + uint32_t usartdiv; + HAL_StatusTypeDef ret = HAL_OK; + uint32_t pclk; + + /* Check the parameters */ + assert_param(IS_UART_BAUDRATE(huart->Init.BaudRate)); + assert_param(IS_UART_WORD_LENGTH(huart->Init.WordLength)); + assert_param(IS_UART_STOPBITS(huart->Init.StopBits)); + assert_param(IS_UART_ONE_BIT_SAMPLE(huart->Init.OneBitSampling)); + + assert_param(IS_UART_PARITY(huart->Init.Parity)); + assert_param(IS_UART_MODE(huart->Init.Mode)); + assert_param(IS_UART_HARDWARE_FLOW_CONTROL(huart->Init.HwFlowCtl)); + assert_param(IS_UART_OVERSAMPLING(huart->Init.OverSampling)); + + /*-------------------------- USART CR1 Configuration -----------------------*/ + /* Clear M, PCE, PS, TE, RE and OVER8 bits and configure + * the UART Word Length, Parity, Mode and oversampling: + * set the M bits according to huart->Init.WordLength value + * set PCE and PS bits according to huart->Init.Parity value + * set TE and RE bits according to huart->Init.Mode value + * set OVER8 bit according to huart->Init.OverSampling value */ + tmpreg = (uint32_t)huart->Init.WordLength | huart->Init.Parity | huart->Init.Mode | huart->Init.OverSampling ; + MODIFY_REG(huart->Instance->CR1, USART_CR1_FIELDS, tmpreg); + + /*-------------------------- USART CR2 Configuration -----------------------*/ + /* Configure the UART Stop Bits: Set STOP[13:12] bits according + * to huart->Init.StopBits value */ + MODIFY_REG(huart->Instance->CR2, USART_CR2_STOP, huart->Init.StopBits); + + /*-------------------------- USART CR3 Configuration -----------------------*/ + /* Configure + * - UART HardWare Flow Control: set CTSE and RTSE bits according + * to huart->Init.HwFlowCtl value + * - one-bit sampling method versus three samples' majority rule according + * to huart->Init.OneBitSampling (not applicable to LPUART) */ + tmpreg = (uint32_t)huart->Init.HwFlowCtl; + + tmpreg |= huart->Init.OneBitSampling; + MODIFY_REG(huart->Instance->CR3, USART_CR3_FIELDS, tmpreg); + + + /*-------------------------- USART BRR Configuration -----------------------*/ + UART_GETCLOCKSOURCE(huart, clocksource); + + if (huart->Init.OverSampling == UART_OVERSAMPLING_8) + { + switch (clocksource) + { + case UART_CLOCKSOURCE_PCLK1: + pclk = HAL_RCC_GetPCLK1Freq(); + break; + case UART_CLOCKSOURCE_PCLK2: + pclk = HAL_RCC_GetPCLK2Freq(); + break; + case UART_CLOCKSOURCE_HSI: + pclk = (uint32_t) HSI_VALUE; + break; + case UART_CLOCKSOURCE_SYSCLK: + pclk = HAL_RCC_GetSysClockFreq(); + break; + case UART_CLOCKSOURCE_LSE: + pclk = (uint32_t) LSE_VALUE; + break; + default: + pclk = 0U; + ret = HAL_ERROR; + break; + } + + /* USARTDIV must be greater than or equal to 0d16 */ + if (pclk != 0U) + { + usartdiv = (uint32_t)(UART_DIV_SAMPLING8(pclk, huart->Init.BaudRate)); + if ((usartdiv >= UART_BRR_MIN) && (usartdiv <= UART_BRR_MAX)) + { + brrtemp = (uint16_t)(usartdiv & 0xFFF0U); + brrtemp |= (uint16_t)((usartdiv & (uint16_t)0x000FU) >> 1U); + huart->Instance->BRR = brrtemp; + } + else + { + ret = HAL_ERROR; + } + } + } + else + { + switch (clocksource) + { + case UART_CLOCKSOURCE_PCLK1: + pclk = HAL_RCC_GetPCLK1Freq(); + break; + case UART_CLOCKSOURCE_PCLK2: + pclk = HAL_RCC_GetPCLK2Freq(); + break; + case UART_CLOCKSOURCE_HSI: + pclk = (uint32_t) HSI_VALUE; + break; + case UART_CLOCKSOURCE_SYSCLK: + pclk = HAL_RCC_GetSysClockFreq(); + break; + case UART_CLOCKSOURCE_LSE: + pclk = (uint32_t) LSE_VALUE; + break; + default: + pclk = 0U; + ret = HAL_ERROR; + break; + } + + if (pclk != 0U) + { + /* USARTDIV must be greater than or equal to 0d16 */ + usartdiv = (uint32_t)(UART_DIV_SAMPLING16(pclk, huart->Init.BaudRate)); + if ((usartdiv >= UART_BRR_MIN) && (usartdiv <= UART_BRR_MAX)) + { + huart->Instance->BRR = (uint16_t)usartdiv; + } + else + { + ret = HAL_ERROR; + } + } + } + + + /* Clear ISR function pointers */ + huart->RxISR = NULL; + huart->TxISR = NULL; + + return ret; +} + +/** + * @brief Configure the UART peripheral advanced features. + * @param huart UART handle. + * @retval None + */ +void UART_AdvFeatureConfig(UART_HandleTypeDef *huart) +{ + /* Check whether the set of advanced features to configure is properly set */ + assert_param(IS_UART_ADVFEATURE_INIT(huart->AdvancedInit.AdvFeatureInit)); + + /* if required, configure TX pin active level inversion */ + if (HAL_IS_BIT_SET(huart->AdvancedInit.AdvFeatureInit, UART_ADVFEATURE_TXINVERT_INIT)) + { + assert_param(IS_UART_ADVFEATURE_TXINV(huart->AdvancedInit.TxPinLevelInvert)); + MODIFY_REG(huart->Instance->CR2, USART_CR2_TXINV, huart->AdvancedInit.TxPinLevelInvert); + } + + /* if required, configure RX pin active level inversion */ + if (HAL_IS_BIT_SET(huart->AdvancedInit.AdvFeatureInit, UART_ADVFEATURE_RXINVERT_INIT)) + { + assert_param(IS_UART_ADVFEATURE_RXINV(huart->AdvancedInit.RxPinLevelInvert)); + MODIFY_REG(huart->Instance->CR2, USART_CR2_RXINV, huart->AdvancedInit.RxPinLevelInvert); + } + + /* if required, configure data inversion */ + if (HAL_IS_BIT_SET(huart->AdvancedInit.AdvFeatureInit, UART_ADVFEATURE_DATAINVERT_INIT)) + { + assert_param(IS_UART_ADVFEATURE_DATAINV(huart->AdvancedInit.DataInvert)); + MODIFY_REG(huart->Instance->CR2, USART_CR2_DATAINV, huart->AdvancedInit.DataInvert); + } + + /* if required, configure RX/TX pins swap */ + if (HAL_IS_BIT_SET(huart->AdvancedInit.AdvFeatureInit, UART_ADVFEATURE_SWAP_INIT)) + { + assert_param(IS_UART_ADVFEATURE_SWAP(huart->AdvancedInit.Swap)); + MODIFY_REG(huart->Instance->CR2, USART_CR2_SWAP, huart->AdvancedInit.Swap); + } + + /* if required, configure RX overrun detection disabling */ + if (HAL_IS_BIT_SET(huart->AdvancedInit.AdvFeatureInit, UART_ADVFEATURE_RXOVERRUNDISABLE_INIT)) + { + assert_param(IS_UART_OVERRUN(huart->AdvancedInit.OverrunDisable)); + MODIFY_REG(huart->Instance->CR3, USART_CR3_OVRDIS, huart->AdvancedInit.OverrunDisable); + } + + /* if required, configure DMA disabling on reception error */ + if (HAL_IS_BIT_SET(huart->AdvancedInit.AdvFeatureInit, UART_ADVFEATURE_DMADISABLEONERROR_INIT)) + { + assert_param(IS_UART_ADVFEATURE_DMAONRXERROR(huart->AdvancedInit.DMADisableonRxError)); + MODIFY_REG(huart->Instance->CR3, USART_CR3_DDRE, huart->AdvancedInit.DMADisableonRxError); + } + + /* if required, configure auto Baud rate detection scheme */ + if (HAL_IS_BIT_SET(huart->AdvancedInit.AdvFeatureInit, UART_ADVFEATURE_AUTOBAUDRATE_INIT)) + { + assert_param(IS_USART_AUTOBAUDRATE_DETECTION_INSTANCE(huart->Instance)); + assert_param(IS_UART_ADVFEATURE_AUTOBAUDRATE(huart->AdvancedInit.AutoBaudRateEnable)); + MODIFY_REG(huart->Instance->CR2, USART_CR2_ABREN, huart->AdvancedInit.AutoBaudRateEnable); + /* set auto Baudrate detection parameters if detection is enabled */ + if (huart->AdvancedInit.AutoBaudRateEnable == UART_ADVFEATURE_AUTOBAUDRATE_ENABLE) + { + assert_param(IS_UART_ADVFEATURE_AUTOBAUDRATEMODE(huart->AdvancedInit.AutoBaudRateMode)); + MODIFY_REG(huart->Instance->CR2, USART_CR2_ABRMODE, huart->AdvancedInit.AutoBaudRateMode); + } + } + + /* if required, configure MSB first on communication line */ + if (HAL_IS_BIT_SET(huart->AdvancedInit.AdvFeatureInit, UART_ADVFEATURE_MSBFIRST_INIT)) + { + assert_param(IS_UART_ADVFEATURE_MSBFIRST(huart->AdvancedInit.MSBFirst)); + MODIFY_REG(huart->Instance->CR2, USART_CR2_MSBFIRST, huart->AdvancedInit.MSBFirst); + } +} + +/** + * @brief Check the UART Idle State. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef UART_CheckIdleState(UART_HandleTypeDef *huart) +{ + uint32_t tickstart; + + /* Initialize the UART ErrorCode */ + huart->ErrorCode = HAL_UART_ERROR_NONE; + + /* Init tickstart for timeout management */ + tickstart = HAL_GetTick(); + + /* Check if the Transmitter is enabled */ + if ((huart->Instance->CR1 & USART_CR1_TE) == USART_CR1_TE) + { + /* Wait until TEACK flag is set */ + if (UART_WaitOnFlagUntilTimeout(huart, USART_ISR_TEACK, RESET, tickstart, HAL_UART_TIMEOUT_VALUE) != HAL_OK) + { + /* Timeout occurred */ + return HAL_TIMEOUT; + } + } +#if defined(USART_ISR_REACK) + + /* Check if the Receiver is enabled */ + if ((huart->Instance->CR1 & USART_CR1_RE) == USART_CR1_RE) + { + /* Wait until REACK flag is set */ + if (UART_WaitOnFlagUntilTimeout(huart, USART_ISR_REACK, RESET, tickstart, HAL_UART_TIMEOUT_VALUE) != HAL_OK) + { + /* Timeout occurred */ + return HAL_TIMEOUT; + } + } +#endif + + /* Initialize the UART State */ + huart->gState = HAL_UART_STATE_READY; + huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +/** + * @brief This function handles UART Communication Timeout. It waits + * until a flag is no longer in the specified status. + * @param huart UART handle. + * @param Flag Specifies the UART flag to check + * @param Status The actual Flag status (SET or RESET) + * @param Tickstart Tick start value + * @param Timeout Timeout duration + * @retval HAL status + */ +HAL_StatusTypeDef UART_WaitOnFlagUntilTimeout(UART_HandleTypeDef *huart, uint32_t Flag, FlagStatus Status, + uint32_t Tickstart, uint32_t Timeout) +{ + /* Wait until flag is set */ + while ((__HAL_UART_GET_FLAG(huart, Flag) ? SET : RESET) == Status) + { + /* Check for the Timeout */ + if (Timeout != HAL_MAX_DELAY) + { + if (((HAL_GetTick() - Tickstart) > Timeout) || (Timeout == 0U)) + { + /* Disable TXE, RXNE, PE and ERR (Frame error, noise error, overrun error) + interrupts for the interrupt process */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE | USART_CR1_TXEIE)); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + huart->gState = HAL_UART_STATE_READY; + huart->RxState = HAL_UART_STATE_READY; + + __HAL_UNLOCK(huart); + + return HAL_TIMEOUT; + } + + if (READ_BIT(huart->Instance->CR1, USART_CR1_RE) != 0U) + { + if (__HAL_UART_GET_FLAG(huart, UART_FLAG_RTOF) == SET) + { + /* Clear Receiver Timeout flag*/ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_RTOF); + + /* Disable TXE, RXNE, PE and ERR (Frame error, noise error, overrun error) + interrupts for the interrupt process */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE | USART_CR1_TXEIE)); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + huart->gState = HAL_UART_STATE_READY; + huart->RxState = HAL_UART_STATE_READY; + huart->ErrorCode = HAL_UART_ERROR_RTO; + + /* Process Unlocked */ + __HAL_UNLOCK(huart); + + return HAL_TIMEOUT; + } + } + } + } + return HAL_OK; +} + +/** + * @brief Start Receive operation in interrupt mode. + * @note This function could be called by all HAL UART API providing reception in Interrupt mode. + * @note When calling this function, parameters validity is considered as already checked, + * i.e. Rx State, buffer address, ... + * UART Handle is assumed as Locked. + * @param huart UART handle. + * @param pData Pointer to data buffer (u8 or u16 data elements). + * @param Size Amount of data elements (u8 or u16) to be received. + * @retval HAL status + */ +HAL_StatusTypeDef UART_Start_Receive_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) +{ + huart->pRxBuffPtr = pData; + huart->RxXferSize = Size; + huart->RxXferCount = Size; + huart->RxISR = NULL; + + /* Computation of UART mask to apply to RDR register */ + UART_MASK_COMPUTATION(huart); + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->RxState = HAL_UART_STATE_BUSY_RX; + + /* Enable the UART Error Interrupt: (Frame error, noise error, overrun error) */ + ATOMIC_SET_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Set the Rx ISR function pointer according to the data word length */ + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) && (huart->Init.Parity == UART_PARITY_NONE)) + { + huart->RxISR = UART_RxISR_16BIT; + } + else + { + huart->RxISR = UART_RxISR_8BIT; + } + + __HAL_UNLOCK(huart); + + /* Enable the UART Parity Error interrupt and Data Register Not Empty interrupt */ + if (huart->Init.Parity != UART_PARITY_NONE) + { + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_PEIE | USART_CR1_RXNEIE); + } + else + { + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_RXNEIE); + } + return HAL_OK; +} + +/** + * @brief Start Receive operation in DMA mode. + * @note This function could be called by all HAL UART API providing reception in DMA mode. + * @note When calling this function, parameters validity is considered as already checked, + * i.e. Rx State, buffer address, ... + * UART Handle is assumed as Locked. + * @param huart UART handle. + * @param pData Pointer to data buffer (u8 or u16 data elements). + * @param Size Amount of data elements (u8 or u16) to be received. + * @retval HAL status + */ +HAL_StatusTypeDef UART_Start_Receive_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) +{ + huart->pRxBuffPtr = pData; + huart->RxXferSize = Size; + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->RxState = HAL_UART_STATE_BUSY_RX; + + if (huart->hdmarx != NULL) + { + /* Set the UART DMA transfer complete callback */ + huart->hdmarx->XferCpltCallback = UART_DMAReceiveCplt; + + /* Set the UART DMA Half transfer complete callback */ + huart->hdmarx->XferHalfCpltCallback = UART_DMARxHalfCplt; + + /* Set the DMA error callback */ + huart->hdmarx->XferErrorCallback = UART_DMAError; + + /* Set the DMA abort callback */ + huart->hdmarx->XferAbortCallback = NULL; + + /* Enable the DMA channel */ + if (HAL_DMA_Start_IT(huart->hdmarx, (uint32_t)&huart->Instance->RDR, (uint32_t)huart->pRxBuffPtr, Size) != HAL_OK) + { + /* Set error code to DMA */ + huart->ErrorCode = HAL_UART_ERROR_DMA; + + __HAL_UNLOCK(huart); + + /* Restore huart->RxState to ready */ + huart->RxState = HAL_UART_STATE_READY; + + return HAL_ERROR; + } + } + __HAL_UNLOCK(huart); + + /* Enable the UART Parity Error Interrupt */ + if (huart->Init.Parity != UART_PARITY_NONE) + { + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_PEIE); + } + + /* Enable the UART Error Interrupt: (Frame error, noise error, overrun error) */ + ATOMIC_SET_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Enable the DMA transfer for the receiver request by setting the DMAR bit + in the UART CR3 register */ + ATOMIC_SET_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + return HAL_OK; +} + + +/** + * @brief End ongoing Tx transfer on UART peripheral (following error detection or Transmit completion). + * @param huart UART handle. + * @retval None + */ +static void UART_EndTxTransfer(UART_HandleTypeDef *huart) +{ + /* Disable TXEIE and TCIE interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_TXEIE | USART_CR1_TCIE)); + + /* At end of Tx process, restore huart->gState to Ready */ + huart->gState = HAL_UART_STATE_READY; +} + + +/** + * @brief End ongoing Rx transfer on UART peripheral (following error detection or Reception completion). + * @param huart UART handle. + * @retval None + */ +static void UART_EndRxTransfer(UART_HandleTypeDef *huart) +{ + /* Disable RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* In case of reception waiting for IDLE event, disable also the IDLE IE interrupt source */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_IDLEIE); + } + + /* At end of Rx process, restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + /* Reset RxIsr function pointer */ + huart->RxISR = NULL; +} + + +/** + * @brief DMA UART transmit process complete callback. + * @param hdma DMA handle. + * @retval None + */ +static void UART_DMATransmitCplt(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)(hdma->Parent); + + /* DMA Normal mode */ + if (hdma->Init.Mode != DMA_CIRCULAR) + { + huart->TxXferCount = 0U; + + /* Disable the DMA transfer for transmit request by resetting the DMAT bit + in the UART CR3 register */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); + + /* Enable the UART Transmit Complete Interrupt */ + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_TCIE); + } + /* DMA Circular mode */ + else + { +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Tx complete callback*/ + huart->TxCpltCallback(huart); +#else + /*Call legacy weak Tx complete callback*/ + HAL_UART_TxCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } +} + +/** + * @brief DMA UART transmit process half complete callback. + * @param hdma DMA handle. + * @retval None + */ +static void UART_DMATxHalfCplt(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)(hdma->Parent); + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Tx Half complete callback*/ + huart->TxHalfCpltCallback(huart); +#else + /*Call legacy weak Tx Half complete callback*/ + HAL_UART_TxHalfCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA UART receive process complete callback. + * @param hdma DMA handle. + * @retval None + */ +static void UART_DMAReceiveCplt(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)(hdma->Parent); + + /* DMA Normal mode */ + if (hdma->Init.Mode != DMA_CIRCULAR) + { + huart->RxXferCount = 0U; + + /* Disable PE and ERR (Frame error, noise error, overrun error) interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_PEIE); + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Disable the DMA transfer for the receiver request by resetting the DMAR bit + in the UART CR3 register */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); + + /* At end of Rx process, restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + + /* If Reception till IDLE event has been selected, Disable IDLE Interrupt */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_IDLEIE); + } + } + + /* Check current reception Mode : + If Reception till IDLE event has been selected : use Rx Event callback */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx Event callback*/ + huart->RxEventCallback(huart, huart->RxXferSize); +#else + /*Call legacy weak Rx Event callback*/ + HAL_UARTEx_RxEventCallback(huart, huart->RxXferSize); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + else + { + /* In other cases : use Rx Complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx complete callback*/ + huart->RxCpltCallback(huart); +#else + /*Call legacy weak Rx complete callback*/ + HAL_UART_RxCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } +} + +/** + * @brief DMA UART receive process half complete callback. + * @param hdma DMA handle. + * @retval None + */ +static void UART_DMARxHalfCplt(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)(hdma->Parent); + + /* Check current reception Mode : + If Reception till IDLE event has been selected : use Rx Event callback */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx Event callback*/ + huart->RxEventCallback(huart, huart->RxXferSize / 2U); +#else + /*Call legacy weak Rx Event callback*/ + HAL_UARTEx_RxEventCallback(huart, huart->RxXferSize / 2U); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + else + { + /* In other cases : use Rx Half Complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx Half complete callback*/ + huart->RxHalfCpltCallback(huart); +#else + /*Call legacy weak Rx Half complete callback*/ + HAL_UART_RxHalfCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } +} + +/** + * @brief DMA UART communication error callback. + * @param hdma DMA handle. + * @retval None + */ +static void UART_DMAError(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)(hdma->Parent); + + const HAL_UART_StateTypeDef gstate = huart->gState; + const HAL_UART_StateTypeDef rxstate = huart->RxState; + + /* Stop UART DMA Tx request if ongoing */ + if ((HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) && + (gstate == HAL_UART_STATE_BUSY_TX)) + { + huart->TxXferCount = 0U; + UART_EndTxTransfer(huart); + } + + /* Stop UART DMA Rx request if ongoing */ + if ((HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) && + (rxstate == HAL_UART_STATE_BUSY_RX)) + { + huart->RxXferCount = 0U; + UART_EndRxTransfer(huart); + } + + huart->ErrorCode |= HAL_UART_ERROR_DMA; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered error callback*/ + huart->ErrorCallback(huart); +#else + /*Call legacy weak error callback*/ + HAL_UART_ErrorCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA UART communication abort callback, when initiated by HAL services on Error + * (To be called at end of DMA Abort procedure following error occurrence). + * @param hdma DMA handle. + * @retval None + */ +static void UART_DMAAbortOnError(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)(hdma->Parent); + huart->RxXferCount = 0U; + huart->TxXferCount = 0U; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered error callback*/ + huart->ErrorCallback(huart); +#else + /*Call legacy weak error callback*/ + HAL_UART_ErrorCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA UART Tx communication abort callback, when initiated by user + * (To be called at end of DMA Tx Abort procedure following user abort request). + * @note When this callback is executed, User Abort complete call back is called only if no + * Abort still ongoing for Rx DMA Handle. + * @param hdma DMA handle. + * @retval None + */ +static void UART_DMATxAbortCallback(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)(hdma->Parent); + + huart->hdmatx->XferAbortCallback = NULL; + + /* Check if an Abort process is still ongoing */ + if (huart->hdmarx != NULL) + { + if (huart->hdmarx->XferAbortCallback != NULL) + { + return; + } + } + + /* No Abort process still ongoing : All DMA channels are aborted, call user Abort Complete callback */ + huart->TxXferCount = 0U; + huart->RxXferCount = 0U; + + /* Reset errorCode */ + huart->ErrorCode = HAL_UART_ERROR_NONE; + + /* Clear the Error flags in the ICR register */ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_OREF | UART_CLEAR_NEF | UART_CLEAR_PEF | UART_CLEAR_FEF); + + + /* Restore huart->gState and huart->RxState to Ready */ + huart->gState = HAL_UART_STATE_READY; + huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + /* Call user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort complete callback */ + huart->AbortCpltCallback(huart); +#else + /* Call legacy weak Abort complete callback */ + HAL_UART_AbortCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + + +/** + * @brief DMA UART Rx communication abort callback, when initiated by user + * (To be called at end of DMA Rx Abort procedure following user abort request). + * @note When this callback is executed, User Abort complete call back is called only if no + * Abort still ongoing for Tx DMA Handle. + * @param hdma DMA handle. + * @retval None + */ +static void UART_DMARxAbortCallback(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)(hdma->Parent); + + huart->hdmarx->XferAbortCallback = NULL; + + /* Check if an Abort process is still ongoing */ + if (huart->hdmatx != NULL) + { + if (huart->hdmatx->XferAbortCallback != NULL) + { + return; + } + } + + /* No Abort process still ongoing : All DMA channels are aborted, call user Abort Complete callback */ + huart->TxXferCount = 0U; + huart->RxXferCount = 0U; + + /* Reset errorCode */ + huart->ErrorCode = HAL_UART_ERROR_NONE; + + /* Clear the Error flags in the ICR register */ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_OREF | UART_CLEAR_NEF | UART_CLEAR_PEF | UART_CLEAR_FEF); + + /* Discard the received data */ + __HAL_UART_SEND_REQ(huart, UART_RXDATA_FLUSH_REQUEST); + + /* Restore huart->gState and huart->RxState to Ready */ + huart->gState = HAL_UART_STATE_READY; + huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + /* Call user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort complete callback */ + huart->AbortCpltCallback(huart); +#else + /* Call legacy weak Abort complete callback */ + HAL_UART_AbortCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + + +/** + * @brief DMA UART Tx communication abort callback, when initiated by user by a call to + * HAL_UART_AbortTransmit_IT API (Abort only Tx transfer) + * (This callback is executed at end of DMA Tx Abort procedure following user abort request, + * and leads to user Tx Abort Complete callback execution). + * @param hdma DMA handle. + * @retval None + */ +static void UART_DMATxOnlyAbortCallback(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)(hdma->Parent); + + huart->TxXferCount = 0U; + + + /* Restore huart->gState to Ready */ + huart->gState = HAL_UART_STATE_READY; + + /* Call user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort Transmit Complete Callback */ + huart->AbortTransmitCpltCallback(huart); +#else + /* Call legacy weak Abort Transmit Complete Callback */ + HAL_UART_AbortTransmitCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + +/** + * @brief DMA UART Rx communication abort callback, when initiated by user by a call to + * HAL_UART_AbortReceive_IT API (Abort only Rx transfer) + * (This callback is executed at end of DMA Rx Abort procedure following user abort request, + * and leads to user Rx Abort Complete callback execution). + * @param hdma DMA handle. + * @retval None + */ +static void UART_DMARxOnlyAbortCallback(DMA_HandleTypeDef *hdma) +{ + UART_HandleTypeDef *huart = (UART_HandleTypeDef *)((DMA_HandleTypeDef *)hdma)->Parent; + + huart->RxXferCount = 0U; + + /* Clear the Error flags in the ICR register */ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_OREF | UART_CLEAR_NEF | UART_CLEAR_PEF | UART_CLEAR_FEF); + + /* Discard the received data */ + __HAL_UART_SEND_REQ(huart, UART_RXDATA_FLUSH_REQUEST); + + /* Restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + /* Call user Abort complete callback */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /* Call registered Abort Receive Complete Callback */ + huart->AbortReceiveCpltCallback(huart); +#else + /* Call legacy weak Abort Receive Complete Callback */ + HAL_UART_AbortReceiveCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + +/** + * @brief TX interrupt handler for 7 or 8 bits data word length . + * @note Function is called under interruption only, once + * interruptions have been enabled by HAL_UART_Transmit_IT(). + * @param huart UART handle. + * @retval None + */ +static void UART_TxISR_8BIT(UART_HandleTypeDef *huart) +{ + /* Check that a Tx process is ongoing */ + if (huart->gState == HAL_UART_STATE_BUSY_TX) + { + if (huart->TxXferCount == 0U) + { + /* Disable the UART Transmit Data Register Empty Interrupt */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_TXEIE); + + /* Enable the UART Transmit Complete Interrupt */ + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_TCIE); + } + else + { + huart->Instance->TDR = (uint8_t)(*huart->pTxBuffPtr & (uint8_t)0xFF); + huart->pTxBuffPtr++; + huart->TxXferCount--; + } + } +} + +/** + * @brief TX interrupt handler for 9 bits data word length. + * @note Function is called under interruption only, once + * interruptions have been enabled by HAL_UART_Transmit_IT(). + * @param huart UART handle. + * @retval None + */ +static void UART_TxISR_16BIT(UART_HandleTypeDef *huart) +{ + const uint16_t *tmp; + + /* Check that a Tx process is ongoing */ + if (huart->gState == HAL_UART_STATE_BUSY_TX) + { + if (huart->TxXferCount == 0U) + { + /* Disable the UART Transmit Data Register Empty Interrupt */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_TXEIE); + + /* Enable the UART Transmit Complete Interrupt */ + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_TCIE); + } + else + { + tmp = (const uint16_t *) huart->pTxBuffPtr; + huart->Instance->TDR = (((uint32_t)(*tmp)) & 0x01FFUL); + huart->pTxBuffPtr += 2U; + huart->TxXferCount--; + } + } +} + + +/** + * @brief Wrap up transmission in non-blocking mode. + * @param huart pointer to a UART_HandleTypeDef structure that contains + * the configuration information for the specified UART module. + * @retval None + */ +static void UART_EndTransmit_IT(UART_HandleTypeDef *huart) +{ + /* Disable the UART Transmit Complete Interrupt */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_TCIE); + + /* Tx process is ended, restore huart->gState to Ready */ + huart->gState = HAL_UART_STATE_READY; + + /* Cleat TxISR function pointer */ + huart->TxISR = NULL; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Tx complete callback*/ + huart->TxCpltCallback(huart); +#else + /*Call legacy weak Tx complete callback*/ + HAL_UART_TxCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ +} + +/** + * @brief RX interrupt handler for 7 or 8 bits data word length . + * @param huart UART handle. + * @retval None + */ +static void UART_RxISR_8BIT(UART_HandleTypeDef *huart) +{ + uint16_t uhMask = huart->Mask; + uint16_t uhdata; + + /* Check that a Rx process is ongoing */ + if (huart->RxState == HAL_UART_STATE_BUSY_RX) + { + uhdata = (uint16_t) READ_REG(huart->Instance->RDR); + *huart->pRxBuffPtr = (uint8_t)(uhdata & (uint8_t)uhMask); + huart->pRxBuffPtr++; + huart->RxXferCount--; + + if (huart->RxXferCount == 0U) + { + /* Disable the UART Parity Error Interrupt and RXNE interrupts */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); + + /* Disable the UART Error Interrupt: (Frame error, noise error, overrun error) */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Rx process is completed, restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + + /* Clear RxISR function pointer */ + huart->RxISR = NULL; + + /* Check current reception Mode : + If Reception till IDLE event has been selected : */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + /* Set reception type to Standard */ + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + /* Disable IDLE interrupt */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_IDLEIE); + + if (__HAL_UART_GET_FLAG(huart, UART_FLAG_IDLE) == SET) + { + /* Clear IDLE Flag */ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_IDLEF); + } +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx Event callback*/ + huart->RxEventCallback(huart, huart->RxXferSize); +#else + /*Call legacy weak Rx Event callback*/ + HAL_UARTEx_RxEventCallback(huart, huart->RxXferSize); +#endif /* (USE_HAL_UART_REGISTER_CALLBACKS) */ + } + else + { + /* Standard reception API called */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx complete callback*/ + huart->RxCpltCallback(huart); +#else + /*Call legacy weak Rx complete callback*/ + HAL_UART_RxCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + } + } + else + { + /* Clear RXNE interrupt flag */ + __HAL_UART_SEND_REQ(huart, UART_RXDATA_FLUSH_REQUEST); + } +} + +/** + * @brief RX interrupt handler for 9 bits data word length . + * @note Function is called under interruption only, once + * interruptions have been enabled by HAL_UART_Receive_IT() + * @param huart UART handle. + * @retval None + */ +static void UART_RxISR_16BIT(UART_HandleTypeDef *huart) +{ + uint16_t *tmp; + uint16_t uhMask = huart->Mask; + uint16_t uhdata; + + /* Check that a Rx process is ongoing */ + if (huart->RxState == HAL_UART_STATE_BUSY_RX) + { + uhdata = (uint16_t) READ_REG(huart->Instance->RDR); + tmp = (uint16_t *) huart->pRxBuffPtr ; + *tmp = (uint16_t)(uhdata & uhMask); + huart->pRxBuffPtr += 2U; + huart->RxXferCount--; + + if (huart->RxXferCount == 0U) + { + /* Disable the UART Parity Error Interrupt and RXNE interrupt*/ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); + + /* Disable the UART Error Interrupt: (Frame error, noise error, overrun error) */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); + + /* Rx process is completed, restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + + /* Clear RxISR function pointer */ + huart->RxISR = NULL; + + /* Check current reception Mode : + If Reception till IDLE event has been selected : */ + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + /* Set reception type to Standard */ + huart->ReceptionType = HAL_UART_RECEPTION_STANDARD; + + /* Disable IDLE interrupt */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_IDLEIE); + + if (__HAL_UART_GET_FLAG(huart, UART_FLAG_IDLE) == SET) + { + /* Clear IDLE Flag */ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_IDLEF); + } +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx Event callback*/ + huart->RxEventCallback(huart, huart->RxXferSize); +#else + /*Call legacy weak Rx Event callback*/ + HAL_UARTEx_RxEventCallback(huart, huart->RxXferSize); +#endif /* (USE_HAL_UART_REGISTER_CALLBACKS) */ + } + else + { + /* Standard reception API called */ +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + /*Call registered Rx complete callback*/ + huart->RxCpltCallback(huart); +#else + /*Call legacy weak Rx complete callback*/ + HAL_UART_RxCpltCallback(huart); +#endif /* USE_HAL_UART_REGISTER_CALLBACKS */ + } + } + } + else + { + /* Clear RXNE interrupt flag */ + __HAL_UART_SEND_REQ(huart, UART_RXDATA_FLUSH_REQUEST); + } +} + + +/** + * @} + */ + +#endif /* HAL_UART_MODULE_ENABLED */ +/** + * @} + */ + +/** + * @} + */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_uart_ex.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_uart_ex.c new file mode 100644 index 0000000..0efc77b --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_uart_ex.c @@ -0,0 +1,770 @@ +/** + ****************************************************************************** + * @file stm32f7xx_hal_uart_ex.c + * @author MCD Application Team + * @brief Extended UART HAL module driver. + * This file provides firmware functions to manage the following extended + * functionalities of the Universal Asynchronous Receiver Transmitter Peripheral (UART). + * + Initialization and de-initialization functions + * + Peripheral Control functions + * + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + @verbatim + ============================================================================== + ##### UART peripheral extended features ##### + ============================================================================== + + (#) Declare a UART_HandleTypeDef handle structure. + + (#) For the UART RS485 Driver Enable mode, initialize the UART registers + by calling the HAL_RS485Ex_Init() API. + + @endverbatim + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_HAL_Driver + * @{ + */ + +/** @defgroup UARTEx UARTEx + * @brief UART Extended HAL module driver + * @{ + */ + +#ifdef HAL_UART_MODULE_ENABLED + +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ + +/* Private macros ------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/** @defgroup UARTEx_Private_Functions UARTEx Private Functions + * @{ + */ +#if defined(USART_CR1_UESM) +static void UARTEx_Wakeup_AddressConfig(UART_HandleTypeDef *huart, UART_WakeUpTypeDef WakeUpSelection); +#endif /* USART_CR1_UESM */ +/** + * @} + */ + +/* Exported functions --------------------------------------------------------*/ + +/** @defgroup UARTEx_Exported_Functions UARTEx Exported Functions + * @{ + */ + +/** @defgroup UARTEx_Exported_Functions_Group1 Initialization and de-initialization functions + * @brief Extended Initialization and Configuration Functions + * +@verbatim +=============================================================================== + ##### Initialization and Configuration functions ##### + =============================================================================== + [..] + This subsection provides a set of functions allowing to initialize the USARTx or the UARTy + in asynchronous mode. + (+) For the asynchronous mode the parameters below can be configured: + (++) Baud Rate + (++) Word Length + (++) Stop Bit + (++) Parity: If the parity is enabled, then the MSB bit of the data written + in the data register is transmitted but is changed by the parity bit. + (++) Hardware flow control + (++) Receiver/transmitter modes + (++) Over Sampling Method + (++) One-Bit Sampling Method + (+) For the asynchronous mode, the following advanced features can be configured as well: + (++) TX and/or RX pin level inversion + (++) data logical level inversion + (++) RX and TX pins swap + (++) RX overrun detection disabling + (++) DMA disabling on RX error + (++) MSB first on communication line + (++) auto Baud rate detection + [..] + The HAL_RS485Ex_Init() API follows the UART RS485 mode configuration + procedures (details for the procedures are available in reference manual). + +@endverbatim + + Depending on the frame length defined by the M1 and M0 bits (7-bit, + 8-bit or 9-bit), the possible UART formats are listed in the + following table. + + Table 1. UART frame format. + +-----------------------------------------------------------------------+ + | M1 bit | M0 bit | PCE bit | UART frame | + |---------|---------|-----------|---------------------------------------| + | 0 | 0 | 0 | | SB | 8 bit data | STB | | + |---------|---------|-----------|---------------------------------------| + | 0 | 0 | 1 | | SB | 7 bit data | PB | STB | | + |---------|---------|-----------|---------------------------------------| + | 0 | 1 | 0 | | SB | 9 bit data | STB | | + |---------|---------|-----------|---------------------------------------| + | 0 | 1 | 1 | | SB | 8 bit data | PB | STB | | + |---------|---------|-----------|---------------------------------------| + | 1 | 0 | 0 | | SB | 7 bit data | STB | | + |---------|---------|-----------|---------------------------------------| + | 1 | 0 | 1 | | SB | 6 bit data | PB | STB | | + +-----------------------------------------------------------------------+ + + * @{ + */ + +/** + * @brief Initialize the RS485 Driver enable feature according to the specified + * parameters in the UART_InitTypeDef and creates the associated handle. + * @param huart UART handle. + * @param Polarity Select the driver enable polarity. + * This parameter can be one of the following values: + * @arg @ref UART_DE_POLARITY_HIGH DE signal is active high + * @arg @ref UART_DE_POLARITY_LOW DE signal is active low + * @param AssertionTime Driver Enable assertion time: + * 5-bit value defining the time between the activation of the DE (Driver Enable) + * signal and the beginning of the start bit. It is expressed in sample time + * units (1/8 or 1/16 bit time, depending on the oversampling rate) + * @param DeassertionTime Driver Enable deassertion time: + * 5-bit value defining the time between the end of the last stop bit, in a + * transmitted message, and the de-activation of the DE (Driver Enable) signal. + * It is expressed in sample time units (1/8 or 1/16 bit time, depending on the + * oversampling rate). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_RS485Ex_Init(UART_HandleTypeDef *huart, uint32_t Polarity, uint32_t AssertionTime, + uint32_t DeassertionTime) +{ + uint32_t temp; + + /* Check the UART handle allocation */ + if (huart == NULL) + { + return HAL_ERROR; + } + /* Check the Driver Enable UART instance */ + assert_param(IS_UART_DRIVER_ENABLE_INSTANCE(huart->Instance)); + + /* Check the Driver Enable polarity */ + assert_param(IS_UART_DE_POLARITY(Polarity)); + + /* Check the Driver Enable assertion time */ + assert_param(IS_UART_ASSERTIONTIME(AssertionTime)); + + /* Check the Driver Enable deassertion time */ + assert_param(IS_UART_DEASSERTIONTIME(DeassertionTime)); + + if (huart->gState == HAL_UART_STATE_RESET) + { + /* Allocate lock resource and initialize it */ + huart->Lock = HAL_UNLOCKED; + +#if (USE_HAL_UART_REGISTER_CALLBACKS == 1) + UART_InitCallbacksToDefault(huart); + + if (huart->MspInitCallback == NULL) + { + huart->MspInitCallback = HAL_UART_MspInit; + } + + /* Init the low level hardware */ + huart->MspInitCallback(huart); +#else + /* Init the low level hardware : GPIO, CLOCK, CORTEX */ + HAL_UART_MspInit(huart); +#endif /* (USE_HAL_UART_REGISTER_CALLBACKS) */ + } + + huart->gState = HAL_UART_STATE_BUSY; + + /* Disable the Peripheral */ + __HAL_UART_DISABLE(huart); + + /* Set the UART Communication parameters */ + if (UART_SetConfig(huart) == HAL_ERROR) + { + return HAL_ERROR; + } + + if (huart->AdvancedInit.AdvFeatureInit != UART_ADVFEATURE_NO_INIT) + { + UART_AdvFeatureConfig(huart); + } + + /* Enable the Driver Enable mode by setting the DEM bit in the CR3 register */ + SET_BIT(huart->Instance->CR3, USART_CR3_DEM); + + /* Set the Driver Enable polarity */ + MODIFY_REG(huart->Instance->CR3, USART_CR3_DEP, Polarity); + + /* Set the Driver Enable assertion and deassertion times */ + temp = (AssertionTime << UART_CR1_DEAT_ADDRESS_LSB_POS); + temp |= (DeassertionTime << UART_CR1_DEDT_ADDRESS_LSB_POS); + MODIFY_REG(huart->Instance->CR1, (USART_CR1_DEDT | USART_CR1_DEAT), temp); + + /* Enable the Peripheral */ + __HAL_UART_ENABLE(huart); + + /* TEACK and/or REACK to check before moving huart->gState and huart->RxState to Ready */ + return (UART_CheckIdleState(huart)); +} + +/** + * @} + */ + + +/** @defgroup UARTEx_Exported_Functions_Group3 Peripheral Control functions + * @brief Extended Peripheral Control functions + * +@verbatim + =============================================================================== + ##### Peripheral Control functions ##### + =============================================================================== + [..] This section provides the following functions: + (+) HAL_UARTEx_EnableClockStopMode() API enables the UART clock (HSI or LSE only) during stop mode + (+) HAL_UARTEx_DisableClockStopMode() API disables the above functionality + (+) HAL_MultiProcessorEx_AddressLength_Set() API optionally sets the UART node address + detection length to more than 4 bits for multiprocessor address mark wake up. +#if defined(USART_CR1_UESM) + (+) HAL_UARTEx_StopModeWakeUpSourceConfig() API defines the wake-up from stop mode + trigger: address match, Start Bit detection or RXNE bit status. + (+) HAL_UARTEx_EnableStopMode() API enables the UART to wake up the MCU from stop mode + (+) HAL_UARTEx_DisableStopMode() API disables the above functionality +#endif + + [..] This subsection also provides a set of additional functions providing enhanced reception + services to user. (For example, these functions allow application to handle use cases + where number of data to be received is unknown). + + (#) Compared to standard reception services which only consider number of received + data elements as reception completion criteria, these functions also consider additional events + as triggers for updating reception status to caller : + (+) Detection of inactivity period (RX line has not been active for a given period). + (++) RX inactivity detected by IDLE event, i.e. RX line has been in idle state (normally high state) + for 1 frame time, after last received byte. + (++) RX inactivity detected by RTO, i.e. line has been in idle state + for a programmable time, after last received byte. + (+) Detection that a specific character has been received. + + (#) There are two mode of transfer: + (+) Blocking mode: The reception is performed in polling mode, until either expected number of data is received, + or till IDLE event occurs. Reception is handled only during function execution. + When function exits, no data reception could occur. HAL status and number of actually received data elements, + are returned by function after finishing transfer. + (+) Non-Blocking mode: The reception is performed using Interrupts or DMA. + These API's return the HAL status. + The end of the data processing will be indicated through the + dedicated UART IRQ when using Interrupt mode or the DMA IRQ when using DMA mode. + The HAL_UARTEx_RxEventCallback() user callback will be executed during Receive process + The HAL_UART_ErrorCallback()user callback will be executed when a reception error is detected. + + (#) Blocking mode API: + (+) HAL_UARTEx_ReceiveToIdle() + + (#) Non-Blocking mode API with Interrupt: + (+) HAL_UARTEx_ReceiveToIdle_IT() + + (#) Non-Blocking mode API with DMA: + (+) HAL_UARTEx_ReceiveToIdle_DMA() + +@endverbatim + * @{ + */ + +#if defined(USART_CR3_UCESM) +/** + * @brief Keep UART Clock enabled when in Stop Mode. + * @note When the USART clock source is configured to be LSE or HSI, it is possible to keep enabled + * this clock during STOP mode by setting the UCESM bit in USART_CR3 control register. + * @note When LPUART is used to wakeup from stop with LSE is selected as LPUART clock source, + * and desired baud rate is 9600 baud, the bit UCESM bit in LPUART_CR3 control register must be set. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UARTEx_EnableClockStopMode(UART_HandleTypeDef *huart) +{ + /* Process Locked */ + __HAL_LOCK(huart); + + /* Set UCESM bit */ + ATOMIC_SET_BIT(huart->Instance->CR3, USART_CR3_UCESM); + + /* Process Unlocked */ + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +/** + * @brief Disable UART Clock when in Stop Mode. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UARTEx_DisableClockStopMode(UART_HandleTypeDef *huart) +{ + /* Process Locked */ + __HAL_LOCK(huart); + + /* Clear UCESM bit */ + ATOMIC_CLEAR_BIT(huart->Instance->CR3, USART_CR3_UCESM); + + /* Process Unlocked */ + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +#endif /* USART_CR3_UCESM */ +/** + * @brief By default in multiprocessor mode, when the wake up method is set + * to address mark, the UART handles only 4-bit long addresses detection; + * this API allows to enable longer addresses detection (6-, 7- or 8-bit + * long). + * @note Addresses detection lengths are: 6-bit address detection in 7-bit data mode, + * 7-bit address detection in 8-bit data mode, 8-bit address detection in 9-bit data mode. + * @param huart UART handle. + * @param AddressLength This parameter can be one of the following values: + * @arg @ref UART_ADDRESS_DETECT_4B 4-bit long address + * @arg @ref UART_ADDRESS_DETECT_7B 6-, 7- or 8-bit long address + * @retval HAL status + */ +HAL_StatusTypeDef HAL_MultiProcessorEx_AddressLength_Set(UART_HandleTypeDef *huart, uint32_t AddressLength) +{ + /* Check the UART handle allocation */ + if (huart == NULL) + { + return HAL_ERROR; + } + + /* Check the address length parameter */ + assert_param(IS_UART_ADDRESSLENGTH_DETECT(AddressLength)); + + huart->gState = HAL_UART_STATE_BUSY; + + /* Disable the Peripheral */ + __HAL_UART_DISABLE(huart); + + /* Set the address length */ + MODIFY_REG(huart->Instance->CR2, USART_CR2_ADDM7, AddressLength); + + /* Enable the Peripheral */ + __HAL_UART_ENABLE(huart); + + /* TEACK and/or REACK to check before moving huart->gState to Ready */ + return (UART_CheckIdleState(huart)); +} + +#if defined(USART_CR1_UESM) +/** + * @brief Set Wakeup from Stop mode interrupt flag selection. + * @note It is the application responsibility to enable the interrupt used as + * usart_wkup interrupt source before entering low-power mode. + * @param huart UART handle. + * @param WakeUpSelection Address match, Start Bit detection or RXNE/RXFNE bit status. + * This parameter can be one of the following values: + * @arg @ref UART_WAKEUP_ON_ADDRESS + * @arg @ref UART_WAKEUP_ON_STARTBIT + * @arg @ref UART_WAKEUP_ON_READDATA_NONEMPTY + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UARTEx_StopModeWakeUpSourceConfig(UART_HandleTypeDef *huart, UART_WakeUpTypeDef WakeUpSelection) +{ + HAL_StatusTypeDef status = HAL_OK; + uint32_t tickstart; + + /* check the wake-up from stop mode UART instance */ + assert_param(IS_UART_WAKEUP_FROMSTOP_INSTANCE(huart->Instance)); + /* check the wake-up selection parameter */ + assert_param(IS_UART_WAKEUP_SELECTION(WakeUpSelection.WakeUpEvent)); + + /* Process Locked */ + __HAL_LOCK(huart); + + huart->gState = HAL_UART_STATE_BUSY; + + /* Disable the Peripheral */ + __HAL_UART_DISABLE(huart); + +#if defined(USART_CR3_WUS) + /* Set the wake-up selection scheme */ + MODIFY_REG(huart->Instance->CR3, USART_CR3_WUS, WakeUpSelection.WakeUpEvent); +#endif /* USART_CR3_WUS */ + + if (WakeUpSelection.WakeUpEvent == UART_WAKEUP_ON_ADDRESS) + { + UARTEx_Wakeup_AddressConfig(huart, WakeUpSelection); + } + + /* Enable the Peripheral */ + __HAL_UART_ENABLE(huart); + + /* Init tickstart for timeout management */ + tickstart = HAL_GetTick(); + + /* Wait until REACK flag is set */ + if (UART_WaitOnFlagUntilTimeout(huart, USART_ISR_REACK, RESET, tickstart, HAL_UART_TIMEOUT_VALUE) != HAL_OK) + { + status = HAL_TIMEOUT; + } + else + { + /* Initialize the UART State */ + huart->gState = HAL_UART_STATE_READY; + } + + /* Process Unlocked */ + __HAL_UNLOCK(huart); + + return status; +} + +/** + * @brief Enable UART Stop Mode. + * @note The UART is able to wake up the MCU from Stop 1 mode as long as UART clock is HSI or LSE. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UARTEx_EnableStopMode(UART_HandleTypeDef *huart) +{ + /* Process Locked */ + __HAL_LOCK(huart); + + /* Set UESM bit */ + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_UESM); + + /* Process Unlocked */ + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +/** + * @brief Disable UART Stop Mode. + * @param huart UART handle. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UARTEx_DisableStopMode(UART_HandleTypeDef *huart) +{ + /* Process Locked */ + __HAL_LOCK(huart); + + /* Clear UESM bit */ + ATOMIC_CLEAR_BIT(huart->Instance->CR1, USART_CR1_UESM); + + /* Process Unlocked */ + __HAL_UNLOCK(huart); + + return HAL_OK; +} + +#endif /* USART_CR1_UESM */ +/** + * @brief Receive an amount of data in blocking mode till either the expected number of data + * is received or an IDLE event occurs. + * @note HAL_OK is returned if reception is completed (expected number of data has been received) + * or if reception is stopped after IDLE event (less than the expected number of data has been received) + * In this case, RxLen output parameter indicates number of data available in reception buffer. + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M1-M0 = 01), + * the received data is handled as a set of uint16_t. In this case, Size must indicate the number + * of uint16_t available through pData. + * @param huart UART handle. + * @param pData Pointer to data buffer (uint8_t or uint16_t data elements). + * @param Size Amount of data elements (uint8_t or uint16_t) to be received. + * @param RxLen Number of data elements finally received + * (could be lower than Size, in case reception ends on IDLE event) + * @param Timeout Timeout duration expressed in ms (covers the whole reception sequence). + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UARTEx_ReceiveToIdle(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size, uint16_t *RxLen, + uint32_t Timeout) +{ + uint8_t *pdata8bits; + uint16_t *pdata16bits; + uint16_t uhMask; + uint32_t tickstart; + + /* Check that a Rx process is not already ongoing */ + if (huart->RxState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + __HAL_LOCK(huart); + + huart->ErrorCode = HAL_UART_ERROR_NONE; + huart->RxState = HAL_UART_STATE_BUSY_RX; + huart->ReceptionType = HAL_UART_RECEPTION_TOIDLE; + + /* Init tickstart for timeout management */ + tickstart = HAL_GetTick(); + + huart->RxXferSize = Size; + huart->RxXferCount = Size; + + /* Computation of UART mask to apply to RDR register */ + UART_MASK_COMPUTATION(huart); + uhMask = huart->Mask; + + /* In case of 9bits/No Parity transfer, pRxData needs to be handled as a uint16_t pointer */ + if ((huart->Init.WordLength == UART_WORDLENGTH_9B) && (huart->Init.Parity == UART_PARITY_NONE)) + { + pdata8bits = NULL; + pdata16bits = (uint16_t *) pData; + } + else + { + pdata8bits = pData; + pdata16bits = NULL; + } + + __HAL_UNLOCK(huart); + + /* Initialize output number of received elements */ + *RxLen = 0U; + + /* as long as data have to be received */ + while (huart->RxXferCount > 0U) + { + /* Check if IDLE flag is set */ + if (__HAL_UART_GET_FLAG(huart, UART_FLAG_IDLE)) + { + /* Clear IDLE flag in ISR */ + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_IDLEF); + + /* If Set, but no data ever received, clear flag without exiting loop */ + /* If Set, and data has already been received, this means Idle Event is valid : End reception */ + if (*RxLen > 0U) + { + huart->RxState = HAL_UART_STATE_READY; + + return HAL_OK; + } + } + + /* Check if RXNE flag is set */ + if (__HAL_UART_GET_FLAG(huart, UART_FLAG_RXNE)) + { + if (pdata8bits == NULL) + { + *pdata16bits = (uint16_t)(huart->Instance->RDR & uhMask); + pdata16bits++; + } + else + { + *pdata8bits = (uint8_t)(huart->Instance->RDR & (uint8_t)uhMask); + pdata8bits++; + } + /* Increment number of received elements */ + *RxLen += 1U; + huart->RxXferCount--; + } + + /* Check for the Timeout */ + if (Timeout != HAL_MAX_DELAY) + { + if (((HAL_GetTick() - tickstart) > Timeout) || (Timeout == 0U)) + { + huart->RxState = HAL_UART_STATE_READY; + + return HAL_TIMEOUT; + } + } + } + + /* Set number of received elements in output parameter : RxLen */ + *RxLen = huart->RxXferSize - huart->RxXferCount; + /* At end of Rx process, restore huart->RxState to Ready */ + huart->RxState = HAL_UART_STATE_READY; + + return HAL_OK; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receive an amount of data in interrupt mode till either the expected number of data + * is received or an IDLE event occurs. + * @note Reception is initiated by this function call. Further progress of reception is achieved thanks + * to UART interrupts raised by RXNE and IDLE events. Callback is called at end of reception indicating + * number of received data elements. + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M1-M0 = 01), + * the received data is handled as a set of uint16_t. In this case, Size must indicate the number + * of uint16_t available through pData. + * @param huart UART handle. + * @param pData Pointer to data buffer (uint8_t or uint16_t data elements). + * @param Size Amount of data elements (uint8_t or uint16_t) to be received. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UARTEx_ReceiveToIdle_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) +{ + HAL_StatusTypeDef status; + + /* Check that a Rx process is not already ongoing */ + if (huart->RxState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + __HAL_LOCK(huart); + + /* Set Reception type to reception till IDLE Event*/ + huart->ReceptionType = HAL_UART_RECEPTION_TOIDLE; + + status = UART_Start_Receive_IT(huart, pData, Size); + + /* Check Rx process has been successfully started */ + if (status == HAL_OK) + { + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_IDLEF); + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_IDLEIE); + } + else + { + /* In case of errors already pending when reception is started, + Interrupts may have already been raised and lead to reception abortion. + (Overrun error for instance). + In such case Reception Type has been reset to HAL_UART_RECEPTION_STANDARD. */ + status = HAL_ERROR; + } + } + + return status; + } + else + { + return HAL_BUSY; + } +} + +/** + * @brief Receive an amount of data in DMA mode till either the expected number + * of data is received or an IDLE event occurs. + * @note Reception is initiated by this function call. Further progress of reception is achieved thanks + * to DMA services, transferring automatically received data elements in user reception buffer and + * calling registered callbacks at half/end of reception. UART IDLE events are also used to consider + * reception phase as ended. In all cases, callback execution will indicate number of received data elements. + * @note When the UART parity is enabled (PCE = 1), the received data contain + * the parity bit (MSB position). + * @note When UART parity is not enabled (PCE = 0), and Word Length is configured to 9 bits (M1-M0 = 01), + * the received data is handled as a set of uint16_t. In this case, Size must indicate the number + * of uint16_t available through pData. + * @param huart UART handle. + * @param pData Pointer to data buffer (uint8_t or uint16_t data elements). + * @param Size Amount of data elements (uint8_t or uint16_t) to be received. + * @retval HAL status + */ +HAL_StatusTypeDef HAL_UARTEx_ReceiveToIdle_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) +{ + HAL_StatusTypeDef status; + + /* Check that a Rx process is not already ongoing */ + if (huart->RxState == HAL_UART_STATE_READY) + { + if ((pData == NULL) || (Size == 0U)) + { + return HAL_ERROR; + } + + __HAL_LOCK(huart); + + /* Set Reception type to reception till IDLE Event*/ + huart->ReceptionType = HAL_UART_RECEPTION_TOIDLE; + + status = UART_Start_Receive_DMA(huart, pData, Size); + + /* Check Rx process has been successfully started */ + if (status == HAL_OK) + { + if (huart->ReceptionType == HAL_UART_RECEPTION_TOIDLE) + { + __HAL_UART_CLEAR_FLAG(huart, UART_CLEAR_IDLEF); + ATOMIC_SET_BIT(huart->Instance->CR1, USART_CR1_IDLEIE); + } + else + { + /* In case of errors already pending when reception is started, + Interrupts may have already been raised and lead to reception abortion. + (Overrun error for instance). + In such case Reception Type has been reset to HAL_UART_RECEPTION_STANDARD. */ + status = HAL_ERROR; + } + } + + return status; + } + else + { + return HAL_BUSY; + } +} + +/** + * @} + */ + +/** + * @} + */ + +/** @addtogroup UARTEx_Private_Functions + * @{ + */ +#if defined(USART_CR1_UESM) + +/** + * @brief Initialize the UART wake-up from stop mode parameters when triggered by address detection. + * @param huart UART handle. + * @param WakeUpSelection UART wake up from stop mode parameters. + * @retval None + */ +static void UARTEx_Wakeup_AddressConfig(UART_HandleTypeDef *huart, UART_WakeUpTypeDef WakeUpSelection) +{ + assert_param(IS_UART_ADDRESSLENGTH_DETECT(WakeUpSelection.AddressLength)); + + /* Set the USART address length */ + MODIFY_REG(huart->Instance->CR2, USART_CR2_ADDM7, WakeUpSelection.AddressLength); + + /* Set the USART address node */ + MODIFY_REG(huart->Instance->CR2, USART_CR2_ADD, ((uint32_t)WakeUpSelection.Address << UART_CR2_ADDRESS_LSB_POS)); +} +#endif /* USART_CR1_UESM */ + +/** + * @} + */ + +#endif /* HAL_UART_MODULE_ENABLED */ + +/** + * @} + */ + +/** + * @} + */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_ll_usb.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_ll_usb.c new file mode 100644 index 0000000..dec364f --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_ll_usb.c @@ -0,0 +1,2225 @@ +/** + ****************************************************************************** + * @file stm32f7xx_ll_usb.c + * @author MCD Application Team + * @brief USB Low Layer HAL module driver. + * + * This file provides firmware functions to manage the following + * functionalities of the USB Peripheral Controller: + * + Initialization/de-initialization functions + * + I/O operation functions + * + Peripheral Control functions + * + Peripheral State functions + * + ****************************************************************************** + * @attention + * + * Copyright (c) 2017 STMicroelectronics. + * All rights reserved. + * + * This software is licensed under terms that can be found in the LICENSE file + * in the root directory of this software component. + * If no LICENSE file comes with this software, it is provided AS-IS. + * + ****************************************************************************** + @verbatim + ============================================================================== + ##### How to use this driver ##### + ============================================================================== + [..] + (#) Fill parameters of Init structure in USB_OTG_CfgTypeDef structure. + + (#) Call USB_CoreInit() API to initialize the USB Core peripheral. + + (#) The upper HAL HCD/PCD driver will call the right routines for its internal processes. + + @endverbatim + + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/** @addtogroup STM32F7xx_LL_USB_DRIVER + * @{ + */ + +#if defined (HAL_PCD_MODULE_ENABLED) || defined (HAL_HCD_MODULE_ENABLED) +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +/* Private typedef -----------------------------------------------------------*/ +/* Private define ------------------------------------------------------------*/ +/* Private macro -------------------------------------------------------------*/ +/* Private variables ---------------------------------------------------------*/ +/* Private function prototypes -----------------------------------------------*/ +/* Private functions ---------------------------------------------------------*/ +#if defined (USB_OTG_FS) || defined (USB_OTG_HS) +static HAL_StatusTypeDef USB_CoreReset(USB_OTG_GlobalTypeDef *USBx); +#ifdef USBPHYC +static HAL_StatusTypeDef USB_HS_PHYCInit(USB_OTG_GlobalTypeDef *USBx); +#endif /* USBPHYC */ + +/* Exported functions --------------------------------------------------------*/ +/** @defgroup USB_LL_Exported_Functions USB Low Layer Exported Functions + * @{ + */ + +/** @defgroup USB_LL_Exported_Functions_Group1 Initialization/de-initialization functions + * @brief Initialization and Configuration functions + * +@verbatim + =============================================================================== + ##### Initialization/de-initialization functions ##### + =============================================================================== + +@endverbatim + * @{ + */ + +/** + * @brief Initializes the USB Core + * @param USBx USB Instance + * @param cfg pointer to a USB_OTG_CfgTypeDef structure that contains + * the configuration information for the specified USBx peripheral. + * @retval HAL status + */ +HAL_StatusTypeDef USB_CoreInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef cfg) +{ + HAL_StatusTypeDef ret; + + if (cfg.phy_itface == USB_OTG_ULPI_PHY) + { + USBx->GCCFG &= ~(USB_OTG_GCCFG_PWRDWN); + + /* Init The ULPI Interface */ + USBx->GUSBCFG &= ~(USB_OTG_GUSBCFG_TSDPS | USB_OTG_GUSBCFG_ULPIFSLS | USB_OTG_GUSBCFG_PHYSEL); + +#if defined(STM32F722xx) || defined(STM32F723xx) || defined(STM32F730xx) || defined(STM32F732xx) || defined(STM32F733xx) + /* Select ULPI Interface */ + USBx->GUSBCFG |= USB_OTG_GUSBCFG_ULPI_UTMI_SEL; +#endif /* defined(STM32F722xx) || defined(STM32F723xx) || defined(STM32F730xx) || defined(STM32F732xx) || defined(STM32F733xx) */ + + /* Select vbus source */ + USBx->GUSBCFG &= ~(USB_OTG_GUSBCFG_ULPIEVBUSD | USB_OTG_GUSBCFG_ULPIEVBUSI); + if (cfg.use_external_vbus == 1U) + { + USBx->GUSBCFG |= USB_OTG_GUSBCFG_ULPIEVBUSD; + } + + /* Reset after a PHY select */ + ret = USB_CoreReset(USBx); + } +#if defined(STM32F722xx) || defined(STM32F723xx) || defined(STM32F730xx) || defined(STM32F732xx) || defined(STM32F733xx) + else if (cfg.phy_itface == USB_OTG_HS_EMBEDDED_PHY) + { + USBx->GCCFG &= ~(USB_OTG_GCCFG_PWRDWN); + + /* Init The UTMI Interface */ + USBx->GUSBCFG &= ~(USB_OTG_GUSBCFG_TSDPS | USB_OTG_GUSBCFG_ULPIFSLS | USB_OTG_GUSBCFG_PHYSEL); + + /* Select vbus source */ + USBx->GUSBCFG &= ~(USB_OTG_GUSBCFG_ULPIEVBUSD | USB_OTG_GUSBCFG_ULPIEVBUSI); + + /* Select UTMI Interface */ + USBx->GUSBCFG &= ~USB_OTG_GUSBCFG_ULPI_UTMI_SEL; +#ifdef USBPHYC + /* Enable USB HS PHY */ + USBx->GCCFG |= USB_OTG_GCCFG_PHYHSEN; + + /* Enables control of a High Speed USB PHY */ + if (USB_HS_PHYCInit(USBx) != HAL_OK) + { + return HAL_ERROR; + } +#endif /* USBPHYC */ + + if (cfg.use_external_vbus == 1U) + { + USBx->GUSBCFG |= USB_OTG_GUSBCFG_ULPIEVBUSD; + } + + /* Reset after a PHY select */ + ret = USB_CoreReset(USBx); + } +#endif /* defined(STM32F722xx) || defined(STM32F723xx) || defined(STM32F730xx) || defined(STM32F732xx) || defined(STM32F733xx) */ + else /* FS interface (embedded Phy) */ + { + /* Select FS Embedded PHY */ + USBx->GUSBCFG |= USB_OTG_GUSBCFG_PHYSEL; + + /* Reset after a PHY select */ + ret = USB_CoreReset(USBx); + + /* Activate the USB Transceiver */ + USBx->GCCFG |= USB_OTG_GCCFG_PWRDWN; + } + + if (cfg.dma_enable == 1U) + { + USBx->GAHBCFG |= USB_OTG_GAHBCFG_HBSTLEN_2; + USBx->GAHBCFG |= USB_OTG_GAHBCFG_DMAEN; + } + + return ret; +} + + +/** + * @brief Set the USB turnaround time + * @param USBx USB Instance + * @param hclk: AHB clock frequency + * @retval USB turnaround time In PHY Clocks number + */ +HAL_StatusTypeDef USB_SetTurnaroundTime(USB_OTG_GlobalTypeDef *USBx, + uint32_t hclk, uint8_t speed) +{ + uint32_t UsbTrd; + + /* The USBTRD is configured according to the tables below, depending on AHB frequency + used by application. In the low AHB frequency range it is used to stretch enough the USB response + time to IN tokens, the USB turnaround time, so to compensate for the longer AHB read access + latency to the Data FIFO */ + if (speed == USBD_FS_SPEED) + { + if ((hclk >= 14200000U) && (hclk < 15000000U)) + { + /* hclk Clock Range between 14.2-15 MHz */ + UsbTrd = 0xFU; + } + else if ((hclk >= 15000000U) && (hclk < 16000000U)) + { + /* hclk Clock Range between 15-16 MHz */ + UsbTrd = 0xEU; + } + else if ((hclk >= 16000000U) && (hclk < 17200000U)) + { + /* hclk Clock Range between 16-17.2 MHz */ + UsbTrd = 0xDU; + } + else if ((hclk >= 17200000U) && (hclk < 18500000U)) + { + /* hclk Clock Range between 17.2-18.5 MHz */ + UsbTrd = 0xCU; + } + else if ((hclk >= 18500000U) && (hclk < 20000000U)) + { + /* hclk Clock Range between 18.5-20 MHz */ + UsbTrd = 0xBU; + } + else if ((hclk >= 20000000U) && (hclk < 21800000U)) + { + /* hclk Clock Range between 20-21.8 MHz */ + UsbTrd = 0xAU; + } + else if ((hclk >= 21800000U) && (hclk < 24000000U)) + { + /* hclk Clock Range between 21.8-24 MHz */ + UsbTrd = 0x9U; + } + else if ((hclk >= 24000000U) && (hclk < 27700000U)) + { + /* hclk Clock Range between 24-27.7 MHz */ + UsbTrd = 0x8U; + } + else if ((hclk >= 27700000U) && (hclk < 32000000U)) + { + /* hclk Clock Range between 27.7-32 MHz */ + UsbTrd = 0x7U; + } + else /* if(hclk >= 32000000) */ + { + /* hclk Clock Range between 32-200 MHz */ + UsbTrd = 0x6U; + } + } + else if (speed == USBD_HS_SPEED) + { + UsbTrd = USBD_HS_TRDT_VALUE; + } + else + { + UsbTrd = USBD_DEFAULT_TRDT_VALUE; + } + + USBx->GUSBCFG &= ~USB_OTG_GUSBCFG_TRDT; + USBx->GUSBCFG |= (uint32_t)((UsbTrd << 10) & USB_OTG_GUSBCFG_TRDT); + + return HAL_OK; +} + +/** + * @brief USB_EnableGlobalInt + * Enables the controller's Global Int in the AHB Config reg + * @param USBx Selected device + * @retval HAL status + */ +HAL_StatusTypeDef USB_EnableGlobalInt(USB_OTG_GlobalTypeDef *USBx) +{ + USBx->GAHBCFG |= USB_OTG_GAHBCFG_GINT; + return HAL_OK; +} + +/** + * @brief USB_DisableGlobalInt + * Disable the controller's Global Int in the AHB Config reg + * @param USBx Selected device + * @retval HAL status + */ +HAL_StatusTypeDef USB_DisableGlobalInt(USB_OTG_GlobalTypeDef *USBx) +{ + USBx->GAHBCFG &= ~USB_OTG_GAHBCFG_GINT; + return HAL_OK; +} + +/** + * @brief USB_SetCurrentMode Set functional mode + * @param USBx Selected device + * @param mode current core mode + * This parameter can be one of these values: + * @arg USB_DEVICE_MODE Peripheral mode + * @arg USB_HOST_MODE Host mode + * @retval HAL status + */ +HAL_StatusTypeDef USB_SetCurrentMode(USB_OTG_GlobalTypeDef *USBx, USB_OTG_ModeTypeDef mode) +{ + uint32_t ms = 0U; + + USBx->GUSBCFG &= ~(USB_OTG_GUSBCFG_FHMOD | USB_OTG_GUSBCFG_FDMOD); + + if (mode == USB_HOST_MODE) + { + USBx->GUSBCFG |= USB_OTG_GUSBCFG_FHMOD; + + do + { + HAL_Delay(1U); + ms++; + } while ((USB_GetMode(USBx) != (uint32_t)USB_HOST_MODE) && (ms < 50U)); + } + else if (mode == USB_DEVICE_MODE) + { + USBx->GUSBCFG |= USB_OTG_GUSBCFG_FDMOD; + + do + { + HAL_Delay(1U); + ms++; + } while ((USB_GetMode(USBx) != (uint32_t)USB_DEVICE_MODE) && (ms < 50U)); + } + else + { + return HAL_ERROR; + } + + if (ms == 50U) + { + return HAL_ERROR; + } + + return HAL_OK; +} + +/** + * @brief USB_DevInit Initializes the USB_OTG controller registers + * for device mode + * @param USBx Selected device + * @param cfg pointer to a USB_OTG_CfgTypeDef structure that contains + * the configuration information for the specified USBx peripheral. + * @retval HAL status + */ +HAL_StatusTypeDef USB_DevInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef cfg) +{ + HAL_StatusTypeDef ret = HAL_OK; + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t i; + + for (i = 0U; i < 15U; i++) + { + USBx->DIEPTXF[i] = 0U; + } + + /* VBUS Sensing setup */ + if (cfg.vbus_sensing_enable == 0U) + { + USBx_DEVICE->DCTL |= USB_OTG_DCTL_SDIS; + + /* Deactivate VBUS Sensing B */ + USBx->GCCFG &= ~USB_OTG_GCCFG_VBDEN; + + /* B-peripheral session valid override enable */ + USBx->GOTGCTL |= USB_OTG_GOTGCTL_BVALOEN; + USBx->GOTGCTL |= USB_OTG_GOTGCTL_BVALOVAL; + } + else + { + /* Enable HW VBUS sensing */ + USBx->GCCFG |= USB_OTG_GCCFG_VBDEN; + } + + /* Restart the Phy Clock */ + USBx_PCGCCTL = 0U; + + /* Device mode configuration */ + USBx_DEVICE->DCFG |= DCFG_FRAME_INTERVAL_80; + + if (cfg.phy_itface == USB_OTG_ULPI_PHY) + { + if (cfg.speed == USBD_HS_SPEED) + { + /* Set Core speed to High speed mode */ + (void)USB_SetDevSpeed(USBx, USB_OTG_SPEED_HIGH); + } + else + { + /* Set Core speed to Full speed mode */ + (void)USB_SetDevSpeed(USBx, USB_OTG_SPEED_HIGH_IN_FULL); + } + } +#if defined(STM32F722xx) || defined(STM32F723xx) || defined(STM32F730xx) || defined(STM32F732xx) || defined(STM32F733xx) + else if (cfg.phy_itface == USB_OTG_HS_EMBEDDED_PHY) + { + if (cfg.speed == USBD_HS_SPEED) + { + /* Set Core speed to High speed mode */ + (void)USB_SetDevSpeed(USBx, USB_OTG_SPEED_HIGH); + } + else + { + /* Set Core speed to Full speed mode */ + (void)USB_SetDevSpeed(USBx, USB_OTG_SPEED_HIGH_IN_FULL); + } + } +#endif /* defined(STM32F722xx) || defined(STM32F723xx) || defined(STM32F730xx) || defined(STM32F732xx) || defined(STM32F733xx) */ + else + { + /* Set Core speed to Full speed mode */ + (void)USB_SetDevSpeed(USBx, USB_OTG_SPEED_FULL); + } + + /* Flush the FIFOs */ + if (USB_FlushTxFifo(USBx, 0x10U) != HAL_OK) /* all Tx FIFOs */ + { + ret = HAL_ERROR; + } + + if (USB_FlushRxFifo(USBx) != HAL_OK) + { + ret = HAL_ERROR; + } + + /* Clear all pending Device Interrupts */ + USBx_DEVICE->DIEPMSK = 0U; + USBx_DEVICE->DOEPMSK = 0U; + USBx_DEVICE->DAINTMSK = 0U; + + for (i = 0U; i < cfg.dev_endpoints; i++) + { + if ((USBx_INEP(i)->DIEPCTL & USB_OTG_DIEPCTL_EPENA) == USB_OTG_DIEPCTL_EPENA) + { + if (i == 0U) + { + USBx_INEP(i)->DIEPCTL = USB_OTG_DIEPCTL_SNAK; + } + else + { + USBx_INEP(i)->DIEPCTL = USB_OTG_DIEPCTL_EPDIS | USB_OTG_DIEPCTL_SNAK; + } + } + else + { + USBx_INEP(i)->DIEPCTL = 0U; + } + + USBx_INEP(i)->DIEPTSIZ = 0U; + USBx_INEP(i)->DIEPINT = 0xFB7FU; + } + + for (i = 0U; i < cfg.dev_endpoints; i++) + { + if ((USBx_OUTEP(i)->DOEPCTL & USB_OTG_DOEPCTL_EPENA) == USB_OTG_DOEPCTL_EPENA) + { + if (i == 0U) + { + USBx_OUTEP(i)->DOEPCTL = USB_OTG_DOEPCTL_SNAK; + } + else + { + USBx_OUTEP(i)->DOEPCTL = USB_OTG_DOEPCTL_EPDIS | USB_OTG_DOEPCTL_SNAK; + } + } + else + { + USBx_OUTEP(i)->DOEPCTL = 0U; + } + + USBx_OUTEP(i)->DOEPTSIZ = 0U; + USBx_OUTEP(i)->DOEPINT = 0xFB7FU; + } + + USBx_DEVICE->DIEPMSK &= ~(USB_OTG_DIEPMSK_TXFURM); + + /* Disable all interrupts. */ + USBx->GINTMSK = 0U; + + /* Clear any pending interrupts */ + USBx->GINTSTS = 0xBFFFFFFFU; + + /* Enable the common interrupts */ + if (cfg.dma_enable == 0U) + { + USBx->GINTMSK |= USB_OTG_GINTMSK_RXFLVLM; + } + + /* Enable interrupts matching to the Device mode ONLY */ + USBx->GINTMSK |= USB_OTG_GINTMSK_USBSUSPM | USB_OTG_GINTMSK_USBRST | + USB_OTG_GINTMSK_ENUMDNEM | USB_OTG_GINTMSK_IEPINT | + USB_OTG_GINTMSK_OEPINT | USB_OTG_GINTMSK_IISOIXFRM | + USB_OTG_GINTMSK_PXFRM_IISOOXFRM | USB_OTG_GINTMSK_WUIM; + + if (cfg.Sof_enable != 0U) + { + USBx->GINTMSK |= USB_OTG_GINTMSK_SOFM; + } + + if (cfg.vbus_sensing_enable == 1U) + { + USBx->GINTMSK |= (USB_OTG_GINTMSK_SRQIM | USB_OTG_GINTMSK_OTGINT); + } + + return ret; +} + +/** + * @brief USB_FlushTxFifo Flush a Tx FIFO + * @param USBx Selected device + * @param num FIFO number + * This parameter can be a value from 1 to 15 + 15 means Flush all Tx FIFOs + * @retval HAL status + */ +HAL_StatusTypeDef USB_FlushTxFifo(USB_OTG_GlobalTypeDef *USBx, uint32_t num) +{ + __IO uint32_t count = 0U; + + /* Wait for AHB master IDLE state. */ + do + { + if (++count > 200000U) + { + return HAL_TIMEOUT; + } + } while ((USBx->GRSTCTL & USB_OTG_GRSTCTL_AHBIDL) == 0U); + + /* Flush TX Fifo */ + count = 0U; + USBx->GRSTCTL = (USB_OTG_GRSTCTL_TXFFLSH | (num << 6)); + + do + { + if (++count > 200000U) + { + return HAL_TIMEOUT; + } + } while ((USBx->GRSTCTL & USB_OTG_GRSTCTL_TXFFLSH) == USB_OTG_GRSTCTL_TXFFLSH); + + return HAL_OK; +} + +/** + * @brief USB_FlushRxFifo Flush Rx FIFO + * @param USBx Selected device + * @retval HAL status + */ +HAL_StatusTypeDef USB_FlushRxFifo(USB_OTG_GlobalTypeDef *USBx) +{ + __IO uint32_t count = 0U; + + /* Wait for AHB master IDLE state. */ + do + { + if (++count > 200000U) + { + return HAL_TIMEOUT; + } + } while ((USBx->GRSTCTL & USB_OTG_GRSTCTL_AHBIDL) == 0U); + + /* Flush RX Fifo */ + count = 0U; + USBx->GRSTCTL = USB_OTG_GRSTCTL_RXFFLSH; + + do + { + if (++count > 200000U) + { + return HAL_TIMEOUT; + } + } while ((USBx->GRSTCTL & USB_OTG_GRSTCTL_RXFFLSH) == USB_OTG_GRSTCTL_RXFFLSH); + + return HAL_OK; +} + +/** + * @brief USB_SetDevSpeed Initializes the DevSpd field of DCFG register + * depending the PHY type and the enumeration speed of the device. + * @param USBx Selected device + * @param speed device speed + * This parameter can be one of these values: + * @arg USB_OTG_SPEED_HIGH: High speed mode + * @arg USB_OTG_SPEED_HIGH_IN_FULL: High speed core in Full Speed mode + * @arg USB_OTG_SPEED_FULL: Full speed mode + * @retval Hal status + */ +HAL_StatusTypeDef USB_SetDevSpeed(USB_OTG_GlobalTypeDef *USBx, uint8_t speed) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + + USBx_DEVICE->DCFG |= speed; + return HAL_OK; +} + +/** + * @brief USB_GetDevSpeed Return the Dev Speed + * @param USBx Selected device + * @retval speed device speed + * This parameter can be one of these values: + * @arg USBD_HS_SPEED: High speed mode + * @arg USBD_FS_SPEED: Full speed mode + */ +uint8_t USB_GetDevSpeed(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint8_t speed; + uint32_t DevEnumSpeed = USBx_DEVICE->DSTS & USB_OTG_DSTS_ENUMSPD; + + if (DevEnumSpeed == DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ) + { + speed = USBD_HS_SPEED; + } + else if ((DevEnumSpeed == DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ) || + (DevEnumSpeed == DSTS_ENUMSPD_FS_PHY_48MHZ)) + { + speed = USBD_FS_SPEED; + } + else + { + speed = 0xFU; + } + + return speed; +} + +/** + * @brief Activate and configure an endpoint + * @param USBx Selected device + * @param ep pointer to endpoint structure + * @retval HAL status + */ +HAL_StatusTypeDef USB_ActivateEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t epnum = (uint32_t)ep->num; + + if (ep->is_in == 1U) + { + USBx_DEVICE->DAINTMSK |= USB_OTG_DAINTMSK_IEPM & (uint32_t)(1UL << (ep->num & EP_ADDR_MSK)); + + if ((USBx_INEP(epnum)->DIEPCTL & USB_OTG_DIEPCTL_USBAEP) == 0U) + { + USBx_INEP(epnum)->DIEPCTL |= (ep->maxpacket & USB_OTG_DIEPCTL_MPSIZ) | + ((uint32_t)ep->type << 18) | (epnum << 22) | + USB_OTG_DIEPCTL_SD0PID_SEVNFRM | + USB_OTG_DIEPCTL_USBAEP; + } + } + else + { + USBx_DEVICE->DAINTMSK |= USB_OTG_DAINTMSK_OEPM & ((uint32_t)(1UL << (ep->num & EP_ADDR_MSK)) << 16); + + if (((USBx_OUTEP(epnum)->DOEPCTL) & USB_OTG_DOEPCTL_USBAEP) == 0U) + { + USBx_OUTEP(epnum)->DOEPCTL |= (ep->maxpacket & USB_OTG_DOEPCTL_MPSIZ) | + ((uint32_t)ep->type << 18) | + USB_OTG_DIEPCTL_SD0PID_SEVNFRM | + USB_OTG_DOEPCTL_USBAEP; + } + } + return HAL_OK; +} + +/** + * @brief Activate and configure a dedicated endpoint + * @param USBx Selected device + * @param ep pointer to endpoint structure + * @retval HAL status + */ +HAL_StatusTypeDef USB_ActivateDedicatedEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t epnum = (uint32_t)ep->num; + + /* Read DEPCTLn register */ + if (ep->is_in == 1U) + { + if (((USBx_INEP(epnum)->DIEPCTL) & USB_OTG_DIEPCTL_USBAEP) == 0U) + { + USBx_INEP(epnum)->DIEPCTL |= (ep->maxpacket & USB_OTG_DIEPCTL_MPSIZ) | + ((uint32_t)ep->type << 18) | (epnum << 22) | + USB_OTG_DIEPCTL_SD0PID_SEVNFRM | + USB_OTG_DIEPCTL_USBAEP; + } + + USBx_DEVICE->DEACHMSK |= USB_OTG_DAINTMSK_IEPM & (uint32_t)(1UL << (ep->num & EP_ADDR_MSK)); + } + else + { + if (((USBx_OUTEP(epnum)->DOEPCTL) & USB_OTG_DOEPCTL_USBAEP) == 0U) + { + USBx_OUTEP(epnum)->DOEPCTL |= (ep->maxpacket & USB_OTG_DOEPCTL_MPSIZ) | + ((uint32_t)ep->type << 18) | (epnum << 22) | + USB_OTG_DOEPCTL_USBAEP; + } + + USBx_DEVICE->DEACHMSK |= USB_OTG_DAINTMSK_OEPM & ((uint32_t)(1UL << (ep->num & EP_ADDR_MSK)) << 16); + } + + return HAL_OK; +} + +/** + * @brief De-activate and de-initialize an endpoint + * @param USBx Selected device + * @param ep pointer to endpoint structure + * @retval HAL status + */ +HAL_StatusTypeDef USB_DeactivateEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t epnum = (uint32_t)ep->num; + + /* Read DEPCTLn register */ + if (ep->is_in == 1U) + { + if ((USBx_INEP(epnum)->DIEPCTL & USB_OTG_DIEPCTL_EPENA) == USB_OTG_DIEPCTL_EPENA) + { + USBx_INEP(epnum)->DIEPCTL |= USB_OTG_DIEPCTL_SNAK; + USBx_INEP(epnum)->DIEPCTL |= USB_OTG_DIEPCTL_EPDIS; + } + + USBx_DEVICE->DEACHMSK &= ~(USB_OTG_DAINTMSK_IEPM & (uint32_t)(1UL << (ep->num & EP_ADDR_MSK))); + USBx_DEVICE->DAINTMSK &= ~(USB_OTG_DAINTMSK_IEPM & (uint32_t)(1UL << (ep->num & EP_ADDR_MSK))); + USBx_INEP(epnum)->DIEPCTL &= ~(USB_OTG_DIEPCTL_USBAEP | + USB_OTG_DIEPCTL_MPSIZ | + USB_OTG_DIEPCTL_TXFNUM | + USB_OTG_DIEPCTL_SD0PID_SEVNFRM | + USB_OTG_DIEPCTL_EPTYP); + } + else + { + if ((USBx_OUTEP(epnum)->DOEPCTL & USB_OTG_DOEPCTL_EPENA) == USB_OTG_DOEPCTL_EPENA) + { + USBx_OUTEP(epnum)->DOEPCTL |= USB_OTG_DOEPCTL_SNAK; + USBx_OUTEP(epnum)->DOEPCTL |= USB_OTG_DOEPCTL_EPDIS; + } + + USBx_DEVICE->DEACHMSK &= ~(USB_OTG_DAINTMSK_OEPM & ((uint32_t)(1UL << (ep->num & EP_ADDR_MSK)) << 16)); + USBx_DEVICE->DAINTMSK &= ~(USB_OTG_DAINTMSK_OEPM & ((uint32_t)(1UL << (ep->num & EP_ADDR_MSK)) << 16)); + USBx_OUTEP(epnum)->DOEPCTL &= ~(USB_OTG_DOEPCTL_USBAEP | + USB_OTG_DOEPCTL_MPSIZ | + USB_OTG_DOEPCTL_SD0PID_SEVNFRM | + USB_OTG_DOEPCTL_EPTYP); + } + + return HAL_OK; +} + +/** + * @brief De-activate and de-initialize a dedicated endpoint + * @param USBx Selected device + * @param ep pointer to endpoint structure + * @retval HAL status + */ +HAL_StatusTypeDef USB_DeactivateDedicatedEndpoint(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t epnum = (uint32_t)ep->num; + + /* Read DEPCTLn register */ + if (ep->is_in == 1U) + { + if ((USBx_INEP(epnum)->DIEPCTL & USB_OTG_DIEPCTL_EPENA) == USB_OTG_DIEPCTL_EPENA) + { + USBx_INEP(epnum)->DIEPCTL |= USB_OTG_DIEPCTL_SNAK; + USBx_INEP(epnum)->DIEPCTL |= USB_OTG_DIEPCTL_EPDIS; + } + + USBx_INEP(epnum)->DIEPCTL &= ~ USB_OTG_DIEPCTL_USBAEP; + USBx_DEVICE->DAINTMSK &= ~(USB_OTG_DAINTMSK_IEPM & (uint32_t)(1UL << (ep->num & EP_ADDR_MSK))); + } + else + { + if ((USBx_OUTEP(epnum)->DOEPCTL & USB_OTG_DOEPCTL_EPENA) == USB_OTG_DOEPCTL_EPENA) + { + USBx_OUTEP(epnum)->DOEPCTL |= USB_OTG_DOEPCTL_SNAK; + USBx_OUTEP(epnum)->DOEPCTL |= USB_OTG_DOEPCTL_EPDIS; + } + + USBx_OUTEP(epnum)->DOEPCTL &= ~USB_OTG_DOEPCTL_USBAEP; + USBx_DEVICE->DAINTMSK &= ~(USB_OTG_DAINTMSK_OEPM & ((uint32_t)(1UL << (ep->num & EP_ADDR_MSK)) << 16)); + } + + return HAL_OK; +} + +/** + * @brief USB_EPStartXfer : setup and starts a transfer over an EP + * @param USBx Selected device + * @param ep pointer to endpoint structure + * @param dma USB dma enabled or disabled + * This parameter can be one of these values: + * 0 : DMA feature not used + * 1 : DMA feature used + * @retval HAL status + */ +HAL_StatusTypeDef USB_EPStartXfer(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep, uint8_t dma) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t epnum = (uint32_t)ep->num; + uint16_t pktcnt; + + /* IN endpoint */ + if (ep->is_in == 1U) + { + /* Zero Length Packet? */ + if (ep->xfer_len == 0U) + { + USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_PKTCNT); + USBx_INEP(epnum)->DIEPTSIZ |= (USB_OTG_DIEPTSIZ_PKTCNT & (1U << 19)); + USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_XFRSIZ); + } + else + { + /* Program the transfer size and packet count + * as follows: xfersize = N * maxpacket + + * short_packet pktcnt = N + (short_packet + * exist ? 1 : 0) + */ + USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_XFRSIZ); + USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_PKTCNT); + USBx_INEP(epnum)->DIEPTSIZ |= (USB_OTG_DIEPTSIZ_PKTCNT & + (((ep->xfer_len + ep->maxpacket - 1U) / ep->maxpacket) << 19)); + + USBx_INEP(epnum)->DIEPTSIZ |= (USB_OTG_DIEPTSIZ_XFRSIZ & ep->xfer_len); + + if (ep->type == EP_TYPE_ISOC) + { + USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_MULCNT); + USBx_INEP(epnum)->DIEPTSIZ |= (USB_OTG_DIEPTSIZ_MULCNT & (1U << 29)); + } + } + + if (dma == 1U) + { + if ((uint32_t)ep->dma_addr != 0U) + { + USBx_INEP(epnum)->DIEPDMA = (uint32_t)(ep->dma_addr); + } + + if (ep->type == EP_TYPE_ISOC) + { + if ((USBx_DEVICE->DSTS & (1U << 8)) == 0U) + { + USBx_INEP(epnum)->DIEPCTL |= USB_OTG_DIEPCTL_SODDFRM; + } + else + { + USBx_INEP(epnum)->DIEPCTL |= USB_OTG_DIEPCTL_SD0PID_SEVNFRM; + } + } + + /* EP enable, IN data in FIFO */ + USBx_INEP(epnum)->DIEPCTL |= (USB_OTG_DIEPCTL_CNAK | USB_OTG_DIEPCTL_EPENA); + } + else + { + /* EP enable, IN data in FIFO */ + USBx_INEP(epnum)->DIEPCTL |= (USB_OTG_DIEPCTL_CNAK | USB_OTG_DIEPCTL_EPENA); + + if (ep->type != EP_TYPE_ISOC) + { + /* Enable the Tx FIFO Empty Interrupt for this EP */ + if (ep->xfer_len > 0U) + { + USBx_DEVICE->DIEPEMPMSK |= 1UL << (ep->num & EP_ADDR_MSK); + } + } + else + { + if ((USBx_DEVICE->DSTS & (1U << 8)) == 0U) + { + USBx_INEP(epnum)->DIEPCTL |= USB_OTG_DIEPCTL_SODDFRM; + } + else + { + USBx_INEP(epnum)->DIEPCTL |= USB_OTG_DIEPCTL_SD0PID_SEVNFRM; + } + + (void)USB_WritePacket(USBx, ep->xfer_buff, ep->num, (uint16_t)ep->xfer_len, dma); + } + } + } + else /* OUT endpoint */ + { + /* Program the transfer size and packet count as follows: + * pktcnt = N + * xfersize = N * maxpacket + */ + USBx_OUTEP(epnum)->DOEPTSIZ &= ~(USB_OTG_DOEPTSIZ_XFRSIZ); + USBx_OUTEP(epnum)->DOEPTSIZ &= ~(USB_OTG_DOEPTSIZ_PKTCNT); + + if (ep->xfer_len == 0U) + { + USBx_OUTEP(epnum)->DOEPTSIZ |= (USB_OTG_DOEPTSIZ_XFRSIZ & ep->maxpacket); + USBx_OUTEP(epnum)->DOEPTSIZ |= (USB_OTG_DOEPTSIZ_PKTCNT & (1U << 19)); + } + else + { + pktcnt = (uint16_t)((ep->xfer_len + ep->maxpacket - 1U) / ep->maxpacket); + USBx_OUTEP(epnum)->DOEPTSIZ |= USB_OTG_DOEPTSIZ_PKTCNT & ((uint32_t)pktcnt << 19); + USBx_OUTEP(epnum)->DOEPTSIZ |= USB_OTG_DOEPTSIZ_XFRSIZ & (ep->maxpacket * pktcnt); + } + + if (dma == 1U) + { + if ((uint32_t)ep->xfer_buff != 0U) + { + USBx_OUTEP(epnum)->DOEPDMA = (uint32_t)(ep->xfer_buff); + } + } + + if (ep->type == EP_TYPE_ISOC) + { + if ((USBx_DEVICE->DSTS & (1U << 8)) == 0U) + { + USBx_OUTEP(epnum)->DOEPCTL |= USB_OTG_DOEPCTL_SODDFRM; + } + else + { + USBx_OUTEP(epnum)->DOEPCTL |= USB_OTG_DOEPCTL_SD0PID_SEVNFRM; + } + } + /* EP enable */ + USBx_OUTEP(epnum)->DOEPCTL |= (USB_OTG_DOEPCTL_CNAK | USB_OTG_DOEPCTL_EPENA); + } + + return HAL_OK; +} + +/** + * @brief USB_EP0StartXfer : setup and starts a transfer over the EP 0 + * @param USBx Selected device + * @param ep pointer to endpoint structure + * @param dma USB dma enabled or disabled + * This parameter can be one of these values: + * 0 : DMA feature not used + * 1 : DMA feature used + * @retval HAL status + */ +HAL_StatusTypeDef USB_EP0StartXfer(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep, uint8_t dma) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t epnum = (uint32_t)ep->num; + + /* IN endpoint */ + if (ep->is_in == 1U) + { + /* Zero Length Packet? */ + if (ep->xfer_len == 0U) + { + USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_PKTCNT); + USBx_INEP(epnum)->DIEPTSIZ |= (USB_OTG_DIEPTSIZ_PKTCNT & (1U << 19)); + USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_XFRSIZ); + } + else + { + /* Program the transfer size and packet count + * as follows: xfersize = N * maxpacket + + * short_packet pktcnt = N + (short_packet + * exist ? 1 : 0) + */ + USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_XFRSIZ); + USBx_INEP(epnum)->DIEPTSIZ &= ~(USB_OTG_DIEPTSIZ_PKTCNT); + + if (ep->xfer_len > ep->maxpacket) + { + ep->xfer_len = ep->maxpacket; + } + USBx_INEP(epnum)->DIEPTSIZ |= (USB_OTG_DIEPTSIZ_PKTCNT & (1U << 19)); + USBx_INEP(epnum)->DIEPTSIZ |= (USB_OTG_DIEPTSIZ_XFRSIZ & ep->xfer_len); + } + + if (dma == 1U) + { + if ((uint32_t)ep->dma_addr != 0U) + { + USBx_INEP(epnum)->DIEPDMA = (uint32_t)(ep->dma_addr); + } + + /* EP enable, IN data in FIFO */ + USBx_INEP(epnum)->DIEPCTL |= (USB_OTG_DIEPCTL_CNAK | USB_OTG_DIEPCTL_EPENA); + } + else + { + /* EP enable, IN data in FIFO */ + USBx_INEP(epnum)->DIEPCTL |= (USB_OTG_DIEPCTL_CNAK | USB_OTG_DIEPCTL_EPENA); + + /* Enable the Tx FIFO Empty Interrupt for this EP */ + if (ep->xfer_len > 0U) + { + USBx_DEVICE->DIEPEMPMSK |= 1UL << (ep->num & EP_ADDR_MSK); + } + } + } + else /* OUT endpoint */ + { + /* Program the transfer size and packet count as follows: + * pktcnt = N + * xfersize = N * maxpacket + */ + USBx_OUTEP(epnum)->DOEPTSIZ &= ~(USB_OTG_DOEPTSIZ_XFRSIZ); + USBx_OUTEP(epnum)->DOEPTSIZ &= ~(USB_OTG_DOEPTSIZ_PKTCNT); + + if (ep->xfer_len > 0U) + { + ep->xfer_len = ep->maxpacket; + } + + USBx_OUTEP(epnum)->DOEPTSIZ |= (USB_OTG_DOEPTSIZ_PKTCNT & (1U << 19)); + USBx_OUTEP(epnum)->DOEPTSIZ |= (USB_OTG_DOEPTSIZ_XFRSIZ & (ep->maxpacket)); + + if (dma == 1U) + { + if ((uint32_t)ep->xfer_buff != 0U) + { + USBx_OUTEP(epnum)->DOEPDMA = (uint32_t)(ep->xfer_buff); + } + } + + /* EP enable */ + USBx_OUTEP(epnum)->DOEPCTL |= (USB_OTG_DOEPCTL_CNAK | USB_OTG_DOEPCTL_EPENA); + } + + return HAL_OK; +} + +/** + * @brief USB_WritePacket : Writes a packet into the Tx FIFO associated + * with the EP/channel + * @param USBx Selected device + * @param src pointer to source buffer + * @param ch_ep_num endpoint or host channel number + * @param len Number of bytes to write + * @param dma USB dma enabled or disabled + * This parameter can be one of these values: + * 0 : DMA feature not used + * 1 : DMA feature used + * @retval HAL status + */ +HAL_StatusTypeDef USB_WritePacket(USB_OTG_GlobalTypeDef *USBx, uint8_t *src, + uint8_t ch_ep_num, uint16_t len, uint8_t dma) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint8_t *pSrc = src; + uint32_t count32b; + uint32_t i; + + if (dma == 0U) + { + count32b = ((uint32_t)len + 3U) / 4U; + for (i = 0U; i < count32b; i++) + { + USBx_DFIFO((uint32_t)ch_ep_num) = __UNALIGNED_UINT32_READ(pSrc); + pSrc++; + pSrc++; + pSrc++; + pSrc++; + } + } + + return HAL_OK; +} + +/** + * @brief USB_ReadPacket : read a packet from the RX FIFO + * @param USBx Selected device + * @param dest source pointer + * @param len Number of bytes to read + * @retval pointer to destination buffer + */ +void *USB_ReadPacket(USB_OTG_GlobalTypeDef *USBx, uint8_t *dest, uint16_t len) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint8_t *pDest = dest; + uint32_t pData; + uint32_t i; + uint32_t count32b = (uint32_t)len >> 2U; + uint16_t remaining_bytes = len % 4U; + + for (i = 0U; i < count32b; i++) + { + __UNALIGNED_UINT32_WRITE(pDest, USBx_DFIFO(0U)); + pDest++; + pDest++; + pDest++; + pDest++; + } + + /* When Number of data is not word aligned, read the remaining byte */ + if (remaining_bytes != 0U) + { + i = 0U; + __UNALIGNED_UINT32_WRITE(&pData, USBx_DFIFO(0U)); + + do + { + *(uint8_t *)pDest = (uint8_t)(pData >> (8U * (uint8_t)(i))); + i++; + pDest++; + remaining_bytes--; + } while (remaining_bytes != 0U); + } + + return ((void *)pDest); +} + +/** + * @brief USB_EPSetStall : set a stall condition over an EP + * @param USBx Selected device + * @param ep pointer to endpoint structure + * @retval HAL status + */ +HAL_StatusTypeDef USB_EPSetStall(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t epnum = (uint32_t)ep->num; + + if (ep->is_in == 1U) + { + if (((USBx_INEP(epnum)->DIEPCTL & USB_OTG_DIEPCTL_EPENA) == 0U) && (epnum != 0U)) + { + USBx_INEP(epnum)->DIEPCTL &= ~(USB_OTG_DIEPCTL_EPDIS); + } + USBx_INEP(epnum)->DIEPCTL |= USB_OTG_DIEPCTL_STALL; + } + else + { + if (((USBx_OUTEP(epnum)->DOEPCTL & USB_OTG_DOEPCTL_EPENA) == 0U) && (epnum != 0U)) + { + USBx_OUTEP(epnum)->DOEPCTL &= ~(USB_OTG_DOEPCTL_EPDIS); + } + USBx_OUTEP(epnum)->DOEPCTL |= USB_OTG_DOEPCTL_STALL; + } + + return HAL_OK; +} + +/** + * @brief USB_EPClearStall : Clear a stall condition over an EP + * @param USBx Selected device + * @param ep pointer to endpoint structure + * @retval HAL status + */ +HAL_StatusTypeDef USB_EPClearStall(USB_OTG_GlobalTypeDef *USBx, USB_OTG_EPTypeDef *ep) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t epnum = (uint32_t)ep->num; + + if (ep->is_in == 1U) + { + USBx_INEP(epnum)->DIEPCTL &= ~USB_OTG_DIEPCTL_STALL; + if ((ep->type == EP_TYPE_INTR) || (ep->type == EP_TYPE_BULK)) + { + USBx_INEP(epnum)->DIEPCTL |= USB_OTG_DIEPCTL_SD0PID_SEVNFRM; /* DATA0 */ + } + } + else + { + USBx_OUTEP(epnum)->DOEPCTL &= ~USB_OTG_DOEPCTL_STALL; + if ((ep->type == EP_TYPE_INTR) || (ep->type == EP_TYPE_BULK)) + { + USBx_OUTEP(epnum)->DOEPCTL |= USB_OTG_DOEPCTL_SD0PID_SEVNFRM; /* DATA0 */ + } + } + return HAL_OK; +} + +/** + * @brief USB_StopDevice : Stop the usb device mode + * @param USBx Selected device + * @retval HAL status + */ +HAL_StatusTypeDef USB_StopDevice(USB_OTG_GlobalTypeDef *USBx) +{ + HAL_StatusTypeDef ret; + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t i; + + /* Clear Pending interrupt */ + for (i = 0U; i < 15U; i++) + { + USBx_INEP(i)->DIEPINT = 0xFB7FU; + USBx_OUTEP(i)->DOEPINT = 0xFB7FU; + } + + /* Clear interrupt masks */ + USBx_DEVICE->DIEPMSK = 0U; + USBx_DEVICE->DOEPMSK = 0U; + USBx_DEVICE->DAINTMSK = 0U; + + /* Flush the FIFO */ + ret = USB_FlushRxFifo(USBx); + if (ret != HAL_OK) + { + return ret; + } + + ret = USB_FlushTxFifo(USBx, 0x10U); + if (ret != HAL_OK) + { + return ret; + } + + return ret; +} + +/** + * @brief USB_SetDevAddress : Stop the usb device mode + * @param USBx Selected device + * @param address new device address to be assigned + * This parameter can be a value from 0 to 255 + * @retval HAL status + */ +HAL_StatusTypeDef USB_SetDevAddress(USB_OTG_GlobalTypeDef *USBx, uint8_t address) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + + USBx_DEVICE->DCFG &= ~(USB_OTG_DCFG_DAD); + USBx_DEVICE->DCFG |= ((uint32_t)address << 4) & USB_OTG_DCFG_DAD; + + return HAL_OK; +} + +/** + * @brief USB_DevConnect : Connect the USB device by enabling Rpu + * @param USBx Selected device + * @retval HAL status + */ +HAL_StatusTypeDef USB_DevConnect(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + + /* In case phy is stopped, ensure to ungate and restore the phy CLK */ + USBx_PCGCCTL &= ~(USB_OTG_PCGCCTL_STOPCLK | USB_OTG_PCGCCTL_GATECLK); + + USBx_DEVICE->DCTL &= ~USB_OTG_DCTL_SDIS; + + return HAL_OK; +} + +/** + * @brief USB_DevDisconnect : Disconnect the USB device by disabling Rpu + * @param USBx Selected device + * @retval HAL status + */ +HAL_StatusTypeDef USB_DevDisconnect(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + + /* In case phy is stopped, ensure to ungate and restore the phy CLK */ + USBx_PCGCCTL &= ~(USB_OTG_PCGCCTL_STOPCLK | USB_OTG_PCGCCTL_GATECLK); + + USBx_DEVICE->DCTL |= USB_OTG_DCTL_SDIS; + + return HAL_OK; +} + +/** + * @brief USB_ReadInterrupts: return the global USB interrupt status + * @param USBx Selected device + * @retval HAL status + */ +uint32_t USB_ReadInterrupts(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t tmpreg; + + tmpreg = USBx->GINTSTS; + tmpreg &= USBx->GINTMSK; + + return tmpreg; +} + +/** + * @brief USB_ReadDevAllOutEpInterrupt: return the USB device OUT endpoints interrupt status + * @param USBx Selected device + * @retval HAL status + */ +uint32_t USB_ReadDevAllOutEpInterrupt(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t tmpreg; + + tmpreg = USBx_DEVICE->DAINT; + tmpreg &= USBx_DEVICE->DAINTMSK; + + return ((tmpreg & 0xffff0000U) >> 16); +} + +/** + * @brief USB_ReadDevAllInEpInterrupt: return the USB device IN endpoints interrupt status + * @param USBx Selected device + * @retval HAL status + */ +uint32_t USB_ReadDevAllInEpInterrupt(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t tmpreg; + + tmpreg = USBx_DEVICE->DAINT; + tmpreg &= USBx_DEVICE->DAINTMSK; + + return ((tmpreg & 0xFFFFU)); +} + +/** + * @brief Returns Device OUT EP Interrupt register + * @param USBx Selected device + * @param epnum endpoint number + * This parameter can be a value from 0 to 15 + * @retval Device OUT EP Interrupt register + */ +uint32_t USB_ReadDevOutEPInterrupt(USB_OTG_GlobalTypeDef *USBx, uint8_t epnum) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t tmpreg; + + tmpreg = USBx_OUTEP((uint32_t)epnum)->DOEPINT; + tmpreg &= USBx_DEVICE->DOEPMSK; + + return tmpreg; +} + +/** + * @brief Returns Device IN EP Interrupt register + * @param USBx Selected device + * @param epnum endpoint number + * This parameter can be a value from 0 to 15 + * @retval Device IN EP Interrupt register + */ +uint32_t USB_ReadDevInEPInterrupt(USB_OTG_GlobalTypeDef *USBx, uint8_t epnum) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t tmpreg; + uint32_t msk; + uint32_t emp; + + msk = USBx_DEVICE->DIEPMSK; + emp = USBx_DEVICE->DIEPEMPMSK; + msk |= ((emp >> (epnum & EP_ADDR_MSK)) & 0x1U) << 7; + tmpreg = USBx_INEP((uint32_t)epnum)->DIEPINT & msk; + + return tmpreg; +} + +/** + * @brief USB_ClearInterrupts: clear a USB interrupt + * @param USBx Selected device + * @param interrupt flag + * @retval None + */ +void USB_ClearInterrupts(USB_OTG_GlobalTypeDef *USBx, uint32_t interrupt) +{ + USBx->GINTSTS |= interrupt; +} + +/** + * @brief Returns USB core mode + * @param USBx Selected device + * @retval return core mode : Host or Device + * This parameter can be one of these values: + * 0 : Host + * 1 : Device + */ +uint32_t USB_GetMode(USB_OTG_GlobalTypeDef *USBx) +{ + return ((USBx->GINTSTS) & 0x1U); +} + +/** + * @brief Activate EP0 for Setup transactions + * @param USBx Selected device + * @retval HAL status + */ +HAL_StatusTypeDef USB_ActivateSetup(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + + /* Set the MPS of the IN EP0 to 64 bytes */ + USBx_INEP(0U)->DIEPCTL &= ~USB_OTG_DIEPCTL_MPSIZ; + + USBx_DEVICE->DCTL |= USB_OTG_DCTL_CGINAK; + + return HAL_OK; +} + +/** + * @brief Prepare the EP0 to start the first control setup + * @param USBx Selected device + * @param dma USB dma enabled or disabled + * This parameter can be one of these values: + * 0 : DMA feature not used + * 1 : DMA feature used + * @param psetup pointer to setup packet + * @retval HAL status + */ +HAL_StatusTypeDef USB_EP0_OutStart(USB_OTG_GlobalTypeDef *USBx, uint8_t dma, uint8_t *psetup) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t gSNPSiD = *(__IO uint32_t *)(&USBx->CID + 0x1U); + + if (gSNPSiD > USB_OTG_CORE_ID_300A) + { + if ((USBx_OUTEP(0U)->DOEPCTL & USB_OTG_DOEPCTL_EPENA) == USB_OTG_DOEPCTL_EPENA) + { + return HAL_OK; + } + } + + USBx_OUTEP(0U)->DOEPTSIZ = 0U; + USBx_OUTEP(0U)->DOEPTSIZ |= (USB_OTG_DOEPTSIZ_PKTCNT & (1U << 19)); + USBx_OUTEP(0U)->DOEPTSIZ |= (3U * 8U); + USBx_OUTEP(0U)->DOEPTSIZ |= USB_OTG_DOEPTSIZ_STUPCNT; + + if (dma == 1U) + { + USBx_OUTEP(0U)->DOEPDMA = (uint32_t)psetup; + /* EP enable */ + USBx_OUTEP(0U)->DOEPCTL |= USB_OTG_DOEPCTL_EPENA | USB_OTG_DOEPCTL_USBAEP; + } + + return HAL_OK; +} + +/** + * @brief Reset the USB Core (needed after USB clock settings change) + * @param USBx Selected device + * @retval HAL status + */ +static HAL_StatusTypeDef USB_CoreReset(USB_OTG_GlobalTypeDef *USBx) +{ + __IO uint32_t count = 0U; + + /* Wait for AHB master IDLE state. */ + do + { + if (++count > 200000U) + { + return HAL_TIMEOUT; + } + } while ((USBx->GRSTCTL & USB_OTG_GRSTCTL_AHBIDL) == 0U); + + /* Core Soft Reset */ + count = 0U; + USBx->GRSTCTL |= USB_OTG_GRSTCTL_CSRST; + + do + { + if (++count > 200000U) + { + return HAL_TIMEOUT; + } + } while ((USBx->GRSTCTL & USB_OTG_GRSTCTL_CSRST) == USB_OTG_GRSTCTL_CSRST); + + return HAL_OK; +} + +#ifdef USBPHYC +/** + * @brief Enables control of a High Speed USB PHY + * Init the low level hardware : GPIO, CLOCK, NVIC... + * @param USBx Selected device + * @retval HAL status + */ +static HAL_StatusTypeDef USB_HS_PHYCInit(USB_OTG_GlobalTypeDef *USBx) +{ + UNUSED(USBx); + __IO uint32_t count = 0U; + + /* Enable LDO */ + USB_HS_PHYC->USB_HS_PHYC_LDO |= USB_HS_PHYC_LDO_ENABLE; + + /* wait for LDO Ready */ + while ((USB_HS_PHYC->USB_HS_PHYC_LDO & USB_HS_PHYC_LDO_STATUS) == 0U) + { + if (++count > 200000U) + { + return HAL_TIMEOUT; + } + } + + /* Controls PHY frequency operation selection */ + if (HSE_VALUE == 12000000U) /* HSE = 12MHz */ + { + USB_HS_PHYC->USB_HS_PHYC_PLL = (0x0U << 1); + } + else if (HSE_VALUE == 12500000U) /* HSE = 12.5MHz */ + { + USB_HS_PHYC->USB_HS_PHYC_PLL = (0x2U << 1); + } + else if (HSE_VALUE == 16000000U) /* HSE = 16MHz */ + { + USB_HS_PHYC->USB_HS_PHYC_PLL = (0x3U << 1); + } + else if (HSE_VALUE == 24000000U) /* HSE = 24MHz */ + { + USB_HS_PHYC->USB_HS_PHYC_PLL = (0x4U << 1); + } + else if (HSE_VALUE == 25000000U) /* HSE = 25MHz */ + { + USB_HS_PHYC->USB_HS_PHYC_PLL = (0x5U << 1); + } + else if (HSE_VALUE == 32000000U) /* HSE = 32MHz */ + { + USB_HS_PHYC->USB_HS_PHYC_PLL = (0x7U << 1); + } + else + { + /* ... */ + } + + /* Control the tuning interface of the High Speed PHY */ + USB_HS_PHYC->USB_HS_PHYC_TUNE |= USB_HS_PHYC_TUNE_VALUE; + + /* Enable PLL internal PHY */ + USB_HS_PHYC->USB_HS_PHYC_PLL |= USB_HS_PHYC_PLL_PLLEN; + + + /* 2ms Delay required to get internal phy clock stable */ + HAL_Delay(2U); + + return HAL_OK; +} + +#endif /* USBPHYC */ +/** + * @brief USB_HostInit : Initializes the USB OTG controller registers + * for Host mode + * @param USBx Selected device + * @param cfg pointer to a USB_OTG_CfgTypeDef structure that contains + * the configuration information for the specified USBx peripheral. + * @retval HAL status + */ +HAL_StatusTypeDef USB_HostInit(USB_OTG_GlobalTypeDef *USBx, USB_OTG_CfgTypeDef cfg) +{ + HAL_StatusTypeDef ret = HAL_OK; + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t i; + + /* Restart the Phy Clock */ + USBx_PCGCCTL = 0U; + + /* Disable VBUS sensing */ + USBx->GCCFG &= ~(USB_OTG_GCCFG_VBDEN); + + if ((USBx->CID & (0x1U << 8)) != 0U) + { + if (cfg.speed == USBH_FSLS_SPEED) + { + /* Force Device Enumeration to FS/LS mode only */ + USBx_HOST->HCFG |= USB_OTG_HCFG_FSLSS; + } + else + { + /* Set default Max speed support */ + USBx_HOST->HCFG &= ~(USB_OTG_HCFG_FSLSS); + } + } + else + { + /* Set default Max speed support */ + USBx_HOST->HCFG &= ~(USB_OTG_HCFG_FSLSS); + } + + /* Make sure the FIFOs are flushed. */ + if (USB_FlushTxFifo(USBx, 0x10U) != HAL_OK) /* all Tx FIFOs */ + { + ret = HAL_ERROR; + } + + if (USB_FlushRxFifo(USBx) != HAL_OK) + { + ret = HAL_ERROR; + } + + /* Clear all pending HC Interrupts */ + for (i = 0U; i < cfg.Host_channels; i++) + { + USBx_HC(i)->HCINT = 0xFFFFFFFFU; + USBx_HC(i)->HCINTMSK = 0U; + } + + /* Disable all interrupts. */ + USBx->GINTMSK = 0U; + + /* Clear any pending interrupts */ + USBx->GINTSTS = 0xFFFFFFFFU; + + if ((USBx->CID & (0x1U << 8)) != 0U) + { + /* set Rx FIFO size */ + USBx->GRXFSIZ = 0x200U; + USBx->DIEPTXF0_HNPTXFSIZ = (uint32_t)(((0x100U << 16) & USB_OTG_NPTXFD) | 0x200U); + USBx->HPTXFSIZ = (uint32_t)(((0xE0U << 16) & USB_OTG_HPTXFSIZ_PTXFD) | 0x300U); + } + else + { + /* set Rx FIFO size */ + USBx->GRXFSIZ = 0x80U; + USBx->DIEPTXF0_HNPTXFSIZ = (uint32_t)(((0x60U << 16) & USB_OTG_NPTXFD) | 0x80U); + USBx->HPTXFSIZ = (uint32_t)(((0x40U << 16)& USB_OTG_HPTXFSIZ_PTXFD) | 0xE0U); + } + + /* Enable the common interrupts */ + if (cfg.dma_enable == 0U) + { + USBx->GINTMSK |= USB_OTG_GINTMSK_RXFLVLM; + } + + /* Enable interrupts matching to the Host mode ONLY */ + USBx->GINTMSK |= (USB_OTG_GINTMSK_PRTIM | USB_OTG_GINTMSK_HCIM | \ + USB_OTG_GINTMSK_SOFM | USB_OTG_GINTSTS_DISCINT | \ + USB_OTG_GINTMSK_PXFRM_IISOOXFRM | USB_OTG_GINTMSK_WUIM); + + return ret; +} + +/** + * @brief USB_InitFSLSPClkSel : Initializes the FSLSPClkSel field of the + * HCFG register on the PHY type and set the right frame interval + * @param USBx Selected device + * @param freq clock frequency + * This parameter can be one of these values: + * HCFG_48_MHZ : Full Speed 48 MHz Clock + * HCFG_6_MHZ : Low Speed 6 MHz Clock + * @retval HAL status + */ +HAL_StatusTypeDef USB_InitFSLSPClkSel(USB_OTG_GlobalTypeDef *USBx, uint8_t freq) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + + USBx_HOST->HCFG &= ~(USB_OTG_HCFG_FSLSPCS); + USBx_HOST->HCFG |= (uint32_t)freq & USB_OTG_HCFG_FSLSPCS; + + if (freq == HCFG_48_MHZ) + { + USBx_HOST->HFIR = 48000U; + } + else if (freq == HCFG_6_MHZ) + { + USBx_HOST->HFIR = 6000U; + } + else + { + /* ... */ + } + + return HAL_OK; +} + +/** + * @brief USB_OTG_ResetPort : Reset Host Port + * @param USBx Selected device + * @retval HAL status + * @note (1)The application must wait at least 10 ms + * before clearing the reset bit. + */ +HAL_StatusTypeDef USB_ResetPort(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + + __IO uint32_t hprt0 = 0U; + + hprt0 = USBx_HPRT0; + + hprt0 &= ~(USB_OTG_HPRT_PENA | USB_OTG_HPRT_PCDET | + USB_OTG_HPRT_PENCHNG | USB_OTG_HPRT_POCCHNG); + + USBx_HPRT0 = (USB_OTG_HPRT_PRST | hprt0); + HAL_Delay(100U); /* See Note #1 */ + USBx_HPRT0 = ((~USB_OTG_HPRT_PRST) & hprt0); + HAL_Delay(10U); + + return HAL_OK; +} + +/** + * @brief USB_DriveVbus : activate or de-activate vbus + * @param state VBUS state + * This parameter can be one of these values: + * 0 : Deactivate VBUS + * 1 : Activate VBUS + * @retval HAL status + */ +HAL_StatusTypeDef USB_DriveVbus(USB_OTG_GlobalTypeDef *USBx, uint8_t state) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + __IO uint32_t hprt0 = 0U; + + hprt0 = USBx_HPRT0; + + hprt0 &= ~(USB_OTG_HPRT_PENA | USB_OTG_HPRT_PCDET | + USB_OTG_HPRT_PENCHNG | USB_OTG_HPRT_POCCHNG); + + if (((hprt0 & USB_OTG_HPRT_PPWR) == 0U) && (state == 1U)) + { + USBx_HPRT0 = (USB_OTG_HPRT_PPWR | hprt0); + } + if (((hprt0 & USB_OTG_HPRT_PPWR) == USB_OTG_HPRT_PPWR) && (state == 0U)) + { + USBx_HPRT0 = ((~USB_OTG_HPRT_PPWR) & hprt0); + } + return HAL_OK; +} + +/** + * @brief Return Host Core speed + * @param USBx Selected device + * @retval speed : Host speed + * This parameter can be one of these values: + * @arg HCD_SPEED_HIGH: High speed mode + * @arg HCD_SPEED_FULL: Full speed mode + * @arg HCD_SPEED_LOW: Low speed mode + */ +uint32_t USB_GetHostSpeed(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + __IO uint32_t hprt0 = 0U; + + hprt0 = USBx_HPRT0; + return ((hprt0 & USB_OTG_HPRT_PSPD) >> 17); +} + +/** + * @brief Return Host Current Frame number + * @param USBx Selected device + * @retval current frame number + */ +uint32_t USB_GetCurrentFrame(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + + return (USBx_HOST->HFNUM & USB_OTG_HFNUM_FRNUM); +} + +/** + * @brief Initialize a host channel + * @param USBx Selected device + * @param ch_num Channel number + * This parameter can be a value from 1 to 15 + * @param epnum Endpoint number + * This parameter can be a value from 1 to 15 + * @param dev_address Current device address + * This parameter can be a value from 0 to 255 + * @param speed Current device speed + * This parameter can be one of these values: + * @arg USB_OTG_SPEED_HIGH: High speed mode + * @arg USB_OTG_SPEED_FULL: Full speed mode + * @arg USB_OTG_SPEED_LOW: Low speed mode + * @param ep_type Endpoint Type + * This parameter can be one of these values: + * @arg EP_TYPE_CTRL: Control type + * @arg EP_TYPE_ISOC: Isochronous type + * @arg EP_TYPE_BULK: Bulk type + * @arg EP_TYPE_INTR: Interrupt type + * @param mps Max Packet Size + * This parameter can be a value from 0 to 32K + * @retval HAL state + */ +HAL_StatusTypeDef USB_HC_Init(USB_OTG_GlobalTypeDef *USBx, uint8_t ch_num, + uint8_t epnum, uint8_t dev_address, uint8_t speed, + uint8_t ep_type, uint16_t mps) +{ + HAL_StatusTypeDef ret = HAL_OK; + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t HCcharEpDir; + uint32_t HCcharLowSpeed; + uint32_t HostCoreSpeed; + + /* Clear old interrupt conditions for this host channel. */ + USBx_HC((uint32_t)ch_num)->HCINT = 0xFFFFFFFFU; + + /* Enable channel interrupts required for this transfer. */ + switch (ep_type) + { + case EP_TYPE_CTRL: + case EP_TYPE_BULK: + USBx_HC((uint32_t)ch_num)->HCINTMSK = USB_OTG_HCINTMSK_XFRCM | + USB_OTG_HCINTMSK_STALLM | + USB_OTG_HCINTMSK_TXERRM | + USB_OTG_HCINTMSK_DTERRM | + USB_OTG_HCINTMSK_AHBERR | + USB_OTG_HCINTMSK_NAKM; + + if ((epnum & 0x80U) == 0x80U) + { + USBx_HC((uint32_t)ch_num)->HCINTMSK |= USB_OTG_HCINTMSK_BBERRM; + } + else + { + if ((USBx->CID & (0x1U << 8)) != 0U) + { + USBx_HC((uint32_t)ch_num)->HCINTMSK |= USB_OTG_HCINTMSK_NYET | + USB_OTG_HCINTMSK_ACKM; + } + } + break; + + case EP_TYPE_INTR: + USBx_HC((uint32_t)ch_num)->HCINTMSK = USB_OTG_HCINTMSK_XFRCM | + USB_OTG_HCINTMSK_STALLM | + USB_OTG_HCINTMSK_TXERRM | + USB_OTG_HCINTMSK_DTERRM | + USB_OTG_HCINTMSK_NAKM | + USB_OTG_HCINTMSK_AHBERR | + USB_OTG_HCINTMSK_FRMORM; + + if ((epnum & 0x80U) == 0x80U) + { + USBx_HC((uint32_t)ch_num)->HCINTMSK |= USB_OTG_HCINTMSK_BBERRM; + } + + break; + + case EP_TYPE_ISOC: + USBx_HC((uint32_t)ch_num)->HCINTMSK = USB_OTG_HCINTMSK_XFRCM | + USB_OTG_HCINTMSK_ACKM | + USB_OTG_HCINTMSK_AHBERR | + USB_OTG_HCINTMSK_FRMORM; + + if ((epnum & 0x80U) == 0x80U) + { + USBx_HC((uint32_t)ch_num)->HCINTMSK |= (USB_OTG_HCINTMSK_TXERRM | USB_OTG_HCINTMSK_BBERRM); + } + break; + + default: + ret = HAL_ERROR; + break; + } + + /* Enable host channel Halt interrupt */ + USBx_HC((uint32_t)ch_num)->HCINTMSK |= USB_OTG_HCINTMSK_CHHM; + + /* Enable the top level host channel interrupt. */ + USBx_HOST->HAINTMSK |= 1UL << (ch_num & 0xFU); + + /* Make sure host channel interrupts are enabled. */ + USBx->GINTMSK |= USB_OTG_GINTMSK_HCIM; + + /* Program the HCCHAR register */ + if ((epnum & 0x80U) == 0x80U) + { + HCcharEpDir = (0x1U << 15) & USB_OTG_HCCHAR_EPDIR; + } + else + { + HCcharEpDir = 0U; + } + + HostCoreSpeed = USB_GetHostSpeed(USBx); + + /* LS device plugged to HUB */ + if ((speed == HPRT0_PRTSPD_LOW_SPEED) && (HostCoreSpeed != HPRT0_PRTSPD_LOW_SPEED)) + { + HCcharLowSpeed = (0x1U << 17) & USB_OTG_HCCHAR_LSDEV; + } + else + { + HCcharLowSpeed = 0U; + } + + USBx_HC((uint32_t)ch_num)->HCCHAR = (((uint32_t)dev_address << 22) & USB_OTG_HCCHAR_DAD) | + ((((uint32_t)epnum & 0x7FU) << 11) & USB_OTG_HCCHAR_EPNUM) | + (((uint32_t)ep_type << 18) & USB_OTG_HCCHAR_EPTYP) | + ((uint32_t)mps & USB_OTG_HCCHAR_MPSIZ) | HCcharEpDir | HCcharLowSpeed; + + if (ep_type == EP_TYPE_INTR) + { + USBx_HC((uint32_t)ch_num)->HCCHAR |= USB_OTG_HCCHAR_ODDFRM ; + } + + return ret; +} + +/** + * @brief Start a transfer over a host channel + * @param USBx Selected device + * @param hc pointer to host channel structure + * @param dma USB dma enabled or disabled + * This parameter can be one of these values: + * 0 : DMA feature not used + * 1 : DMA feature used + * @retval HAL state + */ +HAL_StatusTypeDef USB_HC_StartXfer(USB_OTG_GlobalTypeDef *USBx, USB_OTG_HCTypeDef *hc, uint8_t dma) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t ch_num = (uint32_t)hc->ch_num; + __IO uint32_t tmpreg; + uint8_t is_oddframe; + uint16_t len_words; + uint16_t num_packets; + uint16_t max_hc_pkt_count = 256U; + + if (((USBx->CID & (0x1U << 8)) != 0U) && (hc->speed == USBH_HS_SPEED)) + { + /* in DMA mode host Core automatically issues ping in case of NYET/NAK */ + if ((dma == 1U) && ((hc->ep_type == EP_TYPE_CTRL) || (hc->ep_type == EP_TYPE_BULK))) + { + USBx_HC((uint32_t)ch_num)->HCINTMSK &= ~(USB_OTG_HCINTMSK_NYET | + USB_OTG_HCINTMSK_ACKM | + USB_OTG_HCINTMSK_NAKM); + } + + if ((dma == 0U) && (hc->do_ping == 1U)) + { + (void)USB_DoPing(USBx, hc->ch_num); + return HAL_OK; + } + + } + + /* Compute the expected number of packets associated to the transfer */ + if (hc->xfer_len > 0U) + { + num_packets = (uint16_t)((hc->xfer_len + hc->max_packet - 1U) / hc->max_packet); + + if (num_packets > max_hc_pkt_count) + { + num_packets = max_hc_pkt_count; + hc->XferSize = (uint32_t)num_packets * hc->max_packet; + } + } + else + { + num_packets = 1U; + } + + /* + * For IN channel HCTSIZ.XferSize is expected to be an integer multiple of + * max_packet size. + */ + if (hc->ep_is_in != 0U) + { + hc->XferSize = (uint32_t)num_packets * hc->max_packet; + } + else + { + hc->XferSize = hc->xfer_len; + } + + /* Initialize the HCTSIZn register */ + USBx_HC(ch_num)->HCTSIZ = (hc->XferSize & USB_OTG_HCTSIZ_XFRSIZ) | + (((uint32_t)num_packets << 19) & USB_OTG_HCTSIZ_PKTCNT) | + (((uint32_t)hc->data_pid << 29) & USB_OTG_HCTSIZ_DPID); + + if (dma != 0U) + { + /* xfer_buff MUST be 32-bits aligned */ + USBx_HC(ch_num)->HCDMA = (uint32_t)hc->xfer_buff; + } + + is_oddframe = (((uint32_t)USBx_HOST->HFNUM & 0x01U) != 0U) ? 0U : 1U; + USBx_HC(ch_num)->HCCHAR &= ~USB_OTG_HCCHAR_ODDFRM; + USBx_HC(ch_num)->HCCHAR |= (uint32_t)is_oddframe << 29; + + /* Set host channel enable */ + tmpreg = USBx_HC(ch_num)->HCCHAR; + tmpreg &= ~USB_OTG_HCCHAR_CHDIS; + + /* make sure to set the correct ep direction */ + if (hc->ep_is_in != 0U) + { + tmpreg |= USB_OTG_HCCHAR_EPDIR; + } + else + { + tmpreg &= ~USB_OTG_HCCHAR_EPDIR; + } + tmpreg |= USB_OTG_HCCHAR_CHENA; + USBx_HC(ch_num)->HCCHAR = tmpreg; + + if (dma != 0U) /* dma mode */ + { + return HAL_OK; + } + + if ((hc->ep_is_in == 0U) && (hc->xfer_len > 0U)) + { + switch (hc->ep_type) + { + /* Non periodic transfer */ + case EP_TYPE_CTRL: + case EP_TYPE_BULK: + + len_words = (uint16_t)((hc->xfer_len + 3U) / 4U); + + /* check if there is enough space in FIFO space */ + if (len_words > (USBx->HNPTXSTS & 0xFFFFU)) + { + /* need to process data in nptxfempty interrupt */ + USBx->GINTMSK |= USB_OTG_GINTMSK_NPTXFEM; + } + break; + + /* Periodic transfer */ + case EP_TYPE_INTR: + case EP_TYPE_ISOC: + len_words = (uint16_t)((hc->xfer_len + 3U) / 4U); + /* check if there is enough space in FIFO space */ + if (len_words > (USBx_HOST->HPTXSTS & 0xFFFFU)) /* split the transfer */ + { + /* need to process data in ptxfempty interrupt */ + USBx->GINTMSK |= USB_OTG_GINTMSK_PTXFEM; + } + break; + + default: + break; + } + + /* Write packet into the Tx FIFO. */ + (void)USB_WritePacket(USBx, hc->xfer_buff, hc->ch_num, (uint16_t)hc->xfer_len, 0); + } + + return HAL_OK; +} + +/** + * @brief Read all host channel interrupts status + * @param USBx Selected device + * @retval HAL state + */ +uint32_t USB_HC_ReadInterrupt(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + + return ((USBx_HOST->HAINT) & 0xFFFFU); +} + +/** + * @brief Halt a host channel + * @param USBx Selected device + * @param hc_num Host Channel number + * This parameter can be a value from 1 to 15 + * @retval HAL state + */ +HAL_StatusTypeDef USB_HC_Halt(USB_OTG_GlobalTypeDef *USBx, uint8_t hc_num) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t hcnum = (uint32_t)hc_num; + __IO uint32_t count = 0U; + uint32_t HcEpType = (USBx_HC(hcnum)->HCCHAR & USB_OTG_HCCHAR_EPTYP) >> 18; + uint32_t ChannelEna = (USBx_HC(hcnum)->HCCHAR & USB_OTG_HCCHAR_CHENA) >> 31; + + if (((USBx->GAHBCFG & USB_OTG_GAHBCFG_DMAEN) == USB_OTG_GAHBCFG_DMAEN) && + (ChannelEna == 0U)) + { + return HAL_OK; + } + + /* Check for space in the request queue to issue the halt. */ + if ((HcEpType == HCCHAR_CTRL) || (HcEpType == HCCHAR_BULK)) + { + USBx_HC(hcnum)->HCCHAR |= USB_OTG_HCCHAR_CHDIS; + + if ((USBx->GAHBCFG & USB_OTG_GAHBCFG_DMAEN) == 0U) + { + if ((USBx->HNPTXSTS & (0xFFU << 16)) == 0U) + { + USBx_HC(hcnum)->HCCHAR &= ~USB_OTG_HCCHAR_CHENA; + USBx_HC(hcnum)->HCCHAR |= USB_OTG_HCCHAR_CHENA; + do + { + if (++count > 1000U) + { + break; + } + } while ((USBx_HC(hcnum)->HCCHAR & USB_OTG_HCCHAR_CHENA) == USB_OTG_HCCHAR_CHENA); + } + else + { + USBx_HC(hcnum)->HCCHAR |= USB_OTG_HCCHAR_CHENA; + } + } + } + else + { + USBx_HC(hcnum)->HCCHAR |= USB_OTG_HCCHAR_CHDIS; + + if ((USBx_HOST->HPTXSTS & (0xFFU << 16)) == 0U) + { + USBx_HC(hcnum)->HCCHAR &= ~USB_OTG_HCCHAR_CHENA; + USBx_HC(hcnum)->HCCHAR |= USB_OTG_HCCHAR_CHENA; + do + { + if (++count > 1000U) + { + break; + } + } while ((USBx_HC(hcnum)->HCCHAR & USB_OTG_HCCHAR_CHENA) == USB_OTG_HCCHAR_CHENA); + } + else + { + USBx_HC(hcnum)->HCCHAR |= USB_OTG_HCCHAR_CHENA; + } + } + + return HAL_OK; +} + +/** + * @brief Initiate Do Ping protocol + * @param USBx Selected device + * @param hc_num Host Channel number + * This parameter can be a value from 1 to 15 + * @retval HAL state + */ +HAL_StatusTypeDef USB_DoPing(USB_OTG_GlobalTypeDef *USBx, uint8_t ch_num) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + uint32_t chnum = (uint32_t)ch_num; + uint32_t num_packets = 1U; + uint32_t tmpreg; + + USBx_HC(chnum)->HCTSIZ = ((num_packets << 19) & USB_OTG_HCTSIZ_PKTCNT) | + USB_OTG_HCTSIZ_DOPING; + + /* Set host channel enable */ + tmpreg = USBx_HC(chnum)->HCCHAR; + tmpreg &= ~USB_OTG_HCCHAR_CHDIS; + tmpreg |= USB_OTG_HCCHAR_CHENA; + USBx_HC(chnum)->HCCHAR = tmpreg; + + return HAL_OK; +} + +/** + * @brief Stop Host Core + * @param USBx Selected device + * @retval HAL state + */ +HAL_StatusTypeDef USB_StopHost(USB_OTG_GlobalTypeDef *USBx) +{ + HAL_StatusTypeDef ret = HAL_OK; + uint32_t USBx_BASE = (uint32_t)USBx; + __IO uint32_t count = 0U; + uint32_t value; + uint32_t i; + + (void)USB_DisableGlobalInt(USBx); + + /* Flush USB FIFO */ + if (USB_FlushTxFifo(USBx, 0x10U) != HAL_OK) /* all Tx FIFOs */ + { + ret = HAL_ERROR; + } + + if (USB_FlushRxFifo(USBx) != HAL_OK) + { + ret = HAL_ERROR; + } + + /* Flush out any leftover queued requests. */ + for (i = 0U; i <= 15U; i++) + { + value = USBx_HC(i)->HCCHAR; + value |= USB_OTG_HCCHAR_CHDIS; + value &= ~USB_OTG_HCCHAR_CHENA; + value &= ~USB_OTG_HCCHAR_EPDIR; + USBx_HC(i)->HCCHAR = value; + } + + /* Halt all channels to put them into a known state. */ + for (i = 0U; i <= 15U; i++) + { + value = USBx_HC(i)->HCCHAR; + value |= USB_OTG_HCCHAR_CHDIS; + value |= USB_OTG_HCCHAR_CHENA; + value &= ~USB_OTG_HCCHAR_EPDIR; + USBx_HC(i)->HCCHAR = value; + + do + { + if (++count > 1000U) + { + break; + } + } while ((USBx_HC(i)->HCCHAR & USB_OTG_HCCHAR_CHENA) == USB_OTG_HCCHAR_CHENA); + } + + /* Clear any pending Host interrupts */ + USBx_HOST->HAINT = 0xFFFFFFFFU; + USBx->GINTSTS = 0xFFFFFFFFU; + + (void)USB_EnableGlobalInt(USBx); + + return ret; +} + +/** + * @brief USB_ActivateRemoteWakeup active remote wakeup signalling + * @param USBx Selected device + * @retval HAL status + */ +HAL_StatusTypeDef USB_ActivateRemoteWakeup(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + + if ((USBx_DEVICE->DSTS & USB_OTG_DSTS_SUSPSTS) == USB_OTG_DSTS_SUSPSTS) + { + /* active Remote wakeup signalling */ + USBx_DEVICE->DCTL |= USB_OTG_DCTL_RWUSIG; + } + + return HAL_OK; +} + +/** + * @brief USB_DeActivateRemoteWakeup de-active remote wakeup signalling + * @param USBx Selected device + * @retval HAL status + */ +HAL_StatusTypeDef USB_DeActivateRemoteWakeup(USB_OTG_GlobalTypeDef *USBx) +{ + uint32_t USBx_BASE = (uint32_t)USBx; + + /* active Remote wakeup signalling */ + USBx_DEVICE->DCTL &= ~(USB_OTG_DCTL_RWUSIG); + + return HAL_OK; +} +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ + + +/** + * @} + */ + +/** + * @} + */ +#endif /* defined (USB_OTG_FS) || defined (USB_OTG_HS) */ +#endif /* defined (HAL_PCD_MODULE_ENABLED) || defined (HAL_HCD_MODULE_ENABLED) */ + +/** + * @} + */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/LWIP/App/lwip.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/LWIP/App/lwip.c new file mode 100644 index 0000000..771ad19 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/LWIP/App/lwip.c @@ -0,0 +1,211 @@ +/** + ****************************************************************************** + * File Name : LWIP.c + * Description : This file provides initialization code for LWIP + * middleWare. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2022 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "lwip.h" +#include "lwip/init.h" +#include "lwip/netif.h" +#if defined ( __CC_ARM ) /* MDK ARM Compiler */ +#include "lwip/sio.h" +#endif /* MDK ARM Compiler */ +#include "ethernetif.h" +#include + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ +/* Private function prototypes -----------------------------------------------*/ +/* ETH Variables initialization ----------------------------------------------*/ +void Error_Handler(void); + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ +/* Semaphore to signal Ethernet Link state update */ +osSemaphoreId Netif_LinkSemaphore = NULL; +/* Ethernet link thread Argument */ +struct link_str link_arg; + +/* Variables Initialization */ +struct netif gnetif; +ip4_addr_t ipaddr; +ip4_addr_t netmask; +ip4_addr_t gw; +uint8_t IP_ADDRESS[4]; +uint8_t NETMASK_ADDRESS[4]; +uint8_t GATEWAY_ADDRESS[4]; +/* USER CODE BEGIN OS_THREAD_ATTR_CMSIS_RTOS_V2 */ +#define INTERFACE_THREAD_STACK_SIZE ( 1024 ) +osThreadAttr_t attributes; +/* USER CODE END OS_THREAD_ATTR_CMSIS_RTOS_V2 */ + +/* USER CODE BEGIN 2 */ + +/* USER CODE END 2 */ + +/** + * LwIP initialization function + */ +void MX_LWIP_Init(void) +{ + /* IP addresses initialization */ + IP_ADDRESS[0] = 192; + IP_ADDRESS[1] = 168; + IP_ADDRESS[2] = 11; + IP_ADDRESS[3] = 2; + NETMASK_ADDRESS[0] = 255; + NETMASK_ADDRESS[1] = 255; + NETMASK_ADDRESS[2] = 255; + NETMASK_ADDRESS[3] = 0; + GATEWAY_ADDRESS[0] = 192; + GATEWAY_ADDRESS[1] = 168; + GATEWAY_ADDRESS[2] = 11; + GATEWAY_ADDRESS[3] = 1; + +/* USER CODE BEGIN IP_ADDRESSES */ +/* USER CODE END IP_ADDRESSES */ + + /* Initilialize the LwIP stack with RTOS */ + tcpip_init( NULL, NULL ); + + /* IP addresses initialization without DHCP (IPv4) */ + IP4_ADDR(&ipaddr, IP_ADDRESS[0], IP_ADDRESS[1], IP_ADDRESS[2], IP_ADDRESS[3]); + IP4_ADDR(&netmask, NETMASK_ADDRESS[0], NETMASK_ADDRESS[1] , NETMASK_ADDRESS[2], NETMASK_ADDRESS[3]); + IP4_ADDR(&gw, GATEWAY_ADDRESS[0], GATEWAY_ADDRESS[1], GATEWAY_ADDRESS[2], GATEWAY_ADDRESS[3]); + + /* add the network interface (IPv4/IPv6) with RTOS */ + netif_add(&gnetif, &ipaddr, &netmask, &gw, NULL, ðernetif_init, &tcpip_input); + + /* Registers the default network interface */ + netif_set_default(&gnetif); + + if (netif_is_link_up(&gnetif)) + { + /* When the netif is fully configured this function must be called */ + netif_set_up(&gnetif); + } + else + { + /* When the netif link is down this function must be called */ + netif_set_down(&gnetif); + } + + /* Set the link callback function, this function is called on change of link status*/ + netif_set_link_callback(&gnetif, ethernetif_update_config); + + /* create a binary semaphore used for informing ethernetif of frame reception */ + Netif_LinkSemaphore = osSemaphoreNew(1, 1, NULL); + + link_arg.netif = &gnetif; + link_arg.semaphore = Netif_LinkSemaphore; + /* Create the Ethernet link handler thread */ +/* USER CODE BEGIN OS_THREAD_NEW_CMSIS_RTOS_V2 */ + memset(&attributes, 0x0, sizeof(osThreadAttr_t)); + attributes.name = "LinkThr"; + attributes.stack_size = INTERFACE_THREAD_STACK_SIZE; + attributes.priority = osPriorityBelowNormal; + osThreadNew(ethernetif_set_link, &link_arg, &attributes); +/* USER CODE END OS_THREAD_NEW_CMSIS_RTOS_V2 */ + +/* USER CODE BEGIN 3 */ + +/* USER CODE END 3 */ +} + +#ifdef USE_OBSOLETE_USER_CODE_SECTION_4 +/* Kept to help code migration. (See new 4_1, 4_2... sections) */ +/* Avoid to use this user section which will become obsolete. */ +/* USER CODE BEGIN 4 */ +/* USER CODE END 4 */ +#endif + +#if defined ( __CC_ARM ) /* MDK ARM Compiler */ +/** + * Opens a serial device for communication. + * + * @param devnum device number + * @return handle to serial device if successful, NULL otherwise + */ +sio_fd_t sio_open(u8_t devnum) +{ + sio_fd_t sd; + +/* USER CODE BEGIN 7 */ + sd = 0; // dummy code +/* USER CODE END 7 */ + + return sd; +} + +/** + * Sends a single character to the serial device. + * + * @param c character to send + * @param fd serial device handle + * + * @note This function will block until the character can be sent. + */ +void sio_send(u8_t c, sio_fd_t fd) +{ +/* USER CODE BEGIN 8 */ +/* USER CODE END 8 */ +} + +/** + * Reads from the serial device. + * + * @param fd serial device handle + * @param data pointer to data buffer for receiving + * @param len maximum length (in bytes) of data to receive + * @return number of bytes actually received - may be 0 if aborted by sio_read_abort + * + * @note This function will block until data can be received. The blocking + * can be cancelled by calling sio_read_abort(). + */ +u32_t sio_read(sio_fd_t fd, u8_t *data, u32_t len) +{ + u32_t recved_bytes; + +/* USER CODE BEGIN 9 */ + recved_bytes = 0; // dummy code +/* USER CODE END 9 */ + return recved_bytes; +} + +/** + * Tries to read from the serial device. Same as sio_read but returns + * immediately if no data is available and never blocks. + * + * @param fd serial device handle + * @param data pointer to data buffer for receiving + * @param len maximum length (in bytes) of data to receive + * @return number of bytes actually received + */ +u32_t sio_tryread(sio_fd_t fd, u8_t *data, u32_t len) +{ + u32_t recved_bytes; + +/* USER CODE BEGIN 10 */ + recved_bytes = 0; // dummy code +/* USER CODE END 10 */ + return recved_bytes; +} +#endif /* MDK ARM Compiler */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/LWIP/App/lwip.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/LWIP/App/lwip.h new file mode 100644 index 0000000..b3b4cfb --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/LWIP/App/lwip.h @@ -0,0 +1,77 @@ +/** + ****************************************************************************** + * File Name : LWIP.h + * Description : This file provides code for the configuration + * of the LWIP. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2022 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ************************************************************************* + + */ +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __mx_lwip_H +#define __mx_lwip_H +#ifdef __cplusplus + extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "lwip/opt.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "netif/etharp.h" +#include "lwip/dhcp.h" +#include "lwip/netif.h" +#include "lwip/timeouts.h" +#include "ethernetif.h" + +/* Includes for RTOS ---------------------------------------------------------*/ +#if WITH_RTOS +#include "lwip/tcpip.h" +#endif /* WITH_RTOS */ + +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +/* Global Variables ----------------------------------------------------------*/ +extern ETH_HandleTypeDef heth; + +/* LWIP init function */ +void MX_LWIP_Init(void); + +#if !WITH_RTOS +/* USER CODE BEGIN 1 */ +/* Function defined in lwip.c to: + * - Read a received packet from the Ethernet buffers + * - Send it to the lwIP stack for handling + * - Handle timeouts if NO_SYS_NO_TIMERS not set + */ +void MX_LWIP_Process(void); + +/* USER CODE END 1 */ +#endif /* WITH_RTOS */ + +#ifdef __cplusplus +} +#endif +#endif /*__ mx_lwip_H */ + +/** + * @} + */ + +/** + * @} + */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/LWIP/Target/ethernetif.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/LWIP/Target/ethernetif.c new file mode 100644 index 0000000..f4b25a8 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/LWIP/Target/ethernetif.c @@ -0,0 +1,781 @@ +/** + ****************************************************************************** + * File Name : ethernetif.c + * Description : This file provides code for the configuration + * of the Target/ethernetif.c MiddleWare. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2022 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Includes ------------------------------------------------------------------*/ +#include "main.h" +#include "lwip/opt.h" + +#include "lwip/timeouts.h" +#include "netif/ethernet.h" +#include "netif/etharp.h" +#include "lwip/ethip6.h" +#include "ethernetif.h" +#include +#include "cmsis_os.h" +#include "lwip/tcpip.h" +/* Within 'USER CODE' section, code will be kept by default at each generation */ +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +/* Private define ------------------------------------------------------------*/ +/* The time to block waiting for input. */ +#define TIME_WAITING_FOR_INPUT ( portMAX_DELAY ) +/* USER CODE BEGIN OS_THREAD_STACK_SIZE_WITH_RTOS */ +/* Stack size of the interface thread */ +#define INTERFACE_THREAD_STACK_SIZE ( 350 ) +/* USER CODE END OS_THREAD_STACK_SIZE_WITH_RTOS */ +/* Network interface name */ +#define IFNAME0 's' +#define IFNAME1 't' + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +/* Private variables ---------------------------------------------------------*/ +#if defined ( __ICCARM__ ) /*!< IAR Compiler */ + #pragma data_alignment=4 +#endif +__ALIGN_BEGIN ETH_DMADescTypeDef DMARxDscrTab[ETH_RXBUFNB] __ALIGN_END;/* Ethernet Rx MA Descriptor */ + +#if defined ( __ICCARM__ ) /*!< IAR Compiler */ + #pragma data_alignment=4 +#endif +__ALIGN_BEGIN ETH_DMADescTypeDef DMATxDscrTab[ETH_TXBUFNB] __ALIGN_END;/* Ethernet Tx DMA Descriptor */ + +#if defined ( __ICCARM__ ) /*!< IAR Compiler */ + #pragma data_alignment=4 +#endif +__ALIGN_BEGIN uint8_t Rx_Buff[ETH_RXBUFNB][ETH_RX_BUF_SIZE] __ALIGN_END; /* Ethernet Receive Buffer */ + +#if defined ( __ICCARM__ ) /*!< IAR Compiler */ + #pragma data_alignment=4 +#endif +__ALIGN_BEGIN uint8_t Tx_Buff[ETH_TXBUFNB][ETH_TX_BUF_SIZE] __ALIGN_END; /* Ethernet Transmit Buffer */ + +/* USER CODE BEGIN 2 */ + +/* USER CODE END 2 */ + +/* Semaphore to signal incoming packets */ +osSemaphoreId s_xSemaphore = NULL; +/* Global Ethernet handle */ +ETH_HandleTypeDef heth; + +/* USER CODE BEGIN 3 */ + +/* USER CODE END 3 */ + +/* Private functions ---------------------------------------------------------*/ + +void HAL_ETH_MspInit(ETH_HandleTypeDef* ethHandle) +{ + GPIO_InitTypeDef GPIO_InitStruct = {0}; + if(ethHandle->Instance==ETH) + { + /* USER CODE BEGIN ETH_MspInit 0 */ + + /* USER CODE END ETH_MspInit 0 */ + /* Enable Peripheral clock */ + __HAL_RCC_ETH_CLK_ENABLE(); + + __HAL_RCC_GPIOC_CLK_ENABLE(); + __HAL_RCC_GPIOA_CLK_ENABLE(); + __HAL_RCC_GPIOB_CLK_ENABLE(); + __HAL_RCC_GPIOG_CLK_ENABLE(); + /**ETH GPIO Configuration + PC1 ------> ETH_MDC + PA1 ------> ETH_REF_CLK + PA2 ------> ETH_MDIO + PA7 ------> ETH_CRS_DV + PC4 ------> ETH_RXD0 + PC5 ------> ETH_RXD1 + PB13 ------> ETH_TXD1 + PG11 ------> ETH_TX_EN + PG13 ------> ETH_TXD0 + */ + GPIO_InitStruct.Pin = RMII_MDC_Pin|RMII_RXD0_Pin|RMII_RXD1_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF11_ETH; + HAL_GPIO_Init(GPIOC, &GPIO_InitStruct); + + GPIO_InitStruct.Pin = RMII_REF_CLK_Pin|RMII_MDIO_Pin|RMII_CRS_DV_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF11_ETH; + HAL_GPIO_Init(GPIOA, &GPIO_InitStruct); + + GPIO_InitStruct.Pin = RMII_TXD1_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF11_ETH; + HAL_GPIO_Init(RMII_TXD1_GPIO_Port, &GPIO_InitStruct); + + GPIO_InitStruct.Pin = RMII_TX_EN_Pin|RMII_TXD0_Pin; + GPIO_InitStruct.Mode = GPIO_MODE_AF_PP; + GPIO_InitStruct.Pull = GPIO_NOPULL; + GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_VERY_HIGH; + GPIO_InitStruct.Alternate = GPIO_AF11_ETH; + HAL_GPIO_Init(GPIOG, &GPIO_InitStruct); + + /* Peripheral interrupt init */ + HAL_NVIC_SetPriority(ETH_IRQn, 5, 0); + HAL_NVIC_EnableIRQ(ETH_IRQn); + /* USER CODE BEGIN ETH_MspInit 1 */ + + /* USER CODE END ETH_MspInit 1 */ + } +} + +void HAL_ETH_MspDeInit(ETH_HandleTypeDef* ethHandle) +{ + if(ethHandle->Instance==ETH) + { + /* USER CODE BEGIN ETH_MspDeInit 0 */ + + /* USER CODE END ETH_MspDeInit 0 */ + /* Peripheral clock disable */ + __HAL_RCC_ETH_CLK_DISABLE(); + + /**ETH GPIO Configuration + PC1 ------> ETH_MDC + PA1 ------> ETH_REF_CLK + PA2 ------> ETH_MDIO + PA7 ------> ETH_CRS_DV + PC4 ------> ETH_RXD0 + PC5 ------> ETH_RXD1 + PB13 ------> ETH_TXD1 + PG11 ------> ETH_TX_EN + PG13 ------> ETH_TXD0 + */ + HAL_GPIO_DeInit(GPIOC, RMII_MDC_Pin|RMII_RXD0_Pin|RMII_RXD1_Pin); + + HAL_GPIO_DeInit(GPIOA, RMII_REF_CLK_Pin|RMII_MDIO_Pin|RMII_CRS_DV_Pin); + + HAL_GPIO_DeInit(RMII_TXD1_GPIO_Port, RMII_TXD1_Pin); + + HAL_GPIO_DeInit(GPIOG, RMII_TX_EN_Pin|RMII_TXD0_Pin); + + /* Peripheral interrupt Deinit*/ + HAL_NVIC_DisableIRQ(ETH_IRQn); + + /* USER CODE BEGIN ETH_MspDeInit 1 */ + + /* USER CODE END ETH_MspDeInit 1 */ + } +} + +/** + * @brief Ethernet Rx Transfer completed callback + * @param heth: ETH handle + * @retval None + */ +void HAL_ETH_RxCpltCallback(ETH_HandleTypeDef *heth) +{ + osSemaphoreRelease(s_xSemaphore); +} + +/* USER CODE BEGIN 4 */ + +/* USER CODE END 4 */ + +/******************************************************************************* + LL Driver Interface ( LwIP stack --> ETH) +*******************************************************************************/ +/** + * In this function, the hardware should be initialized. + * Called from ethernetif_init(). + * + * @param netif the already initialized lwip network interface structure + * for this ethernetif + */ +static void low_level_init(struct netif *netif) +{ + uint32_t regvalue = 0; + HAL_StatusTypeDef hal_eth_init_status; +/* USER CODE BEGIN OS_THREAD_ATTR_CMSIS_RTOS_V2 */ + osThreadAttr_t attributes; +/* USER CODE END OS_THREAD_ATTR_CMSIS_RTOS_V2 */ + +/* Init ETH */ + + uint8_t MACAddr[6] ; + heth.Instance = ETH; + heth.Init.AutoNegotiation = ETH_AUTONEGOTIATION_ENABLE; + heth.Init.Speed = ETH_SPEED_100M; + heth.Init.DuplexMode = ETH_MODE_FULLDUPLEX; + heth.Init.PhyAddress = LAN8742A_PHY_ADDRESS; + MACAddr[0] = 0x00; + MACAddr[1] = 0x80; + MACAddr[2] = 0xE1; + MACAddr[3] = 0x00; + MACAddr[4] = 0x00; + MACAddr[5] = 0x00; + heth.Init.MACAddr = &MACAddr[0]; + heth.Init.RxMode = ETH_RXINTERRUPT_MODE; + heth.Init.ChecksumMode = ETH_CHECKSUM_BY_HARDWARE; + heth.Init.MediaInterface = ETH_MEDIA_INTERFACE_RMII; + + /* USER CODE BEGIN MACADDRESS */ + netif->flags |= NETIF_FLAG_IGMP; + /* USER CODE END MACADDRESS */ + + hal_eth_init_status = HAL_ETH_Init(&heth); + + if (hal_eth_init_status == HAL_OK) + { + /* Set netif link flag */ + netif->flags |= NETIF_FLAG_LINK_UP; + } + /* Initialize Tx Descriptors list: Chain Mode */ + HAL_ETH_DMATxDescListInit(&heth, DMATxDscrTab, &Tx_Buff[0][0], ETH_TXBUFNB); + + /* Initialize Rx Descriptors list: Chain Mode */ + HAL_ETH_DMARxDescListInit(&heth, DMARxDscrTab, &Rx_Buff[0][0], ETH_RXBUFNB); + +#if LWIP_ARP || LWIP_ETHERNET + + /* set MAC hardware address length */ + netif->hwaddr_len = ETH_HWADDR_LEN; + + /* set MAC hardware address */ + netif->hwaddr[0] = heth.Init.MACAddr[0]; + netif->hwaddr[1] = heth.Init.MACAddr[1]; + netif->hwaddr[2] = heth.Init.MACAddr[2]; + netif->hwaddr[3] = heth.Init.MACAddr[3]; + netif->hwaddr[4] = heth.Init.MACAddr[4]; + netif->hwaddr[5] = heth.Init.MACAddr[5]; + + /* maximum transfer unit */ + netif->mtu = 1500; + + /* Accept broadcast address and ARP traffic */ + /* don't set NETIF_FLAG_ETHARP if this device is not an ethernet one */ + #if LWIP_ARP + netif->flags |= NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP; + #else + netif->flags |= NETIF_FLAG_BROADCAST; + #endif /* LWIP_ARP */ + +/* create a binary semaphore used for informing ethernetif of frame reception */ + s_xSemaphore = osSemaphoreNew(1, 1, NULL); + +/* create the task that handles the ETH_MAC */ +/* USER CODE BEGIN OS_THREAD_NEW_CMSIS_RTOS_V2 */ + memset(&attributes, 0x0, sizeof(osThreadAttr_t)); + attributes.name = "EthIf"; + attributes.stack_size = INTERFACE_THREAD_STACK_SIZE; + attributes.priority = osPriorityRealtime; + osThreadNew(ethernetif_input, netif, &attributes); +/* USER CODE END OS_THREAD_NEW_CMSIS_RTOS_V2 */ + /* Enable MAC and DMA transmission and reception */ + HAL_ETH_Start(&heth); + +/* USER CODE BEGIN PHY_PRE_CONFIG */ + +/* USER CODE END PHY_PRE_CONFIG */ + + /* Read Register Configuration */ + HAL_ETH_ReadPHYRegister(&heth, PHY_ISFR, ®value); + regvalue |= (PHY_ISFR_INT4); + + /* Enable Interrupt on change of link status */ + HAL_ETH_WritePHYRegister(&heth, PHY_ISFR , regvalue ); + + /* Read Register Configuration */ + HAL_ETH_ReadPHYRegister(&heth, PHY_ISFR , ®value); + +/* USER CODE BEGIN PHY_POST_CONFIG */ + +/* USER CODE END PHY_POST_CONFIG */ + +#endif /* LWIP_ARP || LWIP_ETHERNET */ + +/* USER CODE BEGIN LOW_LEVEL_INIT */ + +/* USER CODE END LOW_LEVEL_INIT */ +} + +/** + * This function should do the actual transmission of the packet. The packet is + * contained in the pbuf that is passed to the function. This pbuf + * might be chained. + * + * @param netif the lwip network interface structure for this ethernetif + * @param p the MAC packet to send (e.g. IP packet including MAC addresses and type) + * @return ERR_OK if the packet could be sent + * an err_t value if the packet couldn't be sent + * + * @note Returning ERR_MEM here if a DMA queue of your MAC is full can lead to + * strange results. You might consider waiting for space in the DMA queue + * to become available since the stack doesn't retry to send a packet + * dropped because of memory failure (except for the TCP timers). + */ + +static err_t low_level_output(struct netif *netif, struct pbuf *p) +{ + err_t errval; + struct pbuf *q; + uint8_t *buffer = (uint8_t *)(heth.TxDesc->Buffer1Addr); + __IO ETH_DMADescTypeDef *DmaTxDesc; + uint32_t framelength = 0; + uint32_t bufferoffset = 0; + uint32_t byteslefttocopy = 0; + uint32_t payloadoffset = 0; + DmaTxDesc = heth.TxDesc; + bufferoffset = 0; + + /* copy frame from pbufs to driver buffers */ + for(q = p; q != NULL; q = q->next) + { + /* Is this buffer available? If not, goto error */ + if((DmaTxDesc->Status & ETH_DMATXDESC_OWN) != (uint32_t)RESET) + { + errval = ERR_USE; + goto error; + } + + /* Get bytes in current lwIP buffer */ + byteslefttocopy = q->len; + payloadoffset = 0; + + /* Check if the length of data to copy is bigger than Tx buffer size*/ + while( (byteslefttocopy + bufferoffset) > ETH_TX_BUF_SIZE ) + { + /* Copy data to Tx buffer*/ + memcpy( (uint8_t*)((uint8_t*)buffer + bufferoffset), (uint8_t*)((uint8_t*)q->payload + payloadoffset), (ETH_TX_BUF_SIZE - bufferoffset) ); + + /* Point to next descriptor */ + DmaTxDesc = (ETH_DMADescTypeDef *)(DmaTxDesc->Buffer2NextDescAddr); + + /* Check if the buffer is available */ + if((DmaTxDesc->Status & ETH_DMATXDESC_OWN) != (uint32_t)RESET) + { + errval = ERR_USE; + goto error; + } + + buffer = (uint8_t *)(DmaTxDesc->Buffer1Addr); + + byteslefttocopy = byteslefttocopy - (ETH_TX_BUF_SIZE - bufferoffset); + payloadoffset = payloadoffset + (ETH_TX_BUF_SIZE - bufferoffset); + framelength = framelength + (ETH_TX_BUF_SIZE - bufferoffset); + bufferoffset = 0; + } + + /* Copy the remaining bytes */ + memcpy( (uint8_t*)((uint8_t*)buffer + bufferoffset), (uint8_t*)((uint8_t*)q->payload + payloadoffset), byteslefttocopy ); + bufferoffset = bufferoffset + byteslefttocopy; + framelength = framelength + byteslefttocopy; + } + + /* Prepare transmit descriptors to give to DMA */ + HAL_ETH_TransmitFrame(&heth, framelength); + + errval = ERR_OK; + +error: + + /* When Transmit Underflow flag is set, clear it and issue a Transmit Poll Demand to resume transmission */ + if ((heth.Instance->DMASR & ETH_DMASR_TUS) != (uint32_t)RESET) + { + /* Clear TUS ETHERNET DMA flag */ + heth.Instance->DMASR = ETH_DMASR_TUS; + + /* Resume DMA transmission*/ + heth.Instance->DMATPDR = 0; + } + return errval; +} + +/** + * Should allocate a pbuf and transfer the bytes of the incoming + * packet from the interface into the pbuf. + * + * @param netif the lwip network interface structure for this ethernetif + * @return a pbuf filled with the received packet (including MAC header) + * NULL on memory error + */ +static struct pbuf * low_level_input(struct netif *netif) +{ + struct pbuf *p = NULL; + struct pbuf *q = NULL; + uint16_t len = 0; + uint8_t *buffer; + __IO ETH_DMADescTypeDef *dmarxdesc; + uint32_t bufferoffset = 0; + uint32_t payloadoffset = 0; + uint32_t byteslefttocopy = 0; + uint32_t i=0; + + /* get received frame */ + if (HAL_ETH_GetReceivedFrame_IT(&heth) != HAL_OK) + + return NULL; + + /* Obtain the size of the packet and put it into the "len" variable. */ + len = heth.RxFrameInfos.length; + buffer = (uint8_t *)heth.RxFrameInfos.buffer; + + if (len > 0) + { + /* We allocate a pbuf chain of pbufs from the Lwip buffer pool */ + p = pbuf_alloc(PBUF_RAW, len, PBUF_POOL); + } + + if (p != NULL) + { + dmarxdesc = heth.RxFrameInfos.FSRxDesc; + bufferoffset = 0; + for(q = p; q != NULL; q = q->next) + { + byteslefttocopy = q->len; + payloadoffset = 0; + + /* Check if the length of bytes to copy in current pbuf is bigger than Rx buffer size*/ + while( (byteslefttocopy + bufferoffset) > ETH_RX_BUF_SIZE ) + { + /* Copy data to pbuf */ + memcpy( (uint8_t*)((uint8_t*)q->payload + payloadoffset), (uint8_t*)((uint8_t*)buffer + bufferoffset), (ETH_RX_BUF_SIZE - bufferoffset)); + + /* Point to next descriptor */ + dmarxdesc = (ETH_DMADescTypeDef *)(dmarxdesc->Buffer2NextDescAddr); + buffer = (uint8_t *)(dmarxdesc->Buffer1Addr); + + byteslefttocopy = byteslefttocopy - (ETH_RX_BUF_SIZE - bufferoffset); + payloadoffset = payloadoffset + (ETH_RX_BUF_SIZE - bufferoffset); + bufferoffset = 0; + } + /* Copy remaining data in pbuf */ + memcpy( (uint8_t*)((uint8_t*)q->payload + payloadoffset), (uint8_t*)((uint8_t*)buffer + bufferoffset), byteslefttocopy); + bufferoffset = bufferoffset + byteslefttocopy; + } + } + + /* Release descriptors to DMA */ + /* Point to first descriptor */ + dmarxdesc = heth.RxFrameInfos.FSRxDesc; + /* Set Own bit in Rx descriptors: gives the buffers back to DMA */ + for (i=0; i< heth.RxFrameInfos.SegCount; i++) + { + dmarxdesc->Status |= ETH_DMARXDESC_OWN; + dmarxdesc = (ETH_DMADescTypeDef *)(dmarxdesc->Buffer2NextDescAddr); + } + + /* Clear Segment_Count */ + heth.RxFrameInfos.SegCount =0; + + /* When Rx Buffer unavailable flag is set: clear it and resume reception */ + if ((heth.Instance->DMASR & ETH_DMASR_RBUS) != (uint32_t)RESET) + { + /* Clear RBUS ETHERNET DMA flag */ + heth.Instance->DMASR = ETH_DMASR_RBUS; + /* Resume DMA reception */ + heth.Instance->DMARPDR = 0; + } + return p; +} + +/** + * This function should be called when a packet is ready to be read + * from the interface. It uses the function low_level_input() that + * should handle the actual reception of bytes from the network + * interface. Then the type of the received packet is determined and + * the appropriate input function is called. + * + * @param netif the lwip network interface structure for this ethernetif + */ +void ethernetif_input(void* argument) +{ + struct pbuf *p; + struct netif *netif = (struct netif *) argument; + + for( ;; ) + { + if (osSemaphoreAcquire(s_xSemaphore, TIME_WAITING_FOR_INPUT) == osOK) + { + do + { + LOCK_TCPIP_CORE(); + p = low_level_input( netif ); + if (p != NULL) + { + if (netif->input( p, netif) != ERR_OK ) + { + pbuf_free(p); + } + } + UNLOCK_TCPIP_CORE(); + } while(p!=NULL); + } + } +} + +#if !LWIP_ARP +/** + * This function has to be completed by user in case of ARP OFF. + * + * @param netif the lwip network interface structure for this ethernetif + * @return ERR_OK if ... + */ +static err_t low_level_output_arp_off(struct netif *netif, struct pbuf *q, const ip4_addr_t *ipaddr) +{ + err_t errval; + errval = ERR_OK; + +/* USER CODE BEGIN 5 */ + +/* USER CODE END 5 */ + + return errval; + +} +#endif /* LWIP_ARP */ + +/** + * Should be called at the beginning of the program to set up the + * network interface. It calls the function low_level_init() to do the + * actual setup of the hardware. + * + * This function should be passed as a parameter to netif_add(). + * + * @param netif the lwip network interface structure for this ethernetif + * @return ERR_OK if the loopif is initialized + * ERR_MEM if private data couldn't be allocated + * any other err_t on error + */ +err_t ethernetif_init(struct netif *netif) +{ + LWIP_ASSERT("netif != NULL", (netif != NULL)); + +#if LWIP_NETIF_HOSTNAME + /* Initialize interface hostname */ + netif->hostname = "lwip"; +#endif /* LWIP_NETIF_HOSTNAME */ + + netif->name[0] = IFNAME0; + netif->name[1] = IFNAME1; + /* We directly use etharp_output() here to save a function call. + * You can instead declare your own function an call etharp_output() + * from it if you have to do some checks before sending (e.g. if link + * is available...) */ + +#if LWIP_IPV4 +#if LWIP_ARP || LWIP_ETHERNET +#if LWIP_ARP + netif->output = etharp_output; +#else + /* The user should write its own code in low_level_output_arp_off function */ + netif->output = low_level_output_arp_off; +#endif /* LWIP_ARP */ +#endif /* LWIP_ARP || LWIP_ETHERNET */ +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 + netif->output_ip6 = ethip6_output; +#endif /* LWIP_IPV6 */ + + netif->linkoutput = low_level_output; + + /* initialize the hardware */ + low_level_init(netif); + + return ERR_OK; +} + +/* USER CODE BEGIN 6 */ + +/** +* @brief Returns the current time in milliseconds +* when LWIP_TIMERS == 1 and NO_SYS == 1 +* @param None +* @retval Time +*/ +u32_t sys_jiffies(void) +{ + return HAL_GetTick(); +} + +/** +* @brief Returns the current time in milliseconds +* when LWIP_TIMERS == 1 and NO_SYS == 1 +* @param None +* @retval Time +*/ +u32_t sys_now(void) +{ + return HAL_GetTick(); +} + +/* USER CODE END 6 */ + +/** + * @brief This function sets the netif link status. + * @param netif: the network interface + * @retval None + */ +void ethernetif_set_link(void* argument) + +{ + uint32_t regvalue = 0; + struct link_str *link_arg = (struct link_str *)argument; + + for(;;) + { + /* Read PHY_BSR*/ + HAL_ETH_ReadPHYRegister(&heth, PHY_BSR, ®value); + + regvalue &= PHY_LINKED_STATUS; + + /* Check whether the netif link down and the PHY link is up */ + if(!netif_is_link_up(link_arg->netif) && (regvalue)) + { + /* network cable is connected */ + netif_set_link_up(link_arg->netif); + } + else if(netif_is_link_up(link_arg->netif) && (!regvalue)) + { + /* network cable is dis-connected */ + netif_set_link_down(link_arg->netif); + } + + /* Suspend thread for 200 ms */ + osDelay(200); + } +} + +/* USER CODE BEGIN 7 */ + +/* USER CODE END 7 */ + +#if LWIP_NETIF_LINK_CALLBACK +/** + * @brief Link callback function, this function is called on change of link status + * to update low level driver configuration. +* @param netif: The network interface + * @retval None + */ +void ethernetif_update_config(struct netif *netif) +{ + __IO uint32_t tickstart = 0; + uint32_t regvalue = 0; + + if(netif_is_link_up(netif)) + { + /* Restart the auto-negotiation */ + if(heth.Init.AutoNegotiation != ETH_AUTONEGOTIATION_DISABLE) + { + /* Enable Auto-Negotiation */ + HAL_ETH_WritePHYRegister(&heth, PHY_BCR, PHY_AUTONEGOTIATION); + + /* Get tick */ + tickstart = HAL_GetTick(); + + /* Wait until the auto-negotiation will be completed */ + do + { + HAL_ETH_ReadPHYRegister(&heth, PHY_BSR, ®value); + + /* Check for the Timeout ( 1s ) */ + if((HAL_GetTick() - tickstart ) > 1000) + { + /* In case of timeout */ + goto error; + } + } while (((regvalue & PHY_AUTONEGO_COMPLETE) != PHY_AUTONEGO_COMPLETE)); + + /* Read the result of the auto-negotiation */ + HAL_ETH_ReadPHYRegister(&heth, PHY_SR, ®value); + + /* Configure the MAC with the Duplex Mode fixed by the auto-negotiation process */ + if((regvalue & PHY_DUPLEX_STATUS) != (uint32_t)RESET) + { + /* Set Ethernet duplex mode to Full-duplex following the auto-negotiation */ + heth.Init.DuplexMode = ETH_MODE_FULLDUPLEX; + } + else + { + /* Set Ethernet duplex mode to Half-duplex following the auto-negotiation */ + heth.Init.DuplexMode = ETH_MODE_HALFDUPLEX; + } + /* Configure the MAC with the speed fixed by the auto-negotiation process */ + if(regvalue & PHY_SPEED_STATUS) + { + /* Set Ethernet speed to 10M following the auto-negotiation */ + heth.Init.Speed = ETH_SPEED_10M; + } + else + { + /* Set Ethernet speed to 100M following the auto-negotiation */ + heth.Init.Speed = ETH_SPEED_100M; + } + } + else /* AutoNegotiation Disable */ + { + error : + /* Check parameters */ + assert_param(IS_ETH_SPEED(heth.Init.Speed)); + assert_param(IS_ETH_DUPLEX_MODE(heth.Init.DuplexMode)); + + /* Set MAC Speed and Duplex Mode to PHY */ + HAL_ETH_WritePHYRegister(&heth, PHY_BCR, ((uint16_t)(heth.Init.DuplexMode >> 3) | + (uint16_t)(heth.Init.Speed >> 1))); + } + + /* ETHERNET MAC Re-Configuration */ + HAL_ETH_ConfigMAC(&heth, (ETH_MACInitTypeDef *) NULL); + + /* Restart MAC interface */ + HAL_ETH_Start(&heth); + } + else + { + /* Stop MAC interface */ + HAL_ETH_Stop(&heth); + } + + ethernetif_notify_conn_changed(netif); +} + +/* USER CODE BEGIN 8 */ +/** + * @brief This function notify user about link status changement. + * @param netif: the network interface + * @retval None + */ +__weak void ethernetif_notify_conn_changed(struct netif *netif) +{ + /* NOTE : This is function could be implemented in user file + when the callback is needed, + */ + +} +/* USER CODE END 8 */ +#endif /* LWIP_NETIF_LINK_CALLBACK */ + +/* USER CODE BEGIN 9 */ + +/* USER CODE END 9 */ +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/LWIP/Target/ethernetif.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/LWIP/Target/ethernetif.h new file mode 100644 index 0000000..f81fac6 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/LWIP/Target/ethernetif.h @@ -0,0 +1,55 @@ +/** + ****************************************************************************** + * File Name : ethernetif.h + * Description : This file provides initialization code for LWIP + * middleWare. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2022 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +#ifndef __ETHERNETIF_H__ +#define __ETHERNETIF_H__ + +#include "lwip/err.h" +#include "lwip/netif.h" +#include "cmsis_os.h" + +/* Exported types ------------------------------------------------------------*/ +/* Structure that include link thread parameters */ +struct link_str { + struct netif *netif; + osSemaphoreId semaphore; +}; + +/* Within 'USER CODE' section, code will be kept by default at each generation */ +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +/* Exported functions ------------------------------------------------------- */ +err_t ethernetif_init(struct netif *netif); + +void ethernetif_input(void* argument); +void ethernetif_set_link(void* argument); +void ethernetif_update_config(struct netif *netif); +void ethernetif_notify_conn_changed(struct netif *netif); + +u32_t sys_jiffies(void); +u32_t sys_now(void); + +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ +#endif + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/LWIP/Target/lwipopts.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/LWIP/Target/lwipopts.h new file mode 100644 index 0000000..1917cd8 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/LWIP/Target/lwipopts.h @@ -0,0 +1,128 @@ + +/** + ****************************************************************************** + * File Name : Target/lwipopts.h + * Description : This file overrides LwIP stack default configuration + * done in opt.h file. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2022 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under Ultimate Liberty license + * SLA0044, the "License"; You may not use this file except in compliance with + * the License. You may obtain a copy of the License at: + * www.st.com/SLA0044 + * + ****************************************************************************** + */ + +/* Define to prevent recursive inclusion --------------------------------------*/ +#ifndef __LWIPOPTS__H__ +#define __LWIPOPTS__H__ + +#include "main.h" + +/*-----------------------------------------------------------------------------*/ +/* Current version of LwIP supported by CubeMx: 2.1.2 -*/ +/*-----------------------------------------------------------------------------*/ + +/* Within 'USER CODE' section, code will be kept by default at each generation */ +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +#ifdef __cplusplus + extern "C" { +#endif + +/* STM32CubeMX Specific Parameters (not defined in opt.h) ---------------------*/ +/* Parameters set in STM32CubeMX LwIP Configuration GUI -*/ +/*----- WITH_RTOS enabled (Since FREERTOS is set) -----*/ +#define WITH_RTOS 1 +/*----- CHECKSUM_BY_HARDWARE enabled -----*/ +#define CHECKSUM_BY_HARDWARE 1 +/*-----------------------------------------------------------------------------*/ + +/* LwIP Stack Parameters (modified compared to initialization value in opt.h) -*/ +/* Parameters set in STM32CubeMX LwIP Configuration GUI -*/ +/*----- Default Value for LWIP_IGMP: 0 ---*/ +#define LWIP_IGMP 1 +/*----- Default Value for MEMP_NUM_UDP_PCB: 4 ---*/ +#define MEMP_NUM_UDP_PCB 15 +/*----- Value in opt.h for MEM_ALIGNMENT: 1 -----*/ +#define MEM_ALIGNMENT 4 +/*----- Default Value for MEM_SIZE: 1600 ---*/ +#define MEM_SIZE 30000 +/*----- Default Value for PBUF_POOL_SIZE: 16 ---*/ +#define PBUF_POOL_SIZE 20 +/*----- Value in opt.h for LWIP_ETHERNET: LWIP_ARP || PPPOE_SUPPORT -*/ +#define LWIP_ETHERNET 1 +/*----- Value in opt.h for LWIP_DNS_SECURE: (LWIP_DNS_SECURE_RAND_XID | LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING | LWIP_DNS_SECURE_RAND_SRC_PORT) -*/ +#define LWIP_DNS_SECURE 7 +/*----- Value in opt.h for TCP_SND_QUEUELEN: (4*TCP_SND_BUF + (TCP_MSS - 1))/TCP_MSS -----*/ +#define TCP_SND_QUEUELEN 9 +/*----- Value in opt.h for TCP_SNDLOWAT: LWIP_MIN(LWIP_MAX(((TCP_SND_BUF)/2), (2 * TCP_MSS) + 1), (TCP_SND_BUF) - 1) -*/ +#define TCP_SNDLOWAT 1071 +/*----- Value in opt.h for TCP_SNDQUEUELOWAT: LWIP_MAX(TCP_SND_QUEUELEN)/2, 5) -*/ +#define TCP_SNDQUEUELOWAT 5 +/*----- Value in opt.h for TCP_WND_UPDATE_THRESHOLD: LWIP_MIN(TCP_WND/4, TCP_MSS*4) -----*/ +#define TCP_WND_UPDATE_THRESHOLD 536 +/*----- Value in opt.h for LWIP_NETIF_LINK_CALLBACK: 0 -----*/ +#define LWIP_NETIF_LINK_CALLBACK 1 +/*----- Value in opt.h for TCPIP_THREAD_STACKSIZE: 0 -----*/ +#define TCPIP_THREAD_STACKSIZE 10000 +/*----- Value in opt.h for TCPIP_THREAD_PRIO: 1 -----*/ +#define TCPIP_THREAD_PRIO 20 +/*----- Value in opt.h for TCPIP_MBOX_SIZE: 0 -----*/ +#define TCPIP_MBOX_SIZE 6 +/*----- Value in opt.h for SLIPIF_THREAD_STACKSIZE: 0 -----*/ +#define SLIPIF_THREAD_STACKSIZE 1024 +/*----- Value in opt.h for SLIPIF_THREAD_PRIO: 1 -----*/ +#define SLIPIF_THREAD_PRIO 3 +/*----- Value in opt.h for DEFAULT_THREAD_STACKSIZE: 0 -----*/ +#define DEFAULT_THREAD_STACKSIZE 1024 +/*----- Value in opt.h for DEFAULT_THREAD_PRIO: 1 -----*/ +#define DEFAULT_THREAD_PRIO 3 +/*----- Value in opt.h for DEFAULT_UDP_RECVMBOX_SIZE: 0 -----*/ +#define DEFAULT_UDP_RECVMBOX_SIZE 6 +/*----- Value in opt.h for DEFAULT_TCP_RECVMBOX_SIZE: 0 -----*/ +#define DEFAULT_TCP_RECVMBOX_SIZE 6 +/*----- Value in opt.h for DEFAULT_ACCEPTMBOX_SIZE: 0 -----*/ +#define DEFAULT_ACCEPTMBOX_SIZE 6 +/*----- Value in opt.h for RECV_BUFSIZE_DEFAULT: INT_MAX -----*/ +#define RECV_BUFSIZE_DEFAULT 2000000000 +/*----- Value in opt.h for LWIP_STATS: 1 -----*/ +#define LWIP_STATS 0 +/*----- Value in opt.h for CHECKSUM_GEN_IP: 1 -----*/ +#define CHECKSUM_GEN_IP 0 +/*----- Value in opt.h for CHECKSUM_GEN_UDP: 1 -----*/ +#define CHECKSUM_GEN_UDP 0 +/*----- Value in opt.h for CHECKSUM_GEN_TCP: 1 -----*/ +#define CHECKSUM_GEN_TCP 0 +/*----- Value in opt.h for CHECKSUM_GEN_ICMP: 1 -----*/ +#define CHECKSUM_GEN_ICMP 0 +/*----- Value in opt.h for CHECKSUM_GEN_ICMP6: 1 -----*/ +#define CHECKSUM_GEN_ICMP6 0 +/*----- Value in opt.h for CHECKSUM_CHECK_IP: 1 -----*/ +#define CHECKSUM_CHECK_IP 0 +/*----- Value in opt.h for CHECKSUM_CHECK_UDP: 1 -----*/ +#define CHECKSUM_CHECK_UDP 0 +/*----- Value in opt.h for CHECKSUM_CHECK_TCP: 1 -----*/ +#define CHECKSUM_CHECK_TCP 0 +/*----- Value in opt.h for CHECKSUM_CHECK_ICMP: 1 -----*/ +#define CHECKSUM_CHECK_ICMP 0 +/*----- Value in opt.h for CHECKSUM_CHECK_ICMP6: 1 -----*/ +#define CHECKSUM_CHECK_ICMP6 0 +/*-----------------------------------------------------------------------------*/ +/* USER CODE BEGIN 1 */ + +/* USER CODE END 1 */ + +#ifdef __cplusplus +} +#endif +#endif /*__LWIPOPTS__H__ */ + +/************************* (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS_V2/cmsis_os.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS_V2/cmsis_os.h new file mode 100644 index 0000000..6375df1 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS_V2/cmsis_os.h @@ -0,0 +1,846 @@ +/* + * Copyright (c) 2013-2019 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ---------------------------------------------------------------------- + * + * $Date: 10. January 2017 + * $Revision: V2.1.0 + * + * Project: CMSIS-RTOS API + * Title: cmsis_os.h FreeRTOS header file + * + * Version 0.02 + * Initial Proposal Phase + * Version 0.03 + * osKernelStart added, optional feature: main started as thread + * osSemaphores have standard behavior + * osTimerCreate does not start the timer, added osTimerStart + * osThreadPass is renamed to osThreadYield + * Version 1.01 + * Support for C++ interface + * - const attribute removed from the osXxxxDef_t typedefs + * - const attribute added to the osXxxxDef macros + * Added: osTimerDelete, osMutexDelete, osSemaphoreDelete + * Added: osKernelInitialize + * Version 1.02 + * Control functions for short timeouts in microsecond resolution: + * Added: osKernelSysTick, osKernelSysTickFrequency, osKernelSysTickMicroSec + * Removed: osSignalGet + * Version 2.0.0 + * OS objects creation without macros (dynamic creation and resource allocation): + * - added: osXxxxNew functions which replace osXxxxCreate + * - added: osXxxxAttr_t structures + * - deprecated: osXxxxCreate functions, osXxxxDef_t structures + * - deprecated: osXxxxDef and osXxxx macros + * osStatus codes simplified and renamed to osStatus_t + * osEvent return structure deprecated + * Kernel: + * - added: osKernelInfo_t and osKernelGetInfo + * - added: osKernelState_t and osKernelGetState (replaces osKernelRunning) + * - added: osKernelLock, osKernelUnlock + * - added: osKernelSuspend, osKernelResume + * - added: osKernelGetTickCount, osKernelGetTickFreq + * - renamed osKernelSysTick to osKernelGetSysTimerCount + * - replaced osKernelSysTickFrequency with osKernelGetSysTimerFreq + * - deprecated osKernelSysTickMicroSec + * Thread: + * - extended number of thread priorities + * - renamed osPrioriry to osPrioriry_t + * - replaced osThreadCreate with osThreadNew + * - added: osThreadGetName + * - added: osThreadState_t and osThreadGetState + * - added: osThreadGetStackSize, osThreadGetStackSpace + * - added: osThreadSuspend, osThreadResume + * - added: osThreadJoin, osThreadDetach, osThreadExit + * - added: osThreadGetCount, osThreadEnumerate + * - added: Thread Flags (moved from Signals) + * Signals: + * - renamed osSignals to osThreadFlags (moved to Thread Flags) + * - changed return value of Set/Clear/Wait functions + * - Clear function limited to current running thread + * - extended Wait function (options) + * - added: osThreadFlagsGet + * Event Flags: + * - added new independent object for handling Event Flags + * Delay and Wait functions: + * - added: osDelayUntil + * - deprecated: osWait + * Timer: + * - replaced osTimerCreate with osTimerNew + * - added: osTimerGetName, osTimerIsRunning + * Mutex: + * - extended: attributes (Recursive, Priority Inherit, Robust) + * - replaced osMutexCreate with osMutexNew + * - renamed osMutexWait to osMutexAcquire + * - added: osMutexGetName, osMutexGetOwner + * Semaphore: + * - extended: maximum and initial token count + * - replaced osSemaphoreCreate with osSemaphoreNew + * - renamed osSemaphoreWait to osSemaphoreAcquire (changed return value) + * - added: osSemaphoreGetName, osSemaphoreGetCount + * Memory Pool: + * - using osMemoryPool prefix instead of osPool + * - replaced osPoolCreate with osMemoryPoolNew + * - extended osMemoryPoolAlloc (timeout) + * - added: osMemoryPoolGetName + * - added: osMemoryPoolGetCapacity, osMemoryPoolGetBlockSize + * - added: osMemoryPoolGetCount, osMemoryPoolGetSpace + * - added: osMemoryPoolDelete + * - deprecated: osPoolCAlloc + * Message Queue: + * - extended: fixed size message instead of a single 32-bit value + * - using osMessageQueue prefix instead of osMessage + * - replaced osMessageCreate with osMessageQueueNew + * - updated: osMessageQueuePut, osMessageQueueGet + * - added: osMessageQueueGetName + * - added: osMessageQueueGetCapacity, osMessageQueueGetMsgSize + * - added: osMessageQueueGetCount, osMessageQueueGetSpace + * - added: osMessageQueueReset, osMessageQueueDelete + * Mail Queue: + * - deprecated (superseded by extended Message Queue functionality) + * Version 2.1.0 + * Support for critical and uncritical sections (nesting safe): + * - updated: osKernelLock, osKernelUnlock + * - added: osKernelRestoreLock + * Updated Thread and Event Flags: + * - changed flags parameter and return type from int32_t to uint32_t + *---------------------------------------------------------------------------*/ + +#ifndef CMSIS_OS_H_ +#define CMSIS_OS_H_ + +#include "FreeRTOS.h" +#include "task.h" + +#define RTOS_ID_n ((tskKERNEL_VERSION_MAJOR << 16) | (tskKERNEL_VERSION_MINOR)) +#define RTOS_ID_s ("FreeRTOS " tskKERNEL_VERSION_NUMBER) + +#define osCMSIS 0x20001U ///< API version (main[31:16].sub[15:0]) + +#define osCMSIS_FreeRTOS RTOS_ID_n ///< RTOS identification and version (main[31:16].sub[15:0]) + +#define osKernelSystemId RTOS_ID_s ///< RTOS identification string + +#define osFeature_MainThread 0 ///< main thread 1=main can be thread, 0=not available +#define osFeature_Signals 24U ///< maximum number of Signal Flags available per thread +#define osFeature_Semaphore 65535U ///< maximum count for \ref osSemaphoreCreate function +#define osFeature_Wait 0 ///< osWait function: 1=available, 0=not available +#define osFeature_SysTick 1 ///< osKernelSysTick functions: 1=available, 0=not available +#define osFeature_Pool 0 ///< Memory Pools: 1=available, 0=not available +#define osFeature_MessageQ 1 ///< Message Queues: 1=available, 0=not available +#define osFeature_MailQ 0 ///< Mail Queues: 1=available, 0=not available + +#if defined(__CC_ARM) +#define os_InRegs __value_in_regs +#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) +#define os_InRegs __attribute__((value_in_regs)) +#else +#define os_InRegs +#endif + +#include "cmsis_os2.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + + +// ==== Enumerations, structures, defines ==== + +/// Priority values. +#if (osCMSIS < 0x20000U) +typedef enum { + osPriorityIdle = -3, ///< Priority: idle (lowest) + osPriorityLow = -2, ///< Priority: low + osPriorityBelowNormal = -1, ///< Priority: below normal + osPriorityNormal = 0, ///< Priority: normal (default) + osPriorityAboveNormal = +1, ///< Priority: above normal + osPriorityHigh = +2, ///< Priority: high + osPriorityRealtime = +3, ///< Priority: realtime (highest) + osPriorityError = 0x84, ///< System cannot determine priority or illegal priority. + osPriorityReserved = 0x7FFFFFFF ///< Prevents enum down-size compiler optimization. +} osPriority; +#else +#define osPriority osPriority_t +#endif + +/// Entry point of a thread. +typedef void (*os_pthread) (void const *argument); + +/// Entry point of a timer call back function. +typedef void (*os_ptimer) (void const *argument); + +/// Timer type. +#if (osCMSIS < 0x20000U) +typedef enum { + osTimerOnce = 0, ///< One-shot timer. + osTimerPeriodic = 1 ///< Repeating timer. +} os_timer_type; +#else +#define os_timer_type osTimerType_t +#endif + +/// Timeout value. +#define osWaitForever 0xFFFFFFFFU ///< Wait forever timeout value. + +/// Status code values returned by CMSIS-RTOS functions. +#if (osCMSIS < 0x20000U) +typedef enum { + osOK = 0, ///< Function completed; no error or event occurred. + osEventSignal = 0x08, ///< Function completed; signal event occurred. + osEventMessage = 0x10, ///< Function completed; message event occurred. + osEventMail = 0x20, ///< Function completed; mail event occurred. + osEventTimeout = 0x40, ///< Function completed; timeout occurred. + osErrorParameter = 0x80, ///< Parameter error: a mandatory parameter was missing or specified an incorrect object. + osErrorResource = 0x81, ///< Resource not available: a specified resource was not available. + osErrorTimeoutResource = 0xC1, ///< Resource not available within given time: a specified resource was not available within the timeout period. + osErrorISR = 0x82, ///< Not allowed in ISR context: the function cannot be called from interrupt service routines. + osErrorISRRecursive = 0x83, ///< Function called multiple times from ISR with same object. + osErrorPriority = 0x84, ///< System cannot determine priority or thread has illegal priority. + osErrorNoMemory = 0x85, ///< System is out of memory: it was impossible to allocate or reserve memory for the operation. + osErrorValue = 0x86, ///< Value of a parameter is out of range. + osErrorOS = 0xFF, ///< Unspecified RTOS error: run-time error but no other error message fits. + osStatusReserved = 0x7FFFFFFF ///< Prevents enum down-size compiler optimization. +} osStatus; +#else +typedef int32_t osStatus; +#define osEventSignal (0x08) +#define osEventMessage (0x10) +#define osEventMail (0x20) +#define osEventTimeout (0x40) +#define osErrorOS osError +#define osErrorTimeoutResource osErrorTimeout +#define osErrorISRRecursive (-126) +#define osErrorValue (-127) +#define osErrorPriority (-128) +#endif + + +// >>> the following data type definitions may be adapted towards a specific RTOS + +/// Thread ID identifies the thread. +#if (osCMSIS < 0x20000U) +typedef void *osThreadId; +#else +#define osThreadId osThreadId_t +#endif + +/// Timer ID identifies the timer. +#if (osCMSIS < 0x20000U) +typedef void *osTimerId; +#else +#define osTimerId osTimerId_t +#endif + +/// Mutex ID identifies the mutex. +#if (osCMSIS < 0x20000U) +typedef void *osMutexId; +#else +#define osMutexId osMutexId_t +#endif + +/// Semaphore ID identifies the semaphore. +#if (osCMSIS < 0x20000U) +typedef void *osSemaphoreId; +#else +#define osSemaphoreId osSemaphoreId_t +#endif + +/// Pool ID identifies the memory pool. +typedef void *osPoolId; + +/// Message ID identifies the message queue. +typedef void *osMessageQId; + +/// Mail ID identifies the mail queue. +typedef void *osMailQId; + + +/// Thread Definition structure contains startup information of a thread. +#if (osCMSIS < 0x20000U) +typedef struct os_thread_def { + os_pthread pthread; ///< start address of thread function + osPriority tpriority; ///< initial thread priority + uint32_t instances; ///< maximum number of instances of that thread function + uint32_t stacksize; ///< stack size requirements in bytes; 0 is default stack size +} osThreadDef_t; +#else +typedef struct os_thread_def { + os_pthread pthread; ///< start address of thread function + osThreadAttr_t attr; ///< thread attributes +} osThreadDef_t; +#endif + +/// Timer Definition structure contains timer parameters. +#if (osCMSIS < 0x20000U) +typedef struct os_timer_def { + os_ptimer ptimer; ///< start address of a timer function +} osTimerDef_t; +#else +typedef struct os_timer_def { + os_ptimer ptimer; ///< start address of a timer function + osTimerAttr_t attr; ///< timer attributes +} osTimerDef_t; +#endif + +/// Mutex Definition structure contains setup information for a mutex. +#if (osCMSIS < 0x20000U) +typedef struct os_mutex_def { + uint32_t dummy; ///< dummy value +} osMutexDef_t; +#else +#define osMutexDef_t osMutexAttr_t +#endif + +/// Semaphore Definition structure contains setup information for a semaphore. +#if (osCMSIS < 0x20000U) +typedef struct os_semaphore_def { + uint32_t dummy; ///< dummy value +} osSemaphoreDef_t; +#else +#define osSemaphoreDef_t osSemaphoreAttr_t +#endif + +/// Definition structure for memory block allocation. +#if (osCMSIS < 0x20000U) +typedef struct os_pool_def { + uint32_t pool_sz; ///< number of items (elements) in the pool + uint32_t item_sz; ///< size of an item + void *pool; ///< pointer to memory for pool +} osPoolDef_t; +#else +typedef struct os_pool_def { + uint32_t pool_sz; ///< number of items (elements) in the pool + uint32_t item_sz; ///< size of an item + osMemoryPoolAttr_t attr; ///< memory pool attributes +} osPoolDef_t; +#endif + +/// Definition structure for message queue. +#if (osCMSIS < 0x20000U) +typedef struct os_messageQ_def { + uint32_t queue_sz; ///< number of elements in the queue + void *pool; ///< memory array for messages +} osMessageQDef_t; +#else +typedef struct os_messageQ_def { + uint32_t queue_sz; ///< number of elements in the queue + osMessageQueueAttr_t attr; ///< message queue attributes +} osMessageQDef_t; +#endif + +/// Definition structure for mail queue. +#if (osCMSIS < 0x20000U) +typedef struct os_mailQ_def { + uint32_t queue_sz; ///< number of elements in the queue + uint32_t item_sz; ///< size of an item + void *pool; ///< memory array for mail +} osMailQDef_t; +#else +typedef struct os_mailQ_def { + uint32_t queue_sz; ///< number of elements in the queue + uint32_t item_sz; ///< size of an item + void *mail; ///< pointer to mail + osMemoryPoolAttr_t mp_attr; ///< memory pool attributes + osMessageQueueAttr_t mq_attr; ///< message queue attributes +} osMailQDef_t; +#endif + + +/// Event structure contains detailed information about an event. +typedef struct { + osStatus status; ///< status code: event or error information + union { + uint32_t v; ///< message as 32-bit value + void *p; ///< message or mail as void pointer + int32_t signals; ///< signal flags + } value; ///< event value + union { + osMailQId mail_id; ///< mail id obtained by \ref osMailCreate + osMessageQId message_id; ///< message id obtained by \ref osMessageCreate + } def; ///< event definition +} osEvent; + + +// ==== Kernel Management Functions ==== + +/// Initialize the RTOS Kernel for creating objects. +/// \return status code that indicates the execution status of the function. +#if (osCMSIS < 0x20000U) +osStatus osKernelInitialize (void); +#endif + +/// Start the RTOS Kernel scheduler. +/// \return status code that indicates the execution status of the function. +#if (osCMSIS < 0x20000U) +osStatus osKernelStart (void); +#endif + +/// Check if the RTOS kernel is already started. +/// \return 0 RTOS is not started, 1 RTOS is started. +#if (osCMSIS < 0x20000U) +int32_t osKernelRunning(void); +#endif + +#if (defined(osFeature_SysTick) && (osFeature_SysTick != 0)) // System Timer available + +/// Get the RTOS kernel system timer counter. +/// \return RTOS kernel system timer as 32-bit value +#if (osCMSIS < 0x20000U) +uint32_t osKernelSysTick (void); +#else +#define osKernelSysTick osKernelGetSysTimerCount +#endif + +/// The RTOS kernel system timer frequency in Hz. +/// \note Reflects the system timer setting and is typically defined in a configuration file. +#if (osCMSIS < 0x20000U) +#define osKernelSysTickFrequency 100000000 +#endif + +/// Convert a microseconds value to a RTOS kernel system timer value. +/// \param microsec time value in microseconds. +/// \return time value normalized to the \ref osKernelSysTickFrequency +#if (osCMSIS < 0x20000U) +#define osKernelSysTickMicroSec(microsec) (((uint64_t)microsec * (osKernelSysTickFrequency)) / 1000000) +#else +#define osKernelSysTickMicroSec(microsec) (((uint64_t)microsec * osKernelGetSysTimerFreq()) / 1000000) +#endif + +#endif // System Timer available + + +// ==== Thread Management Functions ==== + +/// Create a Thread Definition with function, priority, and stack requirements. +/// \param name name of the thread function. +/// \param priority initial priority of the thread function. +/// \param instances number of possible thread instances. +/// \param stacksz stack size (in bytes) requirements for the thread function. +#if defined (osObjectsExternal) // object is external +#define osThreadDef(name, priority, instances, stacksz) \ +extern const osThreadDef_t os_thread_def_##name +#else // define the object +#define osThreadDef(name, priority, instances, stacksz) \ +static uint64_t os_thread_stack##name[(stacksz)?(((stacksz+7)/8)):1]; \ +static StaticTask_t os_thread_cb_##name; \ +const osThreadDef_t os_thread_def_##name = \ +{ (name), \ + { NULL, osThreadDetached, \ + (instances == 1) ? (&os_thread_cb_##name) : NULL,\ + (instances == 1) ? sizeof(StaticTask_t) : 0U, \ + ((stacksz) && (instances == 1)) ? (&os_thread_stack##name) : NULL, \ + 8*((stacksz+7)/8), \ + (priority), 0U, 0U } } +#endif + +/// Access a Thread definition. +/// \param name name of the thread definition object. +#define osThread(name) \ +&os_thread_def_##name + +/// Create a thread and add it to Active Threads and set it to state READY. +/// \param[in] thread_def thread definition referenced with \ref osThread. +/// \param[in] argument pointer that is passed to the thread function as start argument. +/// \return thread ID for reference by other functions or NULL in case of error. +osThreadId osThreadCreate (const osThreadDef_t *thread_def, void *argument); + +/// Return the thread ID of the current running thread. +/// \return thread ID for reference by other functions or NULL in case of error. +#if (osCMSIS < 0x20000U) +osThreadId osThreadGetId (void); +#endif + +/// Change priority of a thread. +/// \param[in] thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. +/// \param[in] priority new priority value for the thread function. +/// \return status code that indicates the execution status of the function. +#if (osCMSIS < 0x20000U) +osStatus osThreadSetPriority (osThreadId thread_id, osPriority priority); +#endif + +/// Get current priority of a thread. +/// \param[in] thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. +/// \return current priority value of the specified thread. +#if (osCMSIS < 0x20000U) +osPriority osThreadGetPriority (osThreadId thread_id); +#endif + +/// Pass control to next thread that is in state \b READY. +/// \return status code that indicates the execution status of the function. +#if (osCMSIS < 0x20000U) +osStatus osThreadYield (void); +#endif + +/// Terminate execution of a thread. +/// \param[in] thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. +/// \return status code that indicates the execution status of the function. +#if (osCMSIS < 0x20000U) +osStatus osThreadTerminate (osThreadId thread_id); +#endif + + +// ==== Signal Management ==== + +/// Set the specified Signal Flags of an active thread. +/// \param[in] thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. +/// \param[in] signals specifies the signal flags of the thread that should be set. +/// \return previous signal flags of the specified thread or 0x80000000 in case of incorrect parameters. +int32_t osSignalSet (osThreadId thread_id, int32_t signals); + +/// Clear the specified Signal Flags of an active thread. +/// \param[in] thread_id thread ID obtained by \ref osThreadCreate or \ref osThreadGetId. +/// \param[in] signals specifies the signal flags of the thread that shall be cleared. +/// \return previous signal flags of the specified thread or 0x80000000 in case of incorrect parameters or call from ISR. +int32_t osSignalClear (osThreadId thread_id, int32_t signals); + +/// Wait for one or more Signal Flags to become signaled for the current \b RUNNING thread. +/// \param[in] signals wait until all specified signal flags set or 0 for any single signal flag. +/// \param[in] millisec \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. +/// \return event flag information or error code. +os_InRegs osEvent osSignalWait (int32_t signals, uint32_t millisec); + + +// ==== Generic Wait Functions ==== + +/// Wait for Timeout (Time Delay). +/// \param[in] millisec \ref CMSIS_RTOS_TimeOutValue "time delay" value +/// \return status code that indicates the execution status of the function. +#if (osCMSIS < 0x20000U) +osStatus osDelay (uint32_t millisec); +#endif + +#if (defined (osFeature_Wait) && (osFeature_Wait != 0)) // Generic Wait available + +/// Wait for Signal, Message, Mail, or Timeout. +/// \param[in] millisec \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out +/// \return event that contains signal, message, or mail information or error code. +os_InRegs osEvent osWait (uint32_t millisec); + +#endif // Generic Wait available + + +// ==== Timer Management Functions ==== + +/// Define a Timer object. +/// \param name name of the timer object. +/// \param function name of the timer call back function. +#if defined (osObjectsExternal) // object is external +#define osTimerDef(name, function) \ +extern const osTimerDef_t os_timer_def_##name +#else // define the object +#define osTimerDef(name, function) \ +static StaticTimer_t os_timer_cb_##name; \ +const osTimerDef_t os_timer_def_##name = \ +{ (function), { NULL, 0U, (&os_timer_cb_##name), sizeof(StaticTimer_t) } } +#endif + +/// Access a Timer definition. +/// \param name name of the timer object. +#define osTimer(name) \ +&os_timer_def_##name + +/// Create and Initialize a timer. +/// \param[in] timer_def timer object referenced with \ref osTimer. +/// \param[in] type osTimerOnce for one-shot or osTimerPeriodic for periodic behavior. +/// \param[in] argument argument to the timer call back function. +/// \return timer ID for reference by other functions or NULL in case of error. +osTimerId osTimerCreate (const osTimerDef_t *timer_def, os_timer_type type, void *argument); + +/// Start or restart a timer. +/// \param[in] timer_id timer ID obtained by \ref osTimerCreate. +/// \param[in] millisec \ref CMSIS_RTOS_TimeOutValue "time delay" value of the timer. +/// \return status code that indicates the execution status of the function. +#if (osCMSIS < 0x20000U) +osStatus osTimerStart (osTimerId timer_id, uint32_t millisec); +#endif + +/// Stop a timer. +/// \param[in] timer_id timer ID obtained by \ref osTimerCreate. +/// \return status code that indicates the execution status of the function. +#if (osCMSIS < 0x20000U) +osStatus osTimerStop (osTimerId timer_id); +#endif + +/// Delete a timer. +/// \param[in] timer_id timer ID obtained by \ref osTimerCreate. +/// \return status code that indicates the execution status of the function. +#if (osCMSIS < 0x20000U) +osStatus osTimerDelete (osTimerId timer_id); +#endif + + +// ==== Mutex Management Functions ==== + +/// Define a Mutex. +/// \param name name of the mutex object. +#if defined (osObjectsExternal) // object is external +#define osMutexDef(name) \ +extern const osMutexDef_t os_mutex_def_##name +#else // define the object +#define osMutexDef(name) \ +static StaticSemaphore_t os_mutex_cb_##name; \ +const osMutexDef_t os_mutex_def_##name = \ +{ NULL, osMutexRecursive | osMutexPrioInherit, (&os_mutex_cb_##name), sizeof(StaticSemaphore_t) } +#endif + +/// Access a Mutex definition. +/// \param name name of the mutex object. +#define osMutex(name) \ +&os_mutex_def_##name + +/// Create and Initialize a Mutex object. +/// \param[in] mutex_def mutex definition referenced with \ref osMutex. +/// \return mutex ID for reference by other functions or NULL in case of error. +osMutexId osMutexCreate (const osMutexDef_t *mutex_def); + +/// Wait until a Mutex becomes available. +/// \param[in] mutex_id mutex ID obtained by \ref osMutexCreate. +/// \param[in] millisec \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. +/// \return status code that indicates the execution status of the function. +#if (osCMSIS < 0x20000U) +osStatus osMutexWait (osMutexId mutex_id, uint32_t millisec); +#else +#define osMutexWait osMutexAcquire +#endif + +/// Release a Mutex that was obtained by \ref osMutexWait. +/// \param[in] mutex_id mutex ID obtained by \ref osMutexCreate. +/// \return status code that indicates the execution status of the function. +#if (osCMSIS < 0x20000U) +osStatus osMutexRelease (osMutexId mutex_id); +#endif + +/// Delete a Mutex object. +/// \param[in] mutex_id mutex ID obtained by \ref osMutexCreate. +/// \return status code that indicates the execution status of the function. +#if (osCMSIS < 0x20000U) +osStatus osMutexDelete (osMutexId mutex_id); +#endif + + +// ==== Semaphore Management Functions ==== + +#if (defined (osFeature_Semaphore) && (osFeature_Semaphore != 0U)) // Semaphore available + +/// Define a Semaphore object. +/// \param name name of the semaphore object. +#if defined (osObjectsExternal) // object is external +#define osSemaphoreDef(name) \ +extern const osSemaphoreDef_t os_semaphore_def_##name +#else // define the object +#define osSemaphoreDef(name) \ +static StaticSemaphore_t os_semaphore_cb_##name; \ +const osSemaphoreDef_t os_semaphore_def_##name = \ +{ NULL, 0U, (&os_semaphore_cb_##name), sizeof(StaticSemaphore_t) } +#endif + +/// Access a Semaphore definition. +/// \param name name of the semaphore object. +#define osSemaphore(name) \ +&os_semaphore_def_##name + +/// Create and Initialize a Semaphore object. +/// \param[in] semaphore_def semaphore definition referenced with \ref osSemaphore. +/// \param[in] count maximum and initial number of available tokens. +/// \return semaphore ID for reference by other functions or NULL in case of error. +osSemaphoreId osSemaphoreCreate (const osSemaphoreDef_t *semaphore_def, int32_t count); + +/// Wait until a Semaphore token becomes available. +/// \param[in] semaphore_id semaphore object referenced with \ref osSemaphoreCreate. +/// \param[in] millisec \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. +/// \return number of available tokens, or -1 in case of incorrect parameters. +int32_t osSemaphoreWait (osSemaphoreId semaphore_id, uint32_t millisec); + +/// Release a Semaphore token. +/// \param[in] semaphore_id semaphore object referenced with \ref osSemaphoreCreate. +/// \return status code that indicates the execution status of the function. +#if (osCMSIS < 0x20000U) +osStatus osSemaphoreRelease (osSemaphoreId semaphore_id); +#endif + +/// Delete a Semaphore object. +/// \param[in] semaphore_id semaphore object referenced with \ref osSemaphoreCreate. +/// \return status code that indicates the execution status of the function. +#if (osCMSIS < 0x20000U) +osStatus osSemaphoreDelete (osSemaphoreId semaphore_id); +#endif + +#endif // Semaphore available + + +// ==== Memory Pool Management Functions ==== + +#if (defined(osFeature_Pool) && (osFeature_Pool != 0)) // Memory Pool available + +/// \brief Define a Memory Pool. +/// \param name name of the memory pool. +/// \param no maximum number of blocks (objects) in the memory pool. +/// \param type data type of a single block (object). +#if defined (osObjectsExternal) // object is external +#define osPoolDef(name, no, type) \ +extern const osPoolDef_t os_pool_def_##name +#else // define the object +#define osPoolDef(name, no, type) \ +const osPoolDef_t os_pool_def_##name = \ +{ (no), sizeof(type), {NULL} } +#endif + +/// \brief Access a Memory Pool definition. +/// \param name name of the memory pool +#define osPool(name) \ +&os_pool_def_##name + +/// Create and Initialize a Memory Pool object. +/// \param[in] pool_def memory pool definition referenced with \ref osPool. +/// \return memory pool ID for reference by other functions or NULL in case of error. +osPoolId osPoolCreate (const osPoolDef_t *pool_def); + +/// Allocate a memory block from a Memory Pool. +/// \param[in] pool_id memory pool ID obtain referenced with \ref osPoolCreate. +/// \return address of the allocated memory block or NULL in case of no memory available. +void *osPoolAlloc (osPoolId pool_id); + +/// Allocate a memory block from a Memory Pool and set memory block to zero. +/// \param[in] pool_id memory pool ID obtain referenced with \ref osPoolCreate. +/// \return address of the allocated memory block or NULL in case of no memory available. +void *osPoolCAlloc (osPoolId pool_id); + +/// Return an allocated memory block back to a Memory Pool. +/// \param[in] pool_id memory pool ID obtain referenced with \ref osPoolCreate. +/// \param[in] block address of the allocated memory block to be returned to the memory pool. +/// \return status code that indicates the execution status of the function. +osStatus osPoolFree (osPoolId pool_id, void *block); + +#endif // Memory Pool available + + +// ==== Message Queue Management Functions ==== + +#if (defined(osFeature_MessageQ) && (osFeature_MessageQ != 0)) // Message Queue available + +/// \brief Create a Message Queue Definition. +/// \param name name of the queue. +/// \param queue_sz maximum number of messages in the queue. +/// \param type data type of a single message element (for debugger). +#if defined (osObjectsExternal) // object is external +#define osMessageQDef(name, queue_sz, type) \ +extern const osMessageQDef_t os_messageQ_def_##name +#else // define the object +#define osMessageQDef(name, queue_sz, type) \ +static StaticQueue_t os_mq_cb_##name; \ +static uint32_t os_mq_data_##name[(queue_sz) * sizeof(type)]; \ +const osMessageQDef_t os_messageQ_def_##name = \ +{ (queue_sz), \ + { NULL, 0U, (&os_mq_cb_##name), sizeof(StaticQueue_t), \ + (&os_mq_data_##name), sizeof(os_mq_data_##name) } } +#endif + +/// \brief Access a Message Queue Definition. +/// \param name name of the queue +#define osMessageQ(name) \ +&os_messageQ_def_##name + +/// Create and Initialize a Message Queue object. +/// \param[in] queue_def message queue definition referenced with \ref osMessageQ. +/// \param[in] thread_id thread ID (obtained by \ref osThreadCreate or \ref osThreadGetId) or NULL. +/// \return message queue ID for reference by other functions or NULL in case of error. +osMessageQId osMessageCreate (const osMessageQDef_t *queue_def, osThreadId thread_id); + +/// Put a Message to a Queue. +/// \param[in] queue_id message queue ID obtained with \ref osMessageCreate. +/// \param[in] info message information. +/// \param[in] millisec \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. +/// \return status code that indicates the execution status of the function. +osStatus osMessagePut (osMessageQId queue_id, uint32_t info, uint32_t millisec); + +/// Get a Message from a Queue or timeout if Queue is empty. +/// \param[in] queue_id message queue ID obtained with \ref osMessageCreate. +/// \param[in] millisec \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. +/// \return event information that includes status code. +os_InRegs osEvent osMessageGet (osMessageQId queue_id, uint32_t millisec); + +#endif // Message Queue available + + +// ==== Mail Queue Management Functions ==== + +#if (defined(osFeature_MailQ) && (osFeature_MailQ != 0)) // Mail Queue available + +/// \brief Create a Mail Queue Definition. +/// \param name name of the queue. +/// \param queue_sz maximum number of mails in the queue. +/// \param type data type of a single mail element. +#if defined (osObjectsExternal) // object is external +#define osMailQDef(name, queue_sz, type) \ +extern const osMailQDef_t os_mailQ_def_##name +#else // define the object +#define osMailQDef(name, queue_sz, type) \ +const osMailQDef_t os_mailQ_def_##name = \ +{ (queue_sz), sizeof(type), NULL } +#endif + +/// \brief Access a Mail Queue Definition. +/// \param name name of the queue +#define osMailQ(name) \ +&os_mailQ_def_##name + +/// Create and Initialize a Mail Queue object. +/// \param[in] queue_def mail queue definition referenced with \ref osMailQ. +/// \param[in] thread_id thread ID (obtained by \ref osThreadCreate or \ref osThreadGetId) or NULL. +/// \return mail queue ID for reference by other functions or NULL in case of error. +osMailQId osMailCreate (const osMailQDef_t *queue_def, osThreadId thread_id); + +/// Allocate a memory block for mail from a mail memory pool. +/// \param[in] queue_id mail queue ID obtained with \ref osMailCreate. +/// \param[in] millisec \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out +/// \return pointer to memory block that can be filled with mail or NULL in case of error. +void *osMailAlloc (osMailQId queue_id, uint32_t millisec); + +/// Allocate a memory block for mail from a mail memory pool and set memory block to zero. +/// \param[in] queue_id mail queue ID obtained with \ref osMailCreate. +/// \param[in] millisec \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out +/// \return pointer to memory block that can be filled with mail or NULL in case of error. +void *osMailCAlloc (osMailQId queue_id, uint32_t millisec); + +/// Put a Mail into a Queue. +/// \param[in] queue_id mail queue ID obtained with \ref osMailCreate. +/// \param[in] mail pointer to memory with mail to put into a queue. +/// \return status code that indicates the execution status of the function. +osStatus osMailPut (osMailQId queue_id, const void *mail); + +/// Get a Mail from a Queue or timeout if Queue is empty. +/// \param[in] queue_id mail queue ID obtained with \ref osMailCreate. +/// \param[in] millisec \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. +/// \return event information that includes status code. +os_InRegs osEvent osMailGet (osMailQId queue_id, uint32_t millisec); + +/// Free a memory block by returning it to a mail memory pool. +/// \param[in] queue_id mail queue ID obtained with \ref osMailCreate. +/// \param[in] mail pointer to memory block that was obtained with \ref osMailGet. +/// \return status code that indicates the execution status of the function. +osStatus osMailFree (osMailQId queue_id, void *mail); + +#endif // Mail Queue available + + +#ifdef __cplusplus +} +#endif + +#endif // CMSIS_OS_H_ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS_V2/cmsis_os2.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS_V2/cmsis_os2.c new file mode 100644 index 0000000..26f5fb1 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS_V2/cmsis_os2.c @@ -0,0 +1,1924 @@ +/* -------------------------------------------------------------------------- + * Portions Copyright © 2019 STMicroelectronics International N.V. All rights reserved. + * Copyright (c) 2013-2019 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Name: cmsis_os2.c + * Purpose: CMSIS RTOS2 wrapper for FreeRTOS + * + *---------------------------------------------------------------------------*/ + +#include + +#include "cmsis_os2.h" // ::CMSIS:RTOS2 +#include "cmsis_compiler.h" + +#include "FreeRTOS.h" // ARM.FreeRTOS::RTOS:Core +#include "task.h" // ARM.FreeRTOS::RTOS:Core +#include "event_groups.h" // ARM.FreeRTOS::RTOS:Event Groups +#include "semphr.h" // ARM.FreeRTOS::RTOS:Core + +/*---------------------------------------------------------------------------*/ +#ifndef __ARM_ARCH_6M__ + #define __ARM_ARCH_6M__ 0 +#endif +#ifndef __ARM_ARCH_7M__ + #define __ARM_ARCH_7M__ 0 +#endif +#ifndef __ARM_ARCH_7EM__ + #define __ARM_ARCH_7EM__ 0 +#endif +#ifndef __ARM_ARCH_8M_MAIN__ + #define __ARM_ARCH_8M_MAIN__ 0 +#endif +#ifndef __ARM_ARCH_7A__ + #define __ARM_ARCH_7A__ 0 +#endif + +#if ((__ARM_ARCH_7M__ == 1U) || \ + (__ARM_ARCH_7EM__ == 1U) || \ + (__ARM_ARCH_8M_MAIN__ == 1U)) +#define IS_IRQ_MASKED() ((__get_PRIMASK() != 0U) || (__get_BASEPRI() != 0U)) +#elif (__ARM_ARCH_6M__ == 1U) +#define IS_IRQ_MASKED() (__get_PRIMASK() != 0U) +#elif (__ARM_ARCH_7A__ == 1U) +/* CPSR mask bits */ +#define CPSR_MASKBIT_I 0x80U + +#define IS_IRQ_MASKED() ((__get_CPSR() & CPSR_MASKBIT_I) != 0U) +#else +#define IS_IRQ_MASKED() (__get_PRIMASK() != 0U) +#endif + +#if (__ARM_ARCH_7A__ == 1U) +/* CPSR mode bitmasks */ +#define CPSR_MODE_USER 0x10U +#define CPSR_MODE_SYSTEM 0x1FU + +#define IS_IRQ_MODE() ((__get_mode() != CPSR_MODE_USER) && (__get_mode() != CPSR_MODE_SYSTEM)) +#else +#define IS_IRQ_MODE() (__get_IPSR() != 0U) +#endif + +#define IS_IRQ() (IS_IRQ_MODE() || (IS_IRQ_MASKED() && (KernelState == osKernelRunning))) + +/* Limits */ +#define MAX_BITS_TASK_NOTIFY 31U +#define MAX_BITS_EVENT_GROUPS 24U + +#define THREAD_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_TASK_NOTIFY) - 1U)) +#define EVENT_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_EVENT_GROUPS) - 1U)) + +/* Kernel version and identification string definition (major.minor.rev: mmnnnrrrr dec) */ +#define KERNEL_VERSION (((uint32_t)tskKERNEL_VERSION_MAJOR * 10000000UL) | \ + ((uint32_t)tskKERNEL_VERSION_MINOR * 10000UL) | \ + ((uint32_t)tskKERNEL_VERSION_BUILD * 1UL)) + +#define KERNEL_ID ("FreeRTOS " tskKERNEL_VERSION_NUMBER) + +/* Timer callback information structure definition */ +typedef struct { + osTimerFunc_t func; + void *arg; +} TimerCallback_t; + +/* Kernel initialization state */ +static osKernelState_t KernelState = osKernelInactive; + +/* + Heap region definition used by heap_5 variant + + Define configAPPLICATION_ALLOCATED_HEAP as nonzero value in FreeRTOSConfig.h if + heap regions are already defined and vPortDefineHeapRegions is called in application. + + Otherwise vPortDefineHeapRegions will be called by osKernelInitialize using + definition configHEAP_5_REGIONS as parameter. Overriding configHEAP_5_REGIONS + is possible by defining it globally or in FreeRTOSConfig.h. +*/ +#if defined(USE_FREERTOS_HEAP_5) +#if (configAPPLICATION_ALLOCATED_HEAP == 0) + /* + FreeRTOS heap is not defined by the application. + Single region of size configTOTAL_HEAP_SIZE (defined in FreeRTOSConfig.h) + is provided by default. Define configHEAP_5_REGIONS to provide custom + HeapRegion_t array. + */ + #define HEAP_5_REGION_SETUP 1 + + #ifndef configHEAP_5_REGIONS + #define configHEAP_5_REGIONS xHeapRegions + + static uint8_t ucHeap[configTOTAL_HEAP_SIZE]; + + static HeapRegion_t xHeapRegions[] = { + { ucHeap, configTOTAL_HEAP_SIZE }, + { NULL, 0 } + }; + #else + /* Global definition is provided to override default heap array */ + extern HeapRegion_t configHEAP_5_REGIONS[]; + #endif +#else + /* + The application already defined the array used for the FreeRTOS heap and + called vPortDefineHeapRegions to initialize heap. + */ + #define HEAP_5_REGION_SETUP 0 +#endif /* configAPPLICATION_ALLOCATED_HEAP */ +#endif /* USE_FREERTOS_HEAP_5 */ + +#if defined(SysTick) +#undef SysTick_Handler + +/* CMSIS SysTick interrupt handler prototype */ +extern void SysTick_Handler (void); +/* FreeRTOS tick timer interrupt handler prototype */ +extern void xPortSysTickHandler (void); + +/* + SysTick handler implementation that also clears overflow flag. +*/ +void SysTick_Handler (void) { + /* Clear overflow flag */ + SysTick->CTRL; + + if (xTaskGetSchedulerState() != taskSCHEDULER_NOT_STARTED) { + /* Call tick handler */ + xPortSysTickHandler(); + } +} +#endif /* SysTick */ + +/* + Setup SVC to reset value. +*/ +__STATIC_INLINE void SVC_Setup (void) { +#if (__ARM_ARCH_7A__ == 0U) + /* Service Call interrupt might be configured before kernel start */ + /* and when its priority is lower or equal to BASEPRI, svc intruction */ + /* causes a Hard Fault. */ + + /* + * the call below has introduced a regression compared to revious release + * The issue was logged under:https://github.com/ARM-software/CMSIS-FreeRTOS/issues/35 + * until it is correctly fixed, the code below is commented + */ +/* NVIC_SetPriority (SVCall_IRQn, 0U); */ +#endif +} + +/*---------------------------------------------------------------------------*/ + +osStatus_t osKernelInitialize (void) { + osStatus_t stat; + + if (IS_IRQ()) { + stat = osErrorISR; + } + else { + if (KernelState == osKernelInactive) { + #if defined(USE_FREERTOS_HEAP_5) && (HEAP_5_REGION_SETUP == 1) + vPortDefineHeapRegions (configHEAP_5_REGIONS); + #endif + KernelState = osKernelReady; + stat = osOK; + } else { + stat = osError; + } + } + + return (stat); +} + +osStatus_t osKernelGetInfo (osVersion_t *version, char *id_buf, uint32_t id_size) { + + if (version != NULL) { + /* Version encoding is major.minor.rev: mmnnnrrrr dec */ + version->api = KERNEL_VERSION; + version->kernel = KERNEL_VERSION; + } + + if ((id_buf != NULL) && (id_size != 0U)) { + if (id_size > sizeof(KERNEL_ID)) { + id_size = sizeof(KERNEL_ID); + } + memcpy(id_buf, KERNEL_ID, id_size); + } + + return (osOK); +} + +osKernelState_t osKernelGetState (void) { + osKernelState_t state; + + switch (xTaskGetSchedulerState()) { + case taskSCHEDULER_RUNNING: + state = osKernelRunning; + break; + + case taskSCHEDULER_SUSPENDED: + state = osKernelLocked; + break; + + case taskSCHEDULER_NOT_STARTED: + default: + if (KernelState == osKernelReady) { + state = osKernelReady; + } else { + state = osKernelInactive; + } + break; + } + + return (state); +} + +osStatus_t osKernelStart (void) { + osStatus_t stat; + + if (IS_IRQ()) { + stat = osErrorISR; + } + else { + if (KernelState == osKernelReady) { + /* Ensure SVC priority is at the reset value */ + SVC_Setup(); + /* Change state to enable IRQ masking check */ + KernelState = osKernelRunning; + /* Start the kernel scheduler */ + vTaskStartScheduler(); + stat = osOK; + } else { + stat = osError; + } + } + + return (stat); +} + +int32_t osKernelLock (void) { + int32_t lock; + + if (IS_IRQ()) { + lock = (int32_t)osErrorISR; + } + else { + switch (xTaskGetSchedulerState()) { + case taskSCHEDULER_SUSPENDED: + lock = 1; + break; + + case taskSCHEDULER_RUNNING: + vTaskSuspendAll(); + lock = 0; + break; + + case taskSCHEDULER_NOT_STARTED: + default: + lock = (int32_t)osError; + break; + } + } + + return (lock); +} + +int32_t osKernelUnlock (void) { + int32_t lock; + + if (IS_IRQ()) { + lock = (int32_t)osErrorISR; + } + else { + switch (xTaskGetSchedulerState()) { + case taskSCHEDULER_SUSPENDED: + lock = 1; + + if (xTaskResumeAll() != pdTRUE) { + if (xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED) { + lock = (int32_t)osError; + } + } + break; + + case taskSCHEDULER_RUNNING: + lock = 0; + break; + + case taskSCHEDULER_NOT_STARTED: + default: + lock = (int32_t)osError; + break; + } + } + + return (lock); +} + +int32_t osKernelRestoreLock (int32_t lock) { + + if (IS_IRQ()) { + lock = (int32_t)osErrorISR; + } + else { + switch (xTaskGetSchedulerState()) { + case taskSCHEDULER_SUSPENDED: + case taskSCHEDULER_RUNNING: + if (lock == 1) { + vTaskSuspendAll(); + } + else { + if (lock != 0) { + lock = (int32_t)osError; + } + else { + if (xTaskResumeAll() != pdTRUE) { + if (xTaskGetSchedulerState() != taskSCHEDULER_RUNNING) { + lock = (int32_t)osError; + } + } + } + } + break; + + case taskSCHEDULER_NOT_STARTED: + default: + lock = (int32_t)osError; + break; + } + } + + return (lock); +} + +uint32_t osKernelGetTickCount (void) { + TickType_t ticks; + + if (IS_IRQ()) { + ticks = xTaskGetTickCountFromISR(); + } else { + ticks = xTaskGetTickCount(); + } + + return (ticks); +} + +uint32_t osKernelGetTickFreq (void) { + return (configTICK_RATE_HZ); +} + +uint32_t osKernelGetSysTimerCount (void) { + uint32_t irqmask = IS_IRQ_MASKED(); + TickType_t ticks; + uint32_t val; + + __disable_irq(); + + ticks = xTaskGetTickCount(); + + val = ticks * ( configCPU_CLOCK_HZ / configTICK_RATE_HZ ); + if (irqmask == 0U) { + __enable_irq(); + } + + return (val); +} + +uint32_t osKernelGetSysTimerFreq (void) { + return (configCPU_CLOCK_HZ); +} + +/*---------------------------------------------------------------------------*/ + +osThreadId_t osThreadNew (osThreadFunc_t func, void *argument, const osThreadAttr_t *attr) { + const char *name; + uint32_t stack; + TaskHandle_t hTask; + UBaseType_t prio; + int32_t mem; + + hTask = NULL; + + if (!IS_IRQ() && (func != NULL)) { + stack = configMINIMAL_STACK_SIZE; + prio = (UBaseType_t)osPriorityNormal; + + name = NULL; + mem = -1; + + if (attr != NULL) { + if (attr->name != NULL) { + name = attr->name; + } + if (attr->priority != osPriorityNone) { + prio = (UBaseType_t)attr->priority; + } + + if ((prio < osPriorityIdle) || (prio > osPriorityISR) || ((attr->attr_bits & osThreadJoinable) == osThreadJoinable)) { + return (NULL); + } + + if (attr->stack_size > 0U) { + /* In FreeRTOS stack is not in bytes, but in sizeof(StackType_t) which is 4 on ARM ports. */ + /* Stack size should be therefore 4 byte aligned in order to avoid division caused side effects */ + stack = attr->stack_size / sizeof(StackType_t); + } + + if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticTask_t)) && + (attr->stack_mem != NULL) && (attr->stack_size > 0U)) { + mem = 1; + } + else { + if ((attr->cb_mem == NULL) && (attr->cb_size == 0U) && (attr->stack_mem == NULL)) { + mem = 0; + } + } + } + else { + mem = 0; + } + + if (mem == 1) { + hTask = xTaskCreateStatic ((TaskFunction_t)func, name, stack, argument, prio, (StackType_t *)attr->stack_mem, + (StaticTask_t *)attr->cb_mem); + } + else { + if (mem == 0) { + if (xTaskCreate ((TaskFunction_t)func, name, (uint16_t)stack, argument, prio, &hTask) != pdPASS) { + hTask = NULL; + } + } + } + } + + return ((osThreadId_t)hTask); +} + +const char *osThreadGetName (osThreadId_t thread_id) { + TaskHandle_t hTask = (TaskHandle_t)thread_id; + const char *name; + + if (IS_IRQ() || (hTask == NULL)) { + name = NULL; + } else { + name = pcTaskGetName (hTask); + } + + return (name); +} + +osThreadId_t osThreadGetId (void) { + osThreadId_t id; + + id = (osThreadId_t)xTaskGetCurrentTaskHandle(); + + return (id); +} + +osThreadState_t osThreadGetState (osThreadId_t thread_id) { + TaskHandle_t hTask = (TaskHandle_t)thread_id; + osThreadState_t state; + + if (IS_IRQ() || (hTask == NULL)) { + state = osThreadError; + } + else { + switch (eTaskGetState (hTask)) { + case eRunning: state = osThreadRunning; break; + case eReady: state = osThreadReady; break; + case eBlocked: + case eSuspended: state = osThreadBlocked; break; + case eDeleted: state = osThreadTerminated; break; + case eInvalid: + default: state = osThreadError; break; + } + } + + return (state); +} + +uint32_t osThreadGetStackSpace (osThreadId_t thread_id) { + TaskHandle_t hTask = (TaskHandle_t)thread_id; + uint32_t sz; + + if (IS_IRQ() || (hTask == NULL)) { + sz = 0U; + } else { + sz = (uint32_t)uxTaskGetStackHighWaterMark (hTask); + } + + return (sz); +} + +osStatus_t osThreadSetPriority (osThreadId_t thread_id, osPriority_t priority) { + TaskHandle_t hTask = (TaskHandle_t)thread_id; + osStatus_t stat; + + if (IS_IRQ()) { + stat = osErrorISR; + } + else if ((hTask == NULL) || (priority < osPriorityIdle) || (priority > osPriorityISR)) { + stat = osErrorParameter; + } + else { + stat = osOK; + vTaskPrioritySet (hTask, (UBaseType_t)priority); + } + + return (stat); +} + +osPriority_t osThreadGetPriority (osThreadId_t thread_id) { + TaskHandle_t hTask = (TaskHandle_t)thread_id; + osPriority_t prio; + + if (IS_IRQ() || (hTask == NULL)) { + prio = osPriorityError; + } else { + prio = (osPriority_t)uxTaskPriorityGet (hTask); + } + + return (prio); +} + +osStatus_t osThreadYield (void) { + osStatus_t stat; + + if (IS_IRQ()) { + stat = osErrorISR; + } else { + stat = osOK; + taskYIELD(); + } + + return (stat); +} + +osStatus_t osThreadSuspend (osThreadId_t thread_id) { + TaskHandle_t hTask = (TaskHandle_t)thread_id; + osStatus_t stat; + + if (IS_IRQ()) { + stat = osErrorISR; + } + else if (hTask == NULL) { + stat = osErrorParameter; + } + else { + stat = osOK; + vTaskSuspend (hTask); + } + + return (stat); +} + +osStatus_t osThreadResume (osThreadId_t thread_id) { + TaskHandle_t hTask = (TaskHandle_t)thread_id; + osStatus_t stat; + + if (IS_IRQ()) { + stat = osErrorISR; + } + else if (hTask == NULL) { + stat = osErrorParameter; + } + else { + stat = osOK; + vTaskResume (hTask); + } + + return (stat); +} + +__NO_RETURN void osThreadExit (void) { +#ifndef USE_FreeRTOS_HEAP_1 + vTaskDelete (NULL); +#endif + for (;;); +} + +osStatus_t osThreadTerminate (osThreadId_t thread_id) { + TaskHandle_t hTask = (TaskHandle_t)thread_id; + osStatus_t stat; +#ifndef USE_FreeRTOS_HEAP_1 + eTaskState tstate; + + if (IS_IRQ()) { + stat = osErrorISR; + } + else if (hTask == NULL) { + stat = osErrorParameter; + } + else { + tstate = eTaskGetState (hTask); + + if (tstate != eDeleted) { + stat = osOK; + vTaskDelete (hTask); + } else { + stat = osErrorResource; + } + } +#else + stat = osError; +#endif + + return (stat); +} + +uint32_t osThreadGetCount (void) { + uint32_t count; + + if (IS_IRQ()) { + count = 0U; + } else { + count = uxTaskGetNumberOfTasks(); + } + + return (count); +} + +uint32_t osThreadEnumerate (osThreadId_t *thread_array, uint32_t array_items) { + uint32_t i, count; + TaskStatus_t *task; + + if (IS_IRQ() || (thread_array == NULL) || (array_items == 0U)) { + count = 0U; + } else { + vTaskSuspendAll(); + + count = uxTaskGetNumberOfTasks(); + task = pvPortMalloc (count * sizeof(TaskStatus_t)); + + if (task != NULL) { + count = uxTaskGetSystemState (task, count, NULL); + + for (i = 0U; (i < count) && (i < array_items); i++) { + thread_array[i] = (osThreadId_t)task[i].xHandle; + } + count = i; + } + (void)xTaskResumeAll(); + + vPortFree (task); + } + + return (count); +} + +uint32_t osThreadFlagsSet (osThreadId_t thread_id, uint32_t flags) { + TaskHandle_t hTask = (TaskHandle_t)thread_id; + uint32_t rflags; + BaseType_t yield; + + if ((hTask == NULL) || ((flags & THREAD_FLAGS_INVALID_BITS) != 0U)) { + rflags = (uint32_t)osErrorParameter; + } + else { + rflags = (uint32_t)osError; + + if (IS_IRQ()) { + yield = pdFALSE; + + (void)xTaskNotifyFromISR (hTask, flags, eSetBits, &yield); + (void)xTaskNotifyAndQueryFromISR (hTask, 0, eNoAction, &rflags, NULL); + + portYIELD_FROM_ISR (yield); + } + else { + (void)xTaskNotify (hTask, flags, eSetBits); + (void)xTaskNotifyAndQuery (hTask, 0, eNoAction, &rflags); + } + } + /* Return flags after setting */ + return (rflags); +} + +uint32_t osThreadFlagsClear (uint32_t flags) { + TaskHandle_t hTask; + uint32_t rflags, cflags; + + if (IS_IRQ()) { + rflags = (uint32_t)osErrorISR; + } + else if ((flags & THREAD_FLAGS_INVALID_BITS) != 0U) { + rflags = (uint32_t)osErrorParameter; + } + else { + hTask = xTaskGetCurrentTaskHandle(); + + if (xTaskNotifyAndQuery (hTask, 0, eNoAction, &cflags) == pdPASS) { + rflags = cflags; + cflags &= ~flags; + + if (xTaskNotify (hTask, cflags, eSetValueWithOverwrite) != pdPASS) { + rflags = (uint32_t)osError; + } + } + else { + rflags = (uint32_t)osError; + } + } + + /* Return flags before clearing */ + return (rflags); +} + +uint32_t osThreadFlagsGet (void) { + TaskHandle_t hTask; + uint32_t rflags; + + if (IS_IRQ()) { + rflags = (uint32_t)osErrorISR; + } + else { + hTask = xTaskGetCurrentTaskHandle(); + + if (xTaskNotifyAndQuery (hTask, 0, eNoAction, &rflags) != pdPASS) { + rflags = (uint32_t)osError; + } + } + + return (rflags); +} + +uint32_t osThreadFlagsWait (uint32_t flags, uint32_t options, uint32_t timeout) { + uint32_t rflags, nval; + uint32_t clear; + TickType_t t0, td, tout; + BaseType_t rval; + + if (IS_IRQ()) { + rflags = (uint32_t)osErrorISR; + } + else if ((flags & THREAD_FLAGS_INVALID_BITS) != 0U) { + rflags = (uint32_t)osErrorParameter; + } + else { + if ((options & osFlagsNoClear) == osFlagsNoClear) { + clear = 0U; + } else { + clear = flags; + } + + rflags = 0U; + tout = timeout; + + t0 = xTaskGetTickCount(); + do { + rval = xTaskNotifyWait (0, clear, &nval, tout); + + if (rval == pdPASS) { + rflags &= flags; + rflags |= nval; + + if ((options & osFlagsWaitAll) == osFlagsWaitAll) { + if ((flags & rflags) == flags) { + break; + } else { + if (timeout == 0U) { + rflags = (uint32_t)osErrorResource; + break; + } + } + } + else { + if ((flags & rflags) != 0) { + break; + } else { + if (timeout == 0U) { + rflags = (uint32_t)osErrorResource; + break; + } + } + } + + /* Update timeout */ + td = xTaskGetTickCount() - t0; + + if (td > tout) { + tout = 0; + } else { + tout -= td; + } + } + else { + if (timeout == 0) { + rflags = (uint32_t)osErrorResource; + } else { + rflags = (uint32_t)osErrorTimeout; + } + } + } + while (rval != pdFAIL); + } + + /* Return flags before clearing */ + return (rflags); +} + +osStatus_t osDelay (uint32_t ticks) { + osStatus_t stat; + + if (IS_IRQ()) { + stat = osErrorISR; + } + else { + stat = osOK; + + if (ticks != 0U) { + vTaskDelay(ticks); + } + } + + return (stat); +} + +osStatus_t osDelayUntil (uint32_t ticks) { + TickType_t tcnt, delay; + osStatus_t stat; + + if (IS_IRQ()) { + stat = osErrorISR; + } + else { + stat = osOK; + tcnt = xTaskGetTickCount(); + + /* Determine remaining number of ticks to delay */ + delay = (TickType_t)ticks - tcnt; + + /* Check if target tick has not expired */ + if((delay != 0U) && (0 == (delay >> (8 * sizeof(TickType_t) - 1)))) { + vTaskDelayUntil (&tcnt, delay); + } + else + { + /* No delay or already expired */ + stat = osErrorParameter; + } + } + + return (stat); +} + +/*---------------------------------------------------------------------------*/ + +static void TimerCallback (TimerHandle_t hTimer) { + TimerCallback_t *callb; + + callb = (TimerCallback_t *)pvTimerGetTimerID (hTimer); + + if (callb != NULL) { + callb->func (callb->arg); + } +} + +osTimerId_t osTimerNew (osTimerFunc_t func, osTimerType_t type, void *argument, const osTimerAttr_t *attr) { + const char *name; + TimerHandle_t hTimer; + TimerCallback_t *callb; + UBaseType_t reload; + int32_t mem; + + hTimer = NULL; + + if (!IS_IRQ() && (func != NULL)) { + /* Allocate memory to store callback function and argument */ + callb = pvPortMalloc (sizeof(TimerCallback_t)); + + if (callb != NULL) { + callb->func = func; + callb->arg = argument; + + if (type == osTimerOnce) { + reload = pdFALSE; + } else { + reload = pdTRUE; + } + + mem = -1; + name = NULL; + + if (attr != NULL) { + if (attr->name != NULL) { + name = attr->name; + } + + if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticTimer_t))) { + mem = 1; + } + else { + if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) { + mem = 0; + } + } + } + else { + mem = 0; + } + + if (mem == 1) { + hTimer = xTimerCreateStatic (name, 1, reload, callb, TimerCallback, (StaticTimer_t *)attr->cb_mem); + } + else { + if (mem == 0) { + hTimer = xTimerCreate (name, 1, reload, callb, TimerCallback); + } + } + } + } + + return ((osTimerId_t)hTimer); +} + +const char *osTimerGetName (osTimerId_t timer_id) { + TimerHandle_t hTimer = (TimerHandle_t)timer_id; + const char *p; + + if (IS_IRQ() || (hTimer == NULL)) { + p = NULL; + } else { + p = pcTimerGetName (hTimer); + } + + return (p); +} + +osStatus_t osTimerStart (osTimerId_t timer_id, uint32_t ticks) { + TimerHandle_t hTimer = (TimerHandle_t)timer_id; + osStatus_t stat; + + if (IS_IRQ()) { + stat = osErrorISR; + } + else if (hTimer == NULL) { + stat = osErrorParameter; + } + else { + if (xTimerChangePeriod (hTimer, ticks, 0) == pdPASS) { + stat = osOK; + } else { + stat = osErrorResource; + } + } + + return (stat); +} + +osStatus_t osTimerStop (osTimerId_t timer_id) { + TimerHandle_t hTimer = (TimerHandle_t)timer_id; + osStatus_t stat; + + if (IS_IRQ()) { + stat = osErrorISR; + } + else if (hTimer == NULL) { + stat = osErrorParameter; + } + else { + if (xTimerIsTimerActive (hTimer) == pdFALSE) { + stat = osErrorResource; + } + else { + if (xTimerStop (hTimer, 0) == pdPASS) { + stat = osOK; + } else { + stat = osError; + } + } + } + + return (stat); +} + +uint32_t osTimerIsRunning (osTimerId_t timer_id) { + TimerHandle_t hTimer = (TimerHandle_t)timer_id; + uint32_t running; + + if (IS_IRQ() || (hTimer == NULL)) { + running = 0U; + } else { + running = (uint32_t)xTimerIsTimerActive (hTimer); + } + + return (running); +} + +osStatus_t osTimerDelete (osTimerId_t timer_id) { + TimerHandle_t hTimer = (TimerHandle_t)timer_id; + osStatus_t stat; +#ifndef USE_FreeRTOS_HEAP_1 + TimerCallback_t *callb; + + if (IS_IRQ()) { + stat = osErrorISR; + } + else if (hTimer == NULL) { + stat = osErrorParameter; + } + else { + callb = (TimerCallback_t *)pvTimerGetTimerID (hTimer); + + if (xTimerDelete (hTimer, 0) == pdPASS) { + vPortFree (callb); + stat = osOK; + } else { + stat = osErrorResource; + } + } +#else + stat = osError; +#endif + + return (stat); +} + +/*---------------------------------------------------------------------------*/ + +osEventFlagsId_t osEventFlagsNew (const osEventFlagsAttr_t *attr) { + EventGroupHandle_t hEventGroup; + int32_t mem; + + hEventGroup = NULL; + + if (!IS_IRQ()) { + mem = -1; + + if (attr != NULL) { + if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticEventGroup_t))) { + mem = 1; + } + else { + if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) { + mem = 0; + } + } + } + else { + mem = 0; + } + + if (mem == 1) { + hEventGroup = xEventGroupCreateStatic (attr->cb_mem); + } + else { + if (mem == 0) { + hEventGroup = xEventGroupCreate(); + } + } + } + + return ((osEventFlagsId_t)hEventGroup); +} + +uint32_t osEventFlagsSet (osEventFlagsId_t ef_id, uint32_t flags) { + EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id; + uint32_t rflags; + BaseType_t yield; + + if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) { + rflags = (uint32_t)osErrorParameter; + } + else if (IS_IRQ()) { + yield = pdFALSE; + + if (xEventGroupSetBitsFromISR (hEventGroup, (EventBits_t)flags, &yield) == pdFAIL) { + rflags = (uint32_t)osErrorResource; + } else { + rflags = flags; + portYIELD_FROM_ISR (yield); + } + } + else { + rflags = xEventGroupSetBits (hEventGroup, (EventBits_t)flags); + } + + return (rflags); +} + +uint32_t osEventFlagsClear (osEventFlagsId_t ef_id, uint32_t flags) { + EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id; + uint32_t rflags; + + if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) { + rflags = (uint32_t)osErrorParameter; + } + else if (IS_IRQ()) { + rflags = xEventGroupGetBitsFromISR (hEventGroup); + + if (xEventGroupClearBitsFromISR (hEventGroup, (EventBits_t)flags) == pdFAIL) { + rflags = (uint32_t)osErrorResource; + } + } + else { + rflags = xEventGroupClearBits (hEventGroup, (EventBits_t)flags); + } + + return (rflags); +} + +uint32_t osEventFlagsGet (osEventFlagsId_t ef_id) { + EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id; + uint32_t rflags; + + if (ef_id == NULL) { + rflags = 0U; + } + else if (IS_IRQ()) { + rflags = xEventGroupGetBitsFromISR (hEventGroup); + } + else { + rflags = xEventGroupGetBits (hEventGroup); + } + + return (rflags); +} + +uint32_t osEventFlagsWait (osEventFlagsId_t ef_id, uint32_t flags, uint32_t options, uint32_t timeout) { + EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id; + BaseType_t wait_all; + BaseType_t exit_clr; + uint32_t rflags; + + if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) { + rflags = (uint32_t)osErrorParameter; + } + else if (IS_IRQ()) { + rflags = (uint32_t)osErrorISR; + } + else { + if (options & osFlagsWaitAll) { + wait_all = pdTRUE; + } else { + wait_all = pdFAIL; + } + + if (options & osFlagsNoClear) { + exit_clr = pdFAIL; + } else { + exit_clr = pdTRUE; + } + + rflags = xEventGroupWaitBits (hEventGroup, (EventBits_t)flags, exit_clr, wait_all, (TickType_t)timeout); + + if (options & osFlagsWaitAll) { + if (flags != rflags) { + if (timeout > 0U) { + rflags = (uint32_t)osErrorTimeout; + } else { + rflags = (uint32_t)osErrorResource; + } + } + } + else { + if ((flags & rflags) == 0U) { + if (timeout > 0U) { + rflags = (uint32_t)osErrorTimeout; + } else { + rflags = (uint32_t)osErrorResource; + } + } + } + } + + return (rflags); +} + +osStatus_t osEventFlagsDelete (osEventFlagsId_t ef_id) { + EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id; + osStatus_t stat; + +#ifndef USE_FreeRTOS_HEAP_1 + if (IS_IRQ()) { + stat = osErrorISR; + } + else if (hEventGroup == NULL) { + stat = osErrorParameter; + } + else { + stat = osOK; + vEventGroupDelete (hEventGroup); + } +#else + stat = osError; +#endif + + return (stat); +} + +/*---------------------------------------------------------------------------*/ + +osMutexId_t osMutexNew (const osMutexAttr_t *attr) { + SemaphoreHandle_t hMutex; + uint32_t type; + uint32_t rmtx; + int32_t mem; + #if (configQUEUE_REGISTRY_SIZE > 0) + const char *name; + #endif + + hMutex = NULL; + + if (!IS_IRQ()) { + if (attr != NULL) { + type = attr->attr_bits; + } else { + type = 0U; + } + + if ((type & osMutexRecursive) == osMutexRecursive) { + rmtx = 1U; + } else { + rmtx = 0U; + } + + if ((type & osMutexRobust) != osMutexRobust) { + mem = -1; + + if (attr != NULL) { + if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticSemaphore_t))) { + mem = 1; + } + else { + if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) { + mem = 0; + } + } + } + else { + mem = 0; + } + + if (mem == 1) { + if (rmtx != 0U) { + hMutex = xSemaphoreCreateRecursiveMutexStatic (attr->cb_mem); + } + else { + hMutex = xSemaphoreCreateMutexStatic (attr->cb_mem); + } + } + else { + if (mem == 0) { + if (rmtx != 0U) { + hMutex = xSemaphoreCreateRecursiveMutex (); + } else { + hMutex = xSemaphoreCreateMutex (); + } + } + } + + #if (configQUEUE_REGISTRY_SIZE > 0) + if (hMutex != NULL) { + if (attr != NULL) { + name = attr->name; + } else { + name = NULL; + } + vQueueAddToRegistry (hMutex, name); + } + #endif + + if ((hMutex != NULL) && (rmtx != 0U)) { + hMutex = (SemaphoreHandle_t)((uint32_t)hMutex | 1U); + } + } + } + + return ((osMutexId_t)hMutex); +} + +osStatus_t osMutexAcquire (osMutexId_t mutex_id, uint32_t timeout) { + SemaphoreHandle_t hMutex; + osStatus_t stat; + uint32_t rmtx; + + hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U); + + rmtx = (uint32_t)mutex_id & 1U; + + stat = osOK; + + if (IS_IRQ()) { + stat = osErrorISR; + } + else if (hMutex == NULL) { + stat = osErrorParameter; + } + else { + if (rmtx != 0U) { + if (xSemaphoreTakeRecursive (hMutex, timeout) != pdPASS) { + if (timeout != 0U) { + stat = osErrorTimeout; + } else { + stat = osErrorResource; + } + } + } + else { + if (xSemaphoreTake (hMutex, timeout) != pdPASS) { + if (timeout != 0U) { + stat = osErrorTimeout; + } else { + stat = osErrorResource; + } + } + } + } + + return (stat); +} + +osStatus_t osMutexRelease (osMutexId_t mutex_id) { + SemaphoreHandle_t hMutex; + osStatus_t stat; + uint32_t rmtx; + + hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U); + + rmtx = (uint32_t)mutex_id & 1U; + + stat = osOK; + + if (IS_IRQ()) { + stat = osErrorISR; + } + else if (hMutex == NULL) { + stat = osErrorParameter; + } + else { + if (rmtx != 0U) { + if (xSemaphoreGiveRecursive (hMutex) != pdPASS) { + stat = osErrorResource; + } + } + else { + if (xSemaphoreGive (hMutex) != pdPASS) { + stat = osErrorResource; + } + } + } + + return (stat); +} + +osThreadId_t osMutexGetOwner (osMutexId_t mutex_id) { + SemaphoreHandle_t hMutex; + osThreadId_t owner; + + hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U); + + if (IS_IRQ() || (hMutex == NULL)) { + owner = NULL; + } else { + owner = (osThreadId_t)xSemaphoreGetMutexHolder (hMutex); + } + + return (owner); +} + +osStatus_t osMutexDelete (osMutexId_t mutex_id) { + osStatus_t stat; +#ifndef USE_FreeRTOS_HEAP_1 + SemaphoreHandle_t hMutex; + + hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U); + + if (IS_IRQ()) { + stat = osErrorISR; + } + else if (hMutex == NULL) { + stat = osErrorParameter; + } + else { + #if (configQUEUE_REGISTRY_SIZE > 0) + vQueueUnregisterQueue (hMutex); + #endif + stat = osOK; + vSemaphoreDelete (hMutex); + } +#else + stat = osError; +#endif + + return (stat); +} + +/*---------------------------------------------------------------------------*/ + +osSemaphoreId_t osSemaphoreNew (uint32_t max_count, uint32_t initial_count, const osSemaphoreAttr_t *attr) { + SemaphoreHandle_t hSemaphore; + int32_t mem; + #if (configQUEUE_REGISTRY_SIZE > 0) + const char *name; + #endif + + hSemaphore = NULL; + + if (!IS_IRQ() && (max_count > 0U) && (initial_count <= max_count)) { + mem = -1; + + if (attr != NULL) { + if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticSemaphore_t))) { + mem = 1; + } + else { + if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) { + mem = 0; + } + } + } + else { + mem = 0; + } + + if (mem != -1) { + if (max_count == 1U) { + if (mem == 1) { + hSemaphore = xSemaphoreCreateBinaryStatic ((StaticSemaphore_t *)attr->cb_mem); + } + else { + hSemaphore = xSemaphoreCreateBinary(); + } + + if ((hSemaphore != NULL) && (initial_count != 0U)) { + if (xSemaphoreGive (hSemaphore) != pdPASS) { + vSemaphoreDelete (hSemaphore); + hSemaphore = NULL; + } + } + } + else { + if (mem == 1) { + hSemaphore = xSemaphoreCreateCountingStatic (max_count, initial_count, (StaticSemaphore_t *)attr->cb_mem); + } + else { + hSemaphore = xSemaphoreCreateCounting (max_count, initial_count); + } + } + + #if (configQUEUE_REGISTRY_SIZE > 0) + if (hSemaphore != NULL) { + if (attr != NULL) { + name = attr->name; + } else { + name = NULL; + } + vQueueAddToRegistry (hSemaphore, name); + } + #endif + } + } + + return ((osSemaphoreId_t)hSemaphore); +} + +osStatus_t osSemaphoreAcquire (osSemaphoreId_t semaphore_id, uint32_t timeout) { + SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id; + osStatus_t stat; + BaseType_t yield; + + stat = osOK; + + if (hSemaphore == NULL) { + stat = osErrorParameter; + } + else if (IS_IRQ()) { + if (timeout != 0U) { + stat = osErrorParameter; + } + else { + yield = pdFALSE; + + if (xSemaphoreTakeFromISR (hSemaphore, &yield) != pdPASS) { + stat = osErrorResource; + } else { + portYIELD_FROM_ISR (yield); + } + } + } + else { + if (xSemaphoreTake (hSemaphore, (TickType_t)timeout) != pdPASS) { + if (timeout != 0U) { + stat = osErrorTimeout; + } else { + stat = osErrorResource; + } + } + } + + return (stat); +} + +osStatus_t osSemaphoreRelease (osSemaphoreId_t semaphore_id) { + SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id; + osStatus_t stat; + BaseType_t yield; + + stat = osOK; + + if (hSemaphore == NULL) { + stat = osErrorParameter; + } + else if (IS_IRQ()) { + yield = pdFALSE; + + if (xSemaphoreGiveFromISR (hSemaphore, &yield) != pdTRUE) { + stat = osErrorResource; + } else { + portYIELD_FROM_ISR (yield); + } + } + else { + if (xSemaphoreGive (hSemaphore) != pdPASS) { + stat = osErrorResource; + } + } + + return (stat); +} + +uint32_t osSemaphoreGetCount (osSemaphoreId_t semaphore_id) { + SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id; + uint32_t count; + + if (hSemaphore == NULL) { + count = 0U; + } + else if (IS_IRQ()) { + count = uxQueueMessagesWaitingFromISR (hSemaphore); + } else { + count = (uint32_t)uxSemaphoreGetCount (hSemaphore); + } + + return (count); +} + +osStatus_t osSemaphoreDelete (osSemaphoreId_t semaphore_id) { + SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id; + osStatus_t stat; + +#ifndef USE_FreeRTOS_HEAP_1 + if (IS_IRQ()) { + stat = osErrorISR; + } + else if (hSemaphore == NULL) { + stat = osErrorParameter; + } + else { + #if (configQUEUE_REGISTRY_SIZE > 0) + vQueueUnregisterQueue (hSemaphore); + #endif + + stat = osOK; + vSemaphoreDelete (hSemaphore); + } +#else + stat = osError; +#endif + + return (stat); +} + +/*---------------------------------------------------------------------------*/ + +osMessageQueueId_t osMessageQueueNew (uint32_t msg_count, uint32_t msg_size, const osMessageQueueAttr_t *attr) { + QueueHandle_t hQueue; + int32_t mem; + #if (configQUEUE_REGISTRY_SIZE > 0) + const char *name; + #endif + + hQueue = NULL; + + if (!IS_IRQ() && (msg_count > 0U) && (msg_size > 0U)) { + mem = -1; + + if (attr != NULL) { + if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticQueue_t)) && + (attr->mq_mem != NULL) && (attr->mq_size >= (msg_count * msg_size))) { + mem = 1; + } + else { + if ((attr->cb_mem == NULL) && (attr->cb_size == 0U) && + (attr->mq_mem == NULL) && (attr->mq_size == 0U)) { + mem = 0; + } + } + } + else { + mem = 0; + } + + if (mem == 1) { + hQueue = xQueueCreateStatic (msg_count, msg_size, attr->mq_mem, attr->cb_mem); + } + else { + if (mem == 0) { + hQueue = xQueueCreate (msg_count, msg_size); + } + } + + #if (configQUEUE_REGISTRY_SIZE > 0) + if (hQueue != NULL) { + if (attr != NULL) { + name = attr->name; + } else { + name = NULL; + } + vQueueAddToRegistry (hQueue, name); + } + #endif + + } + + return ((osMessageQueueId_t)hQueue); +} + +osStatus_t osMessageQueuePut (osMessageQueueId_t mq_id, const void *msg_ptr, uint8_t msg_prio, uint32_t timeout) { + QueueHandle_t hQueue = (QueueHandle_t)mq_id; + osStatus_t stat; + BaseType_t yield; + + (void)msg_prio; /* Message priority is ignored */ + + stat = osOK; + + if (IS_IRQ()) { + if ((hQueue == NULL) || (msg_ptr == NULL) || (timeout != 0U)) { + stat = osErrorParameter; + } + else { + yield = pdFALSE; + + if (xQueueSendToBackFromISR (hQueue, msg_ptr, &yield) != pdTRUE) { + stat = osErrorResource; + } else { + portYIELD_FROM_ISR (yield); + } + } + } + else { + if ((hQueue == NULL) || (msg_ptr == NULL)) { + stat = osErrorParameter; + } + else { + if (xQueueSendToBack (hQueue, msg_ptr, (TickType_t)timeout) != pdPASS) { + if (timeout != 0U) { + stat = osErrorTimeout; + } else { + stat = osErrorResource; + } + } + } + } + + return (stat); +} + +osStatus_t osMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr, uint8_t *msg_prio, uint32_t timeout) { + QueueHandle_t hQueue = (QueueHandle_t)mq_id; + osStatus_t stat; + BaseType_t yield; + + (void)msg_prio; /* Message priority is ignored */ + + stat = osOK; + + if (IS_IRQ()) { + if ((hQueue == NULL) || (msg_ptr == NULL) || (timeout != 0U)) { + stat = osErrorParameter; + } + else { + yield = pdFALSE; + + if (xQueueReceiveFromISR (hQueue, msg_ptr, &yield) != pdPASS) { + stat = osErrorResource; + } else { + portYIELD_FROM_ISR (yield); + } + } + } + else { + if ((hQueue == NULL) || (msg_ptr == NULL)) { + stat = osErrorParameter; + } + else { + if (xQueueReceive (hQueue, msg_ptr, (TickType_t)timeout) != pdPASS) { + if (timeout != 0U) { + stat = osErrorTimeout; + } else { + stat = osErrorResource; + } + } + } + } + + return (stat); +} + +uint32_t osMessageQueueGetCapacity (osMessageQueueId_t mq_id) { + StaticQueue_t *mq = (StaticQueue_t *)mq_id; + uint32_t capacity; + + if (mq == NULL) { + capacity = 0U; + } else { + /* capacity = pxQueue->uxLength */ + capacity = mq->uxDummy4[1]; + } + + return (capacity); +} + +uint32_t osMessageQueueGetMsgSize (osMessageQueueId_t mq_id) { + StaticQueue_t *mq = (StaticQueue_t *)mq_id; + uint32_t size; + + if (mq == NULL) { + size = 0U; + } else { + /* size = pxQueue->uxItemSize */ + size = mq->uxDummy4[2]; + } + + return (size); +} + +uint32_t osMessageQueueGetCount (osMessageQueueId_t mq_id) { + QueueHandle_t hQueue = (QueueHandle_t)mq_id; + UBaseType_t count; + + if (hQueue == NULL) { + count = 0U; + } + else if (IS_IRQ()) { + count = uxQueueMessagesWaitingFromISR (hQueue); + } + else { + count = uxQueueMessagesWaiting (hQueue); + } + + return ((uint32_t)count); +} + +uint32_t osMessageQueueGetSpace (osMessageQueueId_t mq_id) { + StaticQueue_t *mq = (StaticQueue_t *)mq_id; + uint32_t space; + uint32_t isrm; + + if (mq == NULL) { + space = 0U; + } + else if (IS_IRQ()) { + isrm = taskENTER_CRITICAL_FROM_ISR(); + + /* space = pxQueue->uxLength - pxQueue->uxMessagesWaiting; */ + space = mq->uxDummy4[1] - mq->uxDummy4[0]; + + taskEXIT_CRITICAL_FROM_ISR(isrm); + } + else { + space = (uint32_t)uxQueueSpacesAvailable ((QueueHandle_t)mq); + } + + return (space); +} + +osStatus_t osMessageQueueReset (osMessageQueueId_t mq_id) { + QueueHandle_t hQueue = (QueueHandle_t)mq_id; + osStatus_t stat; + + if (IS_IRQ()) { + stat = osErrorISR; + } + else if (hQueue == NULL) { + stat = osErrorParameter; + } + else { + stat = osOK; + (void)xQueueReset (hQueue); + } + + return (stat); +} + +osStatus_t osMessageQueueDelete (osMessageQueueId_t mq_id) { + QueueHandle_t hQueue = (QueueHandle_t)mq_id; + osStatus_t stat; + +#ifndef USE_FreeRTOS_HEAP_1 + if (IS_IRQ()) { + stat = osErrorISR; + } + else if (hQueue == NULL) { + stat = osErrorParameter; + } + else { + #if (configQUEUE_REGISTRY_SIZE > 0) + vQueueUnregisterQueue (hQueue); + #endif + + stat = osOK; + vQueueDelete (hQueue); + } +#else + stat = osError; +#endif + + return (stat); +} + +/*---------------------------------------------------------------------------*/ + +/* Callback function prototypes */ +extern void vApplicationIdleHook (void); +extern void vApplicationTickHook (void); +extern void vApplicationMallocFailedHook (void); +extern void vApplicationDaemonTaskStartupHook (void); +extern void vApplicationStackOverflowHook (TaskHandle_t xTask, signed char *pcTaskName); + +/** + Dummy implementation of the callback function vApplicationIdleHook(). +*/ +#if (configUSE_IDLE_HOOK == 1) +__WEAK void vApplicationIdleHook (void){} +#endif + +/** + Dummy implementation of the callback function vApplicationTickHook(). +*/ +#if (configUSE_TICK_HOOK == 1) + __WEAK void vApplicationTickHook (void){} +#endif + +/** + Dummy implementation of the callback function vApplicationMallocFailedHook(). +*/ +#if (configUSE_MALLOC_FAILED_HOOK == 1) +__WEAK void vApplicationMallocFailedHook (void){} +#endif + +/** + Dummy implementation of the callback function vApplicationDaemonTaskStartupHook(). +*/ +#if (configUSE_DAEMON_TASK_STARTUP_HOOK == 1) +__WEAK void vApplicationDaemonTaskStartupHook (void){} +#endif + +/** + Dummy implementation of the callback function vApplicationStackOverflowHook(). +*/ +#if (configCHECK_FOR_STACK_OVERFLOW > 0) +__WEAK void vApplicationStackOverflowHook (TaskHandle_t xTask, signed char *pcTaskName) { + (void)xTask; + (void)pcTaskName; +} +#endif + +/*---------------------------------------------------------------------------*/ + +/* External Idle and Timer task static memory allocation functions */ +extern void vApplicationGetIdleTaskMemory (StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize); +extern void vApplicationGetTimerTaskMemory (StaticTask_t **ppxTimerTaskTCBBuffer, StackType_t **ppxTimerTaskStackBuffer, uint32_t *pulTimerTaskStackSize); + +/* Idle task control block and stack */ +static StaticTask_t Idle_TCB; +static StackType_t Idle_Stack[configMINIMAL_STACK_SIZE]; + +/* Timer task control block and stack */ +static StaticTask_t Timer_TCB; +static StackType_t Timer_Stack[configTIMER_TASK_STACK_DEPTH]; + +/* + vApplicationGetIdleTaskMemory gets called when configSUPPORT_STATIC_ALLOCATION + equals to 1 and is required for static memory allocation support. +*/ +void vApplicationGetIdleTaskMemory (StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize) { + *ppxIdleTaskTCBBuffer = &Idle_TCB; + *ppxIdleTaskStackBuffer = &Idle_Stack[0]; + *pulIdleTaskStackSize = (uint32_t)configMINIMAL_STACK_SIZE; +} + +/* + vApplicationGetTimerTaskMemory gets called when configSUPPORT_STATIC_ALLOCATION + equals to 1 and is required for static memory allocation support. +*/ +void vApplicationGetTimerTaskMemory (StaticTask_t **ppxTimerTaskTCBBuffer, StackType_t **ppxTimerTaskStackBuffer, uint32_t *pulTimerTaskStackSize) { + *ppxTimerTaskTCBBuffer = &Timer_TCB; + *ppxTimerTaskStackBuffer = &Timer_Stack[0]; + *pulTimerTaskStackSize = (uint32_t)configTIMER_TASK_STACK_DEPTH; +} diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS_V2/cmsis_os2.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS_V2/cmsis_os2.h new file mode 100644 index 0000000..0d3634d --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/CMSIS_RTOS_V2/cmsis_os2.h @@ -0,0 +1,734 @@ +/* -------------------------------------------------------------------------- + * Portions Copyright © 2017 STMicroelectronics International N.V. All rights reserved. + * Portions Copyright (c) 2013-2017 ARM Limited. All rights reserved. + * -------------------------------------------------------------------------- + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Name: cmsis_os2.h + * Purpose: CMSIS RTOS2 wrapper for FreeRTOS + * + *---------------------------------------------------------------------------*/ + +#ifndef CMSIS_OS2_H_ +#define CMSIS_OS2_H_ + +#ifndef __NO_RETURN +#if defined(__CC_ARM) +#define __NO_RETURN __declspec(noreturn) +#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) +#define __NO_RETURN __attribute__((__noreturn__)) +#elif defined(__GNUC__) +#define __NO_RETURN __attribute__((__noreturn__)) +#elif defined(__ICCARM__) +#define __NO_RETURN __noreturn +#else +#define __NO_RETURN +#endif +#endif + +#include +#include + +#ifdef __cplusplus +extern "C" +{ +#endif + + +// ==== Enumerations, structures, defines ==== + +/// Version information. +typedef struct { + uint32_t api; ///< API version (major.minor.rev: mmnnnrrrr dec). + uint32_t kernel; ///< Kernel version (major.minor.rev: mmnnnrrrr dec). +} osVersion_t; + +/// Kernel state. +typedef enum { + osKernelInactive = 0, ///< Inactive. + osKernelReady = 1, ///< Ready. + osKernelRunning = 2, ///< Running. + osKernelLocked = 3, ///< Locked. + osKernelSuspended = 4, ///< Suspended. + osKernelError = -1, ///< Error. + osKernelReserved = 0x7FFFFFFFU ///< Prevents enum down-size compiler optimization. +} osKernelState_t; + +/// Thread state. +typedef enum { + osThreadInactive = 0, ///< Inactive. + osThreadReady = 1, ///< Ready. + osThreadRunning = 2, ///< Running. + osThreadBlocked = 3, ///< Blocked. + osThreadTerminated = 4, ///< Terminated. + osThreadError = -1, ///< Error. + osThreadReserved = 0x7FFFFFFF ///< Prevents enum down-size compiler optimization. +} osThreadState_t; + +/// Priority values. +typedef enum { + osPriorityNone = 0, ///< No priority (not initialized). + osPriorityIdle = 1, ///< Reserved for Idle thread. + osPriorityLow = 8, ///< Priority: low + osPriorityLow1 = 8+1, ///< Priority: low + 1 + osPriorityLow2 = 8+2, ///< Priority: low + 2 + osPriorityLow3 = 8+3, ///< Priority: low + 3 + osPriorityLow4 = 8+4, ///< Priority: low + 4 + osPriorityLow5 = 8+5, ///< Priority: low + 5 + osPriorityLow6 = 8+6, ///< Priority: low + 6 + osPriorityLow7 = 8+7, ///< Priority: low + 7 + osPriorityBelowNormal = 16, ///< Priority: below normal + osPriorityBelowNormal1 = 16+1, ///< Priority: below normal + 1 + osPriorityBelowNormal2 = 16+2, ///< Priority: below normal + 2 + osPriorityBelowNormal3 = 16+3, ///< Priority: below normal + 3 + osPriorityBelowNormal4 = 16+4, ///< Priority: below normal + 4 + osPriorityBelowNormal5 = 16+5, ///< Priority: below normal + 5 + osPriorityBelowNormal6 = 16+6, ///< Priority: below normal + 6 + osPriorityBelowNormal7 = 16+7, ///< Priority: below normal + 7 + osPriorityNormal = 24, ///< Priority: normal + osPriorityNormal1 = 24+1, ///< Priority: normal + 1 + osPriorityNormal2 = 24+2, ///< Priority: normal + 2 + osPriorityNormal3 = 24+3, ///< Priority: normal + 3 + osPriorityNormal4 = 24+4, ///< Priority: normal + 4 + osPriorityNormal5 = 24+5, ///< Priority: normal + 5 + osPriorityNormal6 = 24+6, ///< Priority: normal + 6 + osPriorityNormal7 = 24+7, ///< Priority: normal + 7 + osPriorityAboveNormal = 32, ///< Priority: above normal + osPriorityAboveNormal1 = 32+1, ///< Priority: above normal + 1 + osPriorityAboveNormal2 = 32+2, ///< Priority: above normal + 2 + osPriorityAboveNormal3 = 32+3, ///< Priority: above normal + 3 + osPriorityAboveNormal4 = 32+4, ///< Priority: above normal + 4 + osPriorityAboveNormal5 = 32+5, ///< Priority: above normal + 5 + osPriorityAboveNormal6 = 32+6, ///< Priority: above normal + 6 + osPriorityAboveNormal7 = 32+7, ///< Priority: above normal + 7 + osPriorityHigh = 40, ///< Priority: high + osPriorityHigh1 = 40+1, ///< Priority: high + 1 + osPriorityHigh2 = 40+2, ///< Priority: high + 2 + osPriorityHigh3 = 40+3, ///< Priority: high + 3 + osPriorityHigh4 = 40+4, ///< Priority: high + 4 + osPriorityHigh5 = 40+5, ///< Priority: high + 5 + osPriorityHigh6 = 40+6, ///< Priority: high + 6 + osPriorityHigh7 = 40+7, ///< Priority: high + 7 + osPriorityRealtime = 48, ///< Priority: realtime + osPriorityRealtime1 = 48+1, ///< Priority: realtime + 1 + osPriorityRealtime2 = 48+2, ///< Priority: realtime + 2 + osPriorityRealtime3 = 48+3, ///< Priority: realtime + 3 + osPriorityRealtime4 = 48+4, ///< Priority: realtime + 4 + osPriorityRealtime5 = 48+5, ///< Priority: realtime + 5 + osPriorityRealtime6 = 48+6, ///< Priority: realtime + 6 + osPriorityRealtime7 = 48+7, ///< Priority: realtime + 7 + osPriorityISR = 56, ///< Reserved for ISR deferred thread. + osPriorityError = -1, ///< System cannot determine priority or illegal priority. + osPriorityReserved = 0x7FFFFFFF ///< Prevents enum down-size compiler optimization. +} osPriority_t; + +/// Entry point of a thread. +typedef void (*osThreadFunc_t) (void *argument); + +/// Timer callback function. +typedef void (*osTimerFunc_t) (void *argument); + +/// Timer type. +typedef enum { + osTimerOnce = 0, ///< One-shot timer. + osTimerPeriodic = 1 ///< Repeating timer. +} osTimerType_t; + +// Timeout value. +#define osWaitForever 0xFFFFFFFFU ///< Wait forever timeout value. + +// Flags options (\ref osThreadFlagsWait and \ref osEventFlagsWait). +#define osFlagsWaitAny 0x00000000U ///< Wait for any flag (default). +#define osFlagsWaitAll 0x00000001U ///< Wait for all flags. +#define osFlagsNoClear 0x00000002U ///< Do not clear flags which have been specified to wait for. + +// Flags errors (returned by osThreadFlagsXxxx and osEventFlagsXxxx). +#define osFlagsError 0x80000000U ///< Error indicator. +#define osFlagsErrorUnknown 0xFFFFFFFFU ///< osError (-1). +#define osFlagsErrorTimeout 0xFFFFFFFEU ///< osErrorTimeout (-2). +#define osFlagsErrorResource 0xFFFFFFFDU ///< osErrorResource (-3). +#define osFlagsErrorParameter 0xFFFFFFFCU ///< osErrorParameter (-4). +#define osFlagsErrorISR 0xFFFFFFFAU ///< osErrorISR (-6). + +// Thread attributes (attr_bits in \ref osThreadAttr_t). +#define osThreadDetached 0x00000000U ///< Thread created in detached mode (default) +#define osThreadJoinable 0x00000001U ///< Thread created in joinable mode + +// Mutex attributes (attr_bits in \ref osMutexAttr_t). +#define osMutexRecursive 0x00000001U ///< Recursive mutex. +#define osMutexPrioInherit 0x00000002U ///< Priority inherit protocol. +#define osMutexRobust 0x00000008U ///< Robust mutex. + +/// Status code values returned by CMSIS-RTOS functions. +typedef enum { + osOK = 0, ///< Operation completed successfully. + osError = -1, ///< Unspecified RTOS error: run-time error but no other error message fits. + osErrorTimeout = -2, ///< Operation not completed within the timeout period. + osErrorResource = -3, ///< Resource not available. + osErrorParameter = -4, ///< Parameter error. + osErrorNoMemory = -5, ///< System is out of memory: it was impossible to allocate or reserve memory for the operation. + osErrorISR = -6, ///< Not allowed in ISR context: the function cannot be called from interrupt service routines. + osStatusReserved = 0x7FFFFFFF ///< Prevents enum down-size compiler optimization. +} osStatus_t; + + +/// \details Thread ID identifies the thread. +typedef void *osThreadId_t; + +/// \details Timer ID identifies the timer. +typedef void *osTimerId_t; + +/// \details Event Flags ID identifies the event flags. +typedef void *osEventFlagsId_t; + +/// \details Mutex ID identifies the mutex. +typedef void *osMutexId_t; + +/// \details Semaphore ID identifies the semaphore. +typedef void *osSemaphoreId_t; + +/// \details Memory Pool ID identifies the memory pool. +typedef void *osMemoryPoolId_t; + +/// \details Message Queue ID identifies the message queue. +typedef void *osMessageQueueId_t; + + +#ifndef TZ_MODULEID_T +#define TZ_MODULEID_T +/// \details Data type that identifies secure software modules called by a process. +typedef uint32_t TZ_ModuleId_t; +#endif + + +/// Attributes structure for thread. +typedef struct { + const char *name; ///< name of the thread + uint32_t attr_bits; ///< attribute bits + void *cb_mem; ///< memory for control block + uint32_t cb_size; ///< size of provided memory for control block + void *stack_mem; ///< memory for stack + uint32_t stack_size; ///< size of stack + osPriority_t priority; ///< initial thread priority (default: osPriorityNormal) + TZ_ModuleId_t tz_module; ///< TrustZone module identifier + uint32_t reserved; ///< reserved (must be 0) +} osThreadAttr_t; + +/// Attributes structure for timer. +typedef struct { + const char *name; ///< name of the timer + uint32_t attr_bits; ///< attribute bits + void *cb_mem; ///< memory for control block + uint32_t cb_size; ///< size of provided memory for control block +} osTimerAttr_t; + +/// Attributes structure for event flags. +typedef struct { + const char *name; ///< name of the event flags + uint32_t attr_bits; ///< attribute bits + void *cb_mem; ///< memory for control block + uint32_t cb_size; ///< size of provided memory for control block +} osEventFlagsAttr_t; + +/// Attributes structure for mutex. +typedef struct { + const char *name; ///< name of the mutex + uint32_t attr_bits; ///< attribute bits + void *cb_mem; ///< memory for control block + uint32_t cb_size; ///< size of provided memory for control block +} osMutexAttr_t; + +/// Attributes structure for semaphore. +typedef struct { + const char *name; ///< name of the semaphore + uint32_t attr_bits; ///< attribute bits + void *cb_mem; ///< memory for control block + uint32_t cb_size; ///< size of provided memory for control block +} osSemaphoreAttr_t; + +/// Attributes structure for memory pool. +typedef struct { + const char *name; ///< name of the memory pool + uint32_t attr_bits; ///< attribute bits + void *cb_mem; ///< memory for control block + uint32_t cb_size; ///< size of provided memory for control block + void *mp_mem; ///< memory for data storage + uint32_t mp_size; ///< size of provided memory for data storage +} osMemoryPoolAttr_t; + +/// Attributes structure for message queue. +typedef struct { + const char *name; ///< name of the message queue + uint32_t attr_bits; ///< attribute bits + void *cb_mem; ///< memory for control block + uint32_t cb_size; ///< size of provided memory for control block + void *mq_mem; ///< memory for data storage + uint32_t mq_size; ///< size of provided memory for data storage +} osMessageQueueAttr_t; + + +// ==== Kernel Management Functions ==== + +/// Initialize the RTOS Kernel. +/// \return status code that indicates the execution status of the function. +osStatus_t osKernelInitialize (void); + +/// Get RTOS Kernel Information. +/// \param[out] version pointer to buffer for retrieving version information. +/// \param[out] id_buf pointer to buffer for retrieving kernel identification string. +/// \param[in] id_size size of buffer for kernel identification string. +/// \return status code that indicates the execution status of the function. +osStatus_t osKernelGetInfo (osVersion_t *version, char *id_buf, uint32_t id_size); + +/// Get the current RTOS Kernel state. +/// \return current RTOS Kernel state. +osKernelState_t osKernelGetState (void); + +/// Start the RTOS Kernel scheduler. +/// \return status code that indicates the execution status of the function. +osStatus_t osKernelStart (void); + +/// Lock the RTOS Kernel scheduler. +/// \return previous lock state (1 - locked, 0 - not locked, error code if negative). +int32_t osKernelLock (void); + +/// Unlock the RTOS Kernel scheduler. +/// \return previous lock state (1 - locked, 0 - not locked, error code if negative). +int32_t osKernelUnlock (void); + +/// Restore the RTOS Kernel scheduler lock state. +/// \param[in] lock lock state obtained by \ref osKernelLock or \ref osKernelUnlock. +/// \return new lock state (1 - locked, 0 - not locked, error code if negative). +int32_t osKernelRestoreLock (int32_t lock); + +/// Suspend the RTOS Kernel scheduler. +/// \return time in ticks, for how long the system can sleep or power-down. +uint32_t osKernelSuspend (void); + +/// Resume the RTOS Kernel scheduler. +/// \param[in] sleep_ticks time in ticks for how long the system was in sleep or power-down mode. +void osKernelResume (uint32_t sleep_ticks); + +/// Get the RTOS kernel tick count. +/// \return RTOS kernel current tick count. +uint32_t osKernelGetTickCount (void); + +/// Get the RTOS kernel tick frequency. +/// \return frequency of the kernel tick in hertz, i.e. kernel ticks per second. +uint32_t osKernelGetTickFreq (void); + +/// Get the RTOS kernel system timer count. +/// \return RTOS kernel current system timer count as 32-bit value. +uint32_t osKernelGetSysTimerCount (void); + +/// Get the RTOS kernel system timer frequency. +/// \return frequency of the system timer in hertz, i.e. timer ticks per second. +uint32_t osKernelGetSysTimerFreq (void); + + +// ==== Thread Management Functions ==== + +/// Create a thread and add it to Active Threads. +/// \param[in] func thread function. +/// \param[in] argument pointer that is passed to the thread function as start argument. +/// \param[in] attr thread attributes; NULL: default values. +/// \return thread ID for reference by other functions or NULL in case of error. +osThreadId_t osThreadNew (osThreadFunc_t func, void *argument, const osThreadAttr_t *attr); + +/// Get name of a thread. +/// \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. +/// \return name as NULL terminated string. +const char *osThreadGetName (osThreadId_t thread_id); + +/// Return the thread ID of the current running thread. +/// \return thread ID for reference by other functions or NULL in case of error. +osThreadId_t osThreadGetId (void); + +/// Get current thread state of a thread. +/// \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. +/// \return current thread state of the specified thread. +osThreadState_t osThreadGetState (osThreadId_t thread_id); + +/// Get stack size of a thread. +/// \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. +/// \return stack size in bytes. +uint32_t osThreadGetStackSize (osThreadId_t thread_id); + +/// Get available stack space of a thread based on stack watermark recording during execution. +/// \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. +/// \return remaining stack space in bytes. +uint32_t osThreadGetStackSpace (osThreadId_t thread_id); + +/// Change priority of a thread. +/// \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. +/// \param[in] priority new priority value for the thread function. +/// \return status code that indicates the execution status of the function. +osStatus_t osThreadSetPriority (osThreadId_t thread_id, osPriority_t priority); + +/// Get current priority of a thread. +/// \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. +/// \return current priority value of the specified thread. +osPriority_t osThreadGetPriority (osThreadId_t thread_id); + +/// Pass control to next thread that is in state \b READY. +/// \return status code that indicates the execution status of the function. +osStatus_t osThreadYield (void); + +/// Suspend execution of a thread. +/// \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. +/// \return status code that indicates the execution status of the function. +osStatus_t osThreadSuspend (osThreadId_t thread_id); + +/// Resume execution of a thread. +/// \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. +/// \return status code that indicates the execution status of the function. +osStatus_t osThreadResume (osThreadId_t thread_id); + +/// Detach a thread (thread storage can be reclaimed when thread terminates). +/// \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. +/// \return status code that indicates the execution status of the function. +osStatus_t osThreadDetach (osThreadId_t thread_id); + +/// Wait for specified thread to terminate. +/// \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. +/// \return status code that indicates the execution status of the function. +osStatus_t osThreadJoin (osThreadId_t thread_id); + +/// Terminate execution of current running thread. +__NO_RETURN void osThreadExit (void); + +/// Terminate execution of a thread. +/// \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. +/// \return status code that indicates the execution status of the function. +osStatus_t osThreadTerminate (osThreadId_t thread_id); + +/// Get number of active threads. +/// \return number of active threads. +uint32_t osThreadGetCount (void); + +/// Enumerate active threads. +/// \param[out] thread_array pointer to array for retrieving thread IDs. +/// \param[in] array_items maximum number of items in array for retrieving thread IDs. +/// \return number of enumerated threads. +uint32_t osThreadEnumerate (osThreadId_t *thread_array, uint32_t array_items); + + +// ==== Thread Flags Functions ==== + +/// Set the specified Thread Flags of a thread. +/// \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. +/// \param[in] flags specifies the flags of the thread that shall be set. +/// \return thread flags after setting or error code if highest bit set. +uint32_t osThreadFlagsSet (osThreadId_t thread_id, uint32_t flags); + +/// Clear the specified Thread Flags of current running thread. +/// \param[in] flags specifies the flags of the thread that shall be cleared. +/// \return thread flags before clearing or error code if highest bit set. +uint32_t osThreadFlagsClear (uint32_t flags); + +/// Get the current Thread Flags of current running thread. +/// \return current thread flags. +uint32_t osThreadFlagsGet (void); + +/// Wait for one or more Thread Flags of the current running thread to become signaled. +/// \param[in] flags specifies the flags to wait for. +/// \param[in] options specifies flags options (osFlagsXxxx). +/// \param[in] timeout \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. +/// \return thread flags before clearing or error code if highest bit set. +uint32_t osThreadFlagsWait (uint32_t flags, uint32_t options, uint32_t timeout); + + +// ==== Generic Wait Functions ==== + +/// Wait for Timeout (Time Delay). +/// \param[in] ticks \ref CMSIS_RTOS_TimeOutValue "time ticks" value +/// \return status code that indicates the execution status of the function. +osStatus_t osDelay (uint32_t ticks); + +/// Wait until specified time. +/// \param[in] ticks absolute time in ticks +/// \return status code that indicates the execution status of the function. +osStatus_t osDelayUntil (uint32_t ticks); + + +// ==== Timer Management Functions ==== + +/// Create and Initialize a timer. +/// \param[in] func function pointer to callback function. +/// \param[in] type \ref osTimerOnce for one-shot or \ref osTimerPeriodic for periodic behavior. +/// \param[in] argument argument to the timer callback function. +/// \param[in] attr timer attributes; NULL: default values. +/// \return timer ID for reference by other functions or NULL in case of error. +osTimerId_t osTimerNew (osTimerFunc_t func, osTimerType_t type, void *argument, const osTimerAttr_t *attr); + +/// Get name of a timer. +/// \param[in] timer_id timer ID obtained by \ref osTimerNew. +/// \return name as NULL terminated string. +const char *osTimerGetName (osTimerId_t timer_id); + +/// Start or restart a timer. +/// \param[in] timer_id timer ID obtained by \ref osTimerNew. +/// \param[in] ticks \ref CMSIS_RTOS_TimeOutValue "time ticks" value of the timer. +/// \return status code that indicates the execution status of the function. +osStatus_t osTimerStart (osTimerId_t timer_id, uint32_t ticks); + +/// Stop a timer. +/// \param[in] timer_id timer ID obtained by \ref osTimerNew. +/// \return status code that indicates the execution status of the function. +osStatus_t osTimerStop (osTimerId_t timer_id); + +/// Check if a timer is running. +/// \param[in] timer_id timer ID obtained by \ref osTimerNew. +/// \return 0 not running, 1 running. +uint32_t osTimerIsRunning (osTimerId_t timer_id); + +/// Delete a timer. +/// \param[in] timer_id timer ID obtained by \ref osTimerNew. +/// \return status code that indicates the execution status of the function. +osStatus_t osTimerDelete (osTimerId_t timer_id); + + +// ==== Event Flags Management Functions ==== + +/// Create and Initialize an Event Flags object. +/// \param[in] attr event flags attributes; NULL: default values. +/// \return event flags ID for reference by other functions or NULL in case of error. +osEventFlagsId_t osEventFlagsNew (const osEventFlagsAttr_t *attr); + +/// Get name of an Event Flags object. +/// \param[in] ef_id event flags ID obtained by \ref osEventFlagsNew. +/// \return name as NULL terminated string. +const char *osEventFlagsGetName (osEventFlagsId_t ef_id); + +/// Set the specified Event Flags. +/// \param[in] ef_id event flags ID obtained by \ref osEventFlagsNew. +/// \param[in] flags specifies the flags that shall be set. +/// \return event flags after setting or error code if highest bit set. +uint32_t osEventFlagsSet (osEventFlagsId_t ef_id, uint32_t flags); + +/// Clear the specified Event Flags. +/// \param[in] ef_id event flags ID obtained by \ref osEventFlagsNew. +/// \param[in] flags specifies the flags that shall be cleared. +/// \return event flags before clearing or error code if highest bit set. +uint32_t osEventFlagsClear (osEventFlagsId_t ef_id, uint32_t flags); + +/// Get the current Event Flags. +/// \param[in] ef_id event flags ID obtained by \ref osEventFlagsNew. +/// \return current event flags. +uint32_t osEventFlagsGet (osEventFlagsId_t ef_id); + +/// Wait for one or more Event Flags to become signaled. +/// \param[in] ef_id event flags ID obtained by \ref osEventFlagsNew. +/// \param[in] flags specifies the flags to wait for. +/// \param[in] options specifies flags options (osFlagsXxxx). +/// \param[in] timeout \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. +/// \return event flags before clearing or error code if highest bit set. +uint32_t osEventFlagsWait (osEventFlagsId_t ef_id, uint32_t flags, uint32_t options, uint32_t timeout); + +/// Delete an Event Flags object. +/// \param[in] ef_id event flags ID obtained by \ref osEventFlagsNew. +/// \return status code that indicates the execution status of the function. +osStatus_t osEventFlagsDelete (osEventFlagsId_t ef_id); + + +// ==== Mutex Management Functions ==== + +/// Create and Initialize a Mutex object. +/// \param[in] attr mutex attributes; NULL: default values. +/// \return mutex ID for reference by other functions or NULL in case of error. +osMutexId_t osMutexNew (const osMutexAttr_t *attr); + +/// Get name of a Mutex object. +/// \param[in] mutex_id mutex ID obtained by \ref osMutexNew. +/// \return name as NULL terminated string. +const char *osMutexGetName (osMutexId_t mutex_id); + +/// Acquire a Mutex or timeout if it is locked. +/// \param[in] mutex_id mutex ID obtained by \ref osMutexNew. +/// \param[in] timeout \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. +/// \return status code that indicates the execution status of the function. +osStatus_t osMutexAcquire (osMutexId_t mutex_id, uint32_t timeout); + +/// Release a Mutex that was acquired by \ref osMutexAcquire. +/// \param[in] mutex_id mutex ID obtained by \ref osMutexNew. +/// \return status code that indicates the execution status of the function. +osStatus_t osMutexRelease (osMutexId_t mutex_id); + +/// Get Thread which owns a Mutex object. +/// \param[in] mutex_id mutex ID obtained by \ref osMutexNew. +/// \return thread ID of owner thread or NULL when mutex was not acquired. +osThreadId_t osMutexGetOwner (osMutexId_t mutex_id); + +/// Delete a Mutex object. +/// \param[in] mutex_id mutex ID obtained by \ref osMutexNew. +/// \return status code that indicates the execution status of the function. +osStatus_t osMutexDelete (osMutexId_t mutex_id); + + +// ==== Semaphore Management Functions ==== + +/// Create and Initialize a Semaphore object. +/// \param[in] max_count maximum number of available tokens. +/// \param[in] initial_count initial number of available tokens. +/// \param[in] attr semaphore attributes; NULL: default values. +/// \return semaphore ID for reference by other functions or NULL in case of error. +osSemaphoreId_t osSemaphoreNew (uint32_t max_count, uint32_t initial_count, const osSemaphoreAttr_t *attr); + +/// Get name of a Semaphore object. +/// \param[in] semaphore_id semaphore ID obtained by \ref osSemaphoreNew. +/// \return name as NULL terminated string. +const char *osSemaphoreGetName (osSemaphoreId_t semaphore_id); + +/// Acquire a Semaphore token or timeout if no tokens are available. +/// \param[in] semaphore_id semaphore ID obtained by \ref osSemaphoreNew. +/// \param[in] timeout \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. +/// \return status code that indicates the execution status of the function. +osStatus_t osSemaphoreAcquire (osSemaphoreId_t semaphore_id, uint32_t timeout); + +/// Release a Semaphore token up to the initial maximum count. +/// \param[in] semaphore_id semaphore ID obtained by \ref osSemaphoreNew. +/// \return status code that indicates the execution status of the function. +osStatus_t osSemaphoreRelease (osSemaphoreId_t semaphore_id); + +/// Get current Semaphore token count. +/// \param[in] semaphore_id semaphore ID obtained by \ref osSemaphoreNew. +/// \return number of tokens available. +uint32_t osSemaphoreGetCount (osSemaphoreId_t semaphore_id); + +/// Delete a Semaphore object. +/// \param[in] semaphore_id semaphore ID obtained by \ref osSemaphoreNew. +/// \return status code that indicates the execution status of the function. +osStatus_t osSemaphoreDelete (osSemaphoreId_t semaphore_id); + + +// ==== Memory Pool Management Functions ==== + +/// Create and Initialize a Memory Pool object. +/// \param[in] block_count maximum number of memory blocks in memory pool. +/// \param[in] block_size memory block size in bytes. +/// \param[in] attr memory pool attributes; NULL: default values. +/// \return memory pool ID for reference by other functions or NULL in case of error. +osMemoryPoolId_t osMemoryPoolNew (uint32_t block_count, uint32_t block_size, const osMemoryPoolAttr_t *attr); + +/// Get name of a Memory Pool object. +/// \param[in] mp_id memory pool ID obtained by \ref osMemoryPoolNew. +/// \return name as NULL terminated string. +const char *osMemoryPoolGetName (osMemoryPoolId_t mp_id); + +/// Allocate a memory block from a Memory Pool. +/// \param[in] mp_id memory pool ID obtained by \ref osMemoryPoolNew. +/// \param[in] timeout \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. +/// \return address of the allocated memory block or NULL in case of no memory is available. +void *osMemoryPoolAlloc (osMemoryPoolId_t mp_id, uint32_t timeout); + +/// Return an allocated memory block back to a Memory Pool. +/// \param[in] mp_id memory pool ID obtained by \ref osMemoryPoolNew. +/// \param[in] block address of the allocated memory block to be returned to the memory pool. +/// \return status code that indicates the execution status of the function. +osStatus_t osMemoryPoolFree (osMemoryPoolId_t mp_id, void *block); + +/// Get maximum number of memory blocks in a Memory Pool. +/// \param[in] mp_id memory pool ID obtained by \ref osMemoryPoolNew. +/// \return maximum number of memory blocks. +uint32_t osMemoryPoolGetCapacity (osMemoryPoolId_t mp_id); + +/// Get memory block size in a Memory Pool. +/// \param[in] mp_id memory pool ID obtained by \ref osMemoryPoolNew. +/// \return memory block size in bytes. +uint32_t osMemoryPoolGetBlockSize (osMemoryPoolId_t mp_id); + +/// Get number of memory blocks used in a Memory Pool. +/// \param[in] mp_id memory pool ID obtained by \ref osMemoryPoolNew. +/// \return number of memory blocks used. +uint32_t osMemoryPoolGetCount (osMemoryPoolId_t mp_id); + +/// Get number of memory blocks available in a Memory Pool. +/// \param[in] mp_id memory pool ID obtained by \ref osMemoryPoolNew. +/// \return number of memory blocks available. +uint32_t osMemoryPoolGetSpace (osMemoryPoolId_t mp_id); + +/// Delete a Memory Pool object. +/// \param[in] mp_id memory pool ID obtained by \ref osMemoryPoolNew. +/// \return status code that indicates the execution status of the function. +osStatus_t osMemoryPoolDelete (osMemoryPoolId_t mp_id); + + +// ==== Message Queue Management Functions ==== + +/// Create and Initialize a Message Queue object. +/// \param[in] msg_count maximum number of messages in queue. +/// \param[in] msg_size maximum message size in bytes. +/// \param[in] attr message queue attributes; NULL: default values. +/// \return message queue ID for reference by other functions or NULL in case of error. +osMessageQueueId_t osMessageQueueNew (uint32_t msg_count, uint32_t msg_size, const osMessageQueueAttr_t *attr); + +/// Get name of a Message Queue object. +/// \param[in] mq_id message queue ID obtained by \ref osMessageQueueNew. +/// \return name as NULL terminated string. +const char *osMessageQueueGetName (osMessageQueueId_t mq_id); + +/// Put a Message into a Queue or timeout if Queue is full. +/// \param[in] mq_id message queue ID obtained by \ref osMessageQueueNew. +/// \param[in] msg_ptr pointer to buffer with message to put into a queue. +/// \param[in] msg_prio message priority. +/// \param[in] timeout \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. +/// \return status code that indicates the execution status of the function. +osStatus_t osMessageQueuePut (osMessageQueueId_t mq_id, const void *msg_ptr, uint8_t msg_prio, uint32_t timeout); + +/// Get a Message from a Queue or timeout if Queue is empty. +/// \param[in] mq_id message queue ID obtained by \ref osMessageQueueNew. +/// \param[out] msg_ptr pointer to buffer for message to get from a queue. +/// \param[out] msg_prio pointer to buffer for message priority or NULL. +/// \param[in] timeout \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. +/// \return status code that indicates the execution status of the function. +osStatus_t osMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr, uint8_t *msg_prio, uint32_t timeout); + +/// Get maximum number of messages in a Message Queue. +/// \param[in] mq_id message queue ID obtained by \ref osMessageQueueNew. +/// \return maximum number of messages. +uint32_t osMessageQueueGetCapacity (osMessageQueueId_t mq_id); + +/// Get maximum message size in a Memory Pool. +/// \param[in] mq_id message queue ID obtained by \ref osMessageQueueNew. +/// \return maximum message size in bytes. +uint32_t osMessageQueueGetMsgSize (osMessageQueueId_t mq_id); + +/// Get number of queued messages in a Message Queue. +/// \param[in] mq_id message queue ID obtained by \ref osMessageQueueNew. +/// \return number of queued messages. +uint32_t osMessageQueueGetCount (osMessageQueueId_t mq_id); + +/// Get number of available slots for messages in a Message Queue. +/// \param[in] mq_id message queue ID obtained by \ref osMessageQueueNew. +/// \return number of available slots for messages. +uint32_t osMessageQueueGetSpace (osMessageQueueId_t mq_id); + +/// Reset a Message Queue to initial empty state. +/// \param[in] mq_id message queue ID obtained by \ref osMessageQueueNew. +/// \return status code that indicates the execution status of the function. +osStatus_t osMessageQueueReset (osMessageQueueId_t mq_id); + +/// Delete a Message Queue object. +/// \param[in] mq_id message queue ID obtained by \ref osMessageQueueNew. +/// \return status code that indicates the execution status of the function. +osStatus_t osMessageQueueDelete (osMessageQueueId_t mq_id); + + +#ifdef __cplusplus +} +#endif + +#endif // CMSIS_OS2_H_ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/croutine.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/croutine.c new file mode 100644 index 0000000..565691b --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/croutine.c @@ -0,0 +1,353 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#include "FreeRTOS.h" +#include "task.h" +#include "croutine.h" + +/* Remove the whole file is co-routines are not being used. */ +#if( configUSE_CO_ROUTINES != 0 ) + +/* + * Some kernel aware debuggers require data to be viewed to be global, rather + * than file scope. + */ +#ifdef portREMOVE_STATIC_QUALIFIER + #define static +#endif + + +/* Lists for ready and blocked co-routines. --------------------*/ +static List_t pxReadyCoRoutineLists[ configMAX_CO_ROUTINE_PRIORITIES ]; /*< Prioritised ready co-routines. */ +static List_t xDelayedCoRoutineList1; /*< Delayed co-routines. */ +static List_t xDelayedCoRoutineList2; /*< Delayed co-routines (two lists are used - one for delays that have overflowed the current tick count. */ +static List_t * pxDelayedCoRoutineList; /*< Points to the delayed co-routine list currently being used. */ +static List_t * pxOverflowDelayedCoRoutineList; /*< Points to the delayed co-routine list currently being used to hold co-routines that have overflowed the current tick count. */ +static List_t xPendingReadyCoRoutineList; /*< Holds co-routines that have been readied by an external event. They cannot be added directly to the ready lists as the ready lists cannot be accessed by interrupts. */ + +/* Other file private variables. --------------------------------*/ +CRCB_t * pxCurrentCoRoutine = NULL; +static UBaseType_t uxTopCoRoutineReadyPriority = 0; +static TickType_t xCoRoutineTickCount = 0, xLastTickCount = 0, xPassedTicks = 0; + +/* The initial state of the co-routine when it is created. */ +#define corINITIAL_STATE ( 0 ) + +/* + * Place the co-routine represented by pxCRCB into the appropriate ready queue + * for the priority. It is inserted at the end of the list. + * + * This macro accesses the co-routine ready lists and therefore must not be + * used from within an ISR. + */ +#define prvAddCoRoutineToReadyQueue( pxCRCB ) \ +{ \ + if( pxCRCB->uxPriority > uxTopCoRoutineReadyPriority ) \ + { \ + uxTopCoRoutineReadyPriority = pxCRCB->uxPriority; \ + } \ + vListInsertEnd( ( List_t * ) &( pxReadyCoRoutineLists[ pxCRCB->uxPriority ] ), &( pxCRCB->xGenericListItem ) ); \ +} + +/* + * Utility to ready all the lists used by the scheduler. This is called + * automatically upon the creation of the first co-routine. + */ +static void prvInitialiseCoRoutineLists( void ); + +/* + * Co-routines that are readied by an interrupt cannot be placed directly into + * the ready lists (there is no mutual exclusion). Instead they are placed in + * in the pending ready list in order that they can later be moved to the ready + * list by the co-routine scheduler. + */ +static void prvCheckPendingReadyList( void ); + +/* + * Macro that looks at the list of co-routines that are currently delayed to + * see if any require waking. + * + * Co-routines are stored in the queue in the order of their wake time - + * meaning once one co-routine has been found whose timer has not expired + * we need not look any further down the list. + */ +static void prvCheckDelayedList( void ); + +/*-----------------------------------------------------------*/ + +BaseType_t xCoRoutineCreate( crCOROUTINE_CODE pxCoRoutineCode, UBaseType_t uxPriority, UBaseType_t uxIndex ) +{ +BaseType_t xReturn; +CRCB_t *pxCoRoutine; + + /* Allocate the memory that will store the co-routine control block. */ + pxCoRoutine = ( CRCB_t * ) pvPortMalloc( sizeof( CRCB_t ) ); + if( pxCoRoutine ) + { + /* If pxCurrentCoRoutine is NULL then this is the first co-routine to + be created and the co-routine data structures need initialising. */ + if( pxCurrentCoRoutine == NULL ) + { + pxCurrentCoRoutine = pxCoRoutine; + prvInitialiseCoRoutineLists(); + } + + /* Check the priority is within limits. */ + if( uxPriority >= configMAX_CO_ROUTINE_PRIORITIES ) + { + uxPriority = configMAX_CO_ROUTINE_PRIORITIES - 1; + } + + /* Fill out the co-routine control block from the function parameters. */ + pxCoRoutine->uxState = corINITIAL_STATE; + pxCoRoutine->uxPriority = uxPriority; + pxCoRoutine->uxIndex = uxIndex; + pxCoRoutine->pxCoRoutineFunction = pxCoRoutineCode; + + /* Initialise all the other co-routine control block parameters. */ + vListInitialiseItem( &( pxCoRoutine->xGenericListItem ) ); + vListInitialiseItem( &( pxCoRoutine->xEventListItem ) ); + + /* Set the co-routine control block as a link back from the ListItem_t. + This is so we can get back to the containing CRCB from a generic item + in a list. */ + listSET_LIST_ITEM_OWNER( &( pxCoRoutine->xGenericListItem ), pxCoRoutine ); + listSET_LIST_ITEM_OWNER( &( pxCoRoutine->xEventListItem ), pxCoRoutine ); + + /* Event lists are always in priority order. */ + listSET_LIST_ITEM_VALUE( &( pxCoRoutine->xEventListItem ), ( ( TickType_t ) configMAX_CO_ROUTINE_PRIORITIES - ( TickType_t ) uxPriority ) ); + + /* Now the co-routine has been initialised it can be added to the ready + list at the correct priority. */ + prvAddCoRoutineToReadyQueue( pxCoRoutine ); + + xReturn = pdPASS; + } + else + { + xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +void vCoRoutineAddToDelayedList( TickType_t xTicksToDelay, List_t *pxEventList ) +{ +TickType_t xTimeToWake; + + /* Calculate the time to wake - this may overflow but this is + not a problem. */ + xTimeToWake = xCoRoutineTickCount + xTicksToDelay; + + /* We must remove ourselves from the ready list before adding + ourselves to the blocked list as the same list item is used for + both lists. */ + ( void ) uxListRemove( ( ListItem_t * ) &( pxCurrentCoRoutine->xGenericListItem ) ); + + /* The list item will be inserted in wake time order. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentCoRoutine->xGenericListItem ), xTimeToWake ); + + if( xTimeToWake < xCoRoutineTickCount ) + { + /* Wake time has overflowed. Place this item in the + overflow list. */ + vListInsert( ( List_t * ) pxOverflowDelayedCoRoutineList, ( ListItem_t * ) &( pxCurrentCoRoutine->xGenericListItem ) ); + } + else + { + /* The wake time has not overflowed, so we can use the + current block list. */ + vListInsert( ( List_t * ) pxDelayedCoRoutineList, ( ListItem_t * ) &( pxCurrentCoRoutine->xGenericListItem ) ); + } + + if( pxEventList ) + { + /* Also add the co-routine to an event list. If this is done then the + function must be called with interrupts disabled. */ + vListInsert( pxEventList, &( pxCurrentCoRoutine->xEventListItem ) ); + } +} +/*-----------------------------------------------------------*/ + +static void prvCheckPendingReadyList( void ) +{ + /* Are there any co-routines waiting to get moved to the ready list? These + are co-routines that have been readied by an ISR. The ISR cannot access + the ready lists itself. */ + while( listLIST_IS_EMPTY( &xPendingReadyCoRoutineList ) == pdFALSE ) + { + CRCB_t *pxUnblockedCRCB; + + /* The pending ready list can be accessed by an ISR. */ + portDISABLE_INTERRUPTS(); + { + pxUnblockedCRCB = ( CRCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( (&xPendingReadyCoRoutineList) ); + ( void ) uxListRemove( &( pxUnblockedCRCB->xEventListItem ) ); + } + portENABLE_INTERRUPTS(); + + ( void ) uxListRemove( &( pxUnblockedCRCB->xGenericListItem ) ); + prvAddCoRoutineToReadyQueue( pxUnblockedCRCB ); + } +} +/*-----------------------------------------------------------*/ + +static void prvCheckDelayedList( void ) +{ +CRCB_t *pxCRCB; + + xPassedTicks = xTaskGetTickCount() - xLastTickCount; + while( xPassedTicks ) + { + xCoRoutineTickCount++; + xPassedTicks--; + + /* If the tick count has overflowed we need to swap the ready lists. */ + if( xCoRoutineTickCount == 0 ) + { + List_t * pxTemp; + + /* Tick count has overflowed so we need to swap the delay lists. If there are + any items in pxDelayedCoRoutineList here then there is an error! */ + pxTemp = pxDelayedCoRoutineList; + pxDelayedCoRoutineList = pxOverflowDelayedCoRoutineList; + pxOverflowDelayedCoRoutineList = pxTemp; + } + + /* See if this tick has made a timeout expire. */ + while( listLIST_IS_EMPTY( pxDelayedCoRoutineList ) == pdFALSE ) + { + pxCRCB = ( CRCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxDelayedCoRoutineList ); + + if( xCoRoutineTickCount < listGET_LIST_ITEM_VALUE( &( pxCRCB->xGenericListItem ) ) ) + { + /* Timeout not yet expired. */ + break; + } + + portDISABLE_INTERRUPTS(); + { + /* The event could have occurred just before this critical + section. If this is the case then the generic list item will + have been moved to the pending ready list and the following + line is still valid. Also the pvContainer parameter will have + been set to NULL so the following lines are also valid. */ + ( void ) uxListRemove( &( pxCRCB->xGenericListItem ) ); + + /* Is the co-routine waiting on an event also? */ + if( pxCRCB->xEventListItem.pxContainer ) + { + ( void ) uxListRemove( &( pxCRCB->xEventListItem ) ); + } + } + portENABLE_INTERRUPTS(); + + prvAddCoRoutineToReadyQueue( pxCRCB ); + } + } + + xLastTickCount = xCoRoutineTickCount; +} +/*-----------------------------------------------------------*/ + +void vCoRoutineSchedule( void ) +{ + /* See if any co-routines readied by events need moving to the ready lists. */ + prvCheckPendingReadyList(); + + /* See if any delayed co-routines have timed out. */ + prvCheckDelayedList(); + + /* Find the highest priority queue that contains ready co-routines. */ + while( listLIST_IS_EMPTY( &( pxReadyCoRoutineLists[ uxTopCoRoutineReadyPriority ] ) ) ) + { + if( uxTopCoRoutineReadyPriority == 0 ) + { + /* No more co-routines to check. */ + return; + } + --uxTopCoRoutineReadyPriority; + } + + /* listGET_OWNER_OF_NEXT_ENTRY walks through the list, so the co-routines + of the same priority get an equal share of the processor time. */ + listGET_OWNER_OF_NEXT_ENTRY( pxCurrentCoRoutine, &( pxReadyCoRoutineLists[ uxTopCoRoutineReadyPriority ] ) ); + + /* Call the co-routine. */ + ( pxCurrentCoRoutine->pxCoRoutineFunction )( pxCurrentCoRoutine, pxCurrentCoRoutine->uxIndex ); + + return; +} +/*-----------------------------------------------------------*/ + +static void prvInitialiseCoRoutineLists( void ) +{ +UBaseType_t uxPriority; + + for( uxPriority = 0; uxPriority < configMAX_CO_ROUTINE_PRIORITIES; uxPriority++ ) + { + vListInitialise( ( List_t * ) &( pxReadyCoRoutineLists[ uxPriority ] ) ); + } + + vListInitialise( ( List_t * ) &xDelayedCoRoutineList1 ); + vListInitialise( ( List_t * ) &xDelayedCoRoutineList2 ); + vListInitialise( ( List_t * ) &xPendingReadyCoRoutineList ); + + /* Start with pxDelayedCoRoutineList using list1 and the + pxOverflowDelayedCoRoutineList using list2. */ + pxDelayedCoRoutineList = &xDelayedCoRoutineList1; + pxOverflowDelayedCoRoutineList = &xDelayedCoRoutineList2; +} +/*-----------------------------------------------------------*/ + +BaseType_t xCoRoutineRemoveFromEventList( const List_t *pxEventList ) +{ +CRCB_t *pxUnblockedCRCB; +BaseType_t xReturn; + + /* This function is called from within an interrupt. It can only access + event lists and the pending ready list. This function assumes that a + check has already been made to ensure pxEventList is not empty. */ + pxUnblockedCRCB = ( CRCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); + ( void ) uxListRemove( &( pxUnblockedCRCB->xEventListItem ) ); + vListInsertEnd( ( List_t * ) &( xPendingReadyCoRoutineList ), &( pxUnblockedCRCB->xEventListItem ) ); + + if( pxUnblockedCRCB->uxPriority >= pxCurrentCoRoutine->uxPriority ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} + +#endif /* configUSE_CO_ROUTINES == 0 */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/event_groups.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/event_groups.c new file mode 100644 index 0000000..81c1965 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/event_groups.c @@ -0,0 +1,753 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* Standard includes. */ +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining +all the API functions to use the MPU wrappers. That should only be done when +task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* FreeRTOS includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "timers.h" +#include "event_groups.h" + +/* Lint e961, e750 and e9021 are suppressed as a MISRA exception justified +because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined +for the header files above, but not in this file, in order to generate the +correct privileged Vs unprivileged linkage and placement. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021 See comment above. */ + +/* The following bit fields convey control information in a task's event list +item value. It is important they don't clash with the +taskEVENT_LIST_ITEM_VALUE_IN_USE definition. */ +#if configUSE_16_BIT_TICKS == 1 + #define eventCLEAR_EVENTS_ON_EXIT_BIT 0x0100U + #define eventUNBLOCKED_DUE_TO_BIT_SET 0x0200U + #define eventWAIT_FOR_ALL_BITS 0x0400U + #define eventEVENT_BITS_CONTROL_BYTES 0xff00U +#else + #define eventCLEAR_EVENTS_ON_EXIT_BIT 0x01000000UL + #define eventUNBLOCKED_DUE_TO_BIT_SET 0x02000000UL + #define eventWAIT_FOR_ALL_BITS 0x04000000UL + #define eventEVENT_BITS_CONTROL_BYTES 0xff000000UL +#endif + +typedef struct EventGroupDef_t +{ + EventBits_t uxEventBits; + List_t xTasksWaitingForBits; /*< List of tasks waiting for a bit to be set. */ + + #if( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxEventGroupNumber; + #endif + + #if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */ + #endif +} EventGroup_t; + +/*-----------------------------------------------------------*/ + +/* + * Test the bits set in uxCurrentEventBits to see if the wait condition is met. + * The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is + * pdTRUE then the wait condition is met if all the bits set in uxBitsToWaitFor + * are also set in uxCurrentEventBits. If xWaitForAllBits is pdFALSE then the + * wait condition is met if any of the bits set in uxBitsToWait for are also set + * in uxCurrentEventBits. + */ +static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits, const EventBits_t uxBitsToWaitFor, const BaseType_t xWaitForAllBits ) PRIVILEGED_FUNCTION; + +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + EventGroupHandle_t xEventGroupCreateStatic( StaticEventGroup_t *pxEventGroupBuffer ) + { + EventGroup_t *pxEventBits; + + /* A StaticEventGroup_t object must be provided. */ + configASSERT( pxEventGroupBuffer ); + + #if( configASSERT_DEFINED == 1 ) + { + /* Sanity check that the size of the structure used to declare a + variable of type StaticEventGroup_t equals the size of the real + event group structure. */ + volatile size_t xSize = sizeof( StaticEventGroup_t ); + configASSERT( xSize == sizeof( EventGroup_t ) ); + } /*lint !e529 xSize is referenced if configASSERT() is defined. */ + #endif /* configASSERT_DEFINED */ + + /* The user has provided a statically allocated event group - use it. */ + pxEventBits = ( EventGroup_t * ) pxEventGroupBuffer; /*lint !e740 !e9087 EventGroup_t and StaticEventGroup_t are deliberately aliased for data hiding purposes and guaranteed to have the same size and alignment requirement - checked by configASSERT(). */ + + if( pxEventBits != NULL ) + { + pxEventBits->uxEventBits = 0; + vListInitialise( &( pxEventBits->xTasksWaitingForBits ) ); + + #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + { + /* Both static and dynamic allocation can be used, so note that + this event group was created statically in case the event group + is later deleted. */ + pxEventBits->ucStaticallyAllocated = pdTRUE; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + + traceEVENT_GROUP_CREATE( pxEventBits ); + } + else + { + /* xEventGroupCreateStatic should only ever be called with + pxEventGroupBuffer pointing to a pre-allocated (compile time + allocated) StaticEventGroup_t variable. */ + traceEVENT_GROUP_CREATE_FAILED(); + } + + return pxEventBits; + } + +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + EventGroupHandle_t xEventGroupCreate( void ) + { + EventGroup_t *pxEventBits; + + /* Allocate the event group. Justification for MISRA deviation as + follows: pvPortMalloc() always ensures returned memory blocks are + aligned per the requirements of the MCU stack. In this case + pvPortMalloc() must return a pointer that is guaranteed to meet the + alignment requirements of the EventGroup_t structure - which (if you + follow it through) is the alignment requirements of the TickType_t type + (EventBits_t being of TickType_t itself). Therefore, whenever the + stack alignment requirements are greater than or equal to the + TickType_t alignment requirements the cast is safe. In other cases, + where the natural word size of the architecture is less than + sizeof( TickType_t ), the TickType_t variables will be accessed in two + or more reads operations, and the alignment requirements is only that + of each individual read. */ + pxEventBits = ( EventGroup_t * ) pvPortMalloc( sizeof( EventGroup_t ) ); /*lint !e9087 !e9079 see comment above. */ + + if( pxEventBits != NULL ) + { + pxEventBits->uxEventBits = 0; + vListInitialise( &( pxEventBits->xTasksWaitingForBits ) ); + + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + /* Both static and dynamic allocation can be used, so note this + event group was allocated statically in case the event group is + later deleted. */ + pxEventBits->ucStaticallyAllocated = pdFALSE; + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ + + traceEVENT_GROUP_CREATE( pxEventBits ); + } + else + { + traceEVENT_GROUP_CREATE_FAILED(); /*lint !e9063 Else branch only exists to allow tracing and does not generate code if trace macros are not defined. */ + } + + return pxEventBits; + } + +#endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, const EventBits_t uxBitsToWaitFor, TickType_t xTicksToWait ) +{ +EventBits_t uxOriginalBitValue, uxReturn; +EventGroup_t *pxEventBits = xEventGroup; +BaseType_t xAlreadyYielded; +BaseType_t xTimeoutOccurred = pdFALSE; + + configASSERT( ( uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0 ); + configASSERT( uxBitsToWaitFor != 0 ); + #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + { + configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); + } + #endif + + vTaskSuspendAll(); + { + uxOriginalBitValue = pxEventBits->uxEventBits; + + ( void ) xEventGroupSetBits( xEventGroup, uxBitsToSet ); + + if( ( ( uxOriginalBitValue | uxBitsToSet ) & uxBitsToWaitFor ) == uxBitsToWaitFor ) + { + /* All the rendezvous bits are now set - no need to block. */ + uxReturn = ( uxOriginalBitValue | uxBitsToSet ); + + /* Rendezvous always clear the bits. They will have been cleared + already unless this is the only task in the rendezvous. */ + pxEventBits->uxEventBits &= ~uxBitsToWaitFor; + + xTicksToWait = 0; + } + else + { + if( xTicksToWait != ( TickType_t ) 0 ) + { + traceEVENT_GROUP_SYNC_BLOCK( xEventGroup, uxBitsToSet, uxBitsToWaitFor ); + + /* Store the bits that the calling task is waiting for in the + task's event list item so the kernel knows when a match is + found. Then enter the blocked state. */ + vTaskPlaceOnUnorderedEventList( &( pxEventBits->xTasksWaitingForBits ), ( uxBitsToWaitFor | eventCLEAR_EVENTS_ON_EXIT_BIT | eventWAIT_FOR_ALL_BITS ), xTicksToWait ); + + /* This assignment is obsolete as uxReturn will get set after + the task unblocks, but some compilers mistakenly generate a + warning about uxReturn being returned without being set if the + assignment is omitted. */ + uxReturn = 0; + } + else + { + /* The rendezvous bits were not set, but no block time was + specified - just return the current event bit value. */ + uxReturn = pxEventBits->uxEventBits; + xTimeoutOccurred = pdTRUE; + } + } + } + xAlreadyYielded = xTaskResumeAll(); + + if( xTicksToWait != ( TickType_t ) 0 ) + { + if( xAlreadyYielded == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* The task blocked to wait for its required bits to be set - at this + point either the required bits were set or the block time expired. If + the required bits were set they will have been stored in the task's + event list item, and they should now be retrieved then cleared. */ + uxReturn = uxTaskResetEventItemValue(); + + if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 ) + { + /* The task timed out, just return the current event bit value. */ + taskENTER_CRITICAL(); + { + uxReturn = pxEventBits->uxEventBits; + + /* Although the task got here because it timed out before the + bits it was waiting for were set, it is possible that since it + unblocked another task has set the bits. If this is the case + then it needs to clear the bits before exiting. */ + if( ( uxReturn & uxBitsToWaitFor ) == uxBitsToWaitFor ) + { + pxEventBits->uxEventBits &= ~uxBitsToWaitFor; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + xTimeoutOccurred = pdTRUE; + } + else + { + /* The task unblocked because the bits were set. */ + } + + /* Control bits might be set as the task had blocked should not be + returned. */ + uxReturn &= ~eventEVENT_BITS_CONTROL_BYTES; + } + + traceEVENT_GROUP_SYNC_END( xEventGroup, uxBitsToSet, uxBitsToWaitFor, xTimeoutOccurred ); + + /* Prevent compiler warnings when trace macros are not used. */ + ( void ) xTimeoutOccurred; + + return uxReturn; +} +/*-----------------------------------------------------------*/ + +EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToWaitFor, const BaseType_t xClearOnExit, const BaseType_t xWaitForAllBits, TickType_t xTicksToWait ) +{ +EventGroup_t *pxEventBits = xEventGroup; +EventBits_t uxReturn, uxControlBits = 0; +BaseType_t xWaitConditionMet, xAlreadyYielded; +BaseType_t xTimeoutOccurred = pdFALSE; + + /* Check the user is not attempting to wait on the bits used by the kernel + itself, and that at least one bit is being requested. */ + configASSERT( xEventGroup ); + configASSERT( ( uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0 ); + configASSERT( uxBitsToWaitFor != 0 ); + #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + { + configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); + } + #endif + + vTaskSuspendAll(); + { + const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits; + + /* Check to see if the wait condition is already met or not. */ + xWaitConditionMet = prvTestWaitCondition( uxCurrentEventBits, uxBitsToWaitFor, xWaitForAllBits ); + + if( xWaitConditionMet != pdFALSE ) + { + /* The wait condition has already been met so there is no need to + block. */ + uxReturn = uxCurrentEventBits; + xTicksToWait = ( TickType_t ) 0; + + /* Clear the wait bits if requested to do so. */ + if( xClearOnExit != pdFALSE ) + { + pxEventBits->uxEventBits &= ~uxBitsToWaitFor; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else if( xTicksToWait == ( TickType_t ) 0 ) + { + /* The wait condition has not been met, but no block time was + specified, so just return the current value. */ + uxReturn = uxCurrentEventBits; + xTimeoutOccurred = pdTRUE; + } + else + { + /* The task is going to block to wait for its required bits to be + set. uxControlBits are used to remember the specified behaviour of + this call to xEventGroupWaitBits() - for use when the event bits + unblock the task. */ + if( xClearOnExit != pdFALSE ) + { + uxControlBits |= eventCLEAR_EVENTS_ON_EXIT_BIT; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xWaitForAllBits != pdFALSE ) + { + uxControlBits |= eventWAIT_FOR_ALL_BITS; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Store the bits that the calling task is waiting for in the + task's event list item so the kernel knows when a match is + found. Then enter the blocked state. */ + vTaskPlaceOnUnorderedEventList( &( pxEventBits->xTasksWaitingForBits ), ( uxBitsToWaitFor | uxControlBits ), xTicksToWait ); + + /* This is obsolete as it will get set after the task unblocks, but + some compilers mistakenly generate a warning about the variable + being returned without being set if it is not done. */ + uxReturn = 0; + + traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor ); + } + } + xAlreadyYielded = xTaskResumeAll(); + + if( xTicksToWait != ( TickType_t ) 0 ) + { + if( xAlreadyYielded == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* The task blocked to wait for its required bits to be set - at this + point either the required bits were set or the block time expired. If + the required bits were set they will have been stored in the task's + event list item, and they should now be retrieved then cleared. */ + uxReturn = uxTaskResetEventItemValue(); + + if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 ) + { + taskENTER_CRITICAL(); + { + /* The task timed out, just return the current event bit value. */ + uxReturn = pxEventBits->uxEventBits; + + /* It is possible that the event bits were updated between this + task leaving the Blocked state and running again. */ + if( prvTestWaitCondition( uxReturn, uxBitsToWaitFor, xWaitForAllBits ) != pdFALSE ) + { + if( xClearOnExit != pdFALSE ) + { + pxEventBits->uxEventBits &= ~uxBitsToWaitFor; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + xTimeoutOccurred = pdTRUE; + } + taskEXIT_CRITICAL(); + } + else + { + /* The task unblocked because the bits were set. */ + } + + /* The task blocked so control bits may have been set. */ + uxReturn &= ~eventEVENT_BITS_CONTROL_BYTES; + } + traceEVENT_GROUP_WAIT_BITS_END( xEventGroup, uxBitsToWaitFor, xTimeoutOccurred ); + + /* Prevent compiler warnings when trace macros are not used. */ + ( void ) xTimeoutOccurred; + + return uxReturn; +} +/*-----------------------------------------------------------*/ + +EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear ) +{ +EventGroup_t *pxEventBits = xEventGroup; +EventBits_t uxReturn; + + /* Check the user is not attempting to clear the bits used by the kernel + itself. */ + configASSERT( xEventGroup ); + configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 ); + + taskENTER_CRITICAL(); + { + traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear ); + + /* The value returned is the event group value prior to the bits being + cleared. */ + uxReturn = pxEventBits->uxEventBits; + + /* Clear the bits. */ + pxEventBits->uxEventBits &= ~uxBitsToClear; + } + taskEXIT_CRITICAL(); + + return uxReturn; +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) ) + + BaseType_t xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear ) + { + BaseType_t xReturn; + + traceEVENT_GROUP_CLEAR_BITS_FROM_ISR( xEventGroup, uxBitsToClear ); + xReturn = xTimerPendFunctionCallFromISR( vEventGroupClearBitsCallback, ( void * ) xEventGroup, ( uint32_t ) uxBitsToClear, NULL ); /*lint !e9087 Can't avoid cast to void* as a generic callback function not specific to this use case. Callback casts back to original type so safe. */ + + return xReturn; + } + +#endif +/*-----------------------------------------------------------*/ + +EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) +{ +UBaseType_t uxSavedInterruptStatus; +EventGroup_t const * const pxEventBits = xEventGroup; +EventBits_t uxReturn; + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + uxReturn = pxEventBits->uxEventBits; + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return uxReturn; +} /*lint !e818 EventGroupHandle_t is a typedef used in other functions to so can't be pointer to const. */ +/*-----------------------------------------------------------*/ + +EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet ) +{ +ListItem_t *pxListItem, *pxNext; +ListItem_t const *pxListEnd; +List_t const * pxList; +EventBits_t uxBitsToClear = 0, uxBitsWaitedFor, uxControlBits; +EventGroup_t *pxEventBits = xEventGroup; +BaseType_t xMatchFound = pdFALSE; + + /* Check the user is not attempting to set the bits used by the kernel + itself. */ + configASSERT( xEventGroup ); + configASSERT( ( uxBitsToSet & eventEVENT_BITS_CONTROL_BYTES ) == 0 ); + + pxList = &( pxEventBits->xTasksWaitingForBits ); + pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */ + vTaskSuspendAll(); + { + traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet ); + + pxListItem = listGET_HEAD_ENTRY( pxList ); + + /* Set the bits. */ + pxEventBits->uxEventBits |= uxBitsToSet; + + /* See if the new bit value should unblock any tasks. */ + while( pxListItem != pxListEnd ) + { + pxNext = listGET_NEXT( pxListItem ); + uxBitsWaitedFor = listGET_LIST_ITEM_VALUE( pxListItem ); + xMatchFound = pdFALSE; + + /* Split the bits waited for from the control bits. */ + uxControlBits = uxBitsWaitedFor & eventEVENT_BITS_CONTROL_BYTES; + uxBitsWaitedFor &= ~eventEVENT_BITS_CONTROL_BYTES; + + if( ( uxControlBits & eventWAIT_FOR_ALL_BITS ) == ( EventBits_t ) 0 ) + { + /* Just looking for single bit being set. */ + if( ( uxBitsWaitedFor & pxEventBits->uxEventBits ) != ( EventBits_t ) 0 ) + { + xMatchFound = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else if( ( uxBitsWaitedFor & pxEventBits->uxEventBits ) == uxBitsWaitedFor ) + { + /* All bits are set. */ + xMatchFound = pdTRUE; + } + else + { + /* Need all bits to be set, but not all the bits were set. */ + } + + if( xMatchFound != pdFALSE ) + { + /* The bits match. Should the bits be cleared on exit? */ + if( ( uxControlBits & eventCLEAR_EVENTS_ON_EXIT_BIT ) != ( EventBits_t ) 0 ) + { + uxBitsToClear |= uxBitsWaitedFor; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Store the actual event flag value in the task's event list + item before removing the task from the event list. The + eventUNBLOCKED_DUE_TO_BIT_SET bit is set so the task knows + that is was unblocked due to its required bits matching, rather + than because it timed out. */ + vTaskRemoveFromUnorderedEventList( pxListItem, pxEventBits->uxEventBits | eventUNBLOCKED_DUE_TO_BIT_SET ); + } + + /* Move onto the next list item. Note pxListItem->pxNext is not + used here as the list item may have been removed from the event list + and inserted into the ready/pending reading list. */ + pxListItem = pxNext; + } + + /* Clear any bits that matched when the eventCLEAR_EVENTS_ON_EXIT_BIT + bit was set in the control word. */ + pxEventBits->uxEventBits &= ~uxBitsToClear; + } + ( void ) xTaskResumeAll(); + + return pxEventBits->uxEventBits; +} +/*-----------------------------------------------------------*/ + +void vEventGroupDelete( EventGroupHandle_t xEventGroup ) +{ +EventGroup_t *pxEventBits = xEventGroup; +const List_t *pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits ); + + vTaskSuspendAll(); + { + traceEVENT_GROUP_DELETE( xEventGroup ); + + while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 ) + { + /* Unblock the task, returning 0 as the event list is being deleted + and cannot therefore have any bits set. */ + configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) ); + vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET ); + } + + #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) + { + /* The event group can only have been allocated dynamically - free + it again. */ + vPortFree( pxEventBits ); + } + #elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + { + /* The event group could have been allocated statically or + dynamically, so check before attempting to free the memory. */ + if( pxEventBits->ucStaticallyAllocated == ( uint8_t ) pdFALSE ) + { + vPortFree( pxEventBits ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + } + ( void ) xTaskResumeAll(); +} +/*-----------------------------------------------------------*/ + +/* For internal use only - execute a 'set bits' command that was pended from +an interrupt. */ +void vEventGroupSetBitsCallback( void *pvEventGroup, const uint32_t ulBitsToSet ) +{ + ( void ) xEventGroupSetBits( pvEventGroup, ( EventBits_t ) ulBitsToSet ); /*lint !e9079 Can't avoid cast to void* as a generic timer callback prototype. Callback casts back to original type so safe. */ +} +/*-----------------------------------------------------------*/ + +/* For internal use only - execute a 'clear bits' command that was pended from +an interrupt. */ +void vEventGroupClearBitsCallback( void *pvEventGroup, const uint32_t ulBitsToClear ) +{ + ( void ) xEventGroupClearBits( pvEventGroup, ( EventBits_t ) ulBitsToClear ); /*lint !e9079 Can't avoid cast to void* as a generic timer callback prototype. Callback casts back to original type so safe. */ +} +/*-----------------------------------------------------------*/ + +static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits, const EventBits_t uxBitsToWaitFor, const BaseType_t xWaitForAllBits ) +{ +BaseType_t xWaitConditionMet = pdFALSE; + + if( xWaitForAllBits == pdFALSE ) + { + /* Task only has to wait for one bit within uxBitsToWaitFor to be + set. Is one already set? */ + if( ( uxCurrentEventBits & uxBitsToWaitFor ) != ( EventBits_t ) 0 ) + { + xWaitConditionMet = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* Task has to wait for all the bits in uxBitsToWaitFor to be set. + Are they set already? */ + if( ( uxCurrentEventBits & uxBitsToWaitFor ) == uxBitsToWaitFor ) + { + xWaitConditionMet = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + return xWaitConditionMet; +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) ) + + BaseType_t xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, BaseType_t *pxHigherPriorityTaskWoken ) + { + BaseType_t xReturn; + + traceEVENT_GROUP_SET_BITS_FROM_ISR( xEventGroup, uxBitsToSet ); + xReturn = xTimerPendFunctionCallFromISR( vEventGroupSetBitsCallback, ( void * ) xEventGroup, ( uint32_t ) uxBitsToSet, pxHigherPriorityTaskWoken ); /*lint !e9087 Can't avoid cast to void* as a generic callback function not specific to this use case. Callback casts back to original type so safe. */ + + return xReturn; + } + +#endif +/*-----------------------------------------------------------*/ + +#if (configUSE_TRACE_FACILITY == 1) + + UBaseType_t uxEventGroupGetNumber( void* xEventGroup ) + { + UBaseType_t xReturn; + EventGroup_t const *pxEventBits = ( EventGroup_t * ) xEventGroup; /*lint !e9087 !e9079 EventGroupHandle_t is a pointer to an EventGroup_t, but EventGroupHandle_t is kept opaque outside of this file for data hiding purposes. */ + + if( xEventGroup == NULL ) + { + xReturn = 0; + } + else + { + xReturn = pxEventBits->uxEventGroupNumber; + } + + return xReturn; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + void vEventGroupSetNumber( void * xEventGroup, UBaseType_t uxEventGroupNumber ) + { + ( ( EventGroup_t * ) xEventGroup )->uxEventGroupNumber = uxEventGroupNumber; /*lint !e9087 !e9079 EventGroupHandle_t is a pointer to an EventGroup_t, but EventGroupHandle_t is kept opaque outside of this file for data hiding purposes. */ + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/FreeRTOS.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/FreeRTOS.h new file mode 100644 index 0000000..45c778a --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/FreeRTOS.h @@ -0,0 +1,1278 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef INC_FREERTOS_H +#define INC_FREERTOS_H + +/* + * Include the generic headers required for the FreeRTOS port being used. + */ +#include + +/* + * If stdint.h cannot be located then: + * + If using GCC ensure the -nostdint options is *not* being used. + * + Ensure the project's include path includes the directory in which your + * compiler stores stdint.h. + * + Set any compiler options necessary for it to support C99, as technically + * stdint.h is only mandatory with C99 (FreeRTOS does not require C99 in any + * other way). + * + The FreeRTOS download includes a simple stdint.h definition that can be + * used in cases where none is provided by the compiler. The files only + * contains the typedefs required to build FreeRTOS. Read the instructions + * in FreeRTOS/source/stdint.readme for more information. + */ +#include /* READ COMMENT ABOVE. */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Application specific configuration options. */ +#include "FreeRTOSConfig.h" + +/* Basic FreeRTOS definitions. */ +#include "projdefs.h" + +/* Definitions specific to the port being used. */ +#include "portable.h" + +/* Must be defaulted before configUSE_NEWLIB_REENTRANT is used below. */ +#ifndef configUSE_NEWLIB_REENTRANT + #define configUSE_NEWLIB_REENTRANT 0 +#endif + +/* Required if struct _reent is used. */ +#if ( configUSE_NEWLIB_REENTRANT == 1 ) + #include +#endif +/* + * Check all the required application specific macros have been defined. + * These macros are application specific and (as downloaded) are defined + * within FreeRTOSConfig.h. + */ + +#ifndef configMINIMAL_STACK_SIZE + #error Missing definition: configMINIMAL_STACK_SIZE must be defined in FreeRTOSConfig.h. configMINIMAL_STACK_SIZE defines the size (in words) of the stack allocated to the idle task. Refer to the demo project provided for your port for a suitable value. +#endif + +#ifndef configMAX_PRIORITIES + #error Missing definition: configMAX_PRIORITIES must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details. +#endif + +#if configMAX_PRIORITIES < 1 + #error configMAX_PRIORITIES must be defined to be greater than or equal to 1. +#endif + +#ifndef configUSE_PREEMPTION + #error Missing definition: configUSE_PREEMPTION must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details. +#endif + +#ifndef configUSE_IDLE_HOOK + #error Missing definition: configUSE_IDLE_HOOK must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details. +#endif + +#ifndef configUSE_TICK_HOOK + #error Missing definition: configUSE_TICK_HOOK must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details. +#endif + +#ifndef configUSE_16_BIT_TICKS + #error Missing definition: configUSE_16_BIT_TICKS must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details. +#endif + +#ifndef configUSE_CO_ROUTINES + #define configUSE_CO_ROUTINES 0 +#endif + +#ifndef INCLUDE_vTaskPrioritySet + #define INCLUDE_vTaskPrioritySet 0 +#endif + +#ifndef INCLUDE_uxTaskPriorityGet + #define INCLUDE_uxTaskPriorityGet 0 +#endif + +#ifndef INCLUDE_vTaskDelete + #define INCLUDE_vTaskDelete 0 +#endif + +#ifndef INCLUDE_vTaskSuspend + #define INCLUDE_vTaskSuspend 0 +#endif + +#ifndef INCLUDE_vTaskDelayUntil + #define INCLUDE_vTaskDelayUntil 0 +#endif + +#ifndef INCLUDE_vTaskDelay + #define INCLUDE_vTaskDelay 0 +#endif + +#ifndef INCLUDE_xTaskGetIdleTaskHandle + #define INCLUDE_xTaskGetIdleTaskHandle 0 +#endif + +#ifndef INCLUDE_xTaskAbortDelay + #define INCLUDE_xTaskAbortDelay 0 +#endif + +#ifndef INCLUDE_xQueueGetMutexHolder + #define INCLUDE_xQueueGetMutexHolder 0 +#endif + +#ifndef INCLUDE_xSemaphoreGetMutexHolder + #define INCLUDE_xSemaphoreGetMutexHolder INCLUDE_xQueueGetMutexHolder +#endif + +#ifndef INCLUDE_xTaskGetHandle + #define INCLUDE_xTaskGetHandle 0 +#endif + +#ifndef INCLUDE_uxTaskGetStackHighWaterMark + #define INCLUDE_uxTaskGetStackHighWaterMark 0 +#endif + +#ifndef INCLUDE_uxTaskGetStackHighWaterMark2 + #define INCLUDE_uxTaskGetStackHighWaterMark2 0 +#endif + +#ifndef INCLUDE_eTaskGetState + #define INCLUDE_eTaskGetState 0 +#endif + +#ifndef INCLUDE_xTaskResumeFromISR + #define INCLUDE_xTaskResumeFromISR 1 +#endif + +#ifndef INCLUDE_xTimerPendFunctionCall + #define INCLUDE_xTimerPendFunctionCall 0 +#endif + +#ifndef INCLUDE_xTaskGetSchedulerState + #define INCLUDE_xTaskGetSchedulerState 0 +#endif + +#ifndef INCLUDE_xTaskGetCurrentTaskHandle + #define INCLUDE_xTaskGetCurrentTaskHandle 0 +#endif + +#if configUSE_CO_ROUTINES != 0 + #ifndef configMAX_CO_ROUTINE_PRIORITIES + #error configMAX_CO_ROUTINE_PRIORITIES must be greater than or equal to 1. + #endif +#endif + +#ifndef configUSE_DAEMON_TASK_STARTUP_HOOK + #define configUSE_DAEMON_TASK_STARTUP_HOOK 0 +#endif + +#ifndef configUSE_APPLICATION_TASK_TAG + #define configUSE_APPLICATION_TASK_TAG 0 +#endif + +#ifndef configNUM_THREAD_LOCAL_STORAGE_POINTERS + #define configNUM_THREAD_LOCAL_STORAGE_POINTERS 0 +#endif + +#ifndef configUSE_RECURSIVE_MUTEXES + #define configUSE_RECURSIVE_MUTEXES 0 +#endif + +#ifndef configUSE_MUTEXES + #define configUSE_MUTEXES 0 +#endif + +#ifndef configUSE_TIMERS + #define configUSE_TIMERS 0 +#endif + +#ifndef configUSE_COUNTING_SEMAPHORES + #define configUSE_COUNTING_SEMAPHORES 0 +#endif + +#ifndef configUSE_ALTERNATIVE_API + #define configUSE_ALTERNATIVE_API 0 +#endif + +#ifndef portCRITICAL_NESTING_IN_TCB + #define portCRITICAL_NESTING_IN_TCB 0 +#endif + +#ifndef configMAX_TASK_NAME_LEN + #define configMAX_TASK_NAME_LEN 16 +#endif + +#ifndef configIDLE_SHOULD_YIELD + #define configIDLE_SHOULD_YIELD 1 +#endif + +#if configMAX_TASK_NAME_LEN < 1 + #error configMAX_TASK_NAME_LEN must be set to a minimum of 1 in FreeRTOSConfig.h +#endif + +#ifndef configASSERT + #define configASSERT( x ) + #define configASSERT_DEFINED 0 +#else + #define configASSERT_DEFINED 1 +#endif + +#ifndef portMEMORY_BARRIER + #define portMEMORY_BARRIER() +#endif + +/* The timers module relies on xTaskGetSchedulerState(). */ +#if configUSE_TIMERS == 1 + + #ifndef configTIMER_TASK_PRIORITY + #error If configUSE_TIMERS is set to 1 then configTIMER_TASK_PRIORITY must also be defined. + #endif /* configTIMER_TASK_PRIORITY */ + + #ifndef configTIMER_QUEUE_LENGTH + #error If configUSE_TIMERS is set to 1 then configTIMER_QUEUE_LENGTH must also be defined. + #endif /* configTIMER_QUEUE_LENGTH */ + + #ifndef configTIMER_TASK_STACK_DEPTH + #error If configUSE_TIMERS is set to 1 then configTIMER_TASK_STACK_DEPTH must also be defined. + #endif /* configTIMER_TASK_STACK_DEPTH */ + +#endif /* configUSE_TIMERS */ + +#ifndef portSET_INTERRUPT_MASK_FROM_ISR + #define portSET_INTERRUPT_MASK_FROM_ISR() 0 +#endif + +#ifndef portCLEAR_INTERRUPT_MASK_FROM_ISR + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedStatusValue ) ( void ) uxSavedStatusValue +#endif + +#ifndef portCLEAN_UP_TCB + #define portCLEAN_UP_TCB( pxTCB ) ( void ) pxTCB +#endif + +#ifndef portPRE_TASK_DELETE_HOOK + #define portPRE_TASK_DELETE_HOOK( pvTaskToDelete, pxYieldPending ) +#endif + +#ifndef portSETUP_TCB + #define portSETUP_TCB( pxTCB ) ( void ) pxTCB +#endif + +#ifndef configQUEUE_REGISTRY_SIZE + #define configQUEUE_REGISTRY_SIZE 0U +#endif + +#if ( configQUEUE_REGISTRY_SIZE < 1 ) + #define vQueueAddToRegistry( xQueue, pcName ) + #define vQueueUnregisterQueue( xQueue ) + #define pcQueueGetName( xQueue ) +#endif + +#ifndef portPOINTER_SIZE_TYPE + #define portPOINTER_SIZE_TYPE uint32_t +#endif + +/* Remove any unused trace macros. */ +#ifndef traceSTART + /* Used to perform any necessary initialisation - for example, open a file + into which trace is to be written. */ + #define traceSTART() +#endif + +#ifndef traceEND + /* Use to close a trace, for example close a file into which trace has been + written. */ + #define traceEND() +#endif + +#ifndef traceTASK_SWITCHED_IN + /* Called after a task has been selected to run. pxCurrentTCB holds a pointer + to the task control block of the selected task. */ + #define traceTASK_SWITCHED_IN() +#endif + +#ifndef traceINCREASE_TICK_COUNT + /* Called before stepping the tick count after waking from tickless idle + sleep. */ + #define traceINCREASE_TICK_COUNT( x ) +#endif + +#ifndef traceLOW_POWER_IDLE_BEGIN + /* Called immediately before entering tickless idle. */ + #define traceLOW_POWER_IDLE_BEGIN() +#endif + +#ifndef traceLOW_POWER_IDLE_END + /* Called when returning to the Idle task after a tickless idle. */ + #define traceLOW_POWER_IDLE_END() +#endif + +#ifndef traceTASK_SWITCHED_OUT + /* Called before a task has been selected to run. pxCurrentTCB holds a pointer + to the task control block of the task being switched out. */ + #define traceTASK_SWITCHED_OUT() +#endif + +#ifndef traceTASK_PRIORITY_INHERIT + /* Called when a task attempts to take a mutex that is already held by a + lower priority task. pxTCBOfMutexHolder is a pointer to the TCB of the task + that holds the mutex. uxInheritedPriority is the priority the mutex holder + will inherit (the priority of the task that is attempting to obtain the + muted. */ + #define traceTASK_PRIORITY_INHERIT( pxTCBOfMutexHolder, uxInheritedPriority ) +#endif + +#ifndef traceTASK_PRIORITY_DISINHERIT + /* Called when a task releases a mutex, the holding of which had resulted in + the task inheriting the priority of a higher priority task. + pxTCBOfMutexHolder is a pointer to the TCB of the task that is releasing the + mutex. uxOriginalPriority is the task's configured (base) priority. */ + #define traceTASK_PRIORITY_DISINHERIT( pxTCBOfMutexHolder, uxOriginalPriority ) +#endif + +#ifndef traceBLOCKING_ON_QUEUE_RECEIVE + /* Task is about to block because it cannot read from a + queue/mutex/semaphore. pxQueue is a pointer to the queue/mutex/semaphore + upon which the read was attempted. pxCurrentTCB points to the TCB of the + task that attempted the read. */ + #define traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ) +#endif + +#ifndef traceBLOCKING_ON_QUEUE_PEEK + /* Task is about to block because it cannot read from a + queue/mutex/semaphore. pxQueue is a pointer to the queue/mutex/semaphore + upon which the read was attempted. pxCurrentTCB points to the TCB of the + task that attempted the read. */ + #define traceBLOCKING_ON_QUEUE_PEEK( pxQueue ) +#endif + +#ifndef traceBLOCKING_ON_QUEUE_SEND + /* Task is about to block because it cannot write to a + queue/mutex/semaphore. pxQueue is a pointer to the queue/mutex/semaphore + upon which the write was attempted. pxCurrentTCB points to the TCB of the + task that attempted the write. */ + #define traceBLOCKING_ON_QUEUE_SEND( pxQueue ) +#endif + +#ifndef configCHECK_FOR_STACK_OVERFLOW + #define configCHECK_FOR_STACK_OVERFLOW 0 +#endif + +#ifndef configRECORD_STACK_HIGH_ADDRESS + #define configRECORD_STACK_HIGH_ADDRESS 0 +#endif + +#ifndef configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H + #define configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H 0 +#endif + +/* The following event macros are embedded in the kernel API calls. */ + +#ifndef traceMOVED_TASK_TO_READY_STATE + #define traceMOVED_TASK_TO_READY_STATE( pxTCB ) +#endif + +#ifndef tracePOST_MOVED_TASK_TO_READY_STATE + #define tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB ) +#endif + +#ifndef traceQUEUE_CREATE + #define traceQUEUE_CREATE( pxNewQueue ) +#endif + +#ifndef traceQUEUE_CREATE_FAILED + #define traceQUEUE_CREATE_FAILED( ucQueueType ) +#endif + +#ifndef traceCREATE_MUTEX + #define traceCREATE_MUTEX( pxNewQueue ) +#endif + +#ifndef traceCREATE_MUTEX_FAILED + #define traceCREATE_MUTEX_FAILED() +#endif + +#ifndef traceGIVE_MUTEX_RECURSIVE + #define traceGIVE_MUTEX_RECURSIVE( pxMutex ) +#endif + +#ifndef traceGIVE_MUTEX_RECURSIVE_FAILED + #define traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex ) +#endif + +#ifndef traceTAKE_MUTEX_RECURSIVE + #define traceTAKE_MUTEX_RECURSIVE( pxMutex ) +#endif + +#ifndef traceTAKE_MUTEX_RECURSIVE_FAILED + #define traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex ) +#endif + +#ifndef traceCREATE_COUNTING_SEMAPHORE + #define traceCREATE_COUNTING_SEMAPHORE() +#endif + +#ifndef traceCREATE_COUNTING_SEMAPHORE_FAILED + #define traceCREATE_COUNTING_SEMAPHORE_FAILED() +#endif + +#ifndef traceQUEUE_SEND + #define traceQUEUE_SEND( pxQueue ) +#endif + +#ifndef traceQUEUE_SEND_FAILED + #define traceQUEUE_SEND_FAILED( pxQueue ) +#endif + +#ifndef traceQUEUE_RECEIVE + #define traceQUEUE_RECEIVE( pxQueue ) +#endif + +#ifndef traceQUEUE_PEEK + #define traceQUEUE_PEEK( pxQueue ) +#endif + +#ifndef traceQUEUE_PEEK_FAILED + #define traceQUEUE_PEEK_FAILED( pxQueue ) +#endif + +#ifndef traceQUEUE_PEEK_FROM_ISR + #define traceQUEUE_PEEK_FROM_ISR( pxQueue ) +#endif + +#ifndef traceQUEUE_RECEIVE_FAILED + #define traceQUEUE_RECEIVE_FAILED( pxQueue ) +#endif + +#ifndef traceQUEUE_SEND_FROM_ISR + #define traceQUEUE_SEND_FROM_ISR( pxQueue ) +#endif + +#ifndef traceQUEUE_SEND_FROM_ISR_FAILED + #define traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ) +#endif + +#ifndef traceQUEUE_RECEIVE_FROM_ISR + #define traceQUEUE_RECEIVE_FROM_ISR( pxQueue ) +#endif + +#ifndef traceQUEUE_RECEIVE_FROM_ISR_FAILED + #define traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ) +#endif + +#ifndef traceQUEUE_PEEK_FROM_ISR_FAILED + #define traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ) +#endif + +#ifndef traceQUEUE_DELETE + #define traceQUEUE_DELETE( pxQueue ) +#endif + +#ifndef traceTASK_CREATE + #define traceTASK_CREATE( pxNewTCB ) +#endif + +#ifndef traceTASK_CREATE_FAILED + #define traceTASK_CREATE_FAILED() +#endif + +#ifndef traceTASK_DELETE + #define traceTASK_DELETE( pxTaskToDelete ) +#endif + +#ifndef traceTASK_DELAY_UNTIL + #define traceTASK_DELAY_UNTIL( x ) +#endif + +#ifndef traceTASK_DELAY + #define traceTASK_DELAY() +#endif + +#ifndef traceTASK_PRIORITY_SET + #define traceTASK_PRIORITY_SET( pxTask, uxNewPriority ) +#endif + +#ifndef traceTASK_SUSPEND + #define traceTASK_SUSPEND( pxTaskToSuspend ) +#endif + +#ifndef traceTASK_RESUME + #define traceTASK_RESUME( pxTaskToResume ) +#endif + +#ifndef traceTASK_RESUME_FROM_ISR + #define traceTASK_RESUME_FROM_ISR( pxTaskToResume ) +#endif + +#ifndef traceTASK_INCREMENT_TICK + #define traceTASK_INCREMENT_TICK( xTickCount ) +#endif + +#ifndef traceTIMER_CREATE + #define traceTIMER_CREATE( pxNewTimer ) +#endif + +#ifndef traceTIMER_CREATE_FAILED + #define traceTIMER_CREATE_FAILED() +#endif + +#ifndef traceTIMER_COMMAND_SEND + #define traceTIMER_COMMAND_SEND( xTimer, xMessageID, xMessageValueValue, xReturn ) +#endif + +#ifndef traceTIMER_EXPIRED + #define traceTIMER_EXPIRED( pxTimer ) +#endif + +#ifndef traceTIMER_COMMAND_RECEIVED + #define traceTIMER_COMMAND_RECEIVED( pxTimer, xMessageID, xMessageValue ) +#endif + +#ifndef traceMALLOC + #define traceMALLOC( pvAddress, uiSize ) +#endif + +#ifndef traceFREE + #define traceFREE( pvAddress, uiSize ) +#endif + +#ifndef traceEVENT_GROUP_CREATE + #define traceEVENT_GROUP_CREATE( xEventGroup ) +#endif + +#ifndef traceEVENT_GROUP_CREATE_FAILED + #define traceEVENT_GROUP_CREATE_FAILED() +#endif + +#ifndef traceEVENT_GROUP_SYNC_BLOCK + #define traceEVENT_GROUP_SYNC_BLOCK( xEventGroup, uxBitsToSet, uxBitsToWaitFor ) +#endif + +#ifndef traceEVENT_GROUP_SYNC_END + #define traceEVENT_GROUP_SYNC_END( xEventGroup, uxBitsToSet, uxBitsToWaitFor, xTimeoutOccurred ) ( void ) xTimeoutOccurred +#endif + +#ifndef traceEVENT_GROUP_WAIT_BITS_BLOCK + #define traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor ) +#endif + +#ifndef traceEVENT_GROUP_WAIT_BITS_END + #define traceEVENT_GROUP_WAIT_BITS_END( xEventGroup, uxBitsToWaitFor, xTimeoutOccurred ) ( void ) xTimeoutOccurred +#endif + +#ifndef traceEVENT_GROUP_CLEAR_BITS + #define traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear ) +#endif + +#ifndef traceEVENT_GROUP_CLEAR_BITS_FROM_ISR + #define traceEVENT_GROUP_CLEAR_BITS_FROM_ISR( xEventGroup, uxBitsToClear ) +#endif + +#ifndef traceEVENT_GROUP_SET_BITS + #define traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet ) +#endif + +#ifndef traceEVENT_GROUP_SET_BITS_FROM_ISR + #define traceEVENT_GROUP_SET_BITS_FROM_ISR( xEventGroup, uxBitsToSet ) +#endif + +#ifndef traceEVENT_GROUP_DELETE + #define traceEVENT_GROUP_DELETE( xEventGroup ) +#endif + +#ifndef tracePEND_FUNC_CALL + #define tracePEND_FUNC_CALL(xFunctionToPend, pvParameter1, ulParameter2, ret) +#endif + +#ifndef tracePEND_FUNC_CALL_FROM_ISR + #define tracePEND_FUNC_CALL_FROM_ISR(xFunctionToPend, pvParameter1, ulParameter2, ret) +#endif + +#ifndef traceQUEUE_REGISTRY_ADD + #define traceQUEUE_REGISTRY_ADD(xQueue, pcQueueName) +#endif + +#ifndef traceTASK_NOTIFY_TAKE_BLOCK + #define traceTASK_NOTIFY_TAKE_BLOCK() +#endif + +#ifndef traceTASK_NOTIFY_TAKE + #define traceTASK_NOTIFY_TAKE() +#endif + +#ifndef traceTASK_NOTIFY_WAIT_BLOCK + #define traceTASK_NOTIFY_WAIT_BLOCK() +#endif + +#ifndef traceTASK_NOTIFY_WAIT + #define traceTASK_NOTIFY_WAIT() +#endif + +#ifndef traceTASK_NOTIFY + #define traceTASK_NOTIFY() +#endif + +#ifndef traceTASK_NOTIFY_FROM_ISR + #define traceTASK_NOTIFY_FROM_ISR() +#endif + +#ifndef traceTASK_NOTIFY_GIVE_FROM_ISR + #define traceTASK_NOTIFY_GIVE_FROM_ISR() +#endif + +#ifndef traceSTREAM_BUFFER_CREATE_FAILED + #define traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer ) +#endif + +#ifndef traceSTREAM_BUFFER_CREATE_STATIC_FAILED + #define traceSTREAM_BUFFER_CREATE_STATIC_FAILED( xReturn, xIsMessageBuffer ) +#endif + +#ifndef traceSTREAM_BUFFER_CREATE + #define traceSTREAM_BUFFER_CREATE( pxStreamBuffer, xIsMessageBuffer ) +#endif + +#ifndef traceSTREAM_BUFFER_DELETE + #define traceSTREAM_BUFFER_DELETE( xStreamBuffer ) +#endif + +#ifndef traceSTREAM_BUFFER_RESET + #define traceSTREAM_BUFFER_RESET( xStreamBuffer ) +#endif + +#ifndef traceBLOCKING_ON_STREAM_BUFFER_SEND + #define traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer ) +#endif + +#ifndef traceSTREAM_BUFFER_SEND + #define traceSTREAM_BUFFER_SEND( xStreamBuffer, xBytesSent ) +#endif + +#ifndef traceSTREAM_BUFFER_SEND_FAILED + #define traceSTREAM_BUFFER_SEND_FAILED( xStreamBuffer ) +#endif + +#ifndef traceSTREAM_BUFFER_SEND_FROM_ISR + #define traceSTREAM_BUFFER_SEND_FROM_ISR( xStreamBuffer, xBytesSent ) +#endif + +#ifndef traceBLOCKING_ON_STREAM_BUFFER_RECEIVE + #define traceBLOCKING_ON_STREAM_BUFFER_RECEIVE( xStreamBuffer ) +#endif + +#ifndef traceSTREAM_BUFFER_RECEIVE + #define traceSTREAM_BUFFER_RECEIVE( xStreamBuffer, xReceivedLength ) +#endif + +#ifndef traceSTREAM_BUFFER_RECEIVE_FAILED + #define traceSTREAM_BUFFER_RECEIVE_FAILED( xStreamBuffer ) +#endif + +#ifndef traceSTREAM_BUFFER_RECEIVE_FROM_ISR + #define traceSTREAM_BUFFER_RECEIVE_FROM_ISR( xStreamBuffer, xReceivedLength ) +#endif + +#ifndef configGENERATE_RUN_TIME_STATS + #define configGENERATE_RUN_TIME_STATS 0 +#endif + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + + #ifndef portCONFIGURE_TIMER_FOR_RUN_TIME_STATS + #error If configGENERATE_RUN_TIME_STATS is defined then portCONFIGURE_TIMER_FOR_RUN_TIME_STATS must also be defined. portCONFIGURE_TIMER_FOR_RUN_TIME_STATS should call a port layer function to setup a peripheral timer/counter that can then be used as the run time counter time base. + #endif /* portCONFIGURE_TIMER_FOR_RUN_TIME_STATS */ + + #ifndef portGET_RUN_TIME_COUNTER_VALUE + #ifndef portALT_GET_RUN_TIME_COUNTER_VALUE + #error If configGENERATE_RUN_TIME_STATS is defined then either portGET_RUN_TIME_COUNTER_VALUE or portALT_GET_RUN_TIME_COUNTER_VALUE must also be defined. See the examples provided and the FreeRTOS web site for more information. + #endif /* portALT_GET_RUN_TIME_COUNTER_VALUE */ + #endif /* portGET_RUN_TIME_COUNTER_VALUE */ + +#endif /* configGENERATE_RUN_TIME_STATS */ + +#ifndef portCONFIGURE_TIMER_FOR_RUN_TIME_STATS + #define portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() +#endif + +#ifndef configUSE_MALLOC_FAILED_HOOK + #define configUSE_MALLOC_FAILED_HOOK 0 +#endif + +#ifndef portPRIVILEGE_BIT + #define portPRIVILEGE_BIT ( ( UBaseType_t ) 0x00 ) +#endif + +#ifndef portYIELD_WITHIN_API + #define portYIELD_WITHIN_API portYIELD +#endif + +#ifndef portSUPPRESS_TICKS_AND_SLEEP + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) +#endif + +#ifndef configEXPECTED_IDLE_TIME_BEFORE_SLEEP + #define configEXPECTED_IDLE_TIME_BEFORE_SLEEP 2 +#endif + +#if configEXPECTED_IDLE_TIME_BEFORE_SLEEP < 2 + #error configEXPECTED_IDLE_TIME_BEFORE_SLEEP must not be less than 2 +#endif + +#ifndef configUSE_TICKLESS_IDLE + #define configUSE_TICKLESS_IDLE 0 +#endif + +#ifndef configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING + #define configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING( x ) +#endif + +#ifndef configPRE_SLEEP_PROCESSING + #define configPRE_SLEEP_PROCESSING( x ) +#endif + +#ifndef configPOST_SLEEP_PROCESSING + #define configPOST_SLEEP_PROCESSING( x ) +#endif + +#ifndef configUSE_QUEUE_SETS + #define configUSE_QUEUE_SETS 0 +#endif + +#ifndef portTASK_USES_FLOATING_POINT + #define portTASK_USES_FLOATING_POINT() +#endif + +#ifndef portALLOCATE_SECURE_CONTEXT + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) +#endif + +#ifndef portDONT_DISCARD + #define portDONT_DISCARD +#endif + +#ifndef configUSE_TIME_SLICING + #define configUSE_TIME_SLICING 1 +#endif + +#ifndef configINCLUDE_APPLICATION_DEFINED_PRIVILEGED_FUNCTIONS + #define configINCLUDE_APPLICATION_DEFINED_PRIVILEGED_FUNCTIONS 0 +#endif + +#ifndef configUSE_STATS_FORMATTING_FUNCTIONS + #define configUSE_STATS_FORMATTING_FUNCTIONS 0 +#endif + +#ifndef portASSERT_IF_INTERRUPT_PRIORITY_INVALID + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() +#endif + +#ifndef configUSE_TRACE_FACILITY + #define configUSE_TRACE_FACILITY 0 +#endif + +#ifndef mtCOVERAGE_TEST_MARKER + #define mtCOVERAGE_TEST_MARKER() +#endif + +#ifndef mtCOVERAGE_TEST_DELAY + #define mtCOVERAGE_TEST_DELAY() +#endif + +#ifndef portASSERT_IF_IN_ISR + #define portASSERT_IF_IN_ISR() +#endif + +#ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION + #define configUSE_PORT_OPTIMISED_TASK_SELECTION 0 +#endif + +#ifndef configAPPLICATION_ALLOCATED_HEAP + #define configAPPLICATION_ALLOCATED_HEAP 0 +#endif + +#ifndef configUSE_TASK_NOTIFICATIONS + #define configUSE_TASK_NOTIFICATIONS 1 +#endif + +#ifndef configUSE_POSIX_ERRNO + #define configUSE_POSIX_ERRNO 0 +#endif + +#ifndef portTICK_TYPE_IS_ATOMIC + #define portTICK_TYPE_IS_ATOMIC 0 +#endif + +#ifndef configSUPPORT_STATIC_ALLOCATION + /* Defaults to 0 for backward compatibility. */ + #define configSUPPORT_STATIC_ALLOCATION 0 +#endif + +#ifndef configSUPPORT_DYNAMIC_ALLOCATION + /* Defaults to 1 for backward compatibility. */ + #define configSUPPORT_DYNAMIC_ALLOCATION 1 +#endif + +#ifndef configSTACK_DEPTH_TYPE + /* Defaults to uint16_t for backward compatibility, but can be overridden + in FreeRTOSConfig.h if uint16_t is too restrictive. */ + #define configSTACK_DEPTH_TYPE uint16_t +#endif + +#ifndef configMESSAGE_BUFFER_LENGTH_TYPE + /* Defaults to size_t for backward compatibility, but can be overridden + in FreeRTOSConfig.h if lengths will always be less than the number of bytes + in a size_t. */ + #define configMESSAGE_BUFFER_LENGTH_TYPE size_t +#endif + +/* Sanity check the configuration. */ +#if( configUSE_TICKLESS_IDLE != 0 ) + #if( INCLUDE_vTaskSuspend != 1 ) + #error INCLUDE_vTaskSuspend must be set to 1 if configUSE_TICKLESS_IDLE is not set to 0 + #endif /* INCLUDE_vTaskSuspend */ +#endif /* configUSE_TICKLESS_IDLE */ + +#if( ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 0 ) ) + #error configSUPPORT_STATIC_ALLOCATION and configSUPPORT_DYNAMIC_ALLOCATION cannot both be 0, but can both be 1. +#endif + +#if( ( configUSE_RECURSIVE_MUTEXES == 1 ) && ( configUSE_MUTEXES != 1 ) ) + #error configUSE_MUTEXES must be set to 1 to use recursive mutexes +#endif + +#ifndef configINITIAL_TICK_COUNT + #define configINITIAL_TICK_COUNT 0 +#endif + +#if( portTICK_TYPE_IS_ATOMIC == 0 ) + /* Either variables of tick type cannot be read atomically, or + portTICK_TYPE_IS_ATOMIC was not set - map the critical sections used when + the tick count is returned to the standard critical section macros. */ + #define portTICK_TYPE_ENTER_CRITICAL() portENTER_CRITICAL() + #define portTICK_TYPE_EXIT_CRITICAL() portEXIT_CRITICAL() + #define portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR() + #define portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( ( x ) ) +#else + /* The tick type can be read atomically, so critical sections used when the + tick count is returned can be defined away. */ + #define portTICK_TYPE_ENTER_CRITICAL() + #define portTICK_TYPE_EXIT_CRITICAL() + #define portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR() 0 + #define portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( x ) ( void ) x +#endif + +/* Definitions to allow backward compatibility with FreeRTOS versions prior to +V8 if desired. */ +#ifndef configENABLE_BACKWARD_COMPATIBILITY + #define configENABLE_BACKWARD_COMPATIBILITY 1 +#endif + +#ifndef configPRINTF + /* configPRINTF() was not defined, so define it away to nothing. To use + configPRINTF() then define it as follows (where MyPrintFunction() is + provided by the application writer): + + void MyPrintFunction(const char *pcFormat, ... ); + #define configPRINTF( X ) MyPrintFunction X + + Then call like a standard printf() function, but placing brackets around + all parameters so they are passed as a single parameter. For example: + configPRINTF( ("Value = %d", MyVariable) ); */ + #define configPRINTF( X ) +#endif + +#ifndef configMAX + /* The application writer has not provided their own MAX macro, so define + the following generic implementation. */ + #define configMAX( a, b ) ( ( ( a ) > ( b ) ) ? ( a ) : ( b ) ) +#endif + +#ifndef configMIN + /* The application writer has not provided their own MAX macro, so define + the following generic implementation. */ + #define configMIN( a, b ) ( ( ( a ) < ( b ) ) ? ( a ) : ( b ) ) +#endif + +#if configENABLE_BACKWARD_COMPATIBILITY == 1 + #define eTaskStateGet eTaskGetState + #define portTickType TickType_t + #define xTaskHandle TaskHandle_t + #define xQueueHandle QueueHandle_t + #define xSemaphoreHandle SemaphoreHandle_t + #define xQueueSetHandle QueueSetHandle_t + #define xQueueSetMemberHandle QueueSetMemberHandle_t + #define xTimeOutType TimeOut_t + #define xMemoryRegion MemoryRegion_t + #define xTaskParameters TaskParameters_t + #define xTaskStatusType TaskStatus_t + #define xTimerHandle TimerHandle_t + #define xCoRoutineHandle CoRoutineHandle_t + #define pdTASK_HOOK_CODE TaskHookFunction_t + #define portTICK_RATE_MS portTICK_PERIOD_MS + #define pcTaskGetTaskName pcTaskGetName + #define pcTimerGetTimerName pcTimerGetName + #define pcQueueGetQueueName pcQueueGetName + #define vTaskGetTaskInfo vTaskGetInfo + + /* Backward compatibility within the scheduler code only - these definitions + are not really required but are included for completeness. */ + #define tmrTIMER_CALLBACK TimerCallbackFunction_t + #define pdTASK_CODE TaskFunction_t + #define xListItem ListItem_t + #define xList List_t + + /* For libraries that break the list data hiding, and access list structure + members directly (which is not supposed to be done). */ + #define pxContainer pvContainer +#endif /* configENABLE_BACKWARD_COMPATIBILITY */ + +#if( configUSE_ALTERNATIVE_API != 0 ) + #error The alternative API was deprecated some time ago, and was removed in FreeRTOS V9.0 0 +#endif + +/* Set configUSE_TASK_FPU_SUPPORT to 0 to omit floating point support even +if floating point hardware is otherwise supported by the FreeRTOS port in use. +This constant is not supported by all FreeRTOS ports that include floating +point support. */ +#ifndef configUSE_TASK_FPU_SUPPORT + #define configUSE_TASK_FPU_SUPPORT 1 +#endif + +/* Set configENABLE_MPU to 1 to enable MPU support and 0 to disable it. This is +currently used in ARMv8M ports. */ +#ifndef configENABLE_MPU + #define configENABLE_MPU 0 +#endif + +/* Set configENABLE_FPU to 1 to enable FPU support and 0 to disable it. This is +currently used in ARMv8M ports. */ +#ifndef configENABLE_FPU + #define configENABLE_FPU 1 +#endif + +/* Set configENABLE_TRUSTZONE to 1 enable TrustZone support and 0 to disable it. +This is currently used in ARMv8M ports. */ +#ifndef configENABLE_TRUSTZONE + #define configENABLE_TRUSTZONE 1 +#endif + +/* Set configRUN_FREERTOS_SECURE_ONLY to 1 to run the FreeRTOS ARMv8M port on +the Secure Side only. */ +#ifndef configRUN_FREERTOS_SECURE_ONLY + #define configRUN_FREERTOS_SECURE_ONLY 0 +#endif + +/* Sometimes the FreeRTOSConfig.h settings only allow a task to be created using + * dynamically allocated RAM, in which case when any task is deleted it is known + * that both the task's stack and TCB need to be freed. Sometimes the + * FreeRTOSConfig.h settings only allow a task to be created using statically + * allocated RAM, in which case when any task is deleted it is known that neither + * the task's stack or TCB should be freed. Sometimes the FreeRTOSConfig.h + * settings allow a task to be created using either statically or dynamically + * allocated RAM, in which case a member of the TCB is used to record whether the + * stack and/or TCB were allocated statically or dynamically, so when a task is + * deleted the RAM that was allocated dynamically is freed again and no attempt is + * made to free the RAM that was allocated statically. + * tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE is only true if it is possible for a + * task to be created using either statically or dynamically allocated RAM. Note + * that if portUSING_MPU_WRAPPERS is 1 then a protected task can be created with + * a statically allocated stack and a dynamically allocated TCB. + * + * The following table lists various combinations of portUSING_MPU_WRAPPERS, + * configSUPPORT_DYNAMIC_ALLOCATION and configSUPPORT_STATIC_ALLOCATION and + * when it is possible to have both static and dynamic allocation: + * +-----+---------+--------+-----------------------------+-----------------------------------+------------------+-----------+ + * | MPU | Dynamic | Static | Available Functions | Possible Allocations | Both Dynamic and | Need Free | + * | | | | | | Static Possible | | + * +-----+---------+--------+-----------------------------+-----------------------------------+------------------+-----------+ + * | 0 | 0 | 1 | xTaskCreateStatic | TCB - Static, Stack - Static | No | No | + * +-----|---------|--------|-----------------------------|-----------------------------------|------------------|-----------| + * | 0 | 1 | 0 | xTaskCreate | TCB - Dynamic, Stack - Dynamic | No | Yes | + * +-----|---------|--------|-----------------------------|-----------------------------------|------------------|-----------| + * | 0 | 1 | 1 | xTaskCreate, | 1. TCB - Dynamic, Stack - Dynamic | Yes | Yes | + * | | | | xTaskCreateStatic | 2. TCB - Static, Stack - Static | | | + * +-----|---------|--------|-----------------------------|-----------------------------------|------------------|-----------| + * | 1 | 0 | 1 | xTaskCreateStatic, | TCB - Static, Stack - Static | No | No | + * | | | | xTaskCreateRestrictedStatic | | | | + * +-----|---------|--------|-----------------------------|-----------------------------------|------------------|-----------| + * | 1 | 1 | 0 | xTaskCreate, | 1. TCB - Dynamic, Stack - Dynamic | Yes | Yes | + * | | | | xTaskCreateRestricted | 2. TCB - Dynamic, Stack - Static | | | + * +-----|---------|--------|-----------------------------|-----------------------------------|------------------|-----------| + * | 1 | 1 | 1 | xTaskCreate, | 1. TCB - Dynamic, Stack - Dynamic | Yes | Yes | + * | | | | xTaskCreateStatic, | 2. TCB - Dynamic, Stack - Static | | | + * | | | | xTaskCreateRestricted, | 3. TCB - Static, Stack - Static | | | + * | | | | xTaskCreateRestrictedStatic | | | | + * +-----+---------+--------+-----------------------------+-----------------------------------+------------------+-----------+ + */ +#define tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE ( ( ( portUSING_MPU_WRAPPERS == 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) || \ + ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) ) + +/* + * In line with software engineering best practice, FreeRTOS implements a strict + * data hiding policy, so the real structures used by FreeRTOS to maintain the + * state of tasks, queues, semaphores, etc. are not accessible to the application + * code. However, if the application writer wants to statically allocate such + * an object then the size of the object needs to be know. Dummy structures + * that are guaranteed to have the same size and alignment requirements of the + * real objects are used for this purpose. The dummy list and list item + * structures below are used for inclusion in such a dummy structure. + */ +struct xSTATIC_LIST_ITEM +{ + #if( configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 1 ) + TickType_t xDummy1; + #endif + TickType_t xDummy2; + void *pvDummy3[ 4 ]; + #if( configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 1 ) + TickType_t xDummy4; + #endif +}; +typedef struct xSTATIC_LIST_ITEM StaticListItem_t; + +/* See the comments above the struct xSTATIC_LIST_ITEM definition. */ +struct xSTATIC_MINI_LIST_ITEM +{ + #if( configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 1 ) + TickType_t xDummy1; + #endif + TickType_t xDummy2; + void *pvDummy3[ 2 ]; +}; +typedef struct xSTATIC_MINI_LIST_ITEM StaticMiniListItem_t; + +/* See the comments above the struct xSTATIC_LIST_ITEM definition. */ +typedef struct xSTATIC_LIST +{ + #if( configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 1 ) + TickType_t xDummy1; + #endif + UBaseType_t uxDummy2; + void *pvDummy3; + StaticMiniListItem_t xDummy4; + #if( configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 1 ) + TickType_t xDummy5; + #endif +} StaticList_t; + +/* + * In line with software engineering best practice, especially when supplying a + * library that is likely to change in future versions, FreeRTOS implements a + * strict data hiding policy. This means the Task structure used internally by + * FreeRTOS is not accessible to application code. However, if the application + * writer wants to statically allocate the memory required to create a task then + * the size of the task object needs to be know. The StaticTask_t structure + * below is provided for this purpose. Its sizes and alignment requirements are + * guaranteed to match those of the genuine structure, no matter which + * architecture is being used, and no matter how the values in FreeRTOSConfig.h + * are set. Its contents are somewhat obfuscated in the hope users will + * recognise that it would be unwise to make direct use of the structure members. + */ +typedef struct xSTATIC_TCB +{ + void *pxDummy1; + #if ( portUSING_MPU_WRAPPERS == 1 ) + xMPU_SETTINGS xDummy2; + #endif + StaticListItem_t xDummy3[ 2 ]; + UBaseType_t uxDummy5; + void *pxDummy6; + uint8_t ucDummy7[ configMAX_TASK_NAME_LEN ]; + #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) + void *pxDummy8; + #endif + #if ( portCRITICAL_NESTING_IN_TCB == 1 ) + UBaseType_t uxDummy9; + #endif + #if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxDummy10[ 2 ]; + #endif + #if ( configUSE_MUTEXES == 1 ) + UBaseType_t uxDummy12[ 2 ]; + #endif + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) + void *pxDummy14; + #endif + #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) + void *pvDummy15[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ]; + #endif + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + uint32_t ulDummy16; + #endif + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + struct _reent xDummy17; + #endif + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + uint32_t ulDummy18; + uint8_t ucDummy19; + #endif + #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) + uint8_t uxDummy20; + #endif + + #if( INCLUDE_xTaskAbortDelay == 1 ) + uint8_t ucDummy21; + #endif + #if ( configUSE_POSIX_ERRNO == 1 ) + int iDummy22; + #endif +} StaticTask_t; + +/* + * In line with software engineering best practice, especially when supplying a + * library that is likely to change in future versions, FreeRTOS implements a + * strict data hiding policy. This means the Queue structure used internally by + * FreeRTOS is not accessible to application code. However, if the application + * writer wants to statically allocate the memory required to create a queue + * then the size of the queue object needs to be know. The StaticQueue_t + * structure below is provided for this purpose. Its sizes and alignment + * requirements are guaranteed to match those of the genuine structure, no + * matter which architecture is being used, and no matter how the values in + * FreeRTOSConfig.h are set. Its contents are somewhat obfuscated in the hope + * users will recognise that it would be unwise to make direct use of the + * structure members. + */ +typedef struct xSTATIC_QUEUE +{ + void *pvDummy1[ 3 ]; + + union + { + void *pvDummy2; + UBaseType_t uxDummy2; + } u; + + StaticList_t xDummy3[ 2 ]; + UBaseType_t uxDummy4[ 3 ]; + uint8_t ucDummy5[ 2 ]; + + #if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + uint8_t ucDummy6; + #endif + + #if ( configUSE_QUEUE_SETS == 1 ) + void *pvDummy7; + #endif + + #if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxDummy8; + uint8_t ucDummy9; + #endif + +} StaticQueue_t; +typedef StaticQueue_t StaticSemaphore_t; + +/* + * In line with software engineering best practice, especially when supplying a + * library that is likely to change in future versions, FreeRTOS implements a + * strict data hiding policy. This means the event group structure used + * internally by FreeRTOS is not accessible to application code. However, if + * the application writer wants to statically allocate the memory required to + * create an event group then the size of the event group object needs to be + * know. The StaticEventGroup_t structure below is provided for this purpose. + * Its sizes and alignment requirements are guaranteed to match those of the + * genuine structure, no matter which architecture is being used, and no matter + * how the values in FreeRTOSConfig.h are set. Its contents are somewhat + * obfuscated in the hope users will recognise that it would be unwise to make + * direct use of the structure members. + */ +typedef struct xSTATIC_EVENT_GROUP +{ + TickType_t xDummy1; + StaticList_t xDummy2; + + #if( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxDummy3; + #endif + + #if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + uint8_t ucDummy4; + #endif + +} StaticEventGroup_t; + +/* + * In line with software engineering best practice, especially when supplying a + * library that is likely to change in future versions, FreeRTOS implements a + * strict data hiding policy. This means the software timer structure used + * internally by FreeRTOS is not accessible to application code. However, if + * the application writer wants to statically allocate the memory required to + * create a software timer then the size of the queue object needs to be know. + * The StaticTimer_t structure below is provided for this purpose. Its sizes + * and alignment requirements are guaranteed to match those of the genuine + * structure, no matter which architecture is being used, and no matter how the + * values in FreeRTOSConfig.h are set. Its contents are somewhat obfuscated in + * the hope users will recognise that it would be unwise to make direct use of + * the structure members. + */ +typedef struct xSTATIC_TIMER +{ + void *pvDummy1; + StaticListItem_t xDummy2; + TickType_t xDummy3; + void *pvDummy5; + TaskFunction_t pvDummy6; + #if( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxDummy7; + #endif + uint8_t ucDummy8; + +} StaticTimer_t; + +/* +* In line with software engineering best practice, especially when supplying a +* library that is likely to change in future versions, FreeRTOS implements a +* strict data hiding policy. This means the stream buffer structure used +* internally by FreeRTOS is not accessible to application code. However, if +* the application writer wants to statically allocate the memory required to +* create a stream buffer then the size of the stream buffer object needs to be +* know. The StaticStreamBuffer_t structure below is provided for this purpose. +* Its size and alignment requirements are guaranteed to match those of the +* genuine structure, no matter which architecture is being used, and no matter +* how the values in FreeRTOSConfig.h are set. Its contents are somewhat +* obfuscated in the hope users will recognise that it would be unwise to make +* direct use of the structure members. +*/ +typedef struct xSTATIC_STREAM_BUFFER +{ + size_t uxDummy1[ 4 ]; + void * pvDummy2[ 3 ]; + uint8_t ucDummy3; + #if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxDummy4; + #endif +} StaticStreamBuffer_t; + +/* Message buffers are built on stream buffers. */ +typedef StaticStreamBuffer_t StaticMessageBuffer_t; + +#ifdef __cplusplus +} +#endif + +#endif /* INC_FREERTOS_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/StackMacros.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/StackMacros.h new file mode 100644 index 0000000..000d866 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/StackMacros.h @@ -0,0 +1,133 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef STACK_MACROS_H +#define STACK_MACROS_H + +#ifndef _MSC_VER /* Visual Studio doesn't support #warning. */ + #warning The name of this file has changed to stack_macros.h. Please update your code accordingly. This source file (which has the original name) will be removed in future released. +#endif + +/* + * Call the stack overflow hook function if the stack of the task being swapped + * out is currently overflowed, or looks like it might have overflowed in the + * past. + * + * Setting configCHECK_FOR_STACK_OVERFLOW to 1 will cause the macro to check + * the current stack state only - comparing the current top of stack value to + * the stack limit. Setting configCHECK_FOR_STACK_OVERFLOW to greater than 1 + * will also cause the last few stack bytes to be checked to ensure the value + * to which the bytes were set when the task was created have not been + * overwritten. Note this second test does not guarantee that an overflowed + * stack will always be recognised. + */ + +/*-----------------------------------------------------------*/ + +#if( ( configCHECK_FOR_STACK_OVERFLOW == 1 ) && ( portSTACK_GROWTH < 0 ) ) + + /* Only the current stack state is to be checked. */ + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + /* Is the currently saved stack pointer within the stack limit? */ \ + if( pxCurrentTCB->pxTopOfStack <= pxCurrentTCB->pxStack ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */ +/*-----------------------------------------------------------*/ + +#if( ( configCHECK_FOR_STACK_OVERFLOW == 1 ) && ( portSTACK_GROWTH > 0 ) ) + + /* Only the current stack state is to be checked. */ + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + \ + /* Is the currently saved stack pointer within the stack limit? */ \ + if( pxCurrentTCB->pxTopOfStack >= pxCurrentTCB->pxEndOfStack ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */ +/*-----------------------------------------------------------*/ + +#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH < 0 ) ) + + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + const uint32_t * const pulStack = ( uint32_t * ) pxCurrentTCB->pxStack; \ + const uint32_t ulCheckValue = ( uint32_t ) 0xa5a5a5a5; \ + \ + if( ( pulStack[ 0 ] != ulCheckValue ) || \ + ( pulStack[ 1 ] != ulCheckValue ) || \ + ( pulStack[ 2 ] != ulCheckValue ) || \ + ( pulStack[ 3 ] != ulCheckValue ) ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */ +/*-----------------------------------------------------------*/ + +#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH > 0 ) ) + + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + int8_t *pcEndOfStack = ( int8_t * ) pxCurrentTCB->pxEndOfStack; \ + static const uint8_t ucExpectedStackBytes[] = { tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE }; \ + \ + \ + pcEndOfStack -= sizeof( ucExpectedStackBytes ); \ + \ + /* Has the extremity of the task stack ever been written over? */ \ + if( memcmp( ( void * ) pcEndOfStack, ( void * ) ucExpectedStackBytes, sizeof( ucExpectedStackBytes ) ) != 0 ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */ +/*-----------------------------------------------------------*/ + +/* Remove stack overflow macro if not being used. */ +#ifndef taskCHECK_FOR_STACK_OVERFLOW + #define taskCHECK_FOR_STACK_OVERFLOW() +#endif + + + +#endif /* STACK_MACROS_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/croutine.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/croutine.h new file mode 100644 index 0000000..a781749 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/croutine.h @@ -0,0 +1,720 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef CO_ROUTINE_H +#define CO_ROUTINE_H + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h must appear in source files before include croutine.h" +#endif + +#include "list.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Used to hide the implementation of the co-routine control block. The +control block structure however has to be included in the header due to +the macro implementation of the co-routine functionality. */ +typedef void * CoRoutineHandle_t; + +/* Defines the prototype to which co-routine functions must conform. */ +typedef void (*crCOROUTINE_CODE)( CoRoutineHandle_t, UBaseType_t ); + +typedef struct corCoRoutineControlBlock +{ + crCOROUTINE_CODE pxCoRoutineFunction; + ListItem_t xGenericListItem; /*< List item used to place the CRCB in ready and blocked queues. */ + ListItem_t xEventListItem; /*< List item used to place the CRCB in event lists. */ + UBaseType_t uxPriority; /*< The priority of the co-routine in relation to other co-routines. */ + UBaseType_t uxIndex; /*< Used to distinguish between co-routines when multiple co-routines use the same co-routine function. */ + uint16_t uxState; /*< Used internally by the co-routine implementation. */ +} CRCB_t; /* Co-routine control block. Note must be identical in size down to uxPriority with TCB_t. */ + +/** + * croutine. h + *
+ BaseType_t xCoRoutineCreate(
+                                 crCOROUTINE_CODE pxCoRoutineCode,
+                                 UBaseType_t uxPriority,
+                                 UBaseType_t uxIndex
+                               );
+ * + * Create a new co-routine and add it to the list of co-routines that are + * ready to run. + * + * @param pxCoRoutineCode Pointer to the co-routine function. Co-routine + * functions require special syntax - see the co-routine section of the WEB + * documentation for more information. + * + * @param uxPriority The priority with respect to other co-routines at which + * the co-routine will run. + * + * @param uxIndex Used to distinguish between different co-routines that + * execute the same function. See the example below and the co-routine section + * of the WEB documentation for further information. + * + * @return pdPASS if the co-routine was successfully created and added to a ready + * list, otherwise an error code defined with ProjDefs.h. + * + * Example usage: +
+ // Co-routine to be created.
+ void vFlashCoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex )
+ {
+ // Variables in co-routines must be declared static if they must maintain value across a blocking call.
+ // This may not be necessary for const variables.
+ static const char cLedToFlash[ 2 ] = { 5, 6 };
+ static const TickType_t uxFlashRates[ 2 ] = { 200, 400 };
+
+     // Must start every co-routine with a call to crSTART();
+     crSTART( xHandle );
+
+     for( ;; )
+     {
+         // This co-routine just delays for a fixed period, then toggles
+         // an LED.  Two co-routines are created using this function, so
+         // the uxIndex parameter is used to tell the co-routine which
+         // LED to flash and how int32_t to delay.  This assumes xQueue has
+         // already been created.
+         vParTestToggleLED( cLedToFlash[ uxIndex ] );
+         crDELAY( xHandle, uxFlashRates[ uxIndex ] );
+     }
+
+     // Must end every co-routine with a call to crEND();
+     crEND();
+ }
+
+ // Function that creates two co-routines.
+ void vOtherFunction( void )
+ {
+ uint8_t ucParameterToPass;
+ TaskHandle_t xHandle;
+
+     // Create two co-routines at priority 0.  The first is given index 0
+     // so (from the code above) toggles LED 5 every 200 ticks.  The second
+     // is given index 1 so toggles LED 6 every 400 ticks.
+     for( uxIndex = 0; uxIndex < 2; uxIndex++ )
+     {
+         xCoRoutineCreate( vFlashCoRoutine, 0, uxIndex );
+     }
+ }
+   
+ * \defgroup xCoRoutineCreate xCoRoutineCreate + * \ingroup Tasks + */ +BaseType_t xCoRoutineCreate( crCOROUTINE_CODE pxCoRoutineCode, UBaseType_t uxPriority, UBaseType_t uxIndex ); + + +/** + * croutine. h + *
+ void vCoRoutineSchedule( void );
+ * + * Run a co-routine. + * + * vCoRoutineSchedule() executes the highest priority co-routine that is able + * to run. The co-routine will execute until it either blocks, yields or is + * preempted by a task. Co-routines execute cooperatively so one + * co-routine cannot be preempted by another, but can be preempted by a task. + * + * If an application comprises of both tasks and co-routines then + * vCoRoutineSchedule should be called from the idle task (in an idle task + * hook). + * + * Example usage: +
+ // This idle task hook will schedule a co-routine each time it is called.
+ // The rest of the idle task will execute between co-routine calls.
+ void vApplicationIdleHook( void )
+ {
+	vCoRoutineSchedule();
+ }
+
+ // Alternatively, if you do not require any other part of the idle task to
+ // execute, the idle task hook can call vCoRoutineScheduler() within an
+ // infinite loop.
+ void vApplicationIdleHook( void )
+ {
+    for( ;; )
+    {
+        vCoRoutineSchedule();
+    }
+ }
+ 
+ * \defgroup vCoRoutineSchedule vCoRoutineSchedule + * \ingroup Tasks + */ +void vCoRoutineSchedule( void ); + +/** + * croutine. h + *
+ crSTART( CoRoutineHandle_t xHandle );
+ * + * This macro MUST always be called at the start of a co-routine function. + * + * Example usage: +
+ // Co-routine to be created.
+ void vACoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex )
+ {
+ // Variables in co-routines must be declared static if they must maintain value across a blocking call.
+ static int32_t ulAVariable;
+
+     // Must start every co-routine with a call to crSTART();
+     crSTART( xHandle );
+
+     for( ;; )
+     {
+          // Co-routine functionality goes here.
+     }
+
+     // Must end every co-routine with a call to crEND();
+     crEND();
+ }
+ * \defgroup crSTART crSTART + * \ingroup Tasks + */ +#define crSTART( pxCRCB ) switch( ( ( CRCB_t * )( pxCRCB ) )->uxState ) { case 0: + +/** + * croutine. h + *
+ crEND();
+ * + * This macro MUST always be called at the end of a co-routine function. + * + * Example usage: +
+ // Co-routine to be created.
+ void vACoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex )
+ {
+ // Variables in co-routines must be declared static if they must maintain value across a blocking call.
+ static int32_t ulAVariable;
+
+     // Must start every co-routine with a call to crSTART();
+     crSTART( xHandle );
+
+     for( ;; )
+     {
+          // Co-routine functionality goes here.
+     }
+
+     // Must end every co-routine with a call to crEND();
+     crEND();
+ }
+ * \defgroup crSTART crSTART + * \ingroup Tasks + */ +#define crEND() } + +/* + * These macros are intended for internal use by the co-routine implementation + * only. The macros should not be used directly by application writers. + */ +#define crSET_STATE0( xHandle ) ( ( CRCB_t * )( xHandle ) )->uxState = (__LINE__ * 2); return; case (__LINE__ * 2): +#define crSET_STATE1( xHandle ) ( ( CRCB_t * )( xHandle ) )->uxState = ((__LINE__ * 2)+1); return; case ((__LINE__ * 2)+1): + +/** + * croutine. h + *
+ crDELAY( CoRoutineHandle_t xHandle, TickType_t xTicksToDelay );
+ * + * Delay a co-routine for a fixed period of time. + * + * crDELAY can only be called from the co-routine function itself - not + * from within a function called by the co-routine function. This is because + * co-routines do not maintain their own stack. + * + * @param xHandle The handle of the co-routine to delay. This is the xHandle + * parameter of the co-routine function. + * + * @param xTickToDelay The number of ticks that the co-routine should delay + * for. The actual amount of time this equates to is defined by + * configTICK_RATE_HZ (set in FreeRTOSConfig.h). The constant portTICK_PERIOD_MS + * can be used to convert ticks to milliseconds. + * + * Example usage: +
+ // Co-routine to be created.
+ void vACoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex )
+ {
+ // Variables in co-routines must be declared static if they must maintain value across a blocking call.
+ // This may not be necessary for const variables.
+ // We are to delay for 200ms.
+ static const xTickType xDelayTime = 200 / portTICK_PERIOD_MS;
+
+     // Must start every co-routine with a call to crSTART();
+     crSTART( xHandle );
+
+     for( ;; )
+     {
+        // Delay for 200ms.
+        crDELAY( xHandle, xDelayTime );
+
+        // Do something here.
+     }
+
+     // Must end every co-routine with a call to crEND();
+     crEND();
+ }
+ * \defgroup crDELAY crDELAY + * \ingroup Tasks + */ +#define crDELAY( xHandle, xTicksToDelay ) \ + if( ( xTicksToDelay ) > 0 ) \ + { \ + vCoRoutineAddToDelayedList( ( xTicksToDelay ), NULL ); \ + } \ + crSET_STATE0( ( xHandle ) ); + +/** + *
+ crQUEUE_SEND(
+                  CoRoutineHandle_t xHandle,
+                  QueueHandle_t pxQueue,
+                  void *pvItemToQueue,
+                  TickType_t xTicksToWait,
+                  BaseType_t *pxResult
+             )
+ * + * The macro's crQUEUE_SEND() and crQUEUE_RECEIVE() are the co-routine + * equivalent to the xQueueSend() and xQueueReceive() functions used by tasks. + * + * crQUEUE_SEND and crQUEUE_RECEIVE can only be used from a co-routine whereas + * xQueueSend() and xQueueReceive() can only be used from tasks. + * + * crQUEUE_SEND can only be called from the co-routine function itself - not + * from within a function called by the co-routine function. This is because + * co-routines do not maintain their own stack. + * + * See the co-routine section of the WEB documentation for information on + * passing data between tasks and co-routines and between ISR's and + * co-routines. + * + * @param xHandle The handle of the calling co-routine. This is the xHandle + * parameter of the co-routine function. + * + * @param pxQueue The handle of the queue on which the data will be posted. + * The handle is obtained as the return value when the queue is created using + * the xQueueCreate() API function. + * + * @param pvItemToQueue A pointer to the data being posted onto the queue. + * The number of bytes of each queued item is specified when the queue is + * created. This number of bytes is copied from pvItemToQueue into the queue + * itself. + * + * @param xTickToDelay The number of ticks that the co-routine should block + * to wait for space to become available on the queue, should space not be + * available immediately. The actual amount of time this equates to is defined + * by configTICK_RATE_HZ (set in FreeRTOSConfig.h). The constant + * portTICK_PERIOD_MS can be used to convert ticks to milliseconds (see example + * below). + * + * @param pxResult The variable pointed to by pxResult will be set to pdPASS if + * data was successfully posted onto the queue, otherwise it will be set to an + * error defined within ProjDefs.h. + * + * Example usage: +
+ // Co-routine function that blocks for a fixed period then posts a number onto
+ // a queue.
+ static void prvCoRoutineFlashTask( CoRoutineHandle_t xHandle, UBaseType_t uxIndex )
+ {
+ // Variables in co-routines must be declared static if they must maintain value across a blocking call.
+ static BaseType_t xNumberToPost = 0;
+ static BaseType_t xResult;
+
+    // Co-routines must begin with a call to crSTART().
+    crSTART( xHandle );
+
+    for( ;; )
+    {
+        // This assumes the queue has already been created.
+        crQUEUE_SEND( xHandle, xCoRoutineQueue, &xNumberToPost, NO_DELAY, &xResult );
+
+        if( xResult != pdPASS )
+        {
+            // The message was not posted!
+        }
+
+        // Increment the number to be posted onto the queue.
+        xNumberToPost++;
+
+        // Delay for 100 ticks.
+        crDELAY( xHandle, 100 );
+    }
+
+    // Co-routines must end with a call to crEND().
+    crEND();
+ }
+ * \defgroup crQUEUE_SEND crQUEUE_SEND + * \ingroup Tasks + */ +#define crQUEUE_SEND( xHandle, pxQueue, pvItemToQueue, xTicksToWait, pxResult ) \ +{ \ + *( pxResult ) = xQueueCRSend( ( pxQueue) , ( pvItemToQueue) , ( xTicksToWait ) ); \ + if( *( pxResult ) == errQUEUE_BLOCKED ) \ + { \ + crSET_STATE0( ( xHandle ) ); \ + *pxResult = xQueueCRSend( ( pxQueue ), ( pvItemToQueue ), 0 ); \ + } \ + if( *pxResult == errQUEUE_YIELD ) \ + { \ + crSET_STATE1( ( xHandle ) ); \ + *pxResult = pdPASS; \ + } \ +} + +/** + * croutine. h + *
+  crQUEUE_RECEIVE(
+                     CoRoutineHandle_t xHandle,
+                     QueueHandle_t pxQueue,
+                     void *pvBuffer,
+                     TickType_t xTicksToWait,
+                     BaseType_t *pxResult
+                 )
+ * + * The macro's crQUEUE_SEND() and crQUEUE_RECEIVE() are the co-routine + * equivalent to the xQueueSend() and xQueueReceive() functions used by tasks. + * + * crQUEUE_SEND and crQUEUE_RECEIVE can only be used from a co-routine whereas + * xQueueSend() and xQueueReceive() can only be used from tasks. + * + * crQUEUE_RECEIVE can only be called from the co-routine function itself - not + * from within a function called by the co-routine function. This is because + * co-routines do not maintain their own stack. + * + * See the co-routine section of the WEB documentation for information on + * passing data between tasks and co-routines and between ISR's and + * co-routines. + * + * @param xHandle The handle of the calling co-routine. This is the xHandle + * parameter of the co-routine function. + * + * @param pxQueue The handle of the queue from which the data will be received. + * The handle is obtained as the return value when the queue is created using + * the xQueueCreate() API function. + * + * @param pvBuffer The buffer into which the received item is to be copied. + * The number of bytes of each queued item is specified when the queue is + * created. This number of bytes is copied into pvBuffer. + * + * @param xTickToDelay The number of ticks that the co-routine should block + * to wait for data to become available from the queue, should data not be + * available immediately. The actual amount of time this equates to is defined + * by configTICK_RATE_HZ (set in FreeRTOSConfig.h). The constant + * portTICK_PERIOD_MS can be used to convert ticks to milliseconds (see the + * crQUEUE_SEND example). + * + * @param pxResult The variable pointed to by pxResult will be set to pdPASS if + * data was successfully retrieved from the queue, otherwise it will be set to + * an error code as defined within ProjDefs.h. + * + * Example usage: +
+ // A co-routine receives the number of an LED to flash from a queue.  It
+ // blocks on the queue until the number is received.
+ static void prvCoRoutineFlashWorkTask( CoRoutineHandle_t xHandle, UBaseType_t uxIndex )
+ {
+ // Variables in co-routines must be declared static if they must maintain value across a blocking call.
+ static BaseType_t xResult;
+ static UBaseType_t uxLEDToFlash;
+
+    // All co-routines must start with a call to crSTART().
+    crSTART( xHandle );
+
+    for( ;; )
+    {
+        // Wait for data to become available on the queue.
+        crQUEUE_RECEIVE( xHandle, xCoRoutineQueue, &uxLEDToFlash, portMAX_DELAY, &xResult );
+
+        if( xResult == pdPASS )
+        {
+            // We received the LED to flash - flash it!
+            vParTestToggleLED( uxLEDToFlash );
+        }
+    }
+
+    crEND();
+ }
+ * \defgroup crQUEUE_RECEIVE crQUEUE_RECEIVE + * \ingroup Tasks + */ +#define crQUEUE_RECEIVE( xHandle, pxQueue, pvBuffer, xTicksToWait, pxResult ) \ +{ \ + *( pxResult ) = xQueueCRReceive( ( pxQueue) , ( pvBuffer ), ( xTicksToWait ) ); \ + if( *( pxResult ) == errQUEUE_BLOCKED ) \ + { \ + crSET_STATE0( ( xHandle ) ); \ + *( pxResult ) = xQueueCRReceive( ( pxQueue) , ( pvBuffer ), 0 ); \ + } \ + if( *( pxResult ) == errQUEUE_YIELD ) \ + { \ + crSET_STATE1( ( xHandle ) ); \ + *( pxResult ) = pdPASS; \ + } \ +} + +/** + * croutine. h + *
+  crQUEUE_SEND_FROM_ISR(
+                            QueueHandle_t pxQueue,
+                            void *pvItemToQueue,
+                            BaseType_t xCoRoutinePreviouslyWoken
+                       )
+ * + * The macro's crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() are the + * co-routine equivalent to the xQueueSendFromISR() and xQueueReceiveFromISR() + * functions used by tasks. + * + * crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() can only be used to + * pass data between a co-routine and and ISR, whereas xQueueSendFromISR() and + * xQueueReceiveFromISR() can only be used to pass data between a task and and + * ISR. + * + * crQUEUE_SEND_FROM_ISR can only be called from an ISR to send data to a queue + * that is being used from within a co-routine. + * + * See the co-routine section of the WEB documentation for information on + * passing data between tasks and co-routines and between ISR's and + * co-routines. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param xCoRoutinePreviouslyWoken This is included so an ISR can post onto + * the same queue multiple times from a single interrupt. The first call + * should always pass in pdFALSE. Subsequent calls should pass in + * the value returned from the previous call. + * + * @return pdTRUE if a co-routine was woken by posting onto the queue. This is + * used by the ISR to determine if a context switch may be required following + * the ISR. + * + * Example usage: +
+ // A co-routine that blocks on a queue waiting for characters to be received.
+ static void vReceivingCoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex )
+ {
+ char cRxedChar;
+ BaseType_t xResult;
+
+     // All co-routines must start with a call to crSTART().
+     crSTART( xHandle );
+
+     for( ;; )
+     {
+         // Wait for data to become available on the queue.  This assumes the
+         // queue xCommsRxQueue has already been created!
+         crQUEUE_RECEIVE( xHandle, xCommsRxQueue, &uxLEDToFlash, portMAX_DELAY, &xResult );
+
+         // Was a character received?
+         if( xResult == pdPASS )
+         {
+             // Process the character here.
+         }
+     }
+
+     // All co-routines must end with a call to crEND().
+     crEND();
+ }
+
+ // An ISR that uses a queue to send characters received on a serial port to
+ // a co-routine.
+ void vUART_ISR( void )
+ {
+ char cRxedChar;
+ BaseType_t xCRWokenByPost = pdFALSE;
+
+     // We loop around reading characters until there are none left in the UART.
+     while( UART_RX_REG_NOT_EMPTY() )
+     {
+         // Obtain the character from the UART.
+         cRxedChar = UART_RX_REG;
+
+         // Post the character onto a queue.  xCRWokenByPost will be pdFALSE
+         // the first time around the loop.  If the post causes a co-routine
+         // to be woken (unblocked) then xCRWokenByPost will be set to pdTRUE.
+         // In this manner we can ensure that if more than one co-routine is
+         // blocked on the queue only one is woken by this ISR no matter how
+         // many characters are posted to the queue.
+         xCRWokenByPost = crQUEUE_SEND_FROM_ISR( xCommsRxQueue, &cRxedChar, xCRWokenByPost );
+     }
+ }
+ * \defgroup crQUEUE_SEND_FROM_ISR crQUEUE_SEND_FROM_ISR + * \ingroup Tasks + */ +#define crQUEUE_SEND_FROM_ISR( pxQueue, pvItemToQueue, xCoRoutinePreviouslyWoken ) xQueueCRSendFromISR( ( pxQueue ), ( pvItemToQueue ), ( xCoRoutinePreviouslyWoken ) ) + + +/** + * croutine. h + *
+  crQUEUE_SEND_FROM_ISR(
+                            QueueHandle_t pxQueue,
+                            void *pvBuffer,
+                            BaseType_t * pxCoRoutineWoken
+                       )
+ * + * The macro's crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() are the + * co-routine equivalent to the xQueueSendFromISR() and xQueueReceiveFromISR() + * functions used by tasks. + * + * crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() can only be used to + * pass data between a co-routine and and ISR, whereas xQueueSendFromISR() and + * xQueueReceiveFromISR() can only be used to pass data between a task and and + * ISR. + * + * crQUEUE_RECEIVE_FROM_ISR can only be called from an ISR to receive data + * from a queue that is being used from within a co-routine (a co-routine + * posted to the queue). + * + * See the co-routine section of the WEB documentation for information on + * passing data between tasks and co-routines and between ISR's and + * co-routines. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvBuffer A pointer to a buffer into which the received item will be + * placed. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from the queue into + * pvBuffer. + * + * @param pxCoRoutineWoken A co-routine may be blocked waiting for space to become + * available on the queue. If crQUEUE_RECEIVE_FROM_ISR causes such a + * co-routine to unblock *pxCoRoutineWoken will get set to pdTRUE, otherwise + * *pxCoRoutineWoken will remain unchanged. + * + * @return pdTRUE an item was successfully received from the queue, otherwise + * pdFALSE. + * + * Example usage: +
+ // A co-routine that posts a character to a queue then blocks for a fixed
+ // period.  The character is incremented each time.
+ static void vSendingCoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex )
+ {
+ // cChar holds its value while this co-routine is blocked and must therefore
+ // be declared static.
+ static char cCharToTx = 'a';
+ BaseType_t xResult;
+
+     // All co-routines must start with a call to crSTART().
+     crSTART( xHandle );
+
+     for( ;; )
+     {
+         // Send the next character to the queue.
+         crQUEUE_SEND( xHandle, xCoRoutineQueue, &cCharToTx, NO_DELAY, &xResult );
+
+         if( xResult == pdPASS )
+         {
+             // The character was successfully posted to the queue.
+         }
+		 else
+		 {
+			// Could not post the character to the queue.
+		 }
+
+         // Enable the UART Tx interrupt to cause an interrupt in this
+		 // hypothetical UART.  The interrupt will obtain the character
+		 // from the queue and send it.
+		 ENABLE_RX_INTERRUPT();
+
+		 // Increment to the next character then block for a fixed period.
+		 // cCharToTx will maintain its value across the delay as it is
+		 // declared static.
+		 cCharToTx++;
+		 if( cCharToTx > 'x' )
+		 {
+			cCharToTx = 'a';
+		 }
+		 crDELAY( 100 );
+     }
+
+     // All co-routines must end with a call to crEND().
+     crEND();
+ }
+
+ // An ISR that uses a queue to receive characters to send on a UART.
+ void vUART_ISR( void )
+ {
+ char cCharToTx;
+ BaseType_t xCRWokenByPost = pdFALSE;
+
+     while( UART_TX_REG_EMPTY() )
+     {
+         // Are there any characters in the queue waiting to be sent?
+		 // xCRWokenByPost will automatically be set to pdTRUE if a co-routine
+		 // is woken by the post - ensuring that only a single co-routine is
+		 // woken no matter how many times we go around this loop.
+         if( crQUEUE_RECEIVE_FROM_ISR( pxQueue, &cCharToTx, &xCRWokenByPost ) )
+		 {
+			 SEND_CHARACTER( cCharToTx );
+		 }
+     }
+ }
+ * \defgroup crQUEUE_RECEIVE_FROM_ISR crQUEUE_RECEIVE_FROM_ISR + * \ingroup Tasks + */ +#define crQUEUE_RECEIVE_FROM_ISR( pxQueue, pvBuffer, pxCoRoutineWoken ) xQueueCRReceiveFromISR( ( pxQueue ), ( pvBuffer ), ( pxCoRoutineWoken ) ) + +/* + * This function is intended for internal use by the co-routine macros only. + * The macro nature of the co-routine implementation requires that the + * prototype appears here. The function should not be used by application + * writers. + * + * Removes the current co-routine from its ready list and places it in the + * appropriate delayed list. + */ +void vCoRoutineAddToDelayedList( TickType_t xTicksToDelay, List_t *pxEventList ); + +/* + * This function is intended for internal use by the queue implementation only. + * The function should not be used by application writers. + * + * Removes the highest priority co-routine from the event list and places it in + * the pending ready list. + */ +BaseType_t xCoRoutineRemoveFromEventList( const List_t *pxEventList ); + +#ifdef __cplusplus +} +#endif + +#endif /* CO_ROUTINE_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/deprecated_definitions.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/deprecated_definitions.h new file mode 100644 index 0000000..4d0ce01 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/deprecated_definitions.h @@ -0,0 +1,279 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef DEPRECATED_DEFINITIONS_H +#define DEPRECATED_DEFINITIONS_H + + +/* Each FreeRTOS port has a unique portmacro.h header file. Originally a +pre-processor definition was used to ensure the pre-processor found the correct +portmacro.h file for the port being used. That scheme was deprecated in favour +of setting the compiler's include path such that it found the correct +portmacro.h file - removing the need for the constant and allowing the +portmacro.h file to be located anywhere in relation to the port being used. The +definitions below remain in the code for backward compatibility only. New +projects should not use them. */ + +#ifdef OPEN_WATCOM_INDUSTRIAL_PC_PORT + #include "..\..\Source\portable\owatcom\16bitdos\pc\portmacro.h" + typedef void ( __interrupt __far *pxISR )(); +#endif + +#ifdef OPEN_WATCOM_FLASH_LITE_186_PORT + #include "..\..\Source\portable\owatcom\16bitdos\flsh186\portmacro.h" + typedef void ( __interrupt __far *pxISR )(); +#endif + +#ifdef GCC_MEGA_AVR + #include "../portable/GCC/ATMega323/portmacro.h" +#endif + +#ifdef IAR_MEGA_AVR + #include "../portable/IAR/ATMega323/portmacro.h" +#endif + +#ifdef MPLAB_PIC24_PORT + #include "../../Source/portable/MPLAB/PIC24_dsPIC/portmacro.h" +#endif + +#ifdef MPLAB_DSPIC_PORT + #include "../../Source/portable/MPLAB/PIC24_dsPIC/portmacro.h" +#endif + +#ifdef MPLAB_PIC18F_PORT + #include "../../Source/portable/MPLAB/PIC18F/portmacro.h" +#endif + +#ifdef MPLAB_PIC32MX_PORT + #include "../../Source/portable/MPLAB/PIC32MX/portmacro.h" +#endif + +#ifdef _FEDPICC + #include "libFreeRTOS/Include/portmacro.h" +#endif + +#ifdef SDCC_CYGNAL + #include "../../Source/portable/SDCC/Cygnal/portmacro.h" +#endif + +#ifdef GCC_ARM7 + #include "../../Source/portable/GCC/ARM7_LPC2000/portmacro.h" +#endif + +#ifdef GCC_ARM7_ECLIPSE + #include "portmacro.h" +#endif + +#ifdef ROWLEY_LPC23xx + #include "../../Source/portable/GCC/ARM7_LPC23xx/portmacro.h" +#endif + +#ifdef IAR_MSP430 + #include "..\..\Source\portable\IAR\MSP430\portmacro.h" +#endif + +#ifdef GCC_MSP430 + #include "../../Source/portable/GCC/MSP430F449/portmacro.h" +#endif + +#ifdef ROWLEY_MSP430 + #include "../../Source/portable/Rowley/MSP430F449/portmacro.h" +#endif + +#ifdef ARM7_LPC21xx_KEIL_RVDS + #include "..\..\Source\portable\RVDS\ARM7_LPC21xx\portmacro.h" +#endif + +#ifdef SAM7_GCC + #include "../../Source/portable/GCC/ARM7_AT91SAM7S/portmacro.h" +#endif + +#ifdef SAM7_IAR + #include "..\..\Source\portable\IAR\AtmelSAM7S64\portmacro.h" +#endif + +#ifdef SAM9XE_IAR + #include "..\..\Source\portable\IAR\AtmelSAM9XE\portmacro.h" +#endif + +#ifdef LPC2000_IAR + #include "..\..\Source\portable\IAR\LPC2000\portmacro.h" +#endif + +#ifdef STR71X_IAR + #include "..\..\Source\portable\IAR\STR71x\portmacro.h" +#endif + +#ifdef STR75X_IAR + #include "..\..\Source\portable\IAR\STR75x\portmacro.h" +#endif + +#ifdef STR75X_GCC + #include "..\..\Source\portable\GCC\STR75x\portmacro.h" +#endif + +#ifdef STR91X_IAR + #include "..\..\Source\portable\IAR\STR91x\portmacro.h" +#endif + +#ifdef GCC_H8S + #include "../../Source/portable/GCC/H8S2329/portmacro.h" +#endif + +#ifdef GCC_AT91FR40008 + #include "../../Source/portable/GCC/ARM7_AT91FR40008/portmacro.h" +#endif + +#ifdef RVDS_ARMCM3_LM3S102 + #include "../../Source/portable/RVDS/ARM_CM3/portmacro.h" +#endif + +#ifdef GCC_ARMCM3_LM3S102 + #include "../../Source/portable/GCC/ARM_CM3/portmacro.h" +#endif + +#ifdef GCC_ARMCM3 + #include "../../Source/portable/GCC/ARM_CM3/portmacro.h" +#endif + +#ifdef IAR_ARM_CM3 + #include "../../Source/portable/IAR/ARM_CM3/portmacro.h" +#endif + +#ifdef IAR_ARMCM3_LM + #include "../../Source/portable/IAR/ARM_CM3/portmacro.h" +#endif + +#ifdef HCS12_CODE_WARRIOR + #include "../../Source/portable/CodeWarrior/HCS12/portmacro.h" +#endif + +#ifdef MICROBLAZE_GCC + #include "../../Source/portable/GCC/MicroBlaze/portmacro.h" +#endif + +#ifdef TERN_EE + #include "..\..\Source\portable\Paradigm\Tern_EE\small\portmacro.h" +#endif + +#ifdef GCC_HCS12 + #include "../../Source/portable/GCC/HCS12/portmacro.h" +#endif + +#ifdef GCC_MCF5235 + #include "../../Source/portable/GCC/MCF5235/portmacro.h" +#endif + +#ifdef COLDFIRE_V2_GCC + #include "../../../Source/portable/GCC/ColdFire_V2/portmacro.h" +#endif + +#ifdef COLDFIRE_V2_CODEWARRIOR + #include "../../Source/portable/CodeWarrior/ColdFire_V2/portmacro.h" +#endif + +#ifdef GCC_PPC405 + #include "../../Source/portable/GCC/PPC405_Xilinx/portmacro.h" +#endif + +#ifdef GCC_PPC440 + #include "../../Source/portable/GCC/PPC440_Xilinx/portmacro.h" +#endif + +#ifdef _16FX_SOFTUNE + #include "..\..\Source\portable\Softune\MB96340\portmacro.h" +#endif + +#ifdef BCC_INDUSTRIAL_PC_PORT + /* A short file name has to be used in place of the normal + FreeRTOSConfig.h when using the Borland compiler. */ + #include "frconfig.h" + #include "..\portable\BCC\16BitDOS\PC\prtmacro.h" + typedef void ( __interrupt __far *pxISR )(); +#endif + +#ifdef BCC_FLASH_LITE_186_PORT + /* A short file name has to be used in place of the normal + FreeRTOSConfig.h when using the Borland compiler. */ + #include "frconfig.h" + #include "..\portable\BCC\16BitDOS\flsh186\prtmacro.h" + typedef void ( __interrupt __far *pxISR )(); +#endif + +#ifdef __GNUC__ + #ifdef __AVR32_AVR32A__ + #include "portmacro.h" + #endif +#endif + +#ifdef __ICCAVR32__ + #ifdef __CORE__ + #if __CORE__ == __AVR32A__ + #include "portmacro.h" + #endif + #endif +#endif + +#ifdef __91467D + #include "portmacro.h" +#endif + +#ifdef __96340 + #include "portmacro.h" +#endif + + +#ifdef __IAR_V850ES_Fx3__ + #include "../../Source/portable/IAR/V850ES/portmacro.h" +#endif + +#ifdef __IAR_V850ES_Jx3__ + #include "../../Source/portable/IAR/V850ES/portmacro.h" +#endif + +#ifdef __IAR_V850ES_Jx3_L__ + #include "../../Source/portable/IAR/V850ES/portmacro.h" +#endif + +#ifdef __IAR_V850ES_Jx2__ + #include "../../Source/portable/IAR/V850ES/portmacro.h" +#endif + +#ifdef __IAR_V850ES_Hx2__ + #include "../../Source/portable/IAR/V850ES/portmacro.h" +#endif + +#ifdef __IAR_78K0R_Kx3__ + #include "../../Source/portable/IAR/78K0R/portmacro.h" +#endif + +#ifdef __IAR_78K0R_Kx3L__ + #include "../../Source/portable/IAR/78K0R/portmacro.h" +#endif + +#endif /* DEPRECATED_DEFINITIONS_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/event_groups.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/event_groups.h new file mode 100644 index 0000000..522b0b6 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/event_groups.h @@ -0,0 +1,757 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef EVENT_GROUPS_H +#define EVENT_GROUPS_H + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h" must appear in source files before "include event_groups.h" +#endif + +/* FreeRTOS includes. */ +#include "timers.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * An event group is a collection of bits to which an application can assign a + * meaning. For example, an application may create an event group to convey + * the status of various CAN bus related events in which bit 0 might mean "A CAN + * message has been received and is ready for processing", bit 1 might mean "The + * application has queued a message that is ready for sending onto the CAN + * network", and bit 2 might mean "It is time to send a SYNC message onto the + * CAN network" etc. A task can then test the bit values to see which events + * are active, and optionally enter the Blocked state to wait for a specified + * bit or a group of specified bits to be active. To continue the CAN bus + * example, a CAN controlling task can enter the Blocked state (and therefore + * not consume any processing time) until either bit 0, bit 1 or bit 2 are + * active, at which time the bit that was actually active would inform the task + * which action it had to take (process a received message, send a message, or + * send a SYNC). + * + * The event groups implementation contains intelligence to avoid race + * conditions that would otherwise occur were an application to use a simple + * variable for the same purpose. This is particularly important with respect + * to when a bit within an event group is to be cleared, and when bits have to + * be set and then tested atomically - as is the case where event groups are + * used to create a synchronisation point between multiple tasks (a + * 'rendezvous'). + * + * \defgroup EventGroup + */ + + + +/** + * event_groups.h + * + * Type by which event groups are referenced. For example, a call to + * xEventGroupCreate() returns an EventGroupHandle_t variable that can then + * be used as a parameter to other event group functions. + * + * \defgroup EventGroupHandle_t EventGroupHandle_t + * \ingroup EventGroup + */ +struct EventGroupDef_t; +typedef struct EventGroupDef_t * EventGroupHandle_t; + +/* + * The type that holds event bits always matches TickType_t - therefore the + * number of bits it holds is set by configUSE_16_BIT_TICKS (16 bits if set to 1, + * 32 bits if set to 0. + * + * \defgroup EventBits_t EventBits_t + * \ingroup EventGroup + */ +typedef TickType_t EventBits_t; + +/** + * event_groups.h + *
+ EventGroupHandle_t xEventGroupCreate( void );
+ 
+ * + * Create a new event group. + * + * Internally, within the FreeRTOS implementation, event groups use a [small] + * block of memory, in which the event group's structure is stored. If an event + * groups is created using xEventGropuCreate() then the required memory is + * automatically dynamically allocated inside the xEventGroupCreate() function. + * (see http://www.freertos.org/a00111.html). If an event group is created + * using xEventGropuCreateStatic() then the application writer must instead + * provide the memory that will get used by the event group. + * xEventGroupCreateStatic() therefore allows an event group to be created + * without using any dynamic memory allocation. + * + * Although event groups are not related to ticks, for internal implementation + * reasons the number of bits available for use in an event group is dependent + * on the configUSE_16_BIT_TICKS setting in FreeRTOSConfig.h. If + * configUSE_16_BIT_TICKS is 1 then each event group contains 8 usable bits (bit + * 0 to bit 7). If configUSE_16_BIT_TICKS is set to 0 then each event group has + * 24 usable bits (bit 0 to bit 23). The EventBits_t type is used to store + * event bits within an event group. + * + * @return If the event group was created then a handle to the event group is + * returned. If there was insufficient FreeRTOS heap available to create the + * event group then NULL is returned. See http://www.freertos.org/a00111.html + * + * Example usage: +
+	// Declare a variable to hold the created event group.
+	EventGroupHandle_t xCreatedEventGroup;
+
+	// Attempt to create the event group.
+	xCreatedEventGroup = xEventGroupCreate();
+
+	// Was the event group created successfully?
+	if( xCreatedEventGroup == NULL )
+	{
+		// The event group was not created because there was insufficient
+		// FreeRTOS heap available.
+	}
+	else
+	{
+		// The event group was created.
+	}
+   
+ * \defgroup xEventGroupCreate xEventGroupCreate + * \ingroup EventGroup + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + EventGroupHandle_t xEventGroupCreate( void ) PRIVILEGED_FUNCTION; +#endif + +/** + * event_groups.h + *
+ EventGroupHandle_t xEventGroupCreateStatic( EventGroupHandle_t * pxEventGroupBuffer );
+ 
+ * + * Create a new event group. + * + * Internally, within the FreeRTOS implementation, event groups use a [small] + * block of memory, in which the event group's structure is stored. If an event + * groups is created using xEventGropuCreate() then the required memory is + * automatically dynamically allocated inside the xEventGroupCreate() function. + * (see http://www.freertos.org/a00111.html). If an event group is created + * using xEventGropuCreateStatic() then the application writer must instead + * provide the memory that will get used by the event group. + * xEventGroupCreateStatic() therefore allows an event group to be created + * without using any dynamic memory allocation. + * + * Although event groups are not related to ticks, for internal implementation + * reasons the number of bits available for use in an event group is dependent + * on the configUSE_16_BIT_TICKS setting in FreeRTOSConfig.h. If + * configUSE_16_BIT_TICKS is 1 then each event group contains 8 usable bits (bit + * 0 to bit 7). If configUSE_16_BIT_TICKS is set to 0 then each event group has + * 24 usable bits (bit 0 to bit 23). The EventBits_t type is used to store + * event bits within an event group. + * + * @param pxEventGroupBuffer pxEventGroupBuffer must point to a variable of type + * StaticEventGroup_t, which will be then be used to hold the event group's data + * structures, removing the need for the memory to be allocated dynamically. + * + * @return If the event group was created then a handle to the event group is + * returned. If pxEventGroupBuffer was NULL then NULL is returned. + * + * Example usage: +
+	// StaticEventGroup_t is a publicly accessible structure that has the same
+	// size and alignment requirements as the real event group structure.  It is
+	// provided as a mechanism for applications to know the size of the event
+	// group (which is dependent on the architecture and configuration file
+	// settings) without breaking the strict data hiding policy by exposing the
+	// real event group internals.  This StaticEventGroup_t variable is passed
+	// into the xSemaphoreCreateEventGroupStatic() function and is used to store
+	// the event group's data structures
+	StaticEventGroup_t xEventGroupBuffer;
+
+	// Create the event group without dynamically allocating any memory.
+	xEventGroup = xEventGroupCreateStatic( &xEventGroupBuffer );
+   
+ */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + EventGroupHandle_t xEventGroupCreateStatic( StaticEventGroup_t *pxEventGroupBuffer ) PRIVILEGED_FUNCTION; +#endif + +/** + * event_groups.h + *
+	EventBits_t xEventGroupWaitBits( 	EventGroupHandle_t xEventGroup,
+										const EventBits_t uxBitsToWaitFor,
+										const BaseType_t xClearOnExit,
+										const BaseType_t xWaitForAllBits,
+										const TickType_t xTicksToWait );
+ 
+ * + * [Potentially] block to wait for one or more bits to be set within a + * previously created event group. + * + * This function cannot be called from an interrupt. + * + * @param xEventGroup The event group in which the bits are being tested. The + * event group must have previously been created using a call to + * xEventGroupCreate(). + * + * @param uxBitsToWaitFor A bitwise value that indicates the bit or bits to test + * inside the event group. For example, to wait for bit 0 and/or bit 2 set + * uxBitsToWaitFor to 0x05. To wait for bits 0 and/or bit 1 and/or bit 2 set + * uxBitsToWaitFor to 0x07. Etc. + * + * @param xClearOnExit If xClearOnExit is set to pdTRUE then any bits within + * uxBitsToWaitFor that are set within the event group will be cleared before + * xEventGroupWaitBits() returns if the wait condition was met (if the function + * returns for a reason other than a timeout). If xClearOnExit is set to + * pdFALSE then the bits set in the event group are not altered when the call to + * xEventGroupWaitBits() returns. + * + * @param xWaitForAllBits If xWaitForAllBits is set to pdTRUE then + * xEventGroupWaitBits() will return when either all the bits in uxBitsToWaitFor + * are set or the specified block time expires. If xWaitForAllBits is set to + * pdFALSE then xEventGroupWaitBits() will return when any one of the bits set + * in uxBitsToWaitFor is set or the specified block time expires. The block + * time is specified by the xTicksToWait parameter. + * + * @param xTicksToWait The maximum amount of time (specified in 'ticks') to wait + * for one/all (depending on the xWaitForAllBits value) of the bits specified by + * uxBitsToWaitFor to become set. + * + * @return The value of the event group at the time either the bits being waited + * for became set, or the block time expired. Test the return value to know + * which bits were set. If xEventGroupWaitBits() returned because its timeout + * expired then not all the bits being waited for will be set. If + * xEventGroupWaitBits() returned because the bits it was waiting for were set + * then the returned value is the event group value before any bits were + * automatically cleared in the case that xClearOnExit parameter was set to + * pdTRUE. + * + * Example usage: +
+   #define BIT_0	( 1 << 0 )
+   #define BIT_4	( 1 << 4 )
+
+   void aFunction( EventGroupHandle_t xEventGroup )
+   {
+   EventBits_t uxBits;
+   const TickType_t xTicksToWait = 100 / portTICK_PERIOD_MS;
+
+		// Wait a maximum of 100ms for either bit 0 or bit 4 to be set within
+		// the event group.  Clear the bits before exiting.
+		uxBits = xEventGroupWaitBits(
+					xEventGroup,	// The event group being tested.
+					BIT_0 | BIT_4,	// The bits within the event group to wait for.
+					pdTRUE,			// BIT_0 and BIT_4 should be cleared before returning.
+					pdFALSE,		// Don't wait for both bits, either bit will do.
+					xTicksToWait );	// Wait a maximum of 100ms for either bit to be set.
+
+		if( ( uxBits & ( BIT_0 | BIT_4 ) ) == ( BIT_0 | BIT_4 ) )
+		{
+			// xEventGroupWaitBits() returned because both bits were set.
+		}
+		else if( ( uxBits & BIT_0 ) != 0 )
+		{
+			// xEventGroupWaitBits() returned because just BIT_0 was set.
+		}
+		else if( ( uxBits & BIT_4 ) != 0 )
+		{
+			// xEventGroupWaitBits() returned because just BIT_4 was set.
+		}
+		else
+		{
+			// xEventGroupWaitBits() returned because xTicksToWait ticks passed
+			// without either BIT_0 or BIT_4 becoming set.
+		}
+   }
+   
+ * \defgroup xEventGroupWaitBits xEventGroupWaitBits + * \ingroup EventGroup + */ +EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToWaitFor, const BaseType_t xClearOnExit, const BaseType_t xWaitForAllBits, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * event_groups.h + *
+	EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear );
+ 
+ * + * Clear bits within an event group. This function cannot be called from an + * interrupt. + * + * @param xEventGroup The event group in which the bits are to be cleared. + * + * @param uxBitsToClear A bitwise value that indicates the bit or bits to clear + * in the event group. For example, to clear bit 3 only, set uxBitsToClear to + * 0x08. To clear bit 3 and bit 0 set uxBitsToClear to 0x09. + * + * @return The value of the event group before the specified bits were cleared. + * + * Example usage: +
+   #define BIT_0	( 1 << 0 )
+   #define BIT_4	( 1 << 4 )
+
+   void aFunction( EventGroupHandle_t xEventGroup )
+   {
+   EventBits_t uxBits;
+
+		// Clear bit 0 and bit 4 in xEventGroup.
+		uxBits = xEventGroupClearBits(
+								xEventGroup,	// The event group being updated.
+								BIT_0 | BIT_4 );// The bits being cleared.
+
+		if( ( uxBits & ( BIT_0 | BIT_4 ) ) == ( BIT_0 | BIT_4 ) )
+		{
+			// Both bit 0 and bit 4 were set before xEventGroupClearBits() was
+			// called.  Both will now be clear (not set).
+		}
+		else if( ( uxBits & BIT_0 ) != 0 )
+		{
+			// Bit 0 was set before xEventGroupClearBits() was called.  It will
+			// now be clear.
+		}
+		else if( ( uxBits & BIT_4 ) != 0 )
+		{
+			// Bit 4 was set before xEventGroupClearBits() was called.  It will
+			// now be clear.
+		}
+		else
+		{
+			// Neither bit 0 nor bit 4 were set in the first place.
+		}
+   }
+   
+ * \defgroup xEventGroupClearBits xEventGroupClearBits + * \ingroup EventGroup + */ +EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear ) PRIVILEGED_FUNCTION; + +/** + * event_groups.h + *
+	BaseType_t xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet );
+ 
+ * + * A version of xEventGroupClearBits() that can be called from an interrupt. + * + * Setting bits in an event group is not a deterministic operation because there + * are an unknown number of tasks that may be waiting for the bit or bits being + * set. FreeRTOS does not allow nondeterministic operations to be performed + * while interrupts are disabled, so protects event groups that are accessed + * from tasks by suspending the scheduler rather than disabling interrupts. As + * a result event groups cannot be accessed directly from an interrupt service + * routine. Therefore xEventGroupClearBitsFromISR() sends a message to the + * timer task to have the clear operation performed in the context of the timer + * task. + * + * @param xEventGroup The event group in which the bits are to be cleared. + * + * @param uxBitsToClear A bitwise value that indicates the bit or bits to clear. + * For example, to clear bit 3 only, set uxBitsToClear to 0x08. To clear bit 3 + * and bit 0 set uxBitsToClear to 0x09. + * + * @return If the request to execute the function was posted successfully then + * pdPASS is returned, otherwise pdFALSE is returned. pdFALSE will be returned + * if the timer service queue was full. + * + * Example usage: +
+   #define BIT_0	( 1 << 0 )
+   #define BIT_4	( 1 << 4 )
+
+   // An event group which it is assumed has already been created by a call to
+   // xEventGroupCreate().
+   EventGroupHandle_t xEventGroup;
+
+   void anInterruptHandler( void )
+   {
+		// Clear bit 0 and bit 4 in xEventGroup.
+		xResult = xEventGroupClearBitsFromISR(
+							xEventGroup,	 // The event group being updated.
+							BIT_0 | BIT_4 ); // The bits being set.
+
+		if( xResult == pdPASS )
+		{
+			// The message was posted successfully.
+		}
+  }
+   
+ * \defgroup xEventGroupClearBitsFromISR xEventGroupClearBitsFromISR + * \ingroup EventGroup + */ +#if( configUSE_TRACE_FACILITY == 1 ) + BaseType_t xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear ) PRIVILEGED_FUNCTION; +#else + #define xEventGroupClearBitsFromISR( xEventGroup, uxBitsToClear ) xTimerPendFunctionCallFromISR( vEventGroupClearBitsCallback, ( void * ) xEventGroup, ( uint32_t ) uxBitsToClear, NULL ) +#endif + +/** + * event_groups.h + *
+	EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet );
+ 
+ * + * Set bits within an event group. + * This function cannot be called from an interrupt. xEventGroupSetBitsFromISR() + * is a version that can be called from an interrupt. + * + * Setting bits in an event group will automatically unblock tasks that are + * blocked waiting for the bits. + * + * @param xEventGroup The event group in which the bits are to be set. + * + * @param uxBitsToSet A bitwise value that indicates the bit or bits to set. + * For example, to set bit 3 only, set uxBitsToSet to 0x08. To set bit 3 + * and bit 0 set uxBitsToSet to 0x09. + * + * @return The value of the event group at the time the call to + * xEventGroupSetBits() returns. There are two reasons why the returned value + * might have the bits specified by the uxBitsToSet parameter cleared. First, + * if setting a bit results in a task that was waiting for the bit leaving the + * blocked state then it is possible the bit will be cleared automatically + * (see the xClearBitOnExit parameter of xEventGroupWaitBits()). Second, any + * unblocked (or otherwise Ready state) task that has a priority above that of + * the task that called xEventGroupSetBits() will execute and may change the + * event group value before the call to xEventGroupSetBits() returns. + * + * Example usage: +
+   #define BIT_0	( 1 << 0 )
+   #define BIT_4	( 1 << 4 )
+
+   void aFunction( EventGroupHandle_t xEventGroup )
+   {
+   EventBits_t uxBits;
+
+		// Set bit 0 and bit 4 in xEventGroup.
+		uxBits = xEventGroupSetBits(
+							xEventGroup,	// The event group being updated.
+							BIT_0 | BIT_4 );// The bits being set.
+
+		if( ( uxBits & ( BIT_0 | BIT_4 ) ) == ( BIT_0 | BIT_4 ) )
+		{
+			// Both bit 0 and bit 4 remained set when the function returned.
+		}
+		else if( ( uxBits & BIT_0 ) != 0 )
+		{
+			// Bit 0 remained set when the function returned, but bit 4 was
+			// cleared.  It might be that bit 4 was cleared automatically as a
+			// task that was waiting for bit 4 was removed from the Blocked
+			// state.
+		}
+		else if( ( uxBits & BIT_4 ) != 0 )
+		{
+			// Bit 4 remained set when the function returned, but bit 0 was
+			// cleared.  It might be that bit 0 was cleared automatically as a
+			// task that was waiting for bit 0 was removed from the Blocked
+			// state.
+		}
+		else
+		{
+			// Neither bit 0 nor bit 4 remained set.  It might be that a task
+			// was waiting for both of the bits to be set, and the bits were
+			// cleared as the task left the Blocked state.
+		}
+   }
+   
+ * \defgroup xEventGroupSetBits xEventGroupSetBits + * \ingroup EventGroup + */ +EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet ) PRIVILEGED_FUNCTION; + +/** + * event_groups.h + *
+	BaseType_t xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, BaseType_t *pxHigherPriorityTaskWoken );
+ 
+ * + * A version of xEventGroupSetBits() that can be called from an interrupt. + * + * Setting bits in an event group is not a deterministic operation because there + * are an unknown number of tasks that may be waiting for the bit or bits being + * set. FreeRTOS does not allow nondeterministic operations to be performed in + * interrupts or from critical sections. Therefore xEventGroupSetBitsFromISR() + * sends a message to the timer task to have the set operation performed in the + * context of the timer task - where a scheduler lock is used in place of a + * critical section. + * + * @param xEventGroup The event group in which the bits are to be set. + * + * @param uxBitsToSet A bitwise value that indicates the bit or bits to set. + * For example, to set bit 3 only, set uxBitsToSet to 0x08. To set bit 3 + * and bit 0 set uxBitsToSet to 0x09. + * + * @param pxHigherPriorityTaskWoken As mentioned above, calling this function + * will result in a message being sent to the timer daemon task. If the + * priority of the timer daemon task is higher than the priority of the + * currently running task (the task the interrupt interrupted) then + * *pxHigherPriorityTaskWoken will be set to pdTRUE by + * xEventGroupSetBitsFromISR(), indicating that a context switch should be + * requested before the interrupt exits. For that reason + * *pxHigherPriorityTaskWoken must be initialised to pdFALSE. See the + * example code below. + * + * @return If the request to execute the function was posted successfully then + * pdPASS is returned, otherwise pdFALSE is returned. pdFALSE will be returned + * if the timer service queue was full. + * + * Example usage: +
+   #define BIT_0	( 1 << 0 )
+   #define BIT_4	( 1 << 4 )
+
+   // An event group which it is assumed has already been created by a call to
+   // xEventGroupCreate().
+   EventGroupHandle_t xEventGroup;
+
+   void anInterruptHandler( void )
+   {
+   BaseType_t xHigherPriorityTaskWoken, xResult;
+
+		// xHigherPriorityTaskWoken must be initialised to pdFALSE.
+		xHigherPriorityTaskWoken = pdFALSE;
+
+		// Set bit 0 and bit 4 in xEventGroup.
+		xResult = xEventGroupSetBitsFromISR(
+							xEventGroup,	// The event group being updated.
+							BIT_0 | BIT_4   // The bits being set.
+							&xHigherPriorityTaskWoken );
+
+		// Was the message posted successfully?
+		if( xResult == pdPASS )
+		{
+			// If xHigherPriorityTaskWoken is now set to pdTRUE then a context
+			// switch should be requested.  The macro used is port specific and
+			// will be either portYIELD_FROM_ISR() or portEND_SWITCHING_ISR() -
+			// refer to the documentation page for the port being used.
+			portYIELD_FROM_ISR( xHigherPriorityTaskWoken );
+		}
+  }
+   
+ * \defgroup xEventGroupSetBitsFromISR xEventGroupSetBitsFromISR + * \ingroup EventGroup + */ +#if( configUSE_TRACE_FACILITY == 1 ) + BaseType_t xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; +#else + #define xEventGroupSetBitsFromISR( xEventGroup, uxBitsToSet, pxHigherPriorityTaskWoken ) xTimerPendFunctionCallFromISR( vEventGroupSetBitsCallback, ( void * ) xEventGroup, ( uint32_t ) uxBitsToSet, pxHigherPriorityTaskWoken ) +#endif + +/** + * event_groups.h + *
+	EventBits_t xEventGroupSync(	EventGroupHandle_t xEventGroup,
+									const EventBits_t uxBitsToSet,
+									const EventBits_t uxBitsToWaitFor,
+									TickType_t xTicksToWait );
+ 
+ * + * Atomically set bits within an event group, then wait for a combination of + * bits to be set within the same event group. This functionality is typically + * used to synchronise multiple tasks, where each task has to wait for the other + * tasks to reach a synchronisation point before proceeding. + * + * This function cannot be used from an interrupt. + * + * The function will return before its block time expires if the bits specified + * by the uxBitsToWait parameter are set, or become set within that time. In + * this case all the bits specified by uxBitsToWait will be automatically + * cleared before the function returns. + * + * @param xEventGroup The event group in which the bits are being tested. The + * event group must have previously been created using a call to + * xEventGroupCreate(). + * + * @param uxBitsToSet The bits to set in the event group before determining + * if, and possibly waiting for, all the bits specified by the uxBitsToWait + * parameter are set. + * + * @param uxBitsToWaitFor A bitwise value that indicates the bit or bits to test + * inside the event group. For example, to wait for bit 0 and bit 2 set + * uxBitsToWaitFor to 0x05. To wait for bits 0 and bit 1 and bit 2 set + * uxBitsToWaitFor to 0x07. Etc. + * + * @param xTicksToWait The maximum amount of time (specified in 'ticks') to wait + * for all of the bits specified by uxBitsToWaitFor to become set. + * + * @return The value of the event group at the time either the bits being waited + * for became set, or the block time expired. Test the return value to know + * which bits were set. If xEventGroupSync() returned because its timeout + * expired then not all the bits being waited for will be set. If + * xEventGroupSync() returned because all the bits it was waiting for were + * set then the returned value is the event group value before any bits were + * automatically cleared. + * + * Example usage: +
+ // Bits used by the three tasks.
+ #define TASK_0_BIT		( 1 << 0 )
+ #define TASK_1_BIT		( 1 << 1 )
+ #define TASK_2_BIT		( 1 << 2 )
+
+ #define ALL_SYNC_BITS ( TASK_0_BIT | TASK_1_BIT | TASK_2_BIT )
+
+ // Use an event group to synchronise three tasks.  It is assumed this event
+ // group has already been created elsewhere.
+ EventGroupHandle_t xEventBits;
+
+ void vTask0( void *pvParameters )
+ {
+ EventBits_t uxReturn;
+ TickType_t xTicksToWait = 100 / portTICK_PERIOD_MS;
+
+	 for( ;; )
+	 {
+		// Perform task functionality here.
+
+		// Set bit 0 in the event flag to note this task has reached the
+		// sync point.  The other two tasks will set the other two bits defined
+		// by ALL_SYNC_BITS.  All three tasks have reached the synchronisation
+		// point when all the ALL_SYNC_BITS are set.  Wait a maximum of 100ms
+		// for this to happen.
+		uxReturn = xEventGroupSync( xEventBits, TASK_0_BIT, ALL_SYNC_BITS, xTicksToWait );
+
+		if( ( uxReturn & ALL_SYNC_BITS ) == ALL_SYNC_BITS )
+		{
+			// All three tasks reached the synchronisation point before the call
+			// to xEventGroupSync() timed out.
+		}
+	}
+ }
+
+ void vTask1( void *pvParameters )
+ {
+	 for( ;; )
+	 {
+		// Perform task functionality here.
+
+		// Set bit 1 in the event flag to note this task has reached the
+		// synchronisation point.  The other two tasks will set the other two
+		// bits defined by ALL_SYNC_BITS.  All three tasks have reached the
+		// synchronisation point when all the ALL_SYNC_BITS are set.  Wait
+		// indefinitely for this to happen.
+		xEventGroupSync( xEventBits, TASK_1_BIT, ALL_SYNC_BITS, portMAX_DELAY );
+
+		// xEventGroupSync() was called with an indefinite block time, so
+		// this task will only reach here if the syncrhonisation was made by all
+		// three tasks, so there is no need to test the return value.
+	 }
+ }
+
+ void vTask2( void *pvParameters )
+ {
+	 for( ;; )
+	 {
+		// Perform task functionality here.
+
+		// Set bit 2 in the event flag to note this task has reached the
+		// synchronisation point.  The other two tasks will set the other two
+		// bits defined by ALL_SYNC_BITS.  All three tasks have reached the
+		// synchronisation point when all the ALL_SYNC_BITS are set.  Wait
+		// indefinitely for this to happen.
+		xEventGroupSync( xEventBits, TASK_2_BIT, ALL_SYNC_BITS, portMAX_DELAY );
+
+		// xEventGroupSync() was called with an indefinite block time, so
+		// this task will only reach here if the syncrhonisation was made by all
+		// three tasks, so there is no need to test the return value.
+	}
+ }
+
+ 
+ * \defgroup xEventGroupSync xEventGroupSync + * \ingroup EventGroup + */ +EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, const EventBits_t uxBitsToWaitFor, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + + +/** + * event_groups.h + *
+	EventBits_t xEventGroupGetBits( EventGroupHandle_t xEventGroup );
+ 
+ * + * Returns the current value of the bits in an event group. This function + * cannot be used from an interrupt. + * + * @param xEventGroup The event group being queried. + * + * @return The event group bits at the time xEventGroupGetBits() was called. + * + * \defgroup xEventGroupGetBits xEventGroupGetBits + * \ingroup EventGroup + */ +#define xEventGroupGetBits( xEventGroup ) xEventGroupClearBits( xEventGroup, 0 ) + +/** + * event_groups.h + *
+	EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup );
+ 
+ * + * A version of xEventGroupGetBits() that can be called from an ISR. + * + * @param xEventGroup The event group being queried. + * + * @return The event group bits at the time xEventGroupGetBitsFromISR() was called. + * + * \defgroup xEventGroupGetBitsFromISR xEventGroupGetBitsFromISR + * \ingroup EventGroup + */ +EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION; + +/** + * event_groups.h + *
+	void xEventGroupDelete( EventGroupHandle_t xEventGroup );
+ 
+ * + * Delete an event group that was previously created by a call to + * xEventGroupCreate(). Tasks that are blocked on the event group will be + * unblocked and obtain 0 as the event group's value. + * + * @param xEventGroup The event group being deleted. + */ +void vEventGroupDelete( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION; + +/* For internal use only. */ +void vEventGroupSetBitsCallback( void *pvEventGroup, const uint32_t ulBitsToSet ) PRIVILEGED_FUNCTION; +void vEventGroupClearBitsCallback( void *pvEventGroup, const uint32_t ulBitsToClear ) PRIVILEGED_FUNCTION; + + +#if (configUSE_TRACE_FACILITY == 1) + UBaseType_t uxEventGroupGetNumber( void* xEventGroup ) PRIVILEGED_FUNCTION; + void vEventGroupSetNumber( void* xEventGroup, UBaseType_t uxEventGroupNumber ) PRIVILEGED_FUNCTION; +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT_GROUPS_H */ + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/list.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/list.h new file mode 100644 index 0000000..b2bb46d --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/list.h @@ -0,0 +1,412 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* + * This is the list implementation used by the scheduler. While it is tailored + * heavily for the schedulers needs, it is also available for use by + * application code. + * + * list_ts can only store pointers to list_item_ts. Each ListItem_t contains a + * numeric value (xItemValue). Most of the time the lists are sorted in + * descending item value order. + * + * Lists are created already containing one list item. The value of this + * item is the maximum possible that can be stored, it is therefore always at + * the end of the list and acts as a marker. The list member pxHead always + * points to this marker - even though it is at the tail of the list. This + * is because the tail contains a wrap back pointer to the true head of + * the list. + * + * In addition to it's value, each list item contains a pointer to the next + * item in the list (pxNext), a pointer to the list it is in (pxContainer) + * and a pointer to back to the object that contains it. These later two + * pointers are included for efficiency of list manipulation. There is + * effectively a two way link between the object containing the list item and + * the list item itself. + * + * + * \page ListIntroduction List Implementation + * \ingroup FreeRTOSIntro + */ + +#ifndef INC_FREERTOS_H + #error FreeRTOS.h must be included before list.h +#endif + +#ifndef LIST_H +#define LIST_H + +/* + * The list structure members are modified from within interrupts, and therefore + * by rights should be declared volatile. However, they are only modified in a + * functionally atomic way (within critical sections of with the scheduler + * suspended) and are either passed by reference into a function or indexed via + * a volatile variable. Therefore, in all use cases tested so far, the volatile + * qualifier can be omitted in order to provide a moderate performance + * improvement without adversely affecting functional behaviour. The assembly + * instructions generated by the IAR, ARM and GCC compilers when the respective + * compiler's options were set for maximum optimisation has been inspected and + * deemed to be as intended. That said, as compiler technology advances, and + * especially if aggressive cross module optimisation is used (a use case that + * has not been exercised to any great extend) then it is feasible that the + * volatile qualifier will be needed for correct optimisation. It is expected + * that a compiler removing essential code because, without the volatile + * qualifier on the list structure members and with aggressive cross module + * optimisation, the compiler deemed the code unnecessary will result in + * complete and obvious failure of the scheduler. If this is ever experienced + * then the volatile qualifier can be inserted in the relevant places within the + * list structures by simply defining configLIST_VOLATILE to volatile in + * FreeRTOSConfig.h (as per the example at the bottom of this comment block). + * If configLIST_VOLATILE is not defined then the preprocessor directives below + * will simply #define configLIST_VOLATILE away completely. + * + * To use volatile list structure members then add the following line to + * FreeRTOSConfig.h (without the quotes): + * "#define configLIST_VOLATILE volatile" + */ +#ifndef configLIST_VOLATILE + #define configLIST_VOLATILE +#endif /* configSUPPORT_CROSS_MODULE_OPTIMISATION */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Macros that can be used to place known values within the list structures, +then check that the known values do not get corrupted during the execution of +the application. These may catch the list data structures being overwritten in +memory. They will not catch data errors caused by incorrect configuration or +use of FreeRTOS.*/ +#if( configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 0 ) + /* Define the macros to do nothing. */ + #define listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE + #define listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE + #define listFIRST_LIST_INTEGRITY_CHECK_VALUE + #define listSECOND_LIST_INTEGRITY_CHECK_VALUE + #define listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ) + #define listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ) + #define listSET_LIST_INTEGRITY_CHECK_1_VALUE( pxList ) + #define listSET_LIST_INTEGRITY_CHECK_2_VALUE( pxList ) + #define listTEST_LIST_ITEM_INTEGRITY( pxItem ) + #define listTEST_LIST_INTEGRITY( pxList ) +#else + /* Define macros that add new members into the list structures. */ + #define listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE TickType_t xListItemIntegrityValue1; + #define listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE TickType_t xListItemIntegrityValue2; + #define listFIRST_LIST_INTEGRITY_CHECK_VALUE TickType_t xListIntegrityValue1; + #define listSECOND_LIST_INTEGRITY_CHECK_VALUE TickType_t xListIntegrityValue2; + + /* Define macros that set the new structure members to known values. */ + #define listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ) ( pxItem )->xListItemIntegrityValue1 = pdINTEGRITY_CHECK_VALUE + #define listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ) ( pxItem )->xListItemIntegrityValue2 = pdINTEGRITY_CHECK_VALUE + #define listSET_LIST_INTEGRITY_CHECK_1_VALUE( pxList ) ( pxList )->xListIntegrityValue1 = pdINTEGRITY_CHECK_VALUE + #define listSET_LIST_INTEGRITY_CHECK_2_VALUE( pxList ) ( pxList )->xListIntegrityValue2 = pdINTEGRITY_CHECK_VALUE + + /* Define macros that will assert if one of the structure members does not + contain its expected value. */ + #define listTEST_LIST_ITEM_INTEGRITY( pxItem ) configASSERT( ( ( pxItem )->xListItemIntegrityValue1 == pdINTEGRITY_CHECK_VALUE ) && ( ( pxItem )->xListItemIntegrityValue2 == pdINTEGRITY_CHECK_VALUE ) ) + #define listTEST_LIST_INTEGRITY( pxList ) configASSERT( ( ( pxList )->xListIntegrityValue1 == pdINTEGRITY_CHECK_VALUE ) && ( ( pxList )->xListIntegrityValue2 == pdINTEGRITY_CHECK_VALUE ) ) +#endif /* configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES */ + + +/* + * Definition of the only type of object that a list can contain. + */ +struct xLIST; +struct xLIST_ITEM +{ + listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + configLIST_VOLATILE TickType_t xItemValue; /*< The value being listed. In most cases this is used to sort the list in descending order. */ + struct xLIST_ITEM * configLIST_VOLATILE pxNext; /*< Pointer to the next ListItem_t in the list. */ + struct xLIST_ITEM * configLIST_VOLATILE pxPrevious; /*< Pointer to the previous ListItem_t in the list. */ + void * pvOwner; /*< Pointer to the object (normally a TCB) that contains the list item. There is therefore a two way link between the object containing the list item and the list item itself. */ + struct xLIST * configLIST_VOLATILE pxContainer; /*< Pointer to the list in which this list item is placed (if any). */ + listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ +}; +typedef struct xLIST_ITEM ListItem_t; /* For some reason lint wants this as two separate definitions. */ + +struct xMINI_LIST_ITEM +{ + listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + configLIST_VOLATILE TickType_t xItemValue; + struct xLIST_ITEM * configLIST_VOLATILE pxNext; + struct xLIST_ITEM * configLIST_VOLATILE pxPrevious; +}; +typedef struct xMINI_LIST_ITEM MiniListItem_t; + +/* + * Definition of the type of queue used by the scheduler. + */ +typedef struct xLIST +{ + listFIRST_LIST_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + volatile UBaseType_t uxNumberOfItems; + ListItem_t * configLIST_VOLATILE pxIndex; /*< Used to walk through the list. Points to the last item returned by a call to listGET_OWNER_OF_NEXT_ENTRY (). */ + MiniListItem_t xListEnd; /*< List item that contains the maximum possible item value meaning it is always at the end of the list and is therefore used as a marker. */ + listSECOND_LIST_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ +} List_t; + +/* + * Access macro to set the owner of a list item. The owner of a list item + * is the object (usually a TCB) that contains the list item. + * + * \page listSET_LIST_ITEM_OWNER listSET_LIST_ITEM_OWNER + * \ingroup LinkedList + */ +#define listSET_LIST_ITEM_OWNER( pxListItem, pxOwner ) ( ( pxListItem )->pvOwner = ( void * ) ( pxOwner ) ) + +/* + * Access macro to get the owner of a list item. The owner of a list item + * is the object (usually a TCB) that contains the list item. + * + * \page listSET_LIST_ITEM_OWNER listSET_LIST_ITEM_OWNER + * \ingroup LinkedList + */ +#define listGET_LIST_ITEM_OWNER( pxListItem ) ( ( pxListItem )->pvOwner ) + +/* + * Access macro to set the value of the list item. In most cases the value is + * used to sort the list in descending order. + * + * \page listSET_LIST_ITEM_VALUE listSET_LIST_ITEM_VALUE + * \ingroup LinkedList + */ +#define listSET_LIST_ITEM_VALUE( pxListItem, xValue ) ( ( pxListItem )->xItemValue = ( xValue ) ) + +/* + * Access macro to retrieve the value of the list item. The value can + * represent anything - for example the priority of a task, or the time at + * which a task should be unblocked. + * + * \page listGET_LIST_ITEM_VALUE listGET_LIST_ITEM_VALUE + * \ingroup LinkedList + */ +#define listGET_LIST_ITEM_VALUE( pxListItem ) ( ( pxListItem )->xItemValue ) + +/* + * Access macro to retrieve the value of the list item at the head of a given + * list. + * + * \page listGET_LIST_ITEM_VALUE listGET_LIST_ITEM_VALUE + * \ingroup LinkedList + */ +#define listGET_ITEM_VALUE_OF_HEAD_ENTRY( pxList ) ( ( ( pxList )->xListEnd ).pxNext->xItemValue ) + +/* + * Return the list item at the head of the list. + * + * \page listGET_HEAD_ENTRY listGET_HEAD_ENTRY + * \ingroup LinkedList + */ +#define listGET_HEAD_ENTRY( pxList ) ( ( ( pxList )->xListEnd ).pxNext ) + +/* + * Return the list item at the head of the list. + * + * \page listGET_NEXT listGET_NEXT + * \ingroup LinkedList + */ +#define listGET_NEXT( pxListItem ) ( ( pxListItem )->pxNext ) + +/* + * Return the list item that marks the end of the list + * + * \page listGET_END_MARKER listGET_END_MARKER + * \ingroup LinkedList + */ +#define listGET_END_MARKER( pxList ) ( ( ListItem_t const * ) ( &( ( pxList )->xListEnd ) ) ) + +/* + * Access macro to determine if a list contains any items. The macro will + * only have the value true if the list is empty. + * + * \page listLIST_IS_EMPTY listLIST_IS_EMPTY + * \ingroup LinkedList + */ +#define listLIST_IS_EMPTY( pxList ) ( ( ( pxList )->uxNumberOfItems == ( UBaseType_t ) 0 ) ? pdTRUE : pdFALSE ) + +/* + * Access macro to return the number of items in the list. + */ +#define listCURRENT_LIST_LENGTH( pxList ) ( ( pxList )->uxNumberOfItems ) + +/* + * Access function to obtain the owner of the next entry in a list. + * + * The list member pxIndex is used to walk through a list. Calling + * listGET_OWNER_OF_NEXT_ENTRY increments pxIndex to the next item in the list + * and returns that entry's pxOwner parameter. Using multiple calls to this + * function it is therefore possible to move through every item contained in + * a list. + * + * The pxOwner parameter of a list item is a pointer to the object that owns + * the list item. In the scheduler this is normally a task control block. + * The pxOwner parameter effectively creates a two way link between the list + * item and its owner. + * + * @param pxTCB pxTCB is set to the address of the owner of the next list item. + * @param pxList The list from which the next item owner is to be returned. + * + * \page listGET_OWNER_OF_NEXT_ENTRY listGET_OWNER_OF_NEXT_ENTRY + * \ingroup LinkedList + */ +#define listGET_OWNER_OF_NEXT_ENTRY( pxTCB, pxList ) \ +{ \ +List_t * const pxConstList = ( pxList ); \ + /* Increment the index to the next item and return the item, ensuring */ \ + /* we don't return the marker used at the end of the list. */ \ + ( pxConstList )->pxIndex = ( pxConstList )->pxIndex->pxNext; \ + if( ( void * ) ( pxConstList )->pxIndex == ( void * ) &( ( pxConstList )->xListEnd ) ) \ + { \ + ( pxConstList )->pxIndex = ( pxConstList )->pxIndex->pxNext; \ + } \ + ( pxTCB ) = ( pxConstList )->pxIndex->pvOwner; \ +} + + +/* + * Access function to obtain the owner of the first entry in a list. Lists + * are normally sorted in ascending item value order. + * + * This function returns the pxOwner member of the first item in the list. + * The pxOwner parameter of a list item is a pointer to the object that owns + * the list item. In the scheduler this is normally a task control block. + * The pxOwner parameter effectively creates a two way link between the list + * item and its owner. + * + * @param pxList The list from which the owner of the head item is to be + * returned. + * + * \page listGET_OWNER_OF_HEAD_ENTRY listGET_OWNER_OF_HEAD_ENTRY + * \ingroup LinkedList + */ +#define listGET_OWNER_OF_HEAD_ENTRY( pxList ) ( (&( ( pxList )->xListEnd ))->pxNext->pvOwner ) + +/* + * Check to see if a list item is within a list. The list item maintains a + * "container" pointer that points to the list it is in. All this macro does + * is check to see if the container and the list match. + * + * @param pxList The list we want to know if the list item is within. + * @param pxListItem The list item we want to know if is in the list. + * @return pdTRUE if the list item is in the list, otherwise pdFALSE. + */ +#define listIS_CONTAINED_WITHIN( pxList, pxListItem ) ( ( ( pxListItem )->pxContainer == ( pxList ) ) ? ( pdTRUE ) : ( pdFALSE ) ) + +/* + * Return the list a list item is contained within (referenced from). + * + * @param pxListItem The list item being queried. + * @return A pointer to the List_t object that references the pxListItem + */ +#define listLIST_ITEM_CONTAINER( pxListItem ) ( ( pxListItem )->pxContainer ) + +/* + * This provides a crude means of knowing if a list has been initialised, as + * pxList->xListEnd.xItemValue is set to portMAX_DELAY by the vListInitialise() + * function. + */ +#define listLIST_IS_INITIALISED( pxList ) ( ( pxList )->xListEnd.xItemValue == portMAX_DELAY ) + +/* + * Must be called before a list is used! This initialises all the members + * of the list structure and inserts the xListEnd item into the list as a + * marker to the back of the list. + * + * @param pxList Pointer to the list being initialised. + * + * \page vListInitialise vListInitialise + * \ingroup LinkedList + */ +void vListInitialise( List_t * const pxList ) PRIVILEGED_FUNCTION; + +/* + * Must be called before a list item is used. This sets the list container to + * null so the item does not think that it is already contained in a list. + * + * @param pxItem Pointer to the list item being initialised. + * + * \page vListInitialiseItem vListInitialiseItem + * \ingroup LinkedList + */ +void vListInitialiseItem( ListItem_t * const pxItem ) PRIVILEGED_FUNCTION; + +/* + * Insert a list item into a list. The item will be inserted into the list in + * a position determined by its item value (descending item value order). + * + * @param pxList The list into which the item is to be inserted. + * + * @param pxNewListItem The item that is to be placed in the list. + * + * \page vListInsert vListInsert + * \ingroup LinkedList + */ +void vListInsert( List_t * const pxList, ListItem_t * const pxNewListItem ) PRIVILEGED_FUNCTION; + +/* + * Insert a list item into a list. The item will be inserted in a position + * such that it will be the last item within the list returned by multiple + * calls to listGET_OWNER_OF_NEXT_ENTRY. + * + * The list member pxIndex is used to walk through a list. Calling + * listGET_OWNER_OF_NEXT_ENTRY increments pxIndex to the next item in the list. + * Placing an item in a list using vListInsertEnd effectively places the item + * in the list position pointed to by pxIndex. This means that every other + * item within the list will be returned by listGET_OWNER_OF_NEXT_ENTRY before + * the pxIndex parameter again points to the item being inserted. + * + * @param pxList The list into which the item is to be inserted. + * + * @param pxNewListItem The list item to be inserted into the list. + * + * \page vListInsertEnd vListInsertEnd + * \ingroup LinkedList + */ +void vListInsertEnd( List_t * const pxList, ListItem_t * const pxNewListItem ) PRIVILEGED_FUNCTION; + +/* + * Remove an item from a list. The list item has a pointer to the list that + * it is in, so only the list item need be passed into the function. + * + * @param uxListRemove The item to be removed. The item will remove itself from + * the list pointed to by it's pxContainer parameter. + * + * @return The number of items that remain in the list after the list item has + * been removed. + * + * \page uxListRemove uxListRemove + * \ingroup LinkedList + */ +UBaseType_t uxListRemove( ListItem_t * const pxItemToRemove ) PRIVILEGED_FUNCTION; + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/message_buffer.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/message_buffer.h new file mode 100644 index 0000000..9ee3f4d --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/message_buffer.h @@ -0,0 +1,799 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + + +/* + * Message buffers build functionality on top of FreeRTOS stream buffers. + * Whereas stream buffers are used to send a continuous stream of data from one + * task or interrupt to another, message buffers are used to send variable + * length discrete messages from one task or interrupt to another. Their + * implementation is light weight, making them particularly suited for interrupt + * to task and core to core communication scenarios. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xMessageBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xMessageBufferRead()) inside a critical section and set the receive + * timeout to 0. + * + * Message buffers hold variable length messages. To enable that, when a + * message is written to the message buffer an additional sizeof( size_t ) bytes + * are also written to store the message's length (that happens internally, with + * the API function). sizeof( size_t ) is typically 4 bytes on a 32-bit + * architecture, so writing a 10 byte message to a message buffer on a 32-bit + * architecture will actually reduce the available space in the message buffer + * by 14 bytes (10 byte are used by the message, and 4 bytes to hold the length + * of the message). + */ + +#ifndef FREERTOS_MESSAGE_BUFFER_H +#define FREERTOS_MESSAGE_BUFFER_H + +/* Message buffers are built onto of stream buffers. */ +#include "stream_buffer.h" + +#if defined( __cplusplus ) +extern "C" { +#endif + +/** + * Type by which message buffers are referenced. For example, a call to + * xMessageBufferCreate() returns an MessageBufferHandle_t variable that can + * then be used as a parameter to xMessageBufferSend(), xMessageBufferReceive(), + * etc. + */ +typedef void * MessageBufferHandle_t; + +/*-----------------------------------------------------------*/ + +/** + * message_buffer.h + * +
+MessageBufferHandle_t xMessageBufferCreate( size_t xBufferSizeBytes );
+
+ * + * Creates a new message buffer using dynamically allocated memory. See + * xMessageBufferCreateStatic() for a version that uses statically allocated + * memory (memory that is allocated at compile time). + * + * configSUPPORT_DYNAMIC_ALLOCATION must be set to 1 or left undefined in + * FreeRTOSConfig.h for xMessageBufferCreate() to be available. + * + * @param xBufferSizeBytes The total number of bytes (not messages) the message + * buffer will be able to hold at any one time. When a message is written to + * the message buffer an additional sizeof( size_t ) bytes are also written to + * store the message's length. sizeof( size_t ) is typically 4 bytes on a + * 32-bit architecture, so on most 32-bit architectures a 10 byte message will + * take up 14 bytes of message buffer space. + * + * @return If NULL is returned, then the message buffer cannot be created + * because there is insufficient heap memory available for FreeRTOS to allocate + * the message buffer data structures and storage area. A non-NULL value being + * returned indicates that the message buffer has been created successfully - + * the returned value should be stored as the handle to the created message + * buffer. + * + * Example use: +
+
+void vAFunction( void )
+{
+MessageBufferHandle_t xMessageBuffer;
+const size_t xMessageBufferSizeBytes = 100;
+
+    // Create a message buffer that can hold 100 bytes.  The memory used to hold
+    // both the message buffer structure and the messages themselves is allocated
+    // dynamically.  Each message added to the buffer consumes an additional 4
+    // bytes which are used to hold the lengh of the message.
+    xMessageBuffer = xMessageBufferCreate( xMessageBufferSizeBytes );
+
+    if( xMessageBuffer == NULL )
+    {
+        // There was not enough heap memory space available to create the
+        // message buffer.
+    }
+    else
+    {
+        // The message buffer was created successfully and can now be used.
+    }
+
+
+ * \defgroup xMessageBufferCreate xMessageBufferCreate + * \ingroup MessageBufferManagement + */ +#define xMessageBufferCreate( xBufferSizeBytes ) ( MessageBufferHandle_t ) xStreamBufferGenericCreate( xBufferSizeBytes, ( size_t ) 0, pdTRUE ) + +/** + * message_buffer.h + * +
+MessageBufferHandle_t xMessageBufferCreateStatic( size_t xBufferSizeBytes,
+                                                  uint8_t *pucMessageBufferStorageArea,
+                                                  StaticMessageBuffer_t *pxStaticMessageBuffer );
+
+ * Creates a new message buffer using statically allocated memory. See + * xMessageBufferCreate() for a version that uses dynamically allocated memory. + * + * @param xBufferSizeBytes The size, in bytes, of the buffer pointed to by the + * pucMessageBufferStorageArea parameter. When a message is written to the + * message buffer an additional sizeof( size_t ) bytes are also written to store + * the message's length. sizeof( size_t ) is typically 4 bytes on a 32-bit + * architecture, so on most 32-bit architecture a 10 byte message will take up + * 14 bytes of message buffer space. The maximum number of bytes that can be + * stored in the message buffer is actually (xBufferSizeBytes - 1). + * + * @param pucMessageBufferStorageArea Must point to a uint8_t array that is at + * least xBufferSizeBytes + 1 big. This is the array to which messages are + * copied when they are written to the message buffer. + * + * @param pxStaticMessageBuffer Must point to a variable of type + * StaticMessageBuffer_t, which will be used to hold the message buffer's data + * structure. + * + * @return If the message buffer is created successfully then a handle to the + * created message buffer is returned. If either pucMessageBufferStorageArea or + * pxStaticmessageBuffer are NULL then NULL is returned. + * + * Example use: +
+
+// Used to dimension the array used to hold the messages.  The available space
+// will actually be one less than this, so 999.
+#define STORAGE_SIZE_BYTES 1000
+
+// Defines the memory that will actually hold the messages within the message
+// buffer.
+static uint8_t ucStorageBuffer[ STORAGE_SIZE_BYTES ];
+
+// The variable used to hold the message buffer structure.
+StaticMessageBuffer_t xMessageBufferStruct;
+
+void MyFunction( void )
+{
+MessageBufferHandle_t xMessageBuffer;
+
+    xMessageBuffer = xMessageBufferCreateStatic( sizeof( ucBufferStorage ),
+                                                 ucBufferStorage,
+                                                 &xMessageBufferStruct );
+
+    // As neither the pucMessageBufferStorageArea or pxStaticMessageBuffer
+    // parameters were NULL, xMessageBuffer will not be NULL, and can be used to
+    // reference the created message buffer in other message buffer API calls.
+
+    // Other code that uses the message buffer can go here.
+}
+
+
+ * \defgroup xMessageBufferCreateStatic xMessageBufferCreateStatic + * \ingroup MessageBufferManagement + */ +#define xMessageBufferCreateStatic( xBufferSizeBytes, pucMessageBufferStorageArea, pxStaticMessageBuffer ) ( MessageBufferHandle_t ) xStreamBufferGenericCreateStatic( xBufferSizeBytes, 0, pdTRUE, pucMessageBufferStorageArea, pxStaticMessageBuffer ) + +/** + * message_buffer.h + * +
+size_t xMessageBufferSend( MessageBufferHandle_t xMessageBuffer,
+                           const void *pvTxData,
+                           size_t xDataLengthBytes,
+                           TickType_t xTicksToWait );
+
+ *
+ * Sends a discrete message to the message buffer.  The message can be any
+ * length that fits within the buffer's free space, and is copied into the
+ * buffer.
+ *
+ * ***NOTE***:  Uniquely among FreeRTOS objects, the stream buffer
+ * implementation (so also the message buffer implementation, as message buffers
+ * are built on top of stream buffers) assumes there is only one task or
+ * interrupt that will write to the buffer (the writer), and only one task or
+ * interrupt that will read from the buffer (the reader).  It is safe for the
+ * writer and reader to be different tasks or interrupts, but, unlike other
+ * FreeRTOS objects, it is not safe to have multiple different writers or
+ * multiple different readers.  If there are to be multiple different writers
+ * then the application writer must place each call to a writing API function
+ * (such as xMessageBufferSend()) inside a critical section and set the send
+ * block time to 0.  Likewise, if there are to be multiple different readers
+ * then the application writer must place each call to a reading API function
+ * (such as xMessageBufferRead()) inside a critical section and set the receive
+ * block time to 0.
+ *
+ * Use xMessageBufferSend() to write to a message buffer from a task.  Use
+ * xMessageBufferSendFromISR() to write to a message buffer from an interrupt
+ * service routine (ISR).
+ *
+ * @param xMessageBuffer The handle of the message buffer to which a message is
+ * being sent.
+ *
+ * @param pvTxData A pointer to the message that is to be copied into the
+ * message buffer.
+ *
+ * @param xDataLengthBytes The length of the message.  That is, the number of
+ * bytes to copy from pvTxData into the message buffer.  When a message is
+ * written to the message buffer an additional sizeof( size_t ) bytes are also
+ * written to store the message's length.  sizeof( size_t ) is typically 4 bytes
+ * on a 32-bit architecture, so on most 32-bit architecture setting
+ * xDataLengthBytes to 20 will reduce the free space in the message buffer by 24
+ * bytes (20 bytes of message data and 4 bytes to hold the message length).
+ *
+ * @param xTicksToWait The maximum amount of time the calling task should remain
+ * in the Blocked state to wait for enough space to become available in the
+ * message buffer, should the message buffer have insufficient space when
+ * xMessageBufferSend() is called.  The calling task will never block if
+ * xTicksToWait is zero.  The block time is specified in tick periods, so the
+ * absolute time it represents is dependent on the tick frequency.  The macro
+ * pdMS_TO_TICKS() can be used to convert a time specified in milliseconds into
+ * a time specified in ticks.  Setting xTicksToWait to portMAX_DELAY will cause
+ * the task to wait indefinitely (without timing out), provided
+ * INCLUDE_vTaskSuspend is set to 1 in FreeRTOSConfig.h.  Tasks do not use any
+ * CPU time when they are in the Blocked state.
+ *
+ * @return The number of bytes written to the message buffer.  If the call to
+ * xMessageBufferSend() times out before there was enough space to write the
+ * message into the message buffer then zero is returned.  If the call did not
+ * time out then xDataLengthBytes is returned.
+ *
+ * Example use:
+
+void vAFunction( MessageBufferHandle_t xMessageBuffer )
+{
+size_t xBytesSent;
+uint8_t ucArrayToSend[] = { 0, 1, 2, 3 };
+char *pcStringToSend = "String to send";
+const TickType_t x100ms = pdMS_TO_TICKS( 100 );
+
+    // Send an array to the message buffer, blocking for a maximum of 100ms to
+    // wait for enough space to be available in the message buffer.
+    xBytesSent = xMessageBufferSend( xMessageBuffer, ( void * ) ucArrayToSend, sizeof( ucArrayToSend ), x100ms );
+
+    if( xBytesSent != sizeof( ucArrayToSend ) )
+    {
+        // The call to xMessageBufferSend() times out before there was enough
+        // space in the buffer for the data to be written.
+    }
+
+    // Send the string to the message buffer.  Return immediately if there is
+    // not enough space in the buffer.
+    xBytesSent = xMessageBufferSend( xMessageBuffer, ( void * ) pcStringToSend, strlen( pcStringToSend ), 0 );
+
+    if( xBytesSent != strlen( pcStringToSend ) )
+    {
+        // The string could not be added to the message buffer because there was
+        // not enough free space in the buffer.
+    }
+}
+
+ * \defgroup xMessageBufferSend xMessageBufferSend + * \ingroup MessageBufferManagement + */ +#define xMessageBufferSend( xMessageBuffer, pvTxData, xDataLengthBytes, xTicksToWait ) xStreamBufferSend( ( StreamBufferHandle_t ) xMessageBuffer, pvTxData, xDataLengthBytes, xTicksToWait ) + +/** + * message_buffer.h + * +
+size_t xMessageBufferSendFromISR( MessageBufferHandle_t xMessageBuffer,
+                                  const void *pvTxData,
+                                  size_t xDataLengthBytes,
+                                  BaseType_t *pxHigherPriorityTaskWoken );
+
+ *
+ * Interrupt safe version of the API function that sends a discrete message to
+ * the message buffer.  The message can be any length that fits within the
+ * buffer's free space, and is copied into the buffer.
+ *
+ * ***NOTE***:  Uniquely among FreeRTOS objects, the stream buffer
+ * implementation (so also the message buffer implementation, as message buffers
+ * are built on top of stream buffers) assumes there is only one task or
+ * interrupt that will write to the buffer (the writer), and only one task or
+ * interrupt that will read from the buffer (the reader).  It is safe for the
+ * writer and reader to be different tasks or interrupts, but, unlike other
+ * FreeRTOS objects, it is not safe to have multiple different writers or
+ * multiple different readers.  If there are to be multiple different writers
+ * then the application writer must place each call to a writing API function
+ * (such as xMessageBufferSend()) inside a critical section and set the send
+ * block time to 0.  Likewise, if there are to be multiple different readers
+ * then the application writer must place each call to a reading API function
+ * (such as xMessageBufferRead()) inside a critical section and set the receive
+ * block time to 0.
+ *
+ * Use xMessageBufferSend() to write to a message buffer from a task.  Use
+ * xMessageBufferSendFromISR() to write to a message buffer from an interrupt
+ * service routine (ISR).
+ *
+ * @param xMessageBuffer The handle of the message buffer to which a message is
+ * being sent.
+ *
+ * @param pvTxData A pointer to the message that is to be copied into the
+ * message buffer.
+ *
+ * @param xDataLengthBytes The length of the message.  That is, the number of
+ * bytes to copy from pvTxData into the message buffer.  When a message is
+ * written to the message buffer an additional sizeof( size_t ) bytes are also
+ * written to store the message's length.  sizeof( size_t ) is typically 4 bytes
+ * on a 32-bit architecture, so on most 32-bit architecture setting
+ * xDataLengthBytes to 20 will reduce the free space in the message buffer by 24
+ * bytes (20 bytes of message data and 4 bytes to hold the message length).
+ *
+ * @param pxHigherPriorityTaskWoken  It is possible that a message buffer will
+ * have a task blocked on it waiting for data.  Calling
+ * xMessageBufferSendFromISR() can make data available, and so cause a task that
+ * was waiting for data to leave the Blocked state.  If calling
+ * xMessageBufferSendFromISR() causes a task to leave the Blocked state, and the
+ * unblocked task has a priority higher than the currently executing task (the
+ * task that was interrupted), then, internally, xMessageBufferSendFromISR()
+ * will set *pxHigherPriorityTaskWoken to pdTRUE.  If
+ * xMessageBufferSendFromISR() sets this value to pdTRUE, then normally a
+ * context switch should be performed before the interrupt is exited.  This will
+ * ensure that the interrupt returns directly to the highest priority Ready
+ * state task.  *pxHigherPriorityTaskWoken should be set to pdFALSE before it
+ * is passed into the function.  See the code example below for an example.
+ *
+ * @return The number of bytes actually written to the message buffer.  If the
+ * message buffer didn't have enough free space for the message to be stored
+ * then 0 is returned, otherwise xDataLengthBytes is returned.
+ *
+ * Example use:
+
+// A message buffer that has already been created.
+MessageBufferHandle_t xMessageBuffer;
+
+void vAnInterruptServiceRoutine( void )
+{
+size_t xBytesSent;
+char *pcStringToSend = "String to send";
+BaseType_t xHigherPriorityTaskWoken = pdFALSE; // Initialised to pdFALSE.
+
+    // Attempt to send the string to the message buffer.
+    xBytesSent = xMessageBufferSendFromISR( xMessageBuffer,
+                                            ( void * ) pcStringToSend,
+                                            strlen( pcStringToSend ),
+                                            &xHigherPriorityTaskWoken );
+
+    if( xBytesSent != strlen( pcStringToSend ) )
+    {
+        // The string could not be added to the message buffer because there was
+        // not enough free space in the buffer.
+    }
+
+    // If xHigherPriorityTaskWoken was set to pdTRUE inside
+    // xMessageBufferSendFromISR() then a task that has a priority above the
+    // priority of the currently executing task was unblocked and a context
+    // switch should be performed to ensure the ISR returns to the unblocked
+    // task.  In most FreeRTOS ports this is done by simply passing
+    // xHigherPriorityTaskWoken into taskYIELD_FROM_ISR(), which will test the
+    // variables value, and perform the context switch if necessary.  Check the
+    // documentation for the port in use for port specific instructions.
+    taskYIELD_FROM_ISR( xHigherPriorityTaskWoken );
+}
+
+ * \defgroup xMessageBufferSendFromISR xMessageBufferSendFromISR + * \ingroup MessageBufferManagement + */ +#define xMessageBufferSendFromISR( xMessageBuffer, pvTxData, xDataLengthBytes, pxHigherPriorityTaskWoken ) xStreamBufferSendFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pvTxData, xDataLengthBytes, pxHigherPriorityTaskWoken ) + +/** + * message_buffer.h + * +
+size_t xMessageBufferReceive( MessageBufferHandle_t xMessageBuffer,
+                              void *pvRxData,
+                              size_t xBufferLengthBytes,
+                              TickType_t xTicksToWait );
+
+ * + * Receives a discrete message from a message buffer. Messages can be of + * variable length and are copied out of the buffer. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xMessageBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xMessageBufferRead()) inside a critical section and set the receive + * block time to 0. + * + * Use xMessageBufferReceive() to read from a message buffer from a task. Use + * xMessageBufferReceiveFromISR() to read from a message buffer from an + * interrupt service routine (ISR). + * + * @param xMessageBuffer The handle of the message buffer from which a message + * is being received. + * + * @param pvRxData A pointer to the buffer into which the received message is + * to be copied. + * + * @param xBufferLengthBytes The length of the buffer pointed to by the pvRxData + * parameter. This sets the maximum length of the message that can be received. + * If xBufferLengthBytes is too small to hold the next message then the message + * will be left in the message buffer and 0 will be returned. + * + * @param xTicksToWait The maximum amount of time the task should remain in the + * Blocked state to wait for a message, should the message buffer be empty. + * xMessageBufferReceive() will return immediately if xTicksToWait is zero and + * the message buffer is empty. The block time is specified in tick periods, so + * the absolute time it represents is dependent on the tick frequency. The + * macro pdMS_TO_TICKS() can be used to convert a time specified in milliseconds + * into a time specified in ticks. Setting xTicksToWait to portMAX_DELAY will + * cause the task to wait indefinitely (without timing out), provided + * INCLUDE_vTaskSuspend is set to 1 in FreeRTOSConfig.h. Tasks do not use any + * CPU time when they are in the Blocked state. + * + * @return The length, in bytes, of the message read from the message buffer, if + * any. If xMessageBufferReceive() times out before a message became available + * then zero is returned. If the length of the message is greater than + * xBufferLengthBytes then the message will be left in the message buffer and + * zero is returned. + * + * Example use: +
+void vAFunction( MessageBuffer_t xMessageBuffer )
+{
+uint8_t ucRxData[ 20 ];
+size_t xReceivedBytes;
+const TickType_t xBlockTime = pdMS_TO_TICKS( 20 );
+
+    // Receive the next message from the message buffer.  Wait in the Blocked
+    // state (so not using any CPU processing time) for a maximum of 100ms for
+    // a message to become available.
+    xReceivedBytes = xMessageBufferReceive( xMessageBuffer,
+                                            ( void * ) ucRxData,
+                                            sizeof( ucRxData ),
+                                            xBlockTime );
+
+    if( xReceivedBytes > 0 )
+    {
+        // A ucRxData contains a message that is xReceivedBytes long.  Process
+        // the message here....
+    }
+}
+
+ * \defgroup xMessageBufferReceive xMessageBufferReceive + * \ingroup MessageBufferManagement + */ +#define xMessageBufferReceive( xMessageBuffer, pvRxData, xBufferLengthBytes, xTicksToWait ) xStreamBufferReceive( ( StreamBufferHandle_t ) xMessageBuffer, pvRxData, xBufferLengthBytes, xTicksToWait ) + + +/** + * message_buffer.h + * +
+size_t xMessageBufferReceiveFromISR( MessageBufferHandle_t xMessageBuffer,
+                                     void *pvRxData,
+                                     size_t xBufferLengthBytes,
+                                     BaseType_t *pxHigherPriorityTaskWoken );
+
+ * + * An interrupt safe version of the API function that receives a discrete + * message from a message buffer. Messages can be of variable length and are + * copied out of the buffer. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xMessageBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xMessageBufferRead()) inside a critical section and set the receive + * block time to 0. + * + * Use xMessageBufferReceive() to read from a message buffer from a task. Use + * xMessageBufferReceiveFromISR() to read from a message buffer from an + * interrupt service routine (ISR). + * + * @param xMessageBuffer The handle of the message buffer from which a message + * is being received. + * + * @param pvRxData A pointer to the buffer into which the received message is + * to be copied. + * + * @param xBufferLengthBytes The length of the buffer pointed to by the pvRxData + * parameter. This sets the maximum length of the message that can be received. + * If xBufferLengthBytes is too small to hold the next message then the message + * will be left in the message buffer and 0 will be returned. + * + * @param pxHigherPriorityTaskWoken It is possible that a message buffer will + * have a task blocked on it waiting for space to become available. Calling + * xMessageBufferReceiveFromISR() can make space available, and so cause a task + * that is waiting for space to leave the Blocked state. If calling + * xMessageBufferReceiveFromISR() causes a task to leave the Blocked state, and + * the unblocked task has a priority higher than the currently executing task + * (the task that was interrupted), then, internally, + * xMessageBufferReceiveFromISR() will set *pxHigherPriorityTaskWoken to pdTRUE. + * If xMessageBufferReceiveFromISR() sets this value to pdTRUE, then normally a + * context switch should be performed before the interrupt is exited. That will + * ensure the interrupt returns directly to the highest priority Ready state + * task. *pxHigherPriorityTaskWoken should be set to pdFALSE before it is + * passed into the function. See the code example below for an example. + * + * @return The length, in bytes, of the message read from the message buffer, if + * any. + * + * Example use: +
+// A message buffer that has already been created.
+MessageBuffer_t xMessageBuffer;
+
+void vAnInterruptServiceRoutine( void )
+{
+uint8_t ucRxData[ 20 ];
+size_t xReceivedBytes;
+BaseType_t xHigherPriorityTaskWoken = pdFALSE;  // Initialised to pdFALSE.
+
+    // Receive the next message from the message buffer.
+    xReceivedBytes = xMessageBufferReceiveFromISR( xMessageBuffer,
+                                                  ( void * ) ucRxData,
+                                                  sizeof( ucRxData ),
+                                                  &xHigherPriorityTaskWoken );
+
+    if( xReceivedBytes > 0 )
+    {
+        // A ucRxData contains a message that is xReceivedBytes long.  Process
+        // the message here....
+    }
+
+    // If xHigherPriorityTaskWoken was set to pdTRUE inside
+    // xMessageBufferReceiveFromISR() then a task that has a priority above the
+    // priority of the currently executing task was unblocked and a context
+    // switch should be performed to ensure the ISR returns to the unblocked
+    // task.  In most FreeRTOS ports this is done by simply passing
+    // xHigherPriorityTaskWoken into taskYIELD_FROM_ISR(), which will test the
+    // variables value, and perform the context switch if necessary.  Check the
+    // documentation for the port in use for port specific instructions.
+    taskYIELD_FROM_ISR( xHigherPriorityTaskWoken );
+}
+
+ * \defgroup xMessageBufferReceiveFromISR xMessageBufferReceiveFromISR + * \ingroup MessageBufferManagement + */ +#define xMessageBufferReceiveFromISR( xMessageBuffer, pvRxData, xBufferLengthBytes, pxHigherPriorityTaskWoken ) xStreamBufferReceiveFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pvRxData, xBufferLengthBytes, pxHigherPriorityTaskWoken ) + +/** + * message_buffer.h + * +
+void vMessageBufferDelete( MessageBufferHandle_t xMessageBuffer );
+
+ * + * Deletes a message buffer that was previously created using a call to + * xMessageBufferCreate() or xMessageBufferCreateStatic(). If the message + * buffer was created using dynamic memory (that is, by xMessageBufferCreate()), + * then the allocated memory is freed. + * + * A message buffer handle must not be used after the message buffer has been + * deleted. + * + * @param xMessageBuffer The handle of the message buffer to be deleted. + * + */ +#define vMessageBufferDelete( xMessageBuffer ) vStreamBufferDelete( ( StreamBufferHandle_t ) xMessageBuffer ) + +/** + * message_buffer.h +
+BaseType_t xMessageBufferIsFull( MessageBufferHandle_t xMessageBuffer ) );
+
+ * + * Tests to see if a message buffer is full. A message buffer is full if it + * cannot accept any more messages, of any size, until space is made available + * by a message being removed from the message buffer. + * + * @param xMessageBuffer The handle of the message buffer being queried. + * + * @return If the message buffer referenced by xMessageBuffer is full then + * pdTRUE is returned. Otherwise pdFALSE is returned. + */ +#define xMessageBufferIsFull( xMessageBuffer ) xStreamBufferIsFull( ( StreamBufferHandle_t ) xMessageBuffer ) + +/** + * message_buffer.h +
+BaseType_t xMessageBufferIsEmpty( MessageBufferHandle_t xMessageBuffer ) );
+
+ * + * Tests to see if a message buffer is empty (does not contain any messages). + * + * @param xMessageBuffer The handle of the message buffer being queried. + * + * @return If the message buffer referenced by xMessageBuffer is empty then + * pdTRUE is returned. Otherwise pdFALSE is returned. + * + */ +#define xMessageBufferIsEmpty( xMessageBuffer ) xStreamBufferIsEmpty( ( StreamBufferHandle_t ) xMessageBuffer ) + +/** + * message_buffer.h +
+BaseType_t xMessageBufferReset( MessageBufferHandle_t xMessageBuffer );
+
+ * + * Resets a message buffer to its initial empty state, discarding any message it + * contained. + * + * A message buffer can only be reset if there are no tasks blocked on it. + * + * @param xMessageBuffer The handle of the message buffer being reset. + * + * @return If the message buffer was reset then pdPASS is returned. If the + * message buffer could not be reset because either there was a task blocked on + * the message queue to wait for space to become available, or to wait for a + * a message to be available, then pdFAIL is returned. + * + * \defgroup xMessageBufferReset xMessageBufferReset + * \ingroup MessageBufferManagement + */ +#define xMessageBufferReset( xMessageBuffer ) xStreamBufferReset( ( StreamBufferHandle_t ) xMessageBuffer ) + + +/** + * message_buffer.h +
+size_t xMessageBufferSpaceAvailable( MessageBufferHandle_t xMessageBuffer ) );
+
+ * Returns the number of bytes of free space in the message buffer. + * + * @param xMessageBuffer The handle of the message buffer being queried. + * + * @return The number of bytes that can be written to the message buffer before + * the message buffer would be full. When a message is written to the message + * buffer an additional sizeof( size_t ) bytes are also written to store the + * message's length. sizeof( size_t ) is typically 4 bytes on a 32-bit + * architecture, so if xMessageBufferSpacesAvailable() returns 10, then the size + * of the largest message that can be written to the message buffer is 6 bytes. + * + * \defgroup xMessageBufferSpaceAvailable xMessageBufferSpaceAvailable + * \ingroup MessageBufferManagement + */ +#define xMessageBufferSpaceAvailable( xMessageBuffer ) xStreamBufferSpacesAvailable( ( StreamBufferHandle_t ) xMessageBuffer ) +#define xMessageBufferSpacesAvailable( xMessageBuffer ) xStreamBufferSpacesAvailable( ( StreamBufferHandle_t ) xMessageBuffer ) /* Corrects typo in original macro name. */ + +/** + * message_buffer.h +
+ size_t xMessageBufferNextLengthBytes( MessageBufferHandle_t xMessageBuffer ) );
+ 
+ * Returns the length (in bytes) of the next message in a message buffer. + * Useful if xMessageBufferReceive() returned 0 because the size of the buffer + * passed into xMessageBufferReceive() was too small to hold the next message. + * + * @param xMessageBuffer The handle of the message buffer being queried. + * + * @return The length (in bytes) of the next message in the message buffer, or 0 + * if the message buffer is empty. + * + * \defgroup xMessageBufferNextLengthBytes xMessageBufferNextLengthBytes + * \ingroup MessageBufferManagement + */ +#define xMessageBufferNextLengthBytes( xMessageBuffer ) xStreamBufferNextMessageLengthBytes( ( StreamBufferHandle_t ) xMessageBuffer ) PRIVILEGED_FUNCTION; + +/** + * message_buffer.h + * +
+BaseType_t xMessageBufferSendCompletedFromISR( MessageBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken );
+
+ * + * For advanced users only. + * + * The sbSEND_COMPLETED() macro is called from within the FreeRTOS APIs when + * data is sent to a message buffer or stream buffer. If there was a task that + * was blocked on the message or stream buffer waiting for data to arrive then + * the sbSEND_COMPLETED() macro sends a notification to the task to remove it + * from the Blocked state. xMessageBufferSendCompletedFromISR() does the same + * thing. It is provided to enable application writers to implement their own + * version of sbSEND_COMPLETED(), and MUST NOT BE USED AT ANY OTHER TIME. + * + * See the example implemented in FreeRTOS/Demo/Minimal/MessageBufferAMP.c for + * additional information. + * + * @param xStreamBuffer The handle of the stream buffer to which data was + * written. + * + * @param pxHigherPriorityTaskWoken *pxHigherPriorityTaskWoken should be + * initialised to pdFALSE before it is passed into + * xMessageBufferSendCompletedFromISR(). If calling + * xMessageBufferSendCompletedFromISR() removes a task from the Blocked state, + * and the task has a priority above the priority of the currently running task, + * then *pxHigherPriorityTaskWoken will get set to pdTRUE indicating that a + * context switch should be performed before exiting the ISR. + * + * @return If a task was removed from the Blocked state then pdTRUE is returned. + * Otherwise pdFALSE is returned. + * + * \defgroup xMessageBufferSendCompletedFromISR xMessageBufferSendCompletedFromISR + * \ingroup StreamBufferManagement + */ +#define xMessageBufferSendCompletedFromISR( xMessageBuffer, pxHigherPriorityTaskWoken ) xStreamBufferSendCompletedFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pxHigherPriorityTaskWoken ) + +/** + * message_buffer.h + * +
+BaseType_t xMessageBufferReceiveCompletedFromISR( MessageBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken );
+
+ * + * For advanced users only. + * + * The sbRECEIVE_COMPLETED() macro is called from within the FreeRTOS APIs when + * data is read out of a message buffer or stream buffer. If there was a task + * that was blocked on the message or stream buffer waiting for data to arrive + * then the sbRECEIVE_COMPLETED() macro sends a notification to the task to + * remove it from the Blocked state. xMessageBufferReceiveCompletedFromISR() + * does the same thing. It is provided to enable application writers to + * implement their own version of sbRECEIVE_COMPLETED(), and MUST NOT BE USED AT + * ANY OTHER TIME. + * + * See the example implemented in FreeRTOS/Demo/Minimal/MessageBufferAMP.c for + * additional information. + * + * @param xStreamBuffer The handle of the stream buffer from which data was + * read. + * + * @param pxHigherPriorityTaskWoken *pxHigherPriorityTaskWoken should be + * initialised to pdFALSE before it is passed into + * xMessageBufferReceiveCompletedFromISR(). If calling + * xMessageBufferReceiveCompletedFromISR() removes a task from the Blocked state, + * and the task has a priority above the priority of the currently running task, + * then *pxHigherPriorityTaskWoken will get set to pdTRUE indicating that a + * context switch should be performed before exiting the ISR. + * + * @return If a task was removed from the Blocked state then pdTRUE is returned. + * Otherwise pdFALSE is returned. + * + * \defgroup xMessageBufferReceiveCompletedFromISR xMessageBufferReceiveCompletedFromISR + * \ingroup StreamBufferManagement + */ +#define xMessageBufferReceiveCompletedFromISR( xMessageBuffer, pxHigherPriorityTaskWoken ) xStreamBufferReceiveCompletedFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pxHigherPriorityTaskWoken ) + +#if defined( __cplusplus ) +} /* extern "C" */ +#endif + +#endif /* !defined( FREERTOS_MESSAGE_BUFFER_H ) */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/mpu_prototypes.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/mpu_prototypes.h new file mode 100644 index 0000000..f0b3337 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/mpu_prototypes.h @@ -0,0 +1,157 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* + * When the MPU is used the standard (non MPU) API functions are mapped to + * equivalents that start "MPU_", the prototypes for which are defined in this + * header files. This will cause the application code to call the MPU_ version + * which wraps the non-MPU version with privilege promoting then demoting code, + * so the kernel code always runs will full privileges. + */ + + +#ifndef MPU_PROTOTYPES_H +#define MPU_PROTOTYPES_H + +/* MPU versions of tasks.h API functions. */ +BaseType_t MPU_xTaskCreate( TaskFunction_t pxTaskCode, const char * const pcName, const uint16_t usStackDepth, void * const pvParameters, UBaseType_t uxPriority, TaskHandle_t * const pxCreatedTask ) FREERTOS_SYSTEM_CALL; +TaskHandle_t MPU_xTaskCreateStatic( TaskFunction_t pxTaskCode, const char * const pcName, const uint32_t ulStackDepth, void * const pvParameters, UBaseType_t uxPriority, StackType_t * const puxStackBuffer, StaticTask_t * const pxTaskBuffer ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskAllocateMPURegions( TaskHandle_t xTask, const MemoryRegion_t * const pxRegions ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskDelete( TaskHandle_t xTaskToDelete ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskStartScheduler( void ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskSuspendAll( void ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskResumeAll( void ) FREERTOS_SYSTEM_CALL; +TickType_t MPU_xTaskGetTickCount( void ) FREERTOS_SYSTEM_CALL; +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) FREERTOS_SYSTEM_CALL; +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) FREERTOS_SYSTEM_CALL; +TaskHandle_t MPU_xTaskGetHandle( const char *pcNameToQuery ) FREERTOS_SYSTEM_CALL; +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction ) FREERTOS_SYSTEM_CALL; +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue ) FREERTOS_SYSTEM_CALL; +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter ) FREERTOS_SYSTEM_CALL; +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) FREERTOS_SYSTEM_CALL; +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime ) FREERTOS_SYSTEM_CALL; +TickType_t MPU_xTaskGetIdleRunTimeCounter( void ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskList( char * pcWriteBuffer ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskGetRunTimeStats( char *pcWriteBuffer ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +uint32_t MPU_ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskNotifyStateClear( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskIncrementTick( void ) FREERTOS_SYSTEM_CALL; +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskMissedYield( void ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskGetSchedulerState( void ) FREERTOS_SYSTEM_CALL; + +/* MPU versions of queue.h API functions. */ +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; +void MPU_vQueueDelete( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; +QueueHandle_t MPU_xQueueCreateMutex( const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL; +QueueHandle_t MPU_xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue ) FREERTOS_SYSTEM_CALL; +QueueHandle_t MPU_xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount ) FREERTOS_SYSTEM_CALL; +QueueHandle_t MPU_xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue ) FREERTOS_SYSTEM_CALL; +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) FREERTOS_SYSTEM_CALL; +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcName ) FREERTOS_SYSTEM_CALL; +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; +QueueHandle_t MPU_xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL; +QueueHandle_t MPU_xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL; +QueueSetHandle_t MPU_xQueueCreateSet( const UBaseType_t uxEventQueueLength ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) FREERTOS_SYSTEM_CALL; +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue ) FREERTOS_SYSTEM_CALL; +void MPU_vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber ) FREERTOS_SYSTEM_CALL; +UBaseType_t MPU_uxQueueGetQueueNumber( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; +uint8_t MPU_ucQueueGetQueueType( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; + +/* MPU versions of timers.h API functions. */ +TimerHandle_t MPU_xTimerCreate( const char * const pcTimerName, const TickType_t xTimerPeriodInTicks, const UBaseType_t uxAutoReload, void * const pvTimerID, TimerCallbackFunction_t pxCallbackFunction ) FREERTOS_SYSTEM_CALL; +TimerHandle_t MPU_xTimerCreateStatic( const char * const pcTimerName, const TickType_t xTimerPeriodInTicks, const UBaseType_t uxAutoReload, void * const pvTimerID, TimerCallbackFunction_t pxCallbackFunction, StaticTimer_t *pxTimerBuffer ) FREERTOS_SYSTEM_CALL; +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, void *pvNewID ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTimerPendFunctionCall( PendedFunction_t xFunctionToPend, void *pvParameter1, uint32_t ulParameter2, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, const UBaseType_t uxAutoReload ) FREERTOS_SYSTEM_CALL; +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTimerCreateTimerTask( void ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, const BaseType_t xCommandID, const TickType_t xOptionalValue, BaseType_t * const pxHigherPriorityTaskWoken, const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +/* MPU versions of event_group.h API functions. */ +EventGroupHandle_t MPU_xEventGroupCreate( void ) FREERTOS_SYSTEM_CALL; +EventGroupHandle_t MPU_xEventGroupCreateStatic( StaticEventGroup_t *pxEventGroupBuffer ) FREERTOS_SYSTEM_CALL; +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToWaitFor, const BaseType_t xClearOnExit, const BaseType_t xWaitForAllBits, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear ) FREERTOS_SYSTEM_CALL; +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet ) FREERTOS_SYSTEM_CALL; +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, const EventBits_t uxBitsToWaitFor, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +void MPU_vEventGroupDelete( EventGroupHandle_t xEventGroup ) FREERTOS_SYSTEM_CALL; +UBaseType_t MPU_uxEventGroupGetNumber( void* xEventGroup ) FREERTOS_SYSTEM_CALL; + +/* MPU versions of message/stream_buffer.h API functions. */ +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, const void *pvTxData, size_t xDataLengthBytes, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, void *pvRxData, size_t xBufferLengthBytes, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; +void MPU_vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel ) FREERTOS_SYSTEM_CALL; +StreamBufferHandle_t MPU_xStreamBufferGenericCreate( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer ) FREERTOS_SYSTEM_CALL; +StreamBufferHandle_t MPU_xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer, uint8_t * const pucStreamBufferStorageArea, StaticStreamBuffer_t * const pxStaticStreamBuffer ) FREERTOS_SYSTEM_CALL; + + + +#endif /* MPU_PROTOTYPES_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/mpu_wrappers.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/mpu_wrappers.h new file mode 100644 index 0000000..56d9c7e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/mpu_wrappers.h @@ -0,0 +1,186 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef MPU_WRAPPERS_H +#define MPU_WRAPPERS_H + +/* This file redefines API functions to be called through a wrapper macro, but +only for ports that are using the MPU. */ +#ifdef portUSING_MPU_WRAPPERS + + /* MPU_WRAPPERS_INCLUDED_FROM_API_FILE will be defined when this file is + included from queue.c or task.c to prevent it from having an effect within + those files. */ + #ifndef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + + /* + * Map standard (non MPU) API functions to equivalents that start + * "MPU_". This will cause the application code to call the MPU_ + * version, which wraps the non-MPU version with privilege promoting + * then demoting code, so the kernel code always runs will full + * privileges. + */ + + /* Map standard tasks.h API functions to the MPU equivalents. */ + #define xTaskCreate MPU_xTaskCreate + #define xTaskCreateStatic MPU_xTaskCreateStatic + #define xTaskCreateRestricted MPU_xTaskCreateRestricted + #define vTaskAllocateMPURegions MPU_vTaskAllocateMPURegions + #define vTaskDelete MPU_vTaskDelete + #define vTaskDelay MPU_vTaskDelay + #define vTaskDelayUntil MPU_vTaskDelayUntil + #define xTaskAbortDelay MPU_xTaskAbortDelay + #define uxTaskPriorityGet MPU_uxTaskPriorityGet + #define eTaskGetState MPU_eTaskGetState + #define vTaskGetInfo MPU_vTaskGetInfo + #define vTaskPrioritySet MPU_vTaskPrioritySet + #define vTaskSuspend MPU_vTaskSuspend + #define vTaskResume MPU_vTaskResume + #define vTaskSuspendAll MPU_vTaskSuspendAll + #define xTaskResumeAll MPU_xTaskResumeAll + #define xTaskGetTickCount MPU_xTaskGetTickCount + #define uxTaskGetNumberOfTasks MPU_uxTaskGetNumberOfTasks + #define pcTaskGetName MPU_pcTaskGetName + #define xTaskGetHandle MPU_xTaskGetHandle + #define uxTaskGetStackHighWaterMark MPU_uxTaskGetStackHighWaterMark + #define uxTaskGetStackHighWaterMark2 MPU_uxTaskGetStackHighWaterMark2 + #define vTaskSetApplicationTaskTag MPU_vTaskSetApplicationTaskTag + #define xTaskGetApplicationTaskTag MPU_xTaskGetApplicationTaskTag + #define vTaskSetThreadLocalStoragePointer MPU_vTaskSetThreadLocalStoragePointer + #define pvTaskGetThreadLocalStoragePointer MPU_pvTaskGetThreadLocalStoragePointer + #define xTaskCallApplicationTaskHook MPU_xTaskCallApplicationTaskHook + #define xTaskGetIdleTaskHandle MPU_xTaskGetIdleTaskHandle + #define uxTaskGetSystemState MPU_uxTaskGetSystemState + #define vTaskList MPU_vTaskList + #define vTaskGetRunTimeStats MPU_vTaskGetRunTimeStats + #define xTaskGetIdleRunTimeCounter MPU_xTaskGetIdleRunTimeCounter + #define xTaskGenericNotify MPU_xTaskGenericNotify + #define xTaskNotifyWait MPU_xTaskNotifyWait + #define ulTaskNotifyTake MPU_ulTaskNotifyTake + #define xTaskNotifyStateClear MPU_xTaskNotifyStateClear + + #define xTaskGetCurrentTaskHandle MPU_xTaskGetCurrentTaskHandle + #define vTaskSetTimeOutState MPU_vTaskSetTimeOutState + #define xTaskCheckForTimeOut MPU_xTaskCheckForTimeOut + #define xTaskGetSchedulerState MPU_xTaskGetSchedulerState + + /* Map standard queue.h API functions to the MPU equivalents. */ + #define xQueueGenericSend MPU_xQueueGenericSend + #define xQueueReceive MPU_xQueueReceive + #define xQueuePeek MPU_xQueuePeek + #define xQueueSemaphoreTake MPU_xQueueSemaphoreTake + #define uxQueueMessagesWaiting MPU_uxQueueMessagesWaiting + #define uxQueueSpacesAvailable MPU_uxQueueSpacesAvailable + #define vQueueDelete MPU_vQueueDelete + #define xQueueCreateMutex MPU_xQueueCreateMutex + #define xQueueCreateMutexStatic MPU_xQueueCreateMutexStatic + #define xQueueCreateCountingSemaphore MPU_xQueueCreateCountingSemaphore + #define xQueueCreateCountingSemaphoreStatic MPU_xQueueCreateCountingSemaphoreStatic + #define xQueueGetMutexHolder MPU_xQueueGetMutexHolder + #define xQueueTakeMutexRecursive MPU_xQueueTakeMutexRecursive + #define xQueueGiveMutexRecursive MPU_xQueueGiveMutexRecursive + #define xQueueGenericCreate MPU_xQueueGenericCreate + #define xQueueGenericCreateStatic MPU_xQueueGenericCreateStatic + #define xQueueCreateSet MPU_xQueueCreateSet + #define xQueueAddToSet MPU_xQueueAddToSet + #define xQueueRemoveFromSet MPU_xQueueRemoveFromSet + #define xQueueSelectFromSet MPU_xQueueSelectFromSet + #define xQueueGenericReset MPU_xQueueGenericReset + + #if( configQUEUE_REGISTRY_SIZE > 0 ) + #define vQueueAddToRegistry MPU_vQueueAddToRegistry + #define vQueueUnregisterQueue MPU_vQueueUnregisterQueue + #define pcQueueGetName MPU_pcQueueGetName + #endif + + /* Map standard timer.h API functions to the MPU equivalents. */ + #define xTimerCreate MPU_xTimerCreate + #define xTimerCreateStatic MPU_xTimerCreateStatic + #define pvTimerGetTimerID MPU_pvTimerGetTimerID + #define vTimerSetTimerID MPU_vTimerSetTimerID + #define xTimerIsTimerActive MPU_xTimerIsTimerActive + #define xTimerGetTimerDaemonTaskHandle MPU_xTimerGetTimerDaemonTaskHandle + #define xTimerPendFunctionCall MPU_xTimerPendFunctionCall + #define pcTimerGetName MPU_pcTimerGetName + #define vTimerSetReloadMode MPU_vTimerSetReloadMode + #define xTimerGetPeriod MPU_xTimerGetPeriod + #define xTimerGetExpiryTime MPU_xTimerGetExpiryTime + #define xTimerGenericCommand MPU_xTimerGenericCommand + + /* Map standard event_group.h API functions to the MPU equivalents. */ + #define xEventGroupCreate MPU_xEventGroupCreate + #define xEventGroupCreateStatic MPU_xEventGroupCreateStatic + #define xEventGroupWaitBits MPU_xEventGroupWaitBits + #define xEventGroupClearBits MPU_xEventGroupClearBits + #define xEventGroupSetBits MPU_xEventGroupSetBits + #define xEventGroupSync MPU_xEventGroupSync + #define vEventGroupDelete MPU_vEventGroupDelete + + /* Map standard message/stream_buffer.h API functions to the MPU + equivalents. */ + #define xStreamBufferSend MPU_xStreamBufferSend + #define xStreamBufferReceive MPU_xStreamBufferReceive + #define xStreamBufferNextMessageLengthBytes MPU_xStreamBufferNextMessageLengthBytes + #define vStreamBufferDelete MPU_vStreamBufferDelete + #define xStreamBufferIsFull MPU_xStreamBufferIsFull + #define xStreamBufferIsEmpty MPU_xStreamBufferIsEmpty + #define xStreamBufferReset MPU_xStreamBufferReset + #define xStreamBufferSpacesAvailable MPU_xStreamBufferSpacesAvailable + #define xStreamBufferBytesAvailable MPU_xStreamBufferBytesAvailable + #define xStreamBufferSetTriggerLevel MPU_xStreamBufferSetTriggerLevel + #define xStreamBufferGenericCreate MPU_xStreamBufferGenericCreate + #define xStreamBufferGenericCreateStatic MPU_xStreamBufferGenericCreateStatic + + + /* Remove the privileged function macro, but keep the PRIVILEGED_DATA + macro so applications can place data in privileged access sections + (useful when using statically allocated objects). */ + #define PRIVILEGED_FUNCTION + #define PRIVILEGED_DATA __attribute__((section("privileged_data"))) + #define FREERTOS_SYSTEM_CALL + + #else /* MPU_WRAPPERS_INCLUDED_FROM_API_FILE */ + + /* Ensure API functions go in the privileged execution section. */ + #define PRIVILEGED_FUNCTION __attribute__((section("privileged_functions"))) + #define PRIVILEGED_DATA __attribute__((section("privileged_data"))) + #define FREERTOS_SYSTEM_CALL __attribute__((section( "freertos_system_calls"))) + + #endif /* MPU_WRAPPERS_INCLUDED_FROM_API_FILE */ + +#else /* portUSING_MPU_WRAPPERS */ + + #define PRIVILEGED_FUNCTION + #define PRIVILEGED_DATA + #define FREERTOS_SYSTEM_CALL + #define portUSING_MPU_WRAPPERS 0 + +#endif /* portUSING_MPU_WRAPPERS */ + + +#endif /* MPU_WRAPPERS_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/portable.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/portable.h new file mode 100644 index 0000000..a1bb449 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/portable.h @@ -0,0 +1,181 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/*----------------------------------------------------------- + * Portable layer API. Each function must be defined for each port. + *----------------------------------------------------------*/ + +#ifndef PORTABLE_H +#define PORTABLE_H + +/* Each FreeRTOS port has a unique portmacro.h header file. Originally a +pre-processor definition was used to ensure the pre-processor found the correct +portmacro.h file for the port being used. That scheme was deprecated in favour +of setting the compiler's include path such that it found the correct +portmacro.h file - removing the need for the constant and allowing the +portmacro.h file to be located anywhere in relation to the port being used. +Purely for reasons of backward compatibility the old method is still valid, but +to make it clear that new projects should not use it, support for the port +specific constants has been moved into the deprecated_definitions.h header +file. */ +#include "deprecated_definitions.h" + +/* If portENTER_CRITICAL is not defined then including deprecated_definitions.h +did not result in a portmacro.h header file being included - and it should be +included here. In this case the path to the correct portmacro.h header file +must be set in the compiler's include path. */ +#ifndef portENTER_CRITICAL + #include "portmacro.h" +#endif + +#if portBYTE_ALIGNMENT == 32 + #define portBYTE_ALIGNMENT_MASK ( 0x001f ) +#endif + +#if portBYTE_ALIGNMENT == 16 + #define portBYTE_ALIGNMENT_MASK ( 0x000f ) +#endif + +#if portBYTE_ALIGNMENT == 8 + #define portBYTE_ALIGNMENT_MASK ( 0x0007 ) +#endif + +#if portBYTE_ALIGNMENT == 4 + #define portBYTE_ALIGNMENT_MASK ( 0x0003 ) +#endif + +#if portBYTE_ALIGNMENT == 2 + #define portBYTE_ALIGNMENT_MASK ( 0x0001 ) +#endif + +#if portBYTE_ALIGNMENT == 1 + #define portBYTE_ALIGNMENT_MASK ( 0x0000 ) +#endif + +#ifndef portBYTE_ALIGNMENT_MASK + #error "Invalid portBYTE_ALIGNMENT definition" +#endif + +#ifndef portNUM_CONFIGURABLE_REGIONS + #define portNUM_CONFIGURABLE_REGIONS 1 +#endif + +#ifndef portHAS_STACK_OVERFLOW_CHECKING + #define portHAS_STACK_OVERFLOW_CHECKING 0 +#endif + +#ifndef portARCH_NAME + #define portARCH_NAME NULL +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#include "mpu_wrappers.h" + +/* + * Setup the stack of a new task so it is ready to be placed under the + * scheduler control. The registers have to be placed on the stack in + * the order that the port expects to find them. + * + */ +#if( portUSING_MPU_WRAPPERS == 1 ) + #if( portHAS_STACK_OVERFLOW_CHECKING == 1 ) + StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, StackType_t *pxEndOfStack, TaskFunction_t pxCode, void *pvParameters, BaseType_t xRunPrivileged ) PRIVILEGED_FUNCTION; + #else + StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters, BaseType_t xRunPrivileged ) PRIVILEGED_FUNCTION; + #endif +#else + #if( portHAS_STACK_OVERFLOW_CHECKING == 1 ) + StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, StackType_t *pxEndOfStack, TaskFunction_t pxCode, void *pvParameters ) PRIVILEGED_FUNCTION; + #else + StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters ) PRIVILEGED_FUNCTION; + #endif +#endif + +/* Used by heap_5.c. */ +typedef struct HeapRegion +{ + uint8_t *pucStartAddress; + size_t xSizeInBytes; +} HeapRegion_t; + +/* + * Used to define multiple heap regions for use by heap_5.c. This function + * must be called before any calls to pvPortMalloc() - not creating a task, + * queue, semaphore, mutex, software timer, event group, etc. will result in + * pvPortMalloc being called. + * + * pxHeapRegions passes in an array of HeapRegion_t structures - each of which + * defines a region of memory that can be used as the heap. The array is + * terminated by a HeapRegions_t structure that has a size of 0. The region + * with the lowest start address must appear first in the array. + */ +void vPortDefineHeapRegions( const HeapRegion_t * const pxHeapRegions ) PRIVILEGED_FUNCTION; + + +/* + * Map to the memory management routines required for the port. + */ +void *pvPortMalloc( size_t xSize ) PRIVILEGED_FUNCTION; +void vPortFree( void *pv ) PRIVILEGED_FUNCTION; +void vPortInitialiseBlocks( void ) PRIVILEGED_FUNCTION; +size_t xPortGetFreeHeapSize( void ) PRIVILEGED_FUNCTION; +size_t xPortGetMinimumEverFreeHeapSize( void ) PRIVILEGED_FUNCTION; + +/* + * Setup the hardware ready for the scheduler to take control. This generally + * sets up a tick interrupt and sets timers for the correct tick frequency. + */ +BaseType_t xPortStartScheduler( void ) PRIVILEGED_FUNCTION; + +/* + * Undo any hardware/ISR setup that was performed by xPortStartScheduler() so + * the hardware is left in its original condition after the scheduler stops + * executing. + */ +void vPortEndScheduler( void ) PRIVILEGED_FUNCTION; + +/* + * The structures and methods of manipulating the MPU are contained within the + * port layer. + * + * Fills the xMPUSettings structure with the memory region information + * contained in xRegions. + */ +#if( portUSING_MPU_WRAPPERS == 1 ) + struct xMEMORY_REGION; + void vPortStoreTaskMPUSettings( xMPU_SETTINGS *xMPUSettings, const struct xMEMORY_REGION * const xRegions, StackType_t *pxBottomOfStack, uint32_t ulStackDepth ) PRIVILEGED_FUNCTION; +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* PORTABLE_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/projdefs.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/projdefs.h new file mode 100644 index 0000000..dbcf9bb --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/projdefs.h @@ -0,0 +1,124 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef PROJDEFS_H +#define PROJDEFS_H + +/* + * Defines the prototype to which task functions must conform. Defined in this + * file to ensure the type is known before portable.h is included. + */ +typedef void (*TaskFunction_t)( void * ); + +/* Converts a time in milliseconds to a time in ticks. This macro can be +overridden by a macro of the same name defined in FreeRTOSConfig.h in case the +definition here is not suitable for your application. */ +#ifndef pdMS_TO_TICKS + #define pdMS_TO_TICKS( xTimeInMs ) ( ( TickType_t ) ( ( ( TickType_t ) ( xTimeInMs ) * ( TickType_t ) configTICK_RATE_HZ ) / ( TickType_t ) 1000 ) ) +#endif + +#define pdFALSE ( ( BaseType_t ) 0 ) +#define pdTRUE ( ( BaseType_t ) 1 ) + +#define pdPASS ( pdTRUE ) +#define pdFAIL ( pdFALSE ) +#define errQUEUE_EMPTY ( ( BaseType_t ) 0 ) +#define errQUEUE_FULL ( ( BaseType_t ) 0 ) + +/* FreeRTOS error definitions. */ +#define errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY ( -1 ) +#define errQUEUE_BLOCKED ( -4 ) +#define errQUEUE_YIELD ( -5 ) + +/* Macros used for basic data corruption checks. */ +#ifndef configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES + #define configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES 0 +#endif + +#if( configUSE_16_BIT_TICKS == 1 ) + #define pdINTEGRITY_CHECK_VALUE 0x5a5a +#else + #define pdINTEGRITY_CHECK_VALUE 0x5a5a5a5aUL +#endif + +/* The following errno values are used by FreeRTOS+ components, not FreeRTOS +itself. */ +#define pdFREERTOS_ERRNO_NONE 0 /* No errors */ +#define pdFREERTOS_ERRNO_ENOENT 2 /* No such file or directory */ +#define pdFREERTOS_ERRNO_EINTR 4 /* Interrupted system call */ +#define pdFREERTOS_ERRNO_EIO 5 /* I/O error */ +#define pdFREERTOS_ERRNO_ENXIO 6 /* No such device or address */ +#define pdFREERTOS_ERRNO_EBADF 9 /* Bad file number */ +#define pdFREERTOS_ERRNO_EAGAIN 11 /* No more processes */ +#define pdFREERTOS_ERRNO_EWOULDBLOCK 11 /* Operation would block */ +#define pdFREERTOS_ERRNO_ENOMEM 12 /* Not enough memory */ +#define pdFREERTOS_ERRNO_EACCES 13 /* Permission denied */ +#define pdFREERTOS_ERRNO_EFAULT 14 /* Bad address */ +#define pdFREERTOS_ERRNO_EBUSY 16 /* Mount device busy */ +#define pdFREERTOS_ERRNO_EEXIST 17 /* File exists */ +#define pdFREERTOS_ERRNO_EXDEV 18 /* Cross-device link */ +#define pdFREERTOS_ERRNO_ENODEV 19 /* No such device */ +#define pdFREERTOS_ERRNO_ENOTDIR 20 /* Not a directory */ +#define pdFREERTOS_ERRNO_EISDIR 21 /* Is a directory */ +#define pdFREERTOS_ERRNO_EINVAL 22 /* Invalid argument */ +#define pdFREERTOS_ERRNO_ENOSPC 28 /* No space left on device */ +#define pdFREERTOS_ERRNO_ESPIPE 29 /* Illegal seek */ +#define pdFREERTOS_ERRNO_EROFS 30 /* Read only file system */ +#define pdFREERTOS_ERRNO_EUNATCH 42 /* Protocol driver not attached */ +#define pdFREERTOS_ERRNO_EBADE 50 /* Invalid exchange */ +#define pdFREERTOS_ERRNO_EFTYPE 79 /* Inappropriate file type or format */ +#define pdFREERTOS_ERRNO_ENMFILE 89 /* No more files */ +#define pdFREERTOS_ERRNO_ENOTEMPTY 90 /* Directory not empty */ +#define pdFREERTOS_ERRNO_ENAMETOOLONG 91 /* File or path name too long */ +#define pdFREERTOS_ERRNO_EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ +#define pdFREERTOS_ERRNO_ENOBUFS 105 /* No buffer space available */ +#define pdFREERTOS_ERRNO_ENOPROTOOPT 109 /* Protocol not available */ +#define pdFREERTOS_ERRNO_EADDRINUSE 112 /* Address already in use */ +#define pdFREERTOS_ERRNO_ETIMEDOUT 116 /* Connection timed out */ +#define pdFREERTOS_ERRNO_EINPROGRESS 119 /* Connection already in progress */ +#define pdFREERTOS_ERRNO_EALREADY 120 /* Socket already connected */ +#define pdFREERTOS_ERRNO_EADDRNOTAVAIL 125 /* Address not available */ +#define pdFREERTOS_ERRNO_EISCONN 127 /* Socket is already connected */ +#define pdFREERTOS_ERRNO_ENOTCONN 128 /* Socket is not connected */ +#define pdFREERTOS_ERRNO_ENOMEDIUM 135 /* No medium inserted */ +#define pdFREERTOS_ERRNO_EILSEQ 138 /* An invalid UTF-16 sequence was encountered. */ +#define pdFREERTOS_ERRNO_ECANCELED 140 /* Operation canceled. */ + +/* The following endian values are used by FreeRTOS+ components, not FreeRTOS +itself. */ +#define pdFREERTOS_LITTLE_ENDIAN 0 +#define pdFREERTOS_BIG_ENDIAN 1 + +/* Re-defining endian values for generic naming. */ +#define pdLITTLE_ENDIAN pdFREERTOS_LITTLE_ENDIAN +#define pdBIG_ENDIAN pdFREERTOS_BIG_ENDIAN + + +#endif /* PROJDEFS_H */ + + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/queue.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/queue.h new file mode 100644 index 0000000..5072c45 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/queue.h @@ -0,0 +1,1655 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + + +#ifndef QUEUE_H +#define QUEUE_H + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h" must appear in source files before "include queue.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#include "task.h" + +/** + * Type by which queues are referenced. For example, a call to xQueueCreate() + * returns an QueueHandle_t variable that can then be used as a parameter to + * xQueueSend(), xQueueReceive(), etc. + */ +struct QueueDefinition; /* Using old naming convention so as not to break kernel aware debuggers. */ +typedef struct QueueDefinition * QueueHandle_t; + +/** + * Type by which queue sets are referenced. For example, a call to + * xQueueCreateSet() returns an xQueueSet variable that can then be used as a + * parameter to xQueueSelectFromSet(), xQueueAddToSet(), etc. + */ +typedef struct QueueDefinition * QueueSetHandle_t; + +/** + * Queue sets can contain both queues and semaphores, so the + * QueueSetMemberHandle_t is defined as a type to be used where a parameter or + * return value can be either an QueueHandle_t or an SemaphoreHandle_t. + */ +typedef struct QueueDefinition * QueueSetMemberHandle_t; + +/* For internal use only. */ +#define queueSEND_TO_BACK ( ( BaseType_t ) 0 ) +#define queueSEND_TO_FRONT ( ( BaseType_t ) 1 ) +#define queueOVERWRITE ( ( BaseType_t ) 2 ) + +/* For internal use only. These definitions *must* match those in queue.c. */ +#define queueQUEUE_TYPE_BASE ( ( uint8_t ) 0U ) +#define queueQUEUE_TYPE_SET ( ( uint8_t ) 0U ) +#define queueQUEUE_TYPE_MUTEX ( ( uint8_t ) 1U ) +#define queueQUEUE_TYPE_COUNTING_SEMAPHORE ( ( uint8_t ) 2U ) +#define queueQUEUE_TYPE_BINARY_SEMAPHORE ( ( uint8_t ) 3U ) +#define queueQUEUE_TYPE_RECURSIVE_MUTEX ( ( uint8_t ) 4U ) + +/** + * queue. h + *
+ QueueHandle_t xQueueCreate(
+							  UBaseType_t uxQueueLength,
+							  UBaseType_t uxItemSize
+						  );
+ * 
+ * + * Creates a new queue instance, and returns a handle by which the new queue + * can be referenced. + * + * Internally, within the FreeRTOS implementation, queues use two blocks of + * memory. The first block is used to hold the queue's data structures. The + * second block is used to hold items placed into the queue. If a queue is + * created using xQueueCreate() then both blocks of memory are automatically + * dynamically allocated inside the xQueueCreate() function. (see + * http://www.freertos.org/a00111.html). If a queue is created using + * xQueueCreateStatic() then the application writer must provide the memory that + * will get used by the queue. xQueueCreateStatic() therefore allows a queue to + * be created without using any dynamic memory allocation. + * + * http://www.FreeRTOS.org/Embedded-RTOS-Queues.html + * + * @param uxQueueLength The maximum number of items that the queue can contain. + * + * @param uxItemSize The number of bytes each item in the queue will require. + * Items are queued by copy, not by reference, so this is the number of bytes + * that will be copied for each posted item. Each item on the queue must be + * the same size. + * + * @return If the queue is successfully create then a handle to the newly + * created queue is returned. If the queue cannot be created then 0 is + * returned. + * + * Example usage: +
+ struct AMessage
+ {
+	char ucMessageID;
+	char ucData[ 20 ];
+ };
+
+ void vATask( void *pvParameters )
+ {
+ QueueHandle_t xQueue1, xQueue2;
+
+	// Create a queue capable of containing 10 uint32_t values.
+	xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
+	if( xQueue1 == 0 )
+	{
+		// Queue was not created and must not be used.
+	}
+
+	// Create a queue capable of containing 10 pointers to AMessage structures.
+	// These should be passed by pointer as they contain a lot of data.
+	xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
+	if( xQueue2 == 0 )
+	{
+		// Queue was not created and must not be used.
+	}
+
+	// ... Rest of task code.
+ }
+ 
+ * \defgroup xQueueCreate xQueueCreate + * \ingroup QueueManagement + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + #define xQueueCreate( uxQueueLength, uxItemSize ) xQueueGenericCreate( ( uxQueueLength ), ( uxItemSize ), ( queueQUEUE_TYPE_BASE ) ) +#endif + +/** + * queue. h + *
+ QueueHandle_t xQueueCreateStatic(
+							  UBaseType_t uxQueueLength,
+							  UBaseType_t uxItemSize,
+							  uint8_t *pucQueueStorageBuffer,
+							  StaticQueue_t *pxQueueBuffer
+						  );
+ * 
+ * + * Creates a new queue instance, and returns a handle by which the new queue + * can be referenced. + * + * Internally, within the FreeRTOS implementation, queues use two blocks of + * memory. The first block is used to hold the queue's data structures. The + * second block is used to hold items placed into the queue. If a queue is + * created using xQueueCreate() then both blocks of memory are automatically + * dynamically allocated inside the xQueueCreate() function. (see + * http://www.freertos.org/a00111.html). If a queue is created using + * xQueueCreateStatic() then the application writer must provide the memory that + * will get used by the queue. xQueueCreateStatic() therefore allows a queue to + * be created without using any dynamic memory allocation. + * + * http://www.FreeRTOS.org/Embedded-RTOS-Queues.html + * + * @param uxQueueLength The maximum number of items that the queue can contain. + * + * @param uxItemSize The number of bytes each item in the queue will require. + * Items are queued by copy, not by reference, so this is the number of bytes + * that will be copied for each posted item. Each item on the queue must be + * the same size. + * + * @param pucQueueStorageBuffer If uxItemSize is not zero then + * pucQueueStorageBuffer must point to a uint8_t array that is at least large + * enough to hold the maximum number of items that can be in the queue at any + * one time - which is ( uxQueueLength * uxItemsSize ) bytes. If uxItemSize is + * zero then pucQueueStorageBuffer can be NULL. + * + * @param pxQueueBuffer Must point to a variable of type StaticQueue_t, which + * will be used to hold the queue's data structure. + * + * @return If the queue is created then a handle to the created queue is + * returned. If pxQueueBuffer is NULL then NULL is returned. + * + * Example usage: +
+ struct AMessage
+ {
+	char ucMessageID;
+	char ucData[ 20 ];
+ };
+
+ #define QUEUE_LENGTH 10
+ #define ITEM_SIZE sizeof( uint32_t )
+
+ // xQueueBuffer will hold the queue structure.
+ StaticQueue_t xQueueBuffer;
+
+ // ucQueueStorage will hold the items posted to the queue.  Must be at least
+ // [(queue length) * ( queue item size)] bytes long.
+ uint8_t ucQueueStorage[ QUEUE_LENGTH * ITEM_SIZE ];
+
+ void vATask( void *pvParameters )
+ {
+ QueueHandle_t xQueue1;
+
+	// Create a queue capable of containing 10 uint32_t values.
+	xQueue1 = xQueueCreate( QUEUE_LENGTH, // The number of items the queue can hold.
+							ITEM_SIZE	  // The size of each item in the queue
+							&( ucQueueStorage[ 0 ] ), // The buffer that will hold the items in the queue.
+							&xQueueBuffer ); // The buffer that will hold the queue structure.
+
+	// The queue is guaranteed to be created successfully as no dynamic memory
+	// allocation is used.  Therefore xQueue1 is now a handle to a valid queue.
+
+	// ... Rest of task code.
+ }
+ 
+ * \defgroup xQueueCreateStatic xQueueCreateStatic + * \ingroup QueueManagement + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xQueueCreateStatic( uxQueueLength, uxItemSize, pucQueueStorage, pxQueueBuffer ) xQueueGenericCreateStatic( ( uxQueueLength ), ( uxItemSize ), ( pucQueueStorage ), ( pxQueueBuffer ), ( queueQUEUE_TYPE_BASE ) ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +/** + * queue. h + *
+ BaseType_t xQueueSendToToFront(
+								   QueueHandle_t	xQueue,
+								   const void		*pvItemToQueue,
+								   TickType_t		xTicksToWait
+							   );
+ * 
+ * + * Post an item to the front of a queue. The item is queued by copy, not by + * reference. This function must not be called from an interrupt service + * routine. See xQueueSendFromISR () for an alternative which may be used + * in an ISR. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param xTicksToWait The maximum amount of time the task should block + * waiting for space to become available on the queue, should it already + * be full. The call will return immediately if this is set to 0 and the + * queue is full. The time is defined in tick periods so the constant + * portTICK_PERIOD_MS should be used to convert to real time if this is required. + * + * @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL. + * + * Example usage: +
+ struct AMessage
+ {
+	char ucMessageID;
+	char ucData[ 20 ];
+ } xMessage;
+
+ uint32_t ulVar = 10UL;
+
+ void vATask( void *pvParameters )
+ {
+ QueueHandle_t xQueue1, xQueue2;
+ struct AMessage *pxMessage;
+
+	// Create a queue capable of containing 10 uint32_t values.
+	xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
+
+	// Create a queue capable of containing 10 pointers to AMessage structures.
+	// These should be passed by pointer as they contain a lot of data.
+	xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
+
+	// ...
+
+	if( xQueue1 != 0 )
+	{
+		// Send an uint32_t.  Wait for 10 ticks for space to become
+		// available if necessary.
+		if( xQueueSendToFront( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10 ) != pdPASS )
+		{
+			// Failed to post the message, even after 10 ticks.
+		}
+	}
+
+	if( xQueue2 != 0 )
+	{
+		// Send a pointer to a struct AMessage object.  Don't block if the
+		// queue is already full.
+		pxMessage = & xMessage;
+		xQueueSendToFront( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0 );
+	}
+
+	// ... Rest of task code.
+ }
+ 
+ * \defgroup xQueueSend xQueueSend + * \ingroup QueueManagement + */ +#define xQueueSendToFront( xQueue, pvItemToQueue, xTicksToWait ) xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_FRONT ) + +/** + * queue. h + *
+ BaseType_t xQueueSendToBack(
+								   QueueHandle_t	xQueue,
+								   const void		*pvItemToQueue,
+								   TickType_t		xTicksToWait
+							   );
+ * 
+ * + * This is a macro that calls xQueueGenericSend(). + * + * Post an item to the back of a queue. The item is queued by copy, not by + * reference. This function must not be called from an interrupt service + * routine. See xQueueSendFromISR () for an alternative which may be used + * in an ISR. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param xTicksToWait The maximum amount of time the task should block + * waiting for space to become available on the queue, should it already + * be full. The call will return immediately if this is set to 0 and the queue + * is full. The time is defined in tick periods so the constant + * portTICK_PERIOD_MS should be used to convert to real time if this is required. + * + * @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL. + * + * Example usage: +
+ struct AMessage
+ {
+	char ucMessageID;
+	char ucData[ 20 ];
+ } xMessage;
+
+ uint32_t ulVar = 10UL;
+
+ void vATask( void *pvParameters )
+ {
+ QueueHandle_t xQueue1, xQueue2;
+ struct AMessage *pxMessage;
+
+	// Create a queue capable of containing 10 uint32_t values.
+	xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
+
+	// Create a queue capable of containing 10 pointers to AMessage structures.
+	// These should be passed by pointer as they contain a lot of data.
+	xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
+
+	// ...
+
+	if( xQueue1 != 0 )
+	{
+		// Send an uint32_t.  Wait for 10 ticks for space to become
+		// available if necessary.
+		if( xQueueSendToBack( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10 ) != pdPASS )
+		{
+			// Failed to post the message, even after 10 ticks.
+		}
+	}
+
+	if( xQueue2 != 0 )
+	{
+		// Send a pointer to a struct AMessage object.  Don't block if the
+		// queue is already full.
+		pxMessage = & xMessage;
+		xQueueSendToBack( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0 );
+	}
+
+	// ... Rest of task code.
+ }
+ 
+ * \defgroup xQueueSend xQueueSend + * \ingroup QueueManagement + */ +#define xQueueSendToBack( xQueue, pvItemToQueue, xTicksToWait ) xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_BACK ) + +/** + * queue. h + *
+ BaseType_t xQueueSend(
+							  QueueHandle_t xQueue,
+							  const void * pvItemToQueue,
+							  TickType_t xTicksToWait
+						 );
+ * 
+ * + * This is a macro that calls xQueueGenericSend(). It is included for + * backward compatibility with versions of FreeRTOS.org that did not + * include the xQueueSendToFront() and xQueueSendToBack() macros. It is + * equivalent to xQueueSendToBack(). + * + * Post an item on a queue. The item is queued by copy, not by reference. + * This function must not be called from an interrupt service routine. + * See xQueueSendFromISR () for an alternative which may be used in an ISR. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param xTicksToWait The maximum amount of time the task should block + * waiting for space to become available on the queue, should it already + * be full. The call will return immediately if this is set to 0 and the + * queue is full. The time is defined in tick periods so the constant + * portTICK_PERIOD_MS should be used to convert to real time if this is required. + * + * @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL. + * + * Example usage: +
+ struct AMessage
+ {
+	char ucMessageID;
+	char ucData[ 20 ];
+ } xMessage;
+
+ uint32_t ulVar = 10UL;
+
+ void vATask( void *pvParameters )
+ {
+ QueueHandle_t xQueue1, xQueue2;
+ struct AMessage *pxMessage;
+
+	// Create a queue capable of containing 10 uint32_t values.
+	xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
+
+	// Create a queue capable of containing 10 pointers to AMessage structures.
+	// These should be passed by pointer as they contain a lot of data.
+	xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
+
+	// ...
+
+	if( xQueue1 != 0 )
+	{
+		// Send an uint32_t.  Wait for 10 ticks for space to become
+		// available if necessary.
+		if( xQueueSend( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10 ) != pdPASS )
+		{
+			// Failed to post the message, even after 10 ticks.
+		}
+	}
+
+	if( xQueue2 != 0 )
+	{
+		// Send a pointer to a struct AMessage object.  Don't block if the
+		// queue is already full.
+		pxMessage = & xMessage;
+		xQueueSend( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0 );
+	}
+
+	// ... Rest of task code.
+ }
+ 
+ * \defgroup xQueueSend xQueueSend + * \ingroup QueueManagement + */ +#define xQueueSend( xQueue, pvItemToQueue, xTicksToWait ) xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_BACK ) + +/** + * queue. h + *
+ BaseType_t xQueueOverwrite(
+							  QueueHandle_t xQueue,
+							  const void * pvItemToQueue
+						 );
+ * 
+ * + * Only for use with queues that have a length of one - so the queue is either + * empty or full. + * + * Post an item on a queue. If the queue is already full then overwrite the + * value held in the queue. The item is queued by copy, not by reference. + * + * This function must not be called from an interrupt service routine. + * See xQueueOverwriteFromISR () for an alternative which may be used in an ISR. + * + * @param xQueue The handle of the queue to which the data is being sent. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @return xQueueOverwrite() is a macro that calls xQueueGenericSend(), and + * therefore has the same return values as xQueueSendToFront(). However, pdPASS + * is the only value that can be returned because xQueueOverwrite() will write + * to the queue even when the queue is already full. + * + * Example usage: +
+
+ void vFunction( void *pvParameters )
+ {
+ QueueHandle_t xQueue;
+ uint32_t ulVarToSend, ulValReceived;
+
+	// Create a queue to hold one uint32_t value.  It is strongly
+	// recommended *not* to use xQueueOverwrite() on queues that can
+	// contain more than one value, and doing so will trigger an assertion
+	// if configASSERT() is defined.
+	xQueue = xQueueCreate( 1, sizeof( uint32_t ) );
+
+	// Write the value 10 to the queue using xQueueOverwrite().
+	ulVarToSend = 10;
+	xQueueOverwrite( xQueue, &ulVarToSend );
+
+	// Peeking the queue should now return 10, but leave the value 10 in
+	// the queue.  A block time of zero is used as it is known that the
+	// queue holds a value.
+	ulValReceived = 0;
+	xQueuePeek( xQueue, &ulValReceived, 0 );
+
+	if( ulValReceived != 10 )
+	{
+		// Error unless the item was removed by a different task.
+	}
+
+	// The queue is still full.  Use xQueueOverwrite() to overwrite the
+	// value held in the queue with 100.
+	ulVarToSend = 100;
+	xQueueOverwrite( xQueue, &ulVarToSend );
+
+	// This time read from the queue, leaving the queue empty once more.
+	// A block time of 0 is used again.
+	xQueueReceive( xQueue, &ulValReceived, 0 );
+
+	// The value read should be the last value written, even though the
+	// queue was already full when the value was written.
+	if( ulValReceived != 100 )
+	{
+		// Error!
+	}
+
+	// ...
+}
+ 
+ * \defgroup xQueueOverwrite xQueueOverwrite + * \ingroup QueueManagement + */ +#define xQueueOverwrite( xQueue, pvItemToQueue ) xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), 0, queueOVERWRITE ) + + +/** + * queue. h + *
+ BaseType_t xQueueGenericSend(
+									QueueHandle_t xQueue,
+									const void * pvItemToQueue,
+									TickType_t xTicksToWait
+									BaseType_t xCopyPosition
+								);
+ * 
+ * + * It is preferred that the macros xQueueSend(), xQueueSendToFront() and + * xQueueSendToBack() are used in place of calling this function directly. + * + * Post an item on a queue. The item is queued by copy, not by reference. + * This function must not be called from an interrupt service routine. + * See xQueueSendFromISR () for an alternative which may be used in an ISR. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param xTicksToWait The maximum amount of time the task should block + * waiting for space to become available on the queue, should it already + * be full. The call will return immediately if this is set to 0 and the + * queue is full. The time is defined in tick periods so the constant + * portTICK_PERIOD_MS should be used to convert to real time if this is required. + * + * @param xCopyPosition Can take the value queueSEND_TO_BACK to place the + * item at the back of the queue, or queueSEND_TO_FRONT to place the item + * at the front of the queue (for high priority messages). + * + * @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL. + * + * Example usage: +
+ struct AMessage
+ {
+	char ucMessageID;
+	char ucData[ 20 ];
+ } xMessage;
+
+ uint32_t ulVar = 10UL;
+
+ void vATask( void *pvParameters )
+ {
+ QueueHandle_t xQueue1, xQueue2;
+ struct AMessage *pxMessage;
+
+	// Create a queue capable of containing 10 uint32_t values.
+	xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) );
+
+	// Create a queue capable of containing 10 pointers to AMessage structures.
+	// These should be passed by pointer as they contain a lot of data.
+	xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) );
+
+	// ...
+
+	if( xQueue1 != 0 )
+	{
+		// Send an uint32_t.  Wait for 10 ticks for space to become
+		// available if necessary.
+		if( xQueueGenericSend( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10, queueSEND_TO_BACK ) != pdPASS )
+		{
+			// Failed to post the message, even after 10 ticks.
+		}
+	}
+
+	if( xQueue2 != 0 )
+	{
+		// Send a pointer to a struct AMessage object.  Don't block if the
+		// queue is already full.
+		pxMessage = & xMessage;
+		xQueueGenericSend( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0, queueSEND_TO_BACK );
+	}
+
+	// ... Rest of task code.
+ }
+ 
+ * \defgroup xQueueSend xQueueSend + * \ingroup QueueManagement + */ +BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *
+ BaseType_t xQueuePeek(
+							 QueueHandle_t xQueue,
+							 void * const pvBuffer,
+							 TickType_t xTicksToWait
+						 );
+ * + * Receive an item from a queue without removing the item from the queue. + * The item is received by copy so a buffer of adequate size must be + * provided. The number of bytes copied into the buffer was defined when + * the queue was created. + * + * Successfully received items remain on the queue so will be returned again + * by the next call, or a call to xQueueReceive(). + * + * This macro must not be used in an interrupt service routine. See + * xQueuePeekFromISR() for an alternative that can be called from an interrupt + * service routine. + * + * @param xQueue The handle to the queue from which the item is to be + * received. + * + * @param pvBuffer Pointer to the buffer into which the received item will + * be copied. + * + * @param xTicksToWait The maximum amount of time the task should block + * waiting for an item to receive should the queue be empty at the time + * of the call. The time is defined in tick periods so the constant + * portTICK_PERIOD_MS should be used to convert to real time if this is required. + * xQueuePeek() will return immediately if xTicksToWait is 0 and the queue + * is empty. + * + * @return pdTRUE if an item was successfully received from the queue, + * otherwise pdFALSE. + * + * Example usage: +
+ struct AMessage
+ {
+	char ucMessageID;
+	char ucData[ 20 ];
+ } xMessage;
+
+ QueueHandle_t xQueue;
+
+ // Task to create a queue and post a value.
+ void vATask( void *pvParameters )
+ {
+ struct AMessage *pxMessage;
+
+	// Create a queue capable of containing 10 pointers to AMessage structures.
+	// These should be passed by pointer as they contain a lot of data.
+	xQueue = xQueueCreate( 10, sizeof( struct AMessage * ) );
+	if( xQueue == 0 )
+	{
+		// Failed to create the queue.
+	}
+
+	// ...
+
+	// Send a pointer to a struct AMessage object.  Don't block if the
+	// queue is already full.
+	pxMessage = & xMessage;
+	xQueueSend( xQueue, ( void * ) &pxMessage, ( TickType_t ) 0 );
+
+	// ... Rest of task code.
+ }
+
+ // Task to peek the data from the queue.
+ void vADifferentTask( void *pvParameters )
+ {
+ struct AMessage *pxRxedMessage;
+
+	if( xQueue != 0 )
+	{
+		// Peek a message on the created queue.  Block for 10 ticks if a
+		// message is not immediately available.
+		if( xQueuePeek( xQueue, &( pxRxedMessage ), ( TickType_t ) 10 ) )
+		{
+			// pcRxedMessage now points to the struct AMessage variable posted
+			// by vATask, but the item still remains on the queue.
+		}
+	}
+
+	// ... Rest of task code.
+ }
+ 
+ * \defgroup xQueuePeek xQueuePeek + * \ingroup QueueManagement + */ +BaseType_t xQueuePeek( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *
+ BaseType_t xQueuePeekFromISR(
+									QueueHandle_t xQueue,
+									void *pvBuffer,
+								);
+ * + * A version of xQueuePeek() that can be called from an interrupt service + * routine (ISR). + * + * Receive an item from a queue without removing the item from the queue. + * The item is received by copy so a buffer of adequate size must be + * provided. The number of bytes copied into the buffer was defined when + * the queue was created. + * + * Successfully received items remain on the queue so will be returned again + * by the next call, or a call to xQueueReceive(). + * + * @param xQueue The handle to the queue from which the item is to be + * received. + * + * @param pvBuffer Pointer to the buffer into which the received item will + * be copied. + * + * @return pdTRUE if an item was successfully received from the queue, + * otherwise pdFALSE. + * + * \defgroup xQueuePeekFromISR xQueuePeekFromISR + * \ingroup QueueManagement + */ +BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *
+ BaseType_t xQueueReceive(
+								 QueueHandle_t xQueue,
+								 void *pvBuffer,
+								 TickType_t xTicksToWait
+							);
+ * + * Receive an item from a queue. The item is received by copy so a buffer of + * adequate size must be provided. The number of bytes copied into the buffer + * was defined when the queue was created. + * + * Successfully received items are removed from the queue. + * + * This function must not be used in an interrupt service routine. See + * xQueueReceiveFromISR for an alternative that can. + * + * @param xQueue The handle to the queue from which the item is to be + * received. + * + * @param pvBuffer Pointer to the buffer into which the received item will + * be copied. + * + * @param xTicksToWait The maximum amount of time the task should block + * waiting for an item to receive should the queue be empty at the time + * of the call. xQueueReceive() will return immediately if xTicksToWait + * is zero and the queue is empty. The time is defined in tick periods so the + * constant portTICK_PERIOD_MS should be used to convert to real time if this is + * required. + * + * @return pdTRUE if an item was successfully received from the queue, + * otherwise pdFALSE. + * + * Example usage: +
+ struct AMessage
+ {
+	char ucMessageID;
+	char ucData[ 20 ];
+ } xMessage;
+
+ QueueHandle_t xQueue;
+
+ // Task to create a queue and post a value.
+ void vATask( void *pvParameters )
+ {
+ struct AMessage *pxMessage;
+
+	// Create a queue capable of containing 10 pointers to AMessage structures.
+	// These should be passed by pointer as they contain a lot of data.
+	xQueue = xQueueCreate( 10, sizeof( struct AMessage * ) );
+	if( xQueue == 0 )
+	{
+		// Failed to create the queue.
+	}
+
+	// ...
+
+	// Send a pointer to a struct AMessage object.  Don't block if the
+	// queue is already full.
+	pxMessage = & xMessage;
+	xQueueSend( xQueue, ( void * ) &pxMessage, ( TickType_t ) 0 );
+
+	// ... Rest of task code.
+ }
+
+ // Task to receive from the queue.
+ void vADifferentTask( void *pvParameters )
+ {
+ struct AMessage *pxRxedMessage;
+
+	if( xQueue != 0 )
+	{
+		// Receive a message on the created queue.  Block for 10 ticks if a
+		// message is not immediately available.
+		if( xQueueReceive( xQueue, &( pxRxedMessage ), ( TickType_t ) 10 ) )
+		{
+			// pcRxedMessage now points to the struct AMessage variable posted
+			// by vATask.
+		}
+	}
+
+	// ... Rest of task code.
+ }
+ 
+ * \defgroup xQueueReceive xQueueReceive + * \ingroup QueueManagement + */ +BaseType_t xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *
UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue );
+ * + * Return the number of messages stored in a queue. + * + * @param xQueue A handle to the queue being queried. + * + * @return The number of messages available in the queue. + * + * \defgroup uxQueueMessagesWaiting uxQueueMessagesWaiting + * \ingroup QueueManagement + */ +UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *
UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue );
+ * + * Return the number of free spaces available in a queue. This is equal to the + * number of items that can be sent to the queue before the queue becomes full + * if no items are removed. + * + * @param xQueue A handle to the queue being queried. + * + * @return The number of spaces available in the queue. + * + * \defgroup uxQueueMessagesWaiting uxQueueMessagesWaiting + * \ingroup QueueManagement + */ +UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *
void vQueueDelete( QueueHandle_t xQueue );
+ * + * Delete a queue - freeing all the memory allocated for storing of items + * placed on the queue. + * + * @param xQueue A handle to the queue to be deleted. + * + * \defgroup vQueueDelete vQueueDelete + * \ingroup QueueManagement + */ +void vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *
+ BaseType_t xQueueSendToFrontFromISR(
+										 QueueHandle_t xQueue,
+										 const void *pvItemToQueue,
+										 BaseType_t *pxHigherPriorityTaskWoken
+									  );
+ 
+ * + * This is a macro that calls xQueueGenericSendFromISR(). + * + * Post an item to the front of a queue. It is safe to use this macro from + * within an interrupt service routine. + * + * Items are queued by copy not reference so it is preferable to only + * queue small items, especially when called from an ISR. In most cases + * it would be preferable to store a pointer to the item being queued. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param pxHigherPriorityTaskWoken xQueueSendToFrontFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xQueueSendToFromFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @return pdTRUE if the data was successfully sent to the queue, otherwise + * errQUEUE_FULL. + * + * Example usage for buffered IO (where the ISR can obtain more than one value + * per call): +
+ void vBufferISR( void )
+ {
+ char cIn;
+ BaseType_t xHigherPrioritTaskWoken;
+
+	// We have not woken a task at the start of the ISR.
+	xHigherPriorityTaskWoken = pdFALSE;
+
+	// Loop until the buffer is empty.
+	do
+	{
+		// Obtain a byte from the buffer.
+		cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS );
+
+		// Post the byte.
+		xQueueSendToFrontFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWoken );
+
+	} while( portINPUT_BYTE( BUFFER_COUNT ) );
+
+	// Now the buffer is empty we can switch context if necessary.
+	if( xHigherPriorityTaskWoken )
+	{
+		taskYIELD ();
+	}
+ }
+ 
+ * + * \defgroup xQueueSendFromISR xQueueSendFromISR + * \ingroup QueueManagement + */ +#define xQueueSendToFrontFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueSEND_TO_FRONT ) + + +/** + * queue. h + *
+ BaseType_t xQueueSendToBackFromISR(
+										 QueueHandle_t xQueue,
+										 const void *pvItemToQueue,
+										 BaseType_t *pxHigherPriorityTaskWoken
+									  );
+ 
+ * + * This is a macro that calls xQueueGenericSendFromISR(). + * + * Post an item to the back of a queue. It is safe to use this macro from + * within an interrupt service routine. + * + * Items are queued by copy not reference so it is preferable to only + * queue small items, especially when called from an ISR. In most cases + * it would be preferable to store a pointer to the item being queued. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param pxHigherPriorityTaskWoken xQueueSendToBackFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xQueueSendToBackFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @return pdTRUE if the data was successfully sent to the queue, otherwise + * errQUEUE_FULL. + * + * Example usage for buffered IO (where the ISR can obtain more than one value + * per call): +
+ void vBufferISR( void )
+ {
+ char cIn;
+ BaseType_t xHigherPriorityTaskWoken;
+
+	// We have not woken a task at the start of the ISR.
+	xHigherPriorityTaskWoken = pdFALSE;
+
+	// Loop until the buffer is empty.
+	do
+	{
+		// Obtain a byte from the buffer.
+		cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS );
+
+		// Post the byte.
+		xQueueSendToBackFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWoken );
+
+	} while( portINPUT_BYTE( BUFFER_COUNT ) );
+
+	// Now the buffer is empty we can switch context if necessary.
+	if( xHigherPriorityTaskWoken )
+	{
+		taskYIELD ();
+	}
+ }
+ 
+ * + * \defgroup xQueueSendFromISR xQueueSendFromISR + * \ingroup QueueManagement + */ +#define xQueueSendToBackFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueSEND_TO_BACK ) + +/** + * queue. h + *
+ BaseType_t xQueueOverwriteFromISR(
+							  QueueHandle_t xQueue,
+							  const void * pvItemToQueue,
+							  BaseType_t *pxHigherPriorityTaskWoken
+						 );
+ * 
+ * + * A version of xQueueOverwrite() that can be used in an interrupt service + * routine (ISR). + * + * Only for use with queues that can hold a single item - so the queue is either + * empty or full. + * + * Post an item on a queue. If the queue is already full then overwrite the + * value held in the queue. The item is queued by copy, not by reference. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param pxHigherPriorityTaskWoken xQueueOverwriteFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xQueueOverwriteFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @return xQueueOverwriteFromISR() is a macro that calls + * xQueueGenericSendFromISR(), and therefore has the same return values as + * xQueueSendToFrontFromISR(). However, pdPASS is the only value that can be + * returned because xQueueOverwriteFromISR() will write to the queue even when + * the queue is already full. + * + * Example usage: +
+
+ QueueHandle_t xQueue;
+
+ void vFunction( void *pvParameters )
+ {
+ 	// Create a queue to hold one uint32_t value.  It is strongly
+	// recommended *not* to use xQueueOverwriteFromISR() on queues that can
+	// contain more than one value, and doing so will trigger an assertion
+	// if configASSERT() is defined.
+	xQueue = xQueueCreate( 1, sizeof( uint32_t ) );
+}
+
+void vAnInterruptHandler( void )
+{
+// xHigherPriorityTaskWoken must be set to pdFALSE before it is used.
+BaseType_t xHigherPriorityTaskWoken = pdFALSE;
+uint32_t ulVarToSend, ulValReceived;
+
+	// Write the value 10 to the queue using xQueueOverwriteFromISR().
+	ulVarToSend = 10;
+	xQueueOverwriteFromISR( xQueue, &ulVarToSend, &xHigherPriorityTaskWoken );
+
+	// The queue is full, but calling xQueueOverwriteFromISR() again will still
+	// pass because the value held in the queue will be overwritten with the
+	// new value.
+	ulVarToSend = 100;
+	xQueueOverwriteFromISR( xQueue, &ulVarToSend, &xHigherPriorityTaskWoken );
+
+	// Reading from the queue will now return 100.
+
+	// ...
+
+	if( xHigherPrioritytaskWoken == pdTRUE )
+	{
+		// Writing to the queue caused a task to unblock and the unblocked task
+		// has a priority higher than or equal to the priority of the currently
+		// executing task (the task this interrupt interrupted).  Perform a context
+		// switch so this interrupt returns directly to the unblocked task.
+		portYIELD_FROM_ISR(); // or portEND_SWITCHING_ISR() depending on the port.
+	}
+}
+ 
+ * \defgroup xQueueOverwriteFromISR xQueueOverwriteFromISR + * \ingroup QueueManagement + */ +#define xQueueOverwriteFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueOVERWRITE ) + +/** + * queue. h + *
+ BaseType_t xQueueSendFromISR(
+									 QueueHandle_t xQueue,
+									 const void *pvItemToQueue,
+									 BaseType_t *pxHigherPriorityTaskWoken
+								);
+ 
+ * + * This is a macro that calls xQueueGenericSendFromISR(). It is included + * for backward compatibility with versions of FreeRTOS.org that did not + * include the xQueueSendToBackFromISR() and xQueueSendToFrontFromISR() + * macros. + * + * Post an item to the back of a queue. It is safe to use this function from + * within an interrupt service routine. + * + * Items are queued by copy not reference so it is preferable to only + * queue small items, especially when called from an ISR. In most cases + * it would be preferable to store a pointer to the item being queued. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param pxHigherPriorityTaskWoken xQueueSendFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xQueueSendFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @return pdTRUE if the data was successfully sent to the queue, otherwise + * errQUEUE_FULL. + * + * Example usage for buffered IO (where the ISR can obtain more than one value + * per call): +
+ void vBufferISR( void )
+ {
+ char cIn;
+ BaseType_t xHigherPriorityTaskWoken;
+
+	// We have not woken a task at the start of the ISR.
+	xHigherPriorityTaskWoken = pdFALSE;
+
+	// Loop until the buffer is empty.
+	do
+	{
+		// Obtain a byte from the buffer.
+		cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS );
+
+		// Post the byte.
+		xQueueSendFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWoken );
+
+	} while( portINPUT_BYTE( BUFFER_COUNT ) );
+
+	// Now the buffer is empty we can switch context if necessary.
+	if( xHigherPriorityTaskWoken )
+	{
+		// Actual macro used here is port specific.
+		portYIELD_FROM_ISR ();
+	}
+ }
+ 
+ * + * \defgroup xQueueSendFromISR xQueueSendFromISR + * \ingroup QueueManagement + */ +#define xQueueSendFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueSEND_TO_BACK ) + +/** + * queue. h + *
+ BaseType_t xQueueGenericSendFromISR(
+										   QueueHandle_t		xQueue,
+										   const	void	*pvItemToQueue,
+										   BaseType_t	*pxHigherPriorityTaskWoken,
+										   BaseType_t	xCopyPosition
+									   );
+ 
+ * + * It is preferred that the macros xQueueSendFromISR(), + * xQueueSendToFrontFromISR() and xQueueSendToBackFromISR() be used in place + * of calling this function directly. xQueueGiveFromISR() is an + * equivalent for use by semaphores that don't actually copy any data. + * + * Post an item on a queue. It is safe to use this function from within an + * interrupt service routine. + * + * Items are queued by copy not reference so it is preferable to only + * queue small items, especially when called from an ISR. In most cases + * it would be preferable to store a pointer to the item being queued. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param pxHigherPriorityTaskWoken xQueueGenericSendFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xQueueGenericSendFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @param xCopyPosition Can take the value queueSEND_TO_BACK to place the + * item at the back of the queue, or queueSEND_TO_FRONT to place the item + * at the front of the queue (for high priority messages). + * + * @return pdTRUE if the data was successfully sent to the queue, otherwise + * errQUEUE_FULL. + * + * Example usage for buffered IO (where the ISR can obtain more than one value + * per call): +
+ void vBufferISR( void )
+ {
+ char cIn;
+ BaseType_t xHigherPriorityTaskWokenByPost;
+
+	// We have not woken a task at the start of the ISR.
+	xHigherPriorityTaskWokenByPost = pdFALSE;
+
+	// Loop until the buffer is empty.
+	do
+	{
+		// Obtain a byte from the buffer.
+		cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS );
+
+		// Post each byte.
+		xQueueGenericSendFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWokenByPost, queueSEND_TO_BACK );
+
+	} while( portINPUT_BYTE( BUFFER_COUNT ) );
+
+	// Now the buffer is empty we can switch context if necessary.  Note that the
+	// name of the yield function required is port specific.
+	if( xHigherPriorityTaskWokenByPost )
+	{
+		taskYIELD_YIELD_FROM_ISR();
+	}
+ }
+ 
+ * + * \defgroup xQueueSendFromISR xQueueSendFromISR + * \ingroup QueueManagement + */ +BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION; +BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *
+ BaseType_t xQueueReceiveFromISR(
+									   QueueHandle_t	xQueue,
+									   void	*pvBuffer,
+									   BaseType_t *pxTaskWoken
+								   );
+ * 
+ * + * Receive an item from a queue. It is safe to use this function from within an + * interrupt service routine. + * + * @param xQueue The handle to the queue from which the item is to be + * received. + * + * @param pvBuffer Pointer to the buffer into which the received item will + * be copied. + * + * @param pxTaskWoken A task may be blocked waiting for space to become + * available on the queue. If xQueueReceiveFromISR causes such a task to + * unblock *pxTaskWoken will get set to pdTRUE, otherwise *pxTaskWoken will + * remain unchanged. + * + * @return pdTRUE if an item was successfully received from the queue, + * otherwise pdFALSE. + * + * Example usage: +
+
+ QueueHandle_t xQueue;
+
+ // Function to create a queue and post some values.
+ void vAFunction( void *pvParameters )
+ {
+ char cValueToPost;
+ const TickType_t xTicksToWait = ( TickType_t )0xff;
+
+	// Create a queue capable of containing 10 characters.
+	xQueue = xQueueCreate( 10, sizeof( char ) );
+	if( xQueue == 0 )
+	{
+		// Failed to create the queue.
+	}
+
+	// ...
+
+	// Post some characters that will be used within an ISR.  If the queue
+	// is full then this task will block for xTicksToWait ticks.
+	cValueToPost = 'a';
+	xQueueSend( xQueue, ( void * ) &cValueToPost, xTicksToWait );
+	cValueToPost = 'b';
+	xQueueSend( xQueue, ( void * ) &cValueToPost, xTicksToWait );
+
+	// ... keep posting characters ... this task may block when the queue
+	// becomes full.
+
+	cValueToPost = 'c';
+	xQueueSend( xQueue, ( void * ) &cValueToPost, xTicksToWait );
+ }
+
+ // ISR that outputs all the characters received on the queue.
+ void vISR_Routine( void )
+ {
+ BaseType_t xTaskWokenByReceive = pdFALSE;
+ char cRxedChar;
+
+	while( xQueueReceiveFromISR( xQueue, ( void * ) &cRxedChar, &xTaskWokenByReceive) )
+	{
+		// A character was received.  Output the character now.
+		vOutputCharacter( cRxedChar );
+
+		// If removing the character from the queue woke the task that was
+		// posting onto the queue cTaskWokenByReceive will have been set to
+		// pdTRUE.  No matter how many times this loop iterates only one
+		// task will be woken.
+	}
+
+	if( cTaskWokenByPost != ( char ) pdFALSE;
+	{
+		taskYIELD ();
+	}
+ }
+ 
+ * \defgroup xQueueReceiveFromISR xQueueReceiveFromISR + * \ingroup QueueManagement + */ +BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/* + * Utilities to query queues that are safe to use from an ISR. These utilities + * should be used only from witin an ISR, or within a critical section. + */ +BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + +/* + * The functions defined above are for passing data to and from tasks. The + * functions below are the equivalents for passing data to and from + * co-routines. + * + * These functions are called from the co-routine macro implementation and + * should not be called directly from application code. Instead use the macro + * wrappers defined within croutine.h. + */ +BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken ); +BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxTaskWoken ); +BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait ); +BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait ); + +/* + * For internal use only. Use xSemaphoreCreateMutex(), + * xSemaphoreCreateCounting() or xSemaphoreGetMutexHolder() instead of calling + * these functions directly. + */ +QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType ) PRIVILEGED_FUNCTION; +QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue ) PRIVILEGED_FUNCTION; +QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount ) PRIVILEGED_FUNCTION; +QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue ) PRIVILEGED_FUNCTION; +BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; +TaskHandle_t xQueueGetMutexHolder( QueueHandle_t xSemaphore ) PRIVILEGED_FUNCTION; +TaskHandle_t xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore ) PRIVILEGED_FUNCTION; + +/* + * For internal use only. Use xSemaphoreTakeMutexRecursive() or + * xSemaphoreGiveMutexRecursive() instead of calling these functions directly. + */ +BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; +BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex ) PRIVILEGED_FUNCTION; + +/* + * Reset a queue back to its original empty state. The return value is now + * obsolete and is always set to pdPASS. + */ +#define xQueueReset( xQueue ) xQueueGenericReset( xQueue, pdFALSE ) + +/* + * The registry is provided as a means for kernel aware debuggers to + * locate queues, semaphores and mutexes. Call vQueueAddToRegistry() add + * a queue, semaphore or mutex handle to the registry if you want the handle + * to be available to a kernel aware debugger. If you are not using a kernel + * aware debugger then this function can be ignored. + * + * configQUEUE_REGISTRY_SIZE defines the maximum number of handles the + * registry can hold. configQUEUE_REGISTRY_SIZE must be greater than 0 + * within FreeRTOSConfig.h for the registry to be available. Its value + * does not effect the number of queues, semaphores and mutexes that can be + * created - just the number that the registry can hold. + * + * @param xQueue The handle of the queue being added to the registry. This + * is the handle returned by a call to xQueueCreate(). Semaphore and mutex + * handles can also be passed in here. + * + * @param pcName The name to be associated with the handle. This is the + * name that the kernel aware debugger will display. The queue registry only + * stores a pointer to the string - so the string must be persistent (global or + * preferably in ROM/Flash), not on the stack. + */ +#if( configQUEUE_REGISTRY_SIZE > 0 ) + void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ +#endif + +/* + * The registry is provided as a means for kernel aware debuggers to + * locate queues, semaphores and mutexes. Call vQueueAddToRegistry() add + * a queue, semaphore or mutex handle to the registry if you want the handle + * to be available to a kernel aware debugger, and vQueueUnregisterQueue() to + * remove the queue, semaphore or mutex from the register. If you are not using + * a kernel aware debugger then this function can be ignored. + * + * @param xQueue The handle of the queue being removed from the registry. + */ +#if( configQUEUE_REGISTRY_SIZE > 0 ) + void vQueueUnregisterQueue( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +#endif + +/* + * The queue registry is provided as a means for kernel aware debuggers to + * locate queues, semaphores and mutexes. Call pcQueueGetName() to look + * up and return the name of a queue in the queue registry from the queue's + * handle. + * + * @param xQueue The handle of the queue the name of which will be returned. + * @return If the queue is in the registry then a pointer to the name of the + * queue is returned. If the queue is not in the registry then NULL is + * returned. + */ +#if( configQUEUE_REGISTRY_SIZE > 0 ) + const char *pcQueueGetName( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ +#endif + +/* + * Generic version of the function used to creaet a queue using dynamic memory + * allocation. This is called by other functions and macros that create other + * RTOS objects that use the queue structure as their base. + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType ) PRIVILEGED_FUNCTION; +#endif + +/* + * Generic version of the function used to creaet a queue using dynamic memory + * allocation. This is called by other functions and macros that create other + * RTOS objects that use the queue structure as their base. + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType ) PRIVILEGED_FUNCTION; +#endif + +/* + * Queue sets provide a mechanism to allow a task to block (pend) on a read + * operation from multiple queues or semaphores simultaneously. + * + * See FreeRTOS/Source/Demo/Common/Minimal/QueueSet.c for an example using this + * function. + * + * A queue set must be explicitly created using a call to xQueueCreateSet() + * before it can be used. Once created, standard FreeRTOS queues and semaphores + * can be added to the set using calls to xQueueAddToSet(). + * xQueueSelectFromSet() is then used to determine which, if any, of the queues + * or semaphores contained in the set is in a state where a queue read or + * semaphore take operation would be successful. + * + * Note 1: See the documentation on http://wwwFreeRTOS.org/RTOS-queue-sets.html + * for reasons why queue sets are very rarely needed in practice as there are + * simpler methods of blocking on multiple objects. + * + * Note 2: Blocking on a queue set that contains a mutex will not cause the + * mutex holder to inherit the priority of the blocked task. + * + * Note 3: An additional 4 bytes of RAM is required for each space in a every + * queue added to a queue set. Therefore counting semaphores that have a high + * maximum count value should not be added to a queue set. + * + * Note 4: A receive (in the case of a queue) or take (in the case of a + * semaphore) operation must not be performed on a member of a queue set unless + * a call to xQueueSelectFromSet() has first returned a handle to that set member. + * + * @param uxEventQueueLength Queue sets store events that occur on + * the queues and semaphores contained in the set. uxEventQueueLength specifies + * the maximum number of events that can be queued at once. To be absolutely + * certain that events are not lost uxEventQueueLength should be set to the + * total sum of the length of the queues added to the set, where binary + * semaphores and mutexes have a length of 1, and counting semaphores have a + * length set by their maximum count value. Examples: + * + If a queue set is to hold a queue of length 5, another queue of length 12, + * and a binary semaphore, then uxEventQueueLength should be set to + * (5 + 12 + 1), or 18. + * + If a queue set is to hold three binary semaphores then uxEventQueueLength + * should be set to (1 + 1 + 1 ), or 3. + * + If a queue set is to hold a counting semaphore that has a maximum count of + * 5, and a counting semaphore that has a maximum count of 3, then + * uxEventQueueLength should be set to (5 + 3), or 8. + * + * @return If the queue set is created successfully then a handle to the created + * queue set is returned. Otherwise NULL is returned. + */ +QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength ) PRIVILEGED_FUNCTION; + +/* + * Adds a queue or semaphore to a queue set that was previously created by a + * call to xQueueCreateSet(). + * + * See FreeRTOS/Source/Demo/Common/Minimal/QueueSet.c for an example using this + * function. + * + * Note 1: A receive (in the case of a queue) or take (in the case of a + * semaphore) operation must not be performed on a member of a queue set unless + * a call to xQueueSelectFromSet() has first returned a handle to that set member. + * + * @param xQueueOrSemaphore The handle of the queue or semaphore being added to + * the queue set (cast to an QueueSetMemberHandle_t type). + * + * @param xQueueSet The handle of the queue set to which the queue or semaphore + * is being added. + * + * @return If the queue or semaphore was successfully added to the queue set + * then pdPASS is returned. If the queue could not be successfully added to the + * queue set because it is already a member of a different queue set then pdFAIL + * is returned. + */ +BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION; + +/* + * Removes a queue or semaphore from a queue set. A queue or semaphore can only + * be removed from a set if the queue or semaphore is empty. + * + * See FreeRTOS/Source/Demo/Common/Minimal/QueueSet.c for an example using this + * function. + * + * @param xQueueOrSemaphore The handle of the queue or semaphore being removed + * from the queue set (cast to an QueueSetMemberHandle_t type). + * + * @param xQueueSet The handle of the queue set in which the queue or semaphore + * is included. + * + * @return If the queue or semaphore was successfully removed from the queue set + * then pdPASS is returned. If the queue was not in the queue set, or the + * queue (or semaphore) was not empty, then pdFAIL is returned. + */ +BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION; + +/* + * xQueueSelectFromSet() selects from the members of a queue set a queue or + * semaphore that either contains data (in the case of a queue) or is available + * to take (in the case of a semaphore). xQueueSelectFromSet() effectively + * allows a task to block (pend) on a read operation on all the queues and + * semaphores in a queue set simultaneously. + * + * See FreeRTOS/Source/Demo/Common/Minimal/QueueSet.c for an example using this + * function. + * + * Note 1: See the documentation on http://wwwFreeRTOS.org/RTOS-queue-sets.html + * for reasons why queue sets are very rarely needed in practice as there are + * simpler methods of blocking on multiple objects. + * + * Note 2: Blocking on a queue set that contains a mutex will not cause the + * mutex holder to inherit the priority of the blocked task. + * + * Note 3: A receive (in the case of a queue) or take (in the case of a + * semaphore) operation must not be performed on a member of a queue set unless + * a call to xQueueSelectFromSet() has first returned a handle to that set member. + * + * @param xQueueSet The queue set on which the task will (potentially) block. + * + * @param xTicksToWait The maximum time, in ticks, that the calling task will + * remain in the Blocked state (with other tasks executing) to wait for a member + * of the queue set to be ready for a successful queue read or semaphore take + * operation. + * + * @return xQueueSelectFromSet() will return the handle of a queue (cast to + * a QueueSetMemberHandle_t type) contained in the queue set that contains data, + * or the handle of a semaphore (cast to a QueueSetMemberHandle_t type) contained + * in the queue set that is available, or NULL if no such queue or semaphore + * exists before before the specified block time expires. + */ +QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/* + * A version of xQueueSelectFromSet() that can be used from an ISR. + */ +QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION; + +/* Not public API functions. */ +void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely ) PRIVILEGED_FUNCTION; +BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue ) PRIVILEGED_FUNCTION; +void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber ) PRIVILEGED_FUNCTION; +UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +uint8_t ucQueueGetQueueType( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + + +#ifdef __cplusplus +} +#endif + +#endif /* QUEUE_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/semphr.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/semphr.h new file mode 100644 index 0000000..32b4e5e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/semphr.h @@ -0,0 +1,1140 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef SEMAPHORE_H +#define SEMAPHORE_H + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h" must appear in source files before "include semphr.h" +#endif + +#include "queue.h" + +typedef QueueHandle_t SemaphoreHandle_t; + +#define semBINARY_SEMAPHORE_QUEUE_LENGTH ( ( uint8_t ) 1U ) +#define semSEMAPHORE_QUEUE_ITEM_LENGTH ( ( uint8_t ) 0U ) +#define semGIVE_BLOCK_TIME ( ( TickType_t ) 0U ) + + +/** + * semphr. h + *
vSemaphoreCreateBinary( SemaphoreHandle_t xSemaphore )
+ * + * In many usage scenarios it is faster and more memory efficient to use a + * direct to task notification in place of a binary semaphore! + * http://www.freertos.org/RTOS-task-notifications.html + * + * This old vSemaphoreCreateBinary() macro is now deprecated in favour of the + * xSemaphoreCreateBinary() function. Note that binary semaphores created using + * the vSemaphoreCreateBinary() macro are created in a state such that the + * first call to 'take' the semaphore would pass, whereas binary semaphores + * created using xSemaphoreCreateBinary() are created in a state such that the + * the semaphore must first be 'given' before it can be 'taken'. + * + * Macro that implements a semaphore by using the existing queue mechanism. + * The queue length is 1 as this is a binary semaphore. The data size is 0 + * as we don't want to actually store any data - we just want to know if the + * queue is empty or full. + * + * This type of semaphore can be used for pure synchronisation between tasks or + * between an interrupt and a task. The semaphore need not be given back once + * obtained, so one task/interrupt can continuously 'give' the semaphore while + * another continuously 'takes' the semaphore. For this reason this type of + * semaphore does not use a priority inheritance mechanism. For an alternative + * that does use priority inheritance see xSemaphoreCreateMutex(). + * + * @param xSemaphore Handle to the created semaphore. Should be of type SemaphoreHandle_t. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore = NULL;
+
+ void vATask( void * pvParameters )
+ {
+    // Semaphore cannot be used before a call to vSemaphoreCreateBinary ().
+    // This is a macro so pass the variable in directly.
+    vSemaphoreCreateBinary( xSemaphore );
+
+    if( xSemaphore != NULL )
+    {
+        // The semaphore was created successfully.
+        // The semaphore can now be used.
+    }
+ }
+ 
+ * \defgroup vSemaphoreCreateBinary vSemaphoreCreateBinary + * \ingroup Semaphores + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + #define vSemaphoreCreateBinary( xSemaphore ) \ + { \ + ( xSemaphore ) = xQueueGenericCreate( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_BINARY_SEMAPHORE ); \ + if( ( xSemaphore ) != NULL ) \ + { \ + ( void ) xSemaphoreGive( ( xSemaphore ) ); \ + } \ + } +#endif + +/** + * semphr. h + *
SemaphoreHandle_t xSemaphoreCreateBinary( void )
+ * + * Creates a new binary semaphore instance, and returns a handle by which the + * new semaphore can be referenced. + * + * In many usage scenarios it is faster and more memory efficient to use a + * direct to task notification in place of a binary semaphore! + * http://www.freertos.org/RTOS-task-notifications.html + * + * Internally, within the FreeRTOS implementation, binary semaphores use a block + * of memory, in which the semaphore structure is stored. If a binary semaphore + * is created using xSemaphoreCreateBinary() then the required memory is + * automatically dynamically allocated inside the xSemaphoreCreateBinary() + * function. (see http://www.freertos.org/a00111.html). If a binary semaphore + * is created using xSemaphoreCreateBinaryStatic() then the application writer + * must provide the memory. xSemaphoreCreateBinaryStatic() therefore allows a + * binary semaphore to be created without using any dynamic memory allocation. + * + * The old vSemaphoreCreateBinary() macro is now deprecated in favour of this + * xSemaphoreCreateBinary() function. Note that binary semaphores created using + * the vSemaphoreCreateBinary() macro are created in a state such that the + * first call to 'take' the semaphore would pass, whereas binary semaphores + * created using xSemaphoreCreateBinary() are created in a state such that the + * the semaphore must first be 'given' before it can be 'taken'. + * + * This type of semaphore can be used for pure synchronisation between tasks or + * between an interrupt and a task. The semaphore need not be given back once + * obtained, so one task/interrupt can continuously 'give' the semaphore while + * another continuously 'takes' the semaphore. For this reason this type of + * semaphore does not use a priority inheritance mechanism. For an alternative + * that does use priority inheritance see xSemaphoreCreateMutex(). + * + * @return Handle to the created semaphore, or NULL if the memory required to + * hold the semaphore's data structures could not be allocated. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore = NULL;
+
+ void vATask( void * pvParameters )
+ {
+    // Semaphore cannot be used before a call to xSemaphoreCreateBinary().
+    // This is a macro so pass the variable in directly.
+    xSemaphore = xSemaphoreCreateBinary();
+
+    if( xSemaphore != NULL )
+    {
+        // The semaphore was created successfully.
+        // The semaphore can now be used.
+    }
+ }
+ 
+ * \defgroup xSemaphoreCreateBinary xSemaphoreCreateBinary + * \ingroup Semaphores + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + #define xSemaphoreCreateBinary() xQueueGenericCreate( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_BINARY_SEMAPHORE ) +#endif + +/** + * semphr. h + *
SemaphoreHandle_t xSemaphoreCreateBinaryStatic( StaticSemaphore_t *pxSemaphoreBuffer )
+ * + * Creates a new binary semaphore instance, and returns a handle by which the + * new semaphore can be referenced. + * + * NOTE: In many usage scenarios it is faster and more memory efficient to use a + * direct to task notification in place of a binary semaphore! + * http://www.freertos.org/RTOS-task-notifications.html + * + * Internally, within the FreeRTOS implementation, binary semaphores use a block + * of memory, in which the semaphore structure is stored. If a binary semaphore + * is created using xSemaphoreCreateBinary() then the required memory is + * automatically dynamically allocated inside the xSemaphoreCreateBinary() + * function. (see http://www.freertos.org/a00111.html). If a binary semaphore + * is created using xSemaphoreCreateBinaryStatic() then the application writer + * must provide the memory. xSemaphoreCreateBinaryStatic() therefore allows a + * binary semaphore to be created without using any dynamic memory allocation. + * + * This type of semaphore can be used for pure synchronisation between tasks or + * between an interrupt and a task. The semaphore need not be given back once + * obtained, so one task/interrupt can continuously 'give' the semaphore while + * another continuously 'takes' the semaphore. For this reason this type of + * semaphore does not use a priority inheritance mechanism. For an alternative + * that does use priority inheritance see xSemaphoreCreateMutex(). + * + * @param pxSemaphoreBuffer Must point to a variable of type StaticSemaphore_t, + * which will then be used to hold the semaphore's data structure, removing the + * need for the memory to be allocated dynamically. + * + * @return If the semaphore is created then a handle to the created semaphore is + * returned. If pxSemaphoreBuffer is NULL then NULL is returned. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore = NULL;
+ StaticSemaphore_t xSemaphoreBuffer;
+
+ void vATask( void * pvParameters )
+ {
+    // Semaphore cannot be used before a call to xSemaphoreCreateBinary().
+    // The semaphore's data structures will be placed in the xSemaphoreBuffer
+    // variable, the address of which is passed into the function.  The
+    // function's parameter is not NULL, so the function will not attempt any
+    // dynamic memory allocation, and therefore the function will not return
+    // return NULL.
+    xSemaphore = xSemaphoreCreateBinary( &xSemaphoreBuffer );
+
+    // Rest of task code goes here.
+ }
+ 
+ * \defgroup xSemaphoreCreateBinaryStatic xSemaphoreCreateBinaryStatic + * \ingroup Semaphores + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xSemaphoreCreateBinaryStatic( pxStaticSemaphore ) xQueueGenericCreateStatic( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticSemaphore, queueQUEUE_TYPE_BINARY_SEMAPHORE ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +/** + * semphr. h + *
xSemaphoreTake(
+ *                   SemaphoreHandle_t xSemaphore,
+ *                   TickType_t xBlockTime
+ *               )
+ * + * Macro to obtain a semaphore. The semaphore must have previously been + * created with a call to xSemaphoreCreateBinary(), xSemaphoreCreateMutex() or + * xSemaphoreCreateCounting(). + * + * @param xSemaphore A handle to the semaphore being taken - obtained when + * the semaphore was created. + * + * @param xBlockTime The time in ticks to wait for the semaphore to become + * available. The macro portTICK_PERIOD_MS can be used to convert this to a + * real time. A block time of zero can be used to poll the semaphore. A block + * time of portMAX_DELAY can be used to block indefinitely (provided + * INCLUDE_vTaskSuspend is set to 1 in FreeRTOSConfig.h). + * + * @return pdTRUE if the semaphore was obtained. pdFALSE + * if xBlockTime expired without the semaphore becoming available. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore = NULL;
+
+ // A task that creates a semaphore.
+ void vATask( void * pvParameters )
+ {
+    // Create the semaphore to guard a shared resource.
+    xSemaphore = xSemaphoreCreateBinary();
+ }
+
+ // A task that uses the semaphore.
+ void vAnotherTask( void * pvParameters )
+ {
+    // ... Do other things.
+
+    if( xSemaphore != NULL )
+    {
+        // See if we can obtain the semaphore.  If the semaphore is not available
+        // wait 10 ticks to see if it becomes free.
+        if( xSemaphoreTake( xSemaphore, ( TickType_t ) 10 ) == pdTRUE )
+        {
+            // We were able to obtain the semaphore and can now access the
+            // shared resource.
+
+            // ...
+
+            // We have finished accessing the shared resource.  Release the
+            // semaphore.
+            xSemaphoreGive( xSemaphore );
+        }
+        else
+        {
+            // We could not obtain the semaphore and can therefore not access
+            // the shared resource safely.
+        }
+    }
+ }
+ 
+ * \defgroup xSemaphoreTake xSemaphoreTake + * \ingroup Semaphores + */ +#define xSemaphoreTake( xSemaphore, xBlockTime ) xQueueSemaphoreTake( ( xSemaphore ), ( xBlockTime ) ) + +/** + * semphr. h + * xSemaphoreTakeRecursive( + * SemaphoreHandle_t xMutex, + * TickType_t xBlockTime + * ) + * + * Macro to recursively obtain, or 'take', a mutex type semaphore. + * The mutex must have previously been created using a call to + * xSemaphoreCreateRecursiveMutex(); + * + * configUSE_RECURSIVE_MUTEXES must be set to 1 in FreeRTOSConfig.h for this + * macro to be available. + * + * This macro must not be used on mutexes created using xSemaphoreCreateMutex(). + * + * A mutex used recursively can be 'taken' repeatedly by the owner. The mutex + * doesn't become available again until the owner has called + * xSemaphoreGiveRecursive() for each successful 'take' request. For example, + * if a task successfully 'takes' the same mutex 5 times then the mutex will + * not be available to any other task until it has also 'given' the mutex back + * exactly five times. + * + * @param xMutex A handle to the mutex being obtained. This is the + * handle returned by xSemaphoreCreateRecursiveMutex(); + * + * @param xBlockTime The time in ticks to wait for the semaphore to become + * available. The macro portTICK_PERIOD_MS can be used to convert this to a + * real time. A block time of zero can be used to poll the semaphore. If + * the task already owns the semaphore then xSemaphoreTakeRecursive() will + * return immediately no matter what the value of xBlockTime. + * + * @return pdTRUE if the semaphore was obtained. pdFALSE if xBlockTime + * expired without the semaphore becoming available. + * + * Example usage: +
+ SemaphoreHandle_t xMutex = NULL;
+
+ // A task that creates a mutex.
+ void vATask( void * pvParameters )
+ {
+    // Create the mutex to guard a shared resource.
+    xMutex = xSemaphoreCreateRecursiveMutex();
+ }
+
+ // A task that uses the mutex.
+ void vAnotherTask( void * pvParameters )
+ {
+    // ... Do other things.
+
+    if( xMutex != NULL )
+    {
+        // See if we can obtain the mutex.  If the mutex is not available
+        // wait 10 ticks to see if it becomes free.
+        if( xSemaphoreTakeRecursive( xSemaphore, ( TickType_t ) 10 ) == pdTRUE )
+        {
+            // We were able to obtain the mutex and can now access the
+            // shared resource.
+
+            // ...
+            // For some reason due to the nature of the code further calls to
+            // xSemaphoreTakeRecursive() are made on the same mutex.  In real
+            // code these would not be just sequential calls as this would make
+            // no sense.  Instead the calls are likely to be buried inside
+            // a more complex call structure.
+            xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 );
+            xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 );
+
+            // The mutex has now been 'taken' three times, so will not be
+            // available to another task until it has also been given back
+            // three times.  Again it is unlikely that real code would have
+            // these calls sequentially, but instead buried in a more complex
+            // call structure.  This is just for illustrative purposes.
+            xSemaphoreGiveRecursive( xMutex );
+            xSemaphoreGiveRecursive( xMutex );
+            xSemaphoreGiveRecursive( xMutex );
+
+            // Now the mutex can be taken by other tasks.
+        }
+        else
+        {
+            // We could not obtain the mutex and can therefore not access
+            // the shared resource safely.
+        }
+    }
+ }
+ 
+ * \defgroup xSemaphoreTakeRecursive xSemaphoreTakeRecursive + * \ingroup Semaphores + */ +#if( configUSE_RECURSIVE_MUTEXES == 1 ) + #define xSemaphoreTakeRecursive( xMutex, xBlockTime ) xQueueTakeMutexRecursive( ( xMutex ), ( xBlockTime ) ) +#endif + +/** + * semphr. h + *
xSemaphoreGive( SemaphoreHandle_t xSemaphore )
+ * + * Macro to release a semaphore. The semaphore must have previously been + * created with a call to xSemaphoreCreateBinary(), xSemaphoreCreateMutex() or + * xSemaphoreCreateCounting(). and obtained using sSemaphoreTake(). + * + * This macro must not be used from an ISR. See xSemaphoreGiveFromISR () for + * an alternative which can be used from an ISR. + * + * This macro must also not be used on semaphores created using + * xSemaphoreCreateRecursiveMutex(). + * + * @param xSemaphore A handle to the semaphore being released. This is the + * handle returned when the semaphore was created. + * + * @return pdTRUE if the semaphore was released. pdFALSE if an error occurred. + * Semaphores are implemented using queues. An error can occur if there is + * no space on the queue to post a message - indicating that the + * semaphore was not first obtained correctly. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore = NULL;
+
+ void vATask( void * pvParameters )
+ {
+    // Create the semaphore to guard a shared resource.
+    xSemaphore = vSemaphoreCreateBinary();
+
+    if( xSemaphore != NULL )
+    {
+        if( xSemaphoreGive( xSemaphore ) != pdTRUE )
+        {
+            // We would expect this call to fail because we cannot give
+            // a semaphore without first "taking" it!
+        }
+
+        // Obtain the semaphore - don't block if the semaphore is not
+        // immediately available.
+        if( xSemaphoreTake( xSemaphore, ( TickType_t ) 0 ) )
+        {
+            // We now have the semaphore and can access the shared resource.
+
+            // ...
+
+            // We have finished accessing the shared resource so can free the
+            // semaphore.
+            if( xSemaphoreGive( xSemaphore ) != pdTRUE )
+            {
+                // We would not expect this call to fail because we must have
+                // obtained the semaphore to get here.
+            }
+        }
+    }
+ }
+ 
+ * \defgroup xSemaphoreGive xSemaphoreGive + * \ingroup Semaphores + */ +#define xSemaphoreGive( xSemaphore ) xQueueGenericSend( ( QueueHandle_t ) ( xSemaphore ), NULL, semGIVE_BLOCK_TIME, queueSEND_TO_BACK ) + +/** + * semphr. h + *
xSemaphoreGiveRecursive( SemaphoreHandle_t xMutex )
+ * + * Macro to recursively release, or 'give', a mutex type semaphore. + * The mutex must have previously been created using a call to + * xSemaphoreCreateRecursiveMutex(); + * + * configUSE_RECURSIVE_MUTEXES must be set to 1 in FreeRTOSConfig.h for this + * macro to be available. + * + * This macro must not be used on mutexes created using xSemaphoreCreateMutex(). + * + * A mutex used recursively can be 'taken' repeatedly by the owner. The mutex + * doesn't become available again until the owner has called + * xSemaphoreGiveRecursive() for each successful 'take' request. For example, + * if a task successfully 'takes' the same mutex 5 times then the mutex will + * not be available to any other task until it has also 'given' the mutex back + * exactly five times. + * + * @param xMutex A handle to the mutex being released, or 'given'. This is the + * handle returned by xSemaphoreCreateMutex(); + * + * @return pdTRUE if the semaphore was given. + * + * Example usage: +
+ SemaphoreHandle_t xMutex = NULL;
+
+ // A task that creates a mutex.
+ void vATask( void * pvParameters )
+ {
+    // Create the mutex to guard a shared resource.
+    xMutex = xSemaphoreCreateRecursiveMutex();
+ }
+
+ // A task that uses the mutex.
+ void vAnotherTask( void * pvParameters )
+ {
+    // ... Do other things.
+
+    if( xMutex != NULL )
+    {
+        // See if we can obtain the mutex.  If the mutex is not available
+        // wait 10 ticks to see if it becomes free.
+        if( xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 ) == pdTRUE )
+        {
+            // We were able to obtain the mutex and can now access the
+            // shared resource.
+
+            // ...
+            // For some reason due to the nature of the code further calls to
+			// xSemaphoreTakeRecursive() are made on the same mutex.  In real
+			// code these would not be just sequential calls as this would make
+			// no sense.  Instead the calls are likely to be buried inside
+			// a more complex call structure.
+            xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 );
+            xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 );
+
+            // The mutex has now been 'taken' three times, so will not be
+			// available to another task until it has also been given back
+			// three times.  Again it is unlikely that real code would have
+			// these calls sequentially, it would be more likely that the calls
+			// to xSemaphoreGiveRecursive() would be called as a call stack
+			// unwound.  This is just for demonstrative purposes.
+            xSemaphoreGiveRecursive( xMutex );
+			xSemaphoreGiveRecursive( xMutex );
+			xSemaphoreGiveRecursive( xMutex );
+
+			// Now the mutex can be taken by other tasks.
+        }
+        else
+        {
+            // We could not obtain the mutex and can therefore not access
+            // the shared resource safely.
+        }
+    }
+ }
+ 
+ * \defgroup xSemaphoreGiveRecursive xSemaphoreGiveRecursive + * \ingroup Semaphores + */ +#if( configUSE_RECURSIVE_MUTEXES == 1 ) + #define xSemaphoreGiveRecursive( xMutex ) xQueueGiveMutexRecursive( ( xMutex ) ) +#endif + +/** + * semphr. h + *
+ xSemaphoreGiveFromISR(
+                          SemaphoreHandle_t xSemaphore,
+                          BaseType_t *pxHigherPriorityTaskWoken
+                      )
+ * + * Macro to release a semaphore. The semaphore must have previously been + * created with a call to xSemaphoreCreateBinary() or xSemaphoreCreateCounting(). + * + * Mutex type semaphores (those created using a call to xSemaphoreCreateMutex()) + * must not be used with this macro. + * + * This macro can be used from an ISR. + * + * @param xSemaphore A handle to the semaphore being released. This is the + * handle returned when the semaphore was created. + * + * @param pxHigherPriorityTaskWoken xSemaphoreGiveFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if giving the semaphore caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xSemaphoreGiveFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @return pdTRUE if the semaphore was successfully given, otherwise errQUEUE_FULL. + * + * Example usage: +
+ \#define LONG_TIME 0xffff
+ \#define TICKS_TO_WAIT	10
+ SemaphoreHandle_t xSemaphore = NULL;
+
+ // Repetitive task.
+ void vATask( void * pvParameters )
+ {
+    for( ;; )
+    {
+        // We want this task to run every 10 ticks of a timer.  The semaphore
+        // was created before this task was started.
+
+        // Block waiting for the semaphore to become available.
+        if( xSemaphoreTake( xSemaphore, LONG_TIME ) == pdTRUE )
+        {
+            // It is time to execute.
+
+            // ...
+
+            // We have finished our task.  Return to the top of the loop where
+            // we will block on the semaphore until it is time to execute
+            // again.  Note when using the semaphore for synchronisation with an
+			// ISR in this manner there is no need to 'give' the semaphore back.
+        }
+    }
+ }
+
+ // Timer ISR
+ void vTimerISR( void * pvParameters )
+ {
+ static uint8_t ucLocalTickCount = 0;
+ static BaseType_t xHigherPriorityTaskWoken;
+
+    // A timer tick has occurred.
+
+    // ... Do other time functions.
+
+    // Is it time for vATask () to run?
+	xHigherPriorityTaskWoken = pdFALSE;
+    ucLocalTickCount++;
+    if( ucLocalTickCount >= TICKS_TO_WAIT )
+    {
+        // Unblock the task by releasing the semaphore.
+        xSemaphoreGiveFromISR( xSemaphore, &xHigherPriorityTaskWoken );
+
+        // Reset the count so we release the semaphore again in 10 ticks time.
+        ucLocalTickCount = 0;
+    }
+
+    if( xHigherPriorityTaskWoken != pdFALSE )
+    {
+        // We can force a context switch here.  Context switching from an
+        // ISR uses port specific syntax.  Check the demo task for your port
+        // to find the syntax required.
+    }
+ }
+ 
+ * \defgroup xSemaphoreGiveFromISR xSemaphoreGiveFromISR + * \ingroup Semaphores + */ +#define xSemaphoreGiveFromISR( xSemaphore, pxHigherPriorityTaskWoken ) xQueueGiveFromISR( ( QueueHandle_t ) ( xSemaphore ), ( pxHigherPriorityTaskWoken ) ) + +/** + * semphr. h + *
+ xSemaphoreTakeFromISR(
+                          SemaphoreHandle_t xSemaphore,
+                          BaseType_t *pxHigherPriorityTaskWoken
+                      )
+ * + * Macro to take a semaphore from an ISR. The semaphore must have + * previously been created with a call to xSemaphoreCreateBinary() or + * xSemaphoreCreateCounting(). + * + * Mutex type semaphores (those created using a call to xSemaphoreCreateMutex()) + * must not be used with this macro. + * + * This macro can be used from an ISR, however taking a semaphore from an ISR + * is not a common operation. It is likely to only be useful when taking a + * counting semaphore when an interrupt is obtaining an object from a resource + * pool (when the semaphore count indicates the number of resources available). + * + * @param xSemaphore A handle to the semaphore being taken. This is the + * handle returned when the semaphore was created. + * + * @param pxHigherPriorityTaskWoken xSemaphoreTakeFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if taking the semaphore caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xSemaphoreTakeFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @return pdTRUE if the semaphore was successfully taken, otherwise + * pdFALSE + */ +#define xSemaphoreTakeFromISR( xSemaphore, pxHigherPriorityTaskWoken ) xQueueReceiveFromISR( ( QueueHandle_t ) ( xSemaphore ), NULL, ( pxHigherPriorityTaskWoken ) ) + +/** + * semphr. h + *
SemaphoreHandle_t xSemaphoreCreateMutex( void )
+ * + * Creates a new mutex type semaphore instance, and returns a handle by which + * the new mutex can be referenced. + * + * Internally, within the FreeRTOS implementation, mutex semaphores use a block + * of memory, in which the mutex structure is stored. If a mutex is created + * using xSemaphoreCreateMutex() then the required memory is automatically + * dynamically allocated inside the xSemaphoreCreateMutex() function. (see + * http://www.freertos.org/a00111.html). If a mutex is created using + * xSemaphoreCreateMutexStatic() then the application writer must provided the + * memory. xSemaphoreCreateMutexStatic() therefore allows a mutex to be created + * without using any dynamic memory allocation. + * + * Mutexes created using this function can be accessed using the xSemaphoreTake() + * and xSemaphoreGive() macros. The xSemaphoreTakeRecursive() and + * xSemaphoreGiveRecursive() macros must not be used. + * + * This type of semaphore uses a priority inheritance mechanism so a task + * 'taking' a semaphore MUST ALWAYS 'give' the semaphore back once the + * semaphore it is no longer required. + * + * Mutex type semaphores cannot be used from within interrupt service routines. + * + * See xSemaphoreCreateBinary() for an alternative implementation that can be + * used for pure synchronisation (where one task or interrupt always 'gives' the + * semaphore and another always 'takes' the semaphore) and from within interrupt + * service routines. + * + * @return If the mutex was successfully created then a handle to the created + * semaphore is returned. If there was not enough heap to allocate the mutex + * data structures then NULL is returned. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore;
+
+ void vATask( void * pvParameters )
+ {
+    // Semaphore cannot be used before a call to xSemaphoreCreateMutex().
+    // This is a macro so pass the variable in directly.
+    xSemaphore = xSemaphoreCreateMutex();
+
+    if( xSemaphore != NULL )
+    {
+        // The semaphore was created successfully.
+        // The semaphore can now be used.
+    }
+ }
+ 
+ * \defgroup xSemaphoreCreateMutex xSemaphoreCreateMutex + * \ingroup Semaphores + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + #define xSemaphoreCreateMutex() xQueueCreateMutex( queueQUEUE_TYPE_MUTEX ) +#endif + +/** + * semphr. h + *
SemaphoreHandle_t xSemaphoreCreateMutexStatic( StaticSemaphore_t *pxMutexBuffer )
+ * + * Creates a new mutex type semaphore instance, and returns a handle by which + * the new mutex can be referenced. + * + * Internally, within the FreeRTOS implementation, mutex semaphores use a block + * of memory, in which the mutex structure is stored. If a mutex is created + * using xSemaphoreCreateMutex() then the required memory is automatically + * dynamically allocated inside the xSemaphoreCreateMutex() function. (see + * http://www.freertos.org/a00111.html). If a mutex is created using + * xSemaphoreCreateMutexStatic() then the application writer must provided the + * memory. xSemaphoreCreateMutexStatic() therefore allows a mutex to be created + * without using any dynamic memory allocation. + * + * Mutexes created using this function can be accessed using the xSemaphoreTake() + * and xSemaphoreGive() macros. The xSemaphoreTakeRecursive() and + * xSemaphoreGiveRecursive() macros must not be used. + * + * This type of semaphore uses a priority inheritance mechanism so a task + * 'taking' a semaphore MUST ALWAYS 'give' the semaphore back once the + * semaphore it is no longer required. + * + * Mutex type semaphores cannot be used from within interrupt service routines. + * + * See xSemaphoreCreateBinary() for an alternative implementation that can be + * used for pure synchronisation (where one task or interrupt always 'gives' the + * semaphore and another always 'takes' the semaphore) and from within interrupt + * service routines. + * + * @param pxMutexBuffer Must point to a variable of type StaticSemaphore_t, + * which will be used to hold the mutex's data structure, removing the need for + * the memory to be allocated dynamically. + * + * @return If the mutex was successfully created then a handle to the created + * mutex is returned. If pxMutexBuffer was NULL then NULL is returned. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore;
+ StaticSemaphore_t xMutexBuffer;
+
+ void vATask( void * pvParameters )
+ {
+    // A mutex cannot be used before it has been created.  xMutexBuffer is
+    // into xSemaphoreCreateMutexStatic() so no dynamic memory allocation is
+    // attempted.
+    xSemaphore = xSemaphoreCreateMutexStatic( &xMutexBuffer );
+
+    // As no dynamic memory allocation was performed, xSemaphore cannot be NULL,
+    // so there is no need to check it.
+ }
+ 
+ * \defgroup xSemaphoreCreateMutexStatic xSemaphoreCreateMutexStatic + * \ingroup Semaphores + */ + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xSemaphoreCreateMutexStatic( pxMutexBuffer ) xQueueCreateMutexStatic( queueQUEUE_TYPE_MUTEX, ( pxMutexBuffer ) ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + + +/** + * semphr. h + *
SemaphoreHandle_t xSemaphoreCreateRecursiveMutex( void )
+ * + * Creates a new recursive mutex type semaphore instance, and returns a handle + * by which the new recursive mutex can be referenced. + * + * Internally, within the FreeRTOS implementation, recursive mutexs use a block + * of memory, in which the mutex structure is stored. If a recursive mutex is + * created using xSemaphoreCreateRecursiveMutex() then the required memory is + * automatically dynamically allocated inside the + * xSemaphoreCreateRecursiveMutex() function. (see + * http://www.freertos.org/a00111.html). If a recursive mutex is created using + * xSemaphoreCreateRecursiveMutexStatic() then the application writer must + * provide the memory that will get used by the mutex. + * xSemaphoreCreateRecursiveMutexStatic() therefore allows a recursive mutex to + * be created without using any dynamic memory allocation. + * + * Mutexes created using this macro can be accessed using the + * xSemaphoreTakeRecursive() and xSemaphoreGiveRecursive() macros. The + * xSemaphoreTake() and xSemaphoreGive() macros must not be used. + * + * A mutex used recursively can be 'taken' repeatedly by the owner. The mutex + * doesn't become available again until the owner has called + * xSemaphoreGiveRecursive() for each successful 'take' request. For example, + * if a task successfully 'takes' the same mutex 5 times then the mutex will + * not be available to any other task until it has also 'given' the mutex back + * exactly five times. + * + * This type of semaphore uses a priority inheritance mechanism so a task + * 'taking' a semaphore MUST ALWAYS 'give' the semaphore back once the + * semaphore it is no longer required. + * + * Mutex type semaphores cannot be used from within interrupt service routines. + * + * See xSemaphoreCreateBinary() for an alternative implementation that can be + * used for pure synchronisation (where one task or interrupt always 'gives' the + * semaphore and another always 'takes' the semaphore) and from within interrupt + * service routines. + * + * @return xSemaphore Handle to the created mutex semaphore. Should be of type + * SemaphoreHandle_t. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore;
+
+ void vATask( void * pvParameters )
+ {
+    // Semaphore cannot be used before a call to xSemaphoreCreateMutex().
+    // This is a macro so pass the variable in directly.
+    xSemaphore = xSemaphoreCreateRecursiveMutex();
+
+    if( xSemaphore != NULL )
+    {
+        // The semaphore was created successfully.
+        // The semaphore can now be used.
+    }
+ }
+ 
+ * \defgroup xSemaphoreCreateRecursiveMutex xSemaphoreCreateRecursiveMutex + * \ingroup Semaphores + */ +#if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configUSE_RECURSIVE_MUTEXES == 1 ) ) + #define xSemaphoreCreateRecursiveMutex() xQueueCreateMutex( queueQUEUE_TYPE_RECURSIVE_MUTEX ) +#endif + +/** + * semphr. h + *
SemaphoreHandle_t xSemaphoreCreateRecursiveMutexStatic( StaticSemaphore_t *pxMutexBuffer )
+ * + * Creates a new recursive mutex type semaphore instance, and returns a handle + * by which the new recursive mutex can be referenced. + * + * Internally, within the FreeRTOS implementation, recursive mutexs use a block + * of memory, in which the mutex structure is stored. If a recursive mutex is + * created using xSemaphoreCreateRecursiveMutex() then the required memory is + * automatically dynamically allocated inside the + * xSemaphoreCreateRecursiveMutex() function. (see + * http://www.freertos.org/a00111.html). If a recursive mutex is created using + * xSemaphoreCreateRecursiveMutexStatic() then the application writer must + * provide the memory that will get used by the mutex. + * xSemaphoreCreateRecursiveMutexStatic() therefore allows a recursive mutex to + * be created without using any dynamic memory allocation. + * + * Mutexes created using this macro can be accessed using the + * xSemaphoreTakeRecursive() and xSemaphoreGiveRecursive() macros. The + * xSemaphoreTake() and xSemaphoreGive() macros must not be used. + * + * A mutex used recursively can be 'taken' repeatedly by the owner. The mutex + * doesn't become available again until the owner has called + * xSemaphoreGiveRecursive() for each successful 'take' request. For example, + * if a task successfully 'takes' the same mutex 5 times then the mutex will + * not be available to any other task until it has also 'given' the mutex back + * exactly five times. + * + * This type of semaphore uses a priority inheritance mechanism so a task + * 'taking' a semaphore MUST ALWAYS 'give' the semaphore back once the + * semaphore it is no longer required. + * + * Mutex type semaphores cannot be used from within interrupt service routines. + * + * See xSemaphoreCreateBinary() for an alternative implementation that can be + * used for pure synchronisation (where one task or interrupt always 'gives' the + * semaphore and another always 'takes' the semaphore) and from within interrupt + * service routines. + * + * @param pxMutexBuffer Must point to a variable of type StaticSemaphore_t, + * which will then be used to hold the recursive mutex's data structure, + * removing the need for the memory to be allocated dynamically. + * + * @return If the recursive mutex was successfully created then a handle to the + * created recursive mutex is returned. If pxMutexBuffer was NULL then NULL is + * returned. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore;
+ StaticSemaphore_t xMutexBuffer;
+
+ void vATask( void * pvParameters )
+ {
+    // A recursive semaphore cannot be used before it is created.  Here a
+    // recursive mutex is created using xSemaphoreCreateRecursiveMutexStatic().
+    // The address of xMutexBuffer is passed into the function, and will hold
+    // the mutexes data structures - so no dynamic memory allocation will be
+    // attempted.
+    xSemaphore = xSemaphoreCreateRecursiveMutexStatic( &xMutexBuffer );
+
+    // As no dynamic memory allocation was performed, xSemaphore cannot be NULL,
+    // so there is no need to check it.
+ }
+ 
+ * \defgroup xSemaphoreCreateRecursiveMutexStatic xSemaphoreCreateRecursiveMutexStatic + * \ingroup Semaphores + */ +#if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_RECURSIVE_MUTEXES == 1 ) ) + #define xSemaphoreCreateRecursiveMutexStatic( pxStaticSemaphore ) xQueueCreateMutexStatic( queueQUEUE_TYPE_RECURSIVE_MUTEX, pxStaticSemaphore ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +/** + * semphr. h + *
SemaphoreHandle_t xSemaphoreCreateCounting( UBaseType_t uxMaxCount, UBaseType_t uxInitialCount )
+ * + * Creates a new counting semaphore instance, and returns a handle by which the + * new counting semaphore can be referenced. + * + * In many usage scenarios it is faster and more memory efficient to use a + * direct to task notification in place of a counting semaphore! + * http://www.freertos.org/RTOS-task-notifications.html + * + * Internally, within the FreeRTOS implementation, counting semaphores use a + * block of memory, in which the counting semaphore structure is stored. If a + * counting semaphore is created using xSemaphoreCreateCounting() then the + * required memory is automatically dynamically allocated inside the + * xSemaphoreCreateCounting() function. (see + * http://www.freertos.org/a00111.html). If a counting semaphore is created + * using xSemaphoreCreateCountingStatic() then the application writer can + * instead optionally provide the memory that will get used by the counting + * semaphore. xSemaphoreCreateCountingStatic() therefore allows a counting + * semaphore to be created without using any dynamic memory allocation. + * + * Counting semaphores are typically used for two things: + * + * 1) Counting events. + * + * In this usage scenario an event handler will 'give' a semaphore each time + * an event occurs (incrementing the semaphore count value), and a handler + * task will 'take' a semaphore each time it processes an event + * (decrementing the semaphore count value). The count value is therefore + * the difference between the number of events that have occurred and the + * number that have been processed. In this case it is desirable for the + * initial count value to be zero. + * + * 2) Resource management. + * + * In this usage scenario the count value indicates the number of resources + * available. To obtain control of a resource a task must first obtain a + * semaphore - decrementing the semaphore count value. When the count value + * reaches zero there are no free resources. When a task finishes with the + * resource it 'gives' the semaphore back - incrementing the semaphore count + * value. In this case it is desirable for the initial count value to be + * equal to the maximum count value, indicating that all resources are free. + * + * @param uxMaxCount The maximum count value that can be reached. When the + * semaphore reaches this value it can no longer be 'given'. + * + * @param uxInitialCount The count value assigned to the semaphore when it is + * created. + * + * @return Handle to the created semaphore. Null if the semaphore could not be + * created. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore;
+
+ void vATask( void * pvParameters )
+ {
+ SemaphoreHandle_t xSemaphore = NULL;
+
+    // Semaphore cannot be used before a call to xSemaphoreCreateCounting().
+    // The max value to which the semaphore can count should be 10, and the
+    // initial value assigned to the count should be 0.
+    xSemaphore = xSemaphoreCreateCounting( 10, 0 );
+
+    if( xSemaphore != NULL )
+    {
+        // The semaphore was created successfully.
+        // The semaphore can now be used.
+    }
+ }
+ 
+ * \defgroup xSemaphoreCreateCounting xSemaphoreCreateCounting + * \ingroup Semaphores + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + #define xSemaphoreCreateCounting( uxMaxCount, uxInitialCount ) xQueueCreateCountingSemaphore( ( uxMaxCount ), ( uxInitialCount ) ) +#endif + +/** + * semphr. h + *
SemaphoreHandle_t xSemaphoreCreateCountingStatic( UBaseType_t uxMaxCount, UBaseType_t uxInitialCount, StaticSemaphore_t *pxSemaphoreBuffer )
+ * + * Creates a new counting semaphore instance, and returns a handle by which the + * new counting semaphore can be referenced. + * + * In many usage scenarios it is faster and more memory efficient to use a + * direct to task notification in place of a counting semaphore! + * http://www.freertos.org/RTOS-task-notifications.html + * + * Internally, within the FreeRTOS implementation, counting semaphores use a + * block of memory, in which the counting semaphore structure is stored. If a + * counting semaphore is created using xSemaphoreCreateCounting() then the + * required memory is automatically dynamically allocated inside the + * xSemaphoreCreateCounting() function. (see + * http://www.freertos.org/a00111.html). If a counting semaphore is created + * using xSemaphoreCreateCountingStatic() then the application writer must + * provide the memory. xSemaphoreCreateCountingStatic() therefore allows a + * counting semaphore to be created without using any dynamic memory allocation. + * + * Counting semaphores are typically used for two things: + * + * 1) Counting events. + * + * In this usage scenario an event handler will 'give' a semaphore each time + * an event occurs (incrementing the semaphore count value), and a handler + * task will 'take' a semaphore each time it processes an event + * (decrementing the semaphore count value). The count value is therefore + * the difference between the number of events that have occurred and the + * number that have been processed. In this case it is desirable for the + * initial count value to be zero. + * + * 2) Resource management. + * + * In this usage scenario the count value indicates the number of resources + * available. To obtain control of a resource a task must first obtain a + * semaphore - decrementing the semaphore count value. When the count value + * reaches zero there are no free resources. When a task finishes with the + * resource it 'gives' the semaphore back - incrementing the semaphore count + * value. In this case it is desirable for the initial count value to be + * equal to the maximum count value, indicating that all resources are free. + * + * @param uxMaxCount The maximum count value that can be reached. When the + * semaphore reaches this value it can no longer be 'given'. + * + * @param uxInitialCount The count value assigned to the semaphore when it is + * created. + * + * @param pxSemaphoreBuffer Must point to a variable of type StaticSemaphore_t, + * which will then be used to hold the semaphore's data structure, removing the + * need for the memory to be allocated dynamically. + * + * @return If the counting semaphore was successfully created then a handle to + * the created counting semaphore is returned. If pxSemaphoreBuffer was NULL + * then NULL is returned. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore;
+ StaticSemaphore_t xSemaphoreBuffer;
+
+ void vATask( void * pvParameters )
+ {
+ SemaphoreHandle_t xSemaphore = NULL;
+
+    // Counting semaphore cannot be used before they have been created.  Create
+    // a counting semaphore using xSemaphoreCreateCountingStatic().  The max
+    // value to which the semaphore can count is 10, and the initial value
+    // assigned to the count will be 0.  The address of xSemaphoreBuffer is
+    // passed in and will be used to hold the semaphore structure, so no dynamic
+    // memory allocation will be used.
+    xSemaphore = xSemaphoreCreateCounting( 10, 0, &xSemaphoreBuffer );
+
+    // No memory allocation was attempted so xSemaphore cannot be NULL, so there
+    // is no need to check its value.
+ }
+ 
+ * \defgroup xSemaphoreCreateCountingStatic xSemaphoreCreateCountingStatic + * \ingroup Semaphores + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xSemaphoreCreateCountingStatic( uxMaxCount, uxInitialCount, pxSemaphoreBuffer ) xQueueCreateCountingSemaphoreStatic( ( uxMaxCount ), ( uxInitialCount ), ( pxSemaphoreBuffer ) ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +/** + * semphr. h + *
void vSemaphoreDelete( SemaphoreHandle_t xSemaphore );
+ * + * Delete a semaphore. This function must be used with care. For example, + * do not delete a mutex type semaphore if the mutex is held by a task. + * + * @param xSemaphore A handle to the semaphore to be deleted. + * + * \defgroup vSemaphoreDelete vSemaphoreDelete + * \ingroup Semaphores + */ +#define vSemaphoreDelete( xSemaphore ) vQueueDelete( ( QueueHandle_t ) ( xSemaphore ) ) + +/** + * semphr.h + *
TaskHandle_t xSemaphoreGetMutexHolder( SemaphoreHandle_t xMutex );
+ * + * If xMutex is indeed a mutex type semaphore, return the current mutex holder. + * If xMutex is not a mutex type semaphore, or the mutex is available (not held + * by a task), return NULL. + * + * Note: This is a good way of determining if the calling task is the mutex + * holder, but not a good way of determining the identity of the mutex holder as + * the holder may change between the function exiting and the returned value + * being tested. + */ +#define xSemaphoreGetMutexHolder( xSemaphore ) xQueueGetMutexHolder( ( xSemaphore ) ) + +/** + * semphr.h + *
TaskHandle_t xSemaphoreGetMutexHolderFromISR( SemaphoreHandle_t xMutex );
+ * + * If xMutex is indeed a mutex type semaphore, return the current mutex holder. + * If xMutex is not a mutex type semaphore, or the mutex is available (not held + * by a task), return NULL. + * + */ +#define xSemaphoreGetMutexHolderFromISR( xSemaphore ) xQueueGetMutexHolderFromISR( ( xSemaphore ) ) + +/** + * semphr.h + *
UBaseType_t uxSemaphoreGetCount( SemaphoreHandle_t xSemaphore );
+ * + * If the semaphore is a counting semaphore then uxSemaphoreGetCount() returns + * its current count value. If the semaphore is a binary semaphore then + * uxSemaphoreGetCount() returns 1 if the semaphore is available, and 0 if the + * semaphore is not available. + * + */ +#define uxSemaphoreGetCount( xSemaphore ) uxQueueMessagesWaiting( ( QueueHandle_t ) ( xSemaphore ) ) + +#endif /* SEMAPHORE_H */ + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/stack_macros.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/stack_macros.h new file mode 100644 index 0000000..fb5ed15 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/stack_macros.h @@ -0,0 +1,129 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef STACK_MACROS_H +#define STACK_MACROS_H + +/* + * Call the stack overflow hook function if the stack of the task being swapped + * out is currently overflowed, or looks like it might have overflowed in the + * past. + * + * Setting configCHECK_FOR_STACK_OVERFLOW to 1 will cause the macro to check + * the current stack state only - comparing the current top of stack value to + * the stack limit. Setting configCHECK_FOR_STACK_OVERFLOW to greater than 1 + * will also cause the last few stack bytes to be checked to ensure the value + * to which the bytes were set when the task was created have not been + * overwritten. Note this second test does not guarantee that an overflowed + * stack will always be recognised. + */ + +/*-----------------------------------------------------------*/ + +#if( ( configCHECK_FOR_STACK_OVERFLOW == 1 ) && ( portSTACK_GROWTH < 0 ) ) + + /* Only the current stack state is to be checked. */ + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + /* Is the currently saved stack pointer within the stack limit? */ \ + if( pxCurrentTCB->pxTopOfStack <= pxCurrentTCB->pxStack ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */ +/*-----------------------------------------------------------*/ + +#if( ( configCHECK_FOR_STACK_OVERFLOW == 1 ) && ( portSTACK_GROWTH > 0 ) ) + + /* Only the current stack state is to be checked. */ + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + \ + /* Is the currently saved stack pointer within the stack limit? */ \ + if( pxCurrentTCB->pxTopOfStack >= pxCurrentTCB->pxEndOfStack ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */ +/*-----------------------------------------------------------*/ + +#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH < 0 ) ) + + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + const uint32_t * const pulStack = ( uint32_t * ) pxCurrentTCB->pxStack; \ + const uint32_t ulCheckValue = ( uint32_t ) 0xa5a5a5a5; \ + \ + if( ( pulStack[ 0 ] != ulCheckValue ) || \ + ( pulStack[ 1 ] != ulCheckValue ) || \ + ( pulStack[ 2 ] != ulCheckValue ) || \ + ( pulStack[ 3 ] != ulCheckValue ) ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */ +/*-----------------------------------------------------------*/ + +#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH > 0 ) ) + + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + int8_t *pcEndOfStack = ( int8_t * ) pxCurrentTCB->pxEndOfStack; \ + static const uint8_t ucExpectedStackBytes[] = { tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE }; \ + \ + \ + pcEndOfStack -= sizeof( ucExpectedStackBytes ); \ + \ + /* Has the extremity of the task stack ever been written over? */ \ + if( memcmp( ( void * ) pcEndOfStack, ( void * ) ucExpectedStackBytes, sizeof( ucExpectedStackBytes ) ) != 0 ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */ +/*-----------------------------------------------------------*/ + +/* Remove stack overflow macro if not being used. */ +#ifndef taskCHECK_FOR_STACK_OVERFLOW + #define taskCHECK_FOR_STACK_OVERFLOW() +#endif + + + +#endif /* STACK_MACROS_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/stream_buffer.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/stream_buffer.h new file mode 100644 index 0000000..5f9e012 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/stream_buffer.h @@ -0,0 +1,855 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* + * Stream buffers are used to send a continuous stream of data from one task or + * interrupt to another. Their implementation is light weight, making them + * particularly suited for interrupt to task and core to core communication + * scenarios. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xStreamBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xStreamBufferRead()) inside a critical section section and set the + * receive block time to 0. + * + */ + +#ifndef STREAM_BUFFER_H +#define STREAM_BUFFER_H + +#if defined( __cplusplus ) +extern "C" { +#endif + +/** + * Type by which stream buffers are referenced. For example, a call to + * xStreamBufferCreate() returns an StreamBufferHandle_t variable that can + * then be used as a parameter to xStreamBufferSend(), xStreamBufferReceive(), + * etc. + */ +struct StreamBufferDef_t; +typedef struct StreamBufferDef_t * StreamBufferHandle_t; + + +/** + * message_buffer.h + * +
+StreamBufferHandle_t xStreamBufferCreate( size_t xBufferSizeBytes, size_t xTriggerLevelBytes );
+
+ * + * Creates a new stream buffer using dynamically allocated memory. See + * xStreamBufferCreateStatic() for a version that uses statically allocated + * memory (memory that is allocated at compile time). + * + * configSUPPORT_DYNAMIC_ALLOCATION must be set to 1 or left undefined in + * FreeRTOSConfig.h for xStreamBufferCreate() to be available. + * + * @param xBufferSizeBytes The total number of bytes the stream buffer will be + * able to hold at any one time. + * + * @param xTriggerLevelBytes The number of bytes that must be in the stream + * buffer before a task that is blocked on the stream buffer to wait for data is + * moved out of the blocked state. For example, if a task is blocked on a read + * of an empty stream buffer that has a trigger level of 1 then the task will be + * unblocked when a single byte is written to the buffer or the task's block + * time expires. As another example, if a task is blocked on a read of an empty + * stream buffer that has a trigger level of 10 then the task will not be + * unblocked until the stream buffer contains at least 10 bytes or the task's + * block time expires. If a reading task's block time expires before the + * trigger level is reached then the task will still receive however many bytes + * are actually available. Setting a trigger level of 0 will result in a + * trigger level of 1 being used. It is not valid to specify a trigger level + * that is greater than the buffer size. + * + * @return If NULL is returned, then the stream buffer cannot be created + * because there is insufficient heap memory available for FreeRTOS to allocate + * the stream buffer data structures and storage area. A non-NULL value being + * returned indicates that the stream buffer has been created successfully - + * the returned value should be stored as the handle to the created stream + * buffer. + * + * Example use: +
+
+void vAFunction( void )
+{
+StreamBufferHandle_t xStreamBuffer;
+const size_t xStreamBufferSizeBytes = 100, xTriggerLevel = 10;
+
+    // Create a stream buffer that can hold 100 bytes.  The memory used to hold
+    // both the stream buffer structure and the data in the stream buffer is
+    // allocated dynamically.
+    xStreamBuffer = xStreamBufferCreate( xStreamBufferSizeBytes, xTriggerLevel );
+
+    if( xStreamBuffer == NULL )
+    {
+        // There was not enough heap memory space available to create the
+        // stream buffer.
+    }
+    else
+    {
+        // The stream buffer was created successfully and can now be used.
+    }
+}
+
+ * \defgroup xStreamBufferCreate xStreamBufferCreate + * \ingroup StreamBufferManagement + */ +#define xStreamBufferCreate( xBufferSizeBytes, xTriggerLevelBytes ) xStreamBufferGenericCreate( xBufferSizeBytes, xTriggerLevelBytes, pdFALSE ) + +/** + * stream_buffer.h + * +
+StreamBufferHandle_t xStreamBufferCreateStatic( size_t xBufferSizeBytes,
+                                                size_t xTriggerLevelBytes,
+                                                uint8_t *pucStreamBufferStorageArea,
+                                                StaticStreamBuffer_t *pxStaticStreamBuffer );
+
+ * Creates a new stream buffer using statically allocated memory. See + * xStreamBufferCreate() for a version that uses dynamically allocated memory. + * + * configSUPPORT_STATIC_ALLOCATION must be set to 1 in FreeRTOSConfig.h for + * xStreamBufferCreateStatic() to be available. + * + * @param xBufferSizeBytes The size, in bytes, of the buffer pointed to by the + * pucStreamBufferStorageArea parameter. + * + * @param xTriggerLevelBytes The number of bytes that must be in the stream + * buffer before a task that is blocked on the stream buffer to wait for data is + * moved out of the blocked state. For example, if a task is blocked on a read + * of an empty stream buffer that has a trigger level of 1 then the task will be + * unblocked when a single byte is written to the buffer or the task's block + * time expires. As another example, if a task is blocked on a read of an empty + * stream buffer that has a trigger level of 10 then the task will not be + * unblocked until the stream buffer contains at least 10 bytes or the task's + * block time expires. If a reading task's block time expires before the + * trigger level is reached then the task will still receive however many bytes + * are actually available. Setting a trigger level of 0 will result in a + * trigger level of 1 being used. It is not valid to specify a trigger level + * that is greater than the buffer size. + * + * @param pucStreamBufferStorageArea Must point to a uint8_t array that is at + * least xBufferSizeBytes + 1 big. This is the array to which streams are + * copied when they are written to the stream buffer. + * + * @param pxStaticStreamBuffer Must point to a variable of type + * StaticStreamBuffer_t, which will be used to hold the stream buffer's data + * structure. + * + * @return If the stream buffer is created successfully then a handle to the + * created stream buffer is returned. If either pucStreamBufferStorageArea or + * pxStaticstreamBuffer are NULL then NULL is returned. + * + * Example use: +
+
+// Used to dimension the array used to hold the streams.  The available space
+// will actually be one less than this, so 999.
+#define STORAGE_SIZE_BYTES 1000
+
+// Defines the memory that will actually hold the streams within the stream
+// buffer.
+static uint8_t ucStorageBuffer[ STORAGE_SIZE_BYTES ];
+
+// The variable used to hold the stream buffer structure.
+StaticStreamBuffer_t xStreamBufferStruct;
+
+void MyFunction( void )
+{
+StreamBufferHandle_t xStreamBuffer;
+const size_t xTriggerLevel = 1;
+
+    xStreamBuffer = xStreamBufferCreateStatic( sizeof( ucBufferStorage ),
+                                               xTriggerLevel,
+                                               ucBufferStorage,
+                                               &xStreamBufferStruct );
+
+    // As neither the pucStreamBufferStorageArea or pxStaticStreamBuffer
+    // parameters were NULL, xStreamBuffer will not be NULL, and can be used to
+    // reference the created stream buffer in other stream buffer API calls.
+
+    // Other code that uses the stream buffer can go here.
+}
+
+
+ * \defgroup xStreamBufferCreateStatic xStreamBufferCreateStatic + * \ingroup StreamBufferManagement + */ +#define xStreamBufferCreateStatic( xBufferSizeBytes, xTriggerLevelBytes, pucStreamBufferStorageArea, pxStaticStreamBuffer ) xStreamBufferGenericCreateStatic( xBufferSizeBytes, xTriggerLevelBytes, pdFALSE, pucStreamBufferStorageArea, pxStaticStreamBuffer ) + +/** + * stream_buffer.h + * +
+size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
+                          const void *pvTxData,
+                          size_t xDataLengthBytes,
+                          TickType_t xTicksToWait );
+
+ * + * Sends bytes to a stream buffer. The bytes are copied into the stream buffer. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xStreamBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xStreamBufferRead()) inside a critical section and set the receive + * block time to 0. + * + * Use xStreamBufferSend() to write to a stream buffer from a task. Use + * xStreamBufferSendFromISR() to write to a stream buffer from an interrupt + * service routine (ISR). + * + * @param xStreamBuffer The handle of the stream buffer to which a stream is + * being sent. + * + * @param pvTxData A pointer to the buffer that holds the bytes to be copied + * into the stream buffer. + * + * @param xDataLengthBytes The maximum number of bytes to copy from pvTxData + * into the stream buffer. + * + * @param xTicksToWait The maximum amount of time the task should remain in the + * Blocked state to wait for enough space to become available in the stream + * buffer, should the stream buffer contain too little space to hold the + * another xDataLengthBytes bytes. The block time is specified in tick periods, + * so the absolute time it represents is dependent on the tick frequency. The + * macro pdMS_TO_TICKS() can be used to convert a time specified in milliseconds + * into a time specified in ticks. Setting xTicksToWait to portMAX_DELAY will + * cause the task to wait indefinitely (without timing out), provided + * INCLUDE_vTaskSuspend is set to 1 in FreeRTOSConfig.h. If a task times out + * before it can write all xDataLengthBytes into the buffer it will still write + * as many bytes as possible. A task does not use any CPU time when it is in + * the blocked state. + * + * @return The number of bytes written to the stream buffer. If a task times + * out before it can write all xDataLengthBytes into the buffer it will still + * write as many bytes as possible. + * + * Example use: +
+void vAFunction( StreamBufferHandle_t xStreamBuffer )
+{
+size_t xBytesSent;
+uint8_t ucArrayToSend[] = { 0, 1, 2, 3 };
+char *pcStringToSend = "String to send";
+const TickType_t x100ms = pdMS_TO_TICKS( 100 );
+
+    // Send an array to the stream buffer, blocking for a maximum of 100ms to
+    // wait for enough space to be available in the stream buffer.
+    xBytesSent = xStreamBufferSend( xStreamBuffer, ( void * ) ucArrayToSend, sizeof( ucArrayToSend ), x100ms );
+
+    if( xBytesSent != sizeof( ucArrayToSend ) )
+    {
+        // The call to xStreamBufferSend() times out before there was enough
+        // space in the buffer for the data to be written, but it did
+        // successfully write xBytesSent bytes.
+    }
+
+    // Send the string to the stream buffer.  Return immediately if there is not
+    // enough space in the buffer.
+    xBytesSent = xStreamBufferSend( xStreamBuffer, ( void * ) pcStringToSend, strlen( pcStringToSend ), 0 );
+
+    if( xBytesSent != strlen( pcStringToSend ) )
+    {
+        // The entire string could not be added to the stream buffer because
+        // there was not enough free space in the buffer, but xBytesSent bytes
+        // were sent.  Could try again to send the remaining bytes.
+    }
+}
+
+ * \defgroup xStreamBufferSend xStreamBufferSend + * \ingroup StreamBufferManagement + */ +size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void *pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * +
+size_t xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer,
+                                 const void *pvTxData,
+                                 size_t xDataLengthBytes,
+                                 BaseType_t *pxHigherPriorityTaskWoken );
+
+ * + * Interrupt safe version of the API function that sends a stream of bytes to + * the stream buffer. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xStreamBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xStreamBufferRead()) inside a critical section and set the receive + * block time to 0. + * + * Use xStreamBufferSend() to write to a stream buffer from a task. Use + * xStreamBufferSendFromISR() to write to a stream buffer from an interrupt + * service routine (ISR). + * + * @param xStreamBuffer The handle of the stream buffer to which a stream is + * being sent. + * + * @param pvTxData A pointer to the data that is to be copied into the stream + * buffer. + * + * @param xDataLengthBytes The maximum number of bytes to copy from pvTxData + * into the stream buffer. + * + * @param pxHigherPriorityTaskWoken It is possible that a stream buffer will + * have a task blocked on it waiting for data. Calling + * xStreamBufferSendFromISR() can make data available, and so cause a task that + * was waiting for data to leave the Blocked state. If calling + * xStreamBufferSendFromISR() causes a task to leave the Blocked state, and the + * unblocked task has a priority higher than the currently executing task (the + * task that was interrupted), then, internally, xStreamBufferSendFromISR() + * will set *pxHigherPriorityTaskWoken to pdTRUE. If + * xStreamBufferSendFromISR() sets this value to pdTRUE, then normally a + * context switch should be performed before the interrupt is exited. This will + * ensure that the interrupt returns directly to the highest priority Ready + * state task. *pxHigherPriorityTaskWoken should be set to pdFALSE before it + * is passed into the function. See the example code below for an example. + * + * @return The number of bytes actually written to the stream buffer, which will + * be less than xDataLengthBytes if the stream buffer didn't have enough free + * space for all the bytes to be written. + * + * Example use: +
+// A stream buffer that has already been created.
+StreamBufferHandle_t xStreamBuffer;
+
+void vAnInterruptServiceRoutine( void )
+{
+size_t xBytesSent;
+char *pcStringToSend = "String to send";
+BaseType_t xHigherPriorityTaskWoken = pdFALSE; // Initialised to pdFALSE.
+
+    // Attempt to send the string to the stream buffer.
+    xBytesSent = xStreamBufferSendFromISR( xStreamBuffer,
+                                           ( void * ) pcStringToSend,
+                                           strlen( pcStringToSend ),
+                                           &xHigherPriorityTaskWoken );
+
+    if( xBytesSent != strlen( pcStringToSend ) )
+    {
+        // There was not enough free space in the stream buffer for the entire
+        // string to be written, ut xBytesSent bytes were written.
+    }
+
+    // If xHigherPriorityTaskWoken was set to pdTRUE inside
+    // xStreamBufferSendFromISR() then a task that has a priority above the
+    // priority of the currently executing task was unblocked and a context
+    // switch should be performed to ensure the ISR returns to the unblocked
+    // task.  In most FreeRTOS ports this is done by simply passing
+    // xHigherPriorityTaskWoken into taskYIELD_FROM_ISR(), which will test the
+    // variables value, and perform the context switch if necessary.  Check the
+    // documentation for the port in use for port specific instructions.
+    taskYIELD_FROM_ISR( xHigherPriorityTaskWoken );
+}
+
+ * \defgroup xStreamBufferSendFromISR xStreamBufferSendFromISR + * \ingroup StreamBufferManagement + */ +size_t xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer, + const void *pvTxData, + size_t xDataLengthBytes, + BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * +
+size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
+                             void *pvRxData,
+                             size_t xBufferLengthBytes,
+                             TickType_t xTicksToWait );
+
+ * + * Receives bytes from a stream buffer. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xStreamBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xStreamBufferRead()) inside a critical section and set the receive + * block time to 0. + * + * Use xStreamBufferReceive() to read from a stream buffer from a task. Use + * xStreamBufferReceiveFromISR() to read from a stream buffer from an + * interrupt service routine (ISR). + * + * @param xStreamBuffer The handle of the stream buffer from which bytes are to + * be received. + * + * @param pvRxData A pointer to the buffer into which the received bytes will be + * copied. + * + * @param xBufferLengthBytes The length of the buffer pointed to by the + * pvRxData parameter. This sets the maximum number of bytes to receive in one + * call. xStreamBufferReceive will return as many bytes as possible up to a + * maximum set by xBufferLengthBytes. + * + * @param xTicksToWait The maximum amount of time the task should remain in the + * Blocked state to wait for data to become available if the stream buffer is + * empty. xStreamBufferReceive() will return immediately if xTicksToWait is + * zero. The block time is specified in tick periods, so the absolute time it + * represents is dependent on the tick frequency. The macro pdMS_TO_TICKS() can + * be used to convert a time specified in milliseconds into a time specified in + * ticks. Setting xTicksToWait to portMAX_DELAY will cause the task to wait + * indefinitely (without timing out), provided INCLUDE_vTaskSuspend is set to 1 + * in FreeRTOSConfig.h. A task does not use any CPU time when it is in the + * Blocked state. + * + * @return The number of bytes actually read from the stream buffer, which will + * be less than xBufferLengthBytes if the call to xStreamBufferReceive() timed + * out before xBufferLengthBytes were available. + * + * Example use: +
+void vAFunction( StreamBuffer_t xStreamBuffer )
+{
+uint8_t ucRxData[ 20 ];
+size_t xReceivedBytes;
+const TickType_t xBlockTime = pdMS_TO_TICKS( 20 );
+
+    // Receive up to another sizeof( ucRxData ) bytes from the stream buffer.
+    // Wait in the Blocked state (so not using any CPU processing time) for a
+    // maximum of 100ms for the full sizeof( ucRxData ) number of bytes to be
+    // available.
+    xReceivedBytes = xStreamBufferReceive( xStreamBuffer,
+                                           ( void * ) ucRxData,
+                                           sizeof( ucRxData ),
+                                           xBlockTime );
+
+    if( xReceivedBytes > 0 )
+    {
+        // A ucRxData contains another xRecievedBytes bytes of data, which can
+        // be processed here....
+    }
+}
+
+ * \defgroup xStreamBufferReceive xStreamBufferReceive + * \ingroup StreamBufferManagement + */ +size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * +
+size_t xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer,
+                                    void *pvRxData,
+                                    size_t xBufferLengthBytes,
+                                    BaseType_t *pxHigherPriorityTaskWoken );
+
+ * + * An interrupt safe version of the API function that receives bytes from a + * stream buffer. + * + * Use xStreamBufferReceive() to read bytes from a stream buffer from a task. + * Use xStreamBufferReceiveFromISR() to read bytes from a stream buffer from an + * interrupt service routine (ISR). + * + * @param xStreamBuffer The handle of the stream buffer from which a stream + * is being received. + * + * @param pvRxData A pointer to the buffer into which the received bytes are + * copied. + * + * @param xBufferLengthBytes The length of the buffer pointed to by the + * pvRxData parameter. This sets the maximum number of bytes to receive in one + * call. xStreamBufferReceive will return as many bytes as possible up to a + * maximum set by xBufferLengthBytes. + * + * @param pxHigherPriorityTaskWoken It is possible that a stream buffer will + * have a task blocked on it waiting for space to become available. Calling + * xStreamBufferReceiveFromISR() can make space available, and so cause a task + * that is waiting for space to leave the Blocked state. If calling + * xStreamBufferReceiveFromISR() causes a task to leave the Blocked state, and + * the unblocked task has a priority higher than the currently executing task + * (the task that was interrupted), then, internally, + * xStreamBufferReceiveFromISR() will set *pxHigherPriorityTaskWoken to pdTRUE. + * If xStreamBufferReceiveFromISR() sets this value to pdTRUE, then normally a + * context switch should be performed before the interrupt is exited. That will + * ensure the interrupt returns directly to the highest priority Ready state + * task. *pxHigherPriorityTaskWoken should be set to pdFALSE before it is + * passed into the function. See the code example below for an example. + * + * @return The number of bytes read from the stream buffer, if any. + * + * Example use: +
+// A stream buffer that has already been created.
+StreamBuffer_t xStreamBuffer;
+
+void vAnInterruptServiceRoutine( void )
+{
+uint8_t ucRxData[ 20 ];
+size_t xReceivedBytes;
+BaseType_t xHigherPriorityTaskWoken = pdFALSE;  // Initialised to pdFALSE.
+
+    // Receive the next stream from the stream buffer.
+    xReceivedBytes = xStreamBufferReceiveFromISR( xStreamBuffer,
+                                                  ( void * ) ucRxData,
+                                                  sizeof( ucRxData ),
+                                                  &xHigherPriorityTaskWoken );
+
+    if( xReceivedBytes > 0 )
+    {
+        // ucRxData contains xReceivedBytes read from the stream buffer.
+        // Process the stream here....
+    }
+
+    // If xHigherPriorityTaskWoken was set to pdTRUE inside
+    // xStreamBufferReceiveFromISR() then a task that has a priority above the
+    // priority of the currently executing task was unblocked and a context
+    // switch should be performed to ensure the ISR returns to the unblocked
+    // task.  In most FreeRTOS ports this is done by simply passing
+    // xHigherPriorityTaskWoken into taskYIELD_FROM_ISR(), which will test the
+    // variables value, and perform the context switch if necessary.  Check the
+    // documentation for the port in use for port specific instructions.
+    taskYIELD_FROM_ISR( xHigherPriorityTaskWoken );
+}
+
+ * \defgroup xStreamBufferReceiveFromISR xStreamBufferReceiveFromISR + * \ingroup StreamBufferManagement + */ +size_t xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * +
+void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer );
+
+ * + * Deletes a stream buffer that was previously created using a call to + * xStreamBufferCreate() or xStreamBufferCreateStatic(). If the stream + * buffer was created using dynamic memory (that is, by xStreamBufferCreate()), + * then the allocated memory is freed. + * + * A stream buffer handle must not be used after the stream buffer has been + * deleted. + * + * @param xStreamBuffer The handle of the stream buffer to be deleted. + * + * \defgroup vStreamBufferDelete vStreamBufferDelete + * \ingroup StreamBufferManagement + */ +void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * +
+BaseType_t xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer );
+
+ * + * Queries a stream buffer to see if it is full. A stream buffer is full if it + * does not have any free space, and therefore cannot accept any more data. + * + * @param xStreamBuffer The handle of the stream buffer being queried. + * + * @return If the stream buffer is full then pdTRUE is returned. Otherwise + * pdFALSE is returned. + * + * \defgroup xStreamBufferIsFull xStreamBufferIsFull + * \ingroup StreamBufferManagement + */ +BaseType_t xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * +
+BaseType_t xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer );
+
+ * + * Queries a stream buffer to see if it is empty. A stream buffer is empty if + * it does not contain any data. + * + * @param xStreamBuffer The handle of the stream buffer being queried. + * + * @return If the stream buffer is empty then pdTRUE is returned. Otherwise + * pdFALSE is returned. + * + * \defgroup xStreamBufferIsEmpty xStreamBufferIsEmpty + * \ingroup StreamBufferManagement + */ +BaseType_t xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * +
+BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer );
+
+ * + * Resets a stream buffer to its initial, empty, state. Any data that was in + * the stream buffer is discarded. A stream buffer can only be reset if there + * are no tasks blocked waiting to either send to or receive from the stream + * buffer. + * + * @param xStreamBuffer The handle of the stream buffer being reset. + * + * @return If the stream buffer is reset then pdPASS is returned. If there was + * a task blocked waiting to send to or read from the stream buffer then the + * stream buffer is not reset and pdFAIL is returned. + * + * \defgroup xStreamBufferReset xStreamBufferReset + * \ingroup StreamBufferManagement + */ +BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * +
+size_t xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer );
+
+ * + * Queries a stream buffer to see how much free space it contains, which is + * equal to the amount of data that can be sent to the stream buffer before it + * is full. + * + * @param xStreamBuffer The handle of the stream buffer being queried. + * + * @return The number of bytes that can be written to the stream buffer before + * the stream buffer would be full. + * + * \defgroup xStreamBufferSpacesAvailable xStreamBufferSpacesAvailable + * \ingroup StreamBufferManagement + */ +size_t xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * +
+size_t xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer );
+
+ * + * Queries a stream buffer to see how much data it contains, which is equal to + * the number of bytes that can be read from the stream buffer before the stream + * buffer would be empty. + * + * @param xStreamBuffer The handle of the stream buffer being queried. + * + * @return The number of bytes that can be read from the stream buffer before + * the stream buffer would be empty. + * + * \defgroup xStreamBufferBytesAvailable xStreamBufferBytesAvailable + * \ingroup StreamBufferManagement + */ +size_t xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * +
+BaseType_t xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel );
+
+ * + * A stream buffer's trigger level is the number of bytes that must be in the + * stream buffer before a task that is blocked on the stream buffer to + * wait for data is moved out of the blocked state. For example, if a task is + * blocked on a read of an empty stream buffer that has a trigger level of 1 + * then the task will be unblocked when a single byte is written to the buffer + * or the task's block time expires. As another example, if a task is blocked + * on a read of an empty stream buffer that has a trigger level of 10 then the + * task will not be unblocked until the stream buffer contains at least 10 bytes + * or the task's block time expires. If a reading task's block time expires + * before the trigger level is reached then the task will still receive however + * many bytes are actually available. Setting a trigger level of 0 will result + * in a trigger level of 1 being used. It is not valid to specify a trigger + * level that is greater than the buffer size. + * + * A trigger level is set when the stream buffer is created, and can be modified + * using xStreamBufferSetTriggerLevel(). + * + * @param xStreamBuffer The handle of the stream buffer being updated. + * + * @param xTriggerLevel The new trigger level for the stream buffer. + * + * @return If xTriggerLevel was less than or equal to the stream buffer's length + * then the trigger level will be updated and pdTRUE is returned. Otherwise + * pdFALSE is returned. + * + * \defgroup xStreamBufferSetTriggerLevel xStreamBufferSetTriggerLevel + * \ingroup StreamBufferManagement + */ +BaseType_t xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * +
+BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken );
+
+ * + * For advanced users only. + * + * The sbSEND_COMPLETED() macro is called from within the FreeRTOS APIs when + * data is sent to a message buffer or stream buffer. If there was a task that + * was blocked on the message or stream buffer waiting for data to arrive then + * the sbSEND_COMPLETED() macro sends a notification to the task to remove it + * from the Blocked state. xStreamBufferSendCompletedFromISR() does the same + * thing. It is provided to enable application writers to implement their own + * version of sbSEND_COMPLETED(), and MUST NOT BE USED AT ANY OTHER TIME. + * + * See the example implemented in FreeRTOS/Demo/Minimal/MessageBufferAMP.c for + * additional information. + * + * @param xStreamBuffer The handle of the stream buffer to which data was + * written. + * + * @param pxHigherPriorityTaskWoken *pxHigherPriorityTaskWoken should be + * initialised to pdFALSE before it is passed into + * xStreamBufferSendCompletedFromISR(). If calling + * xStreamBufferSendCompletedFromISR() removes a task from the Blocked state, + * and the task has a priority above the priority of the currently running task, + * then *pxHigherPriorityTaskWoken will get set to pdTRUE indicating that a + * context switch should be performed before exiting the ISR. + * + * @return If a task was removed from the Blocked state then pdTRUE is returned. + * Otherwise pdFALSE is returned. + * + * \defgroup xStreamBufferSendCompletedFromISR xStreamBufferSendCompletedFromISR + * \ingroup StreamBufferManagement + */ +BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * +
+BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken );
+
+ * + * For advanced users only. + * + * The sbRECEIVE_COMPLETED() macro is called from within the FreeRTOS APIs when + * data is read out of a message buffer or stream buffer. If there was a task + * that was blocked on the message or stream buffer waiting for data to arrive + * then the sbRECEIVE_COMPLETED() macro sends a notification to the task to + * remove it from the Blocked state. xStreamBufferReceiveCompletedFromISR() + * does the same thing. It is provided to enable application writers to + * implement their own version of sbRECEIVE_COMPLETED(), and MUST NOT BE USED AT + * ANY OTHER TIME. + * + * See the example implemented in FreeRTOS/Demo/Minimal/MessageBufferAMP.c for + * additional information. + * + * @param xStreamBuffer The handle of the stream buffer from which data was + * read. + * + * @param pxHigherPriorityTaskWoken *pxHigherPriorityTaskWoken should be + * initialised to pdFALSE before it is passed into + * xStreamBufferReceiveCompletedFromISR(). If calling + * xStreamBufferReceiveCompletedFromISR() removes a task from the Blocked state, + * and the task has a priority above the priority of the currently running task, + * then *pxHigherPriorityTaskWoken will get set to pdTRUE indicating that a + * context switch should be performed before exiting the ISR. + * + * @return If a task was removed from the Blocked state then pdTRUE is returned. + * Otherwise pdFALSE is returned. + * + * \defgroup xStreamBufferReceiveCompletedFromISR xStreamBufferReceiveCompletedFromISR + * \ingroup StreamBufferManagement + */ +BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/* Functions below here are not part of the public API. */ +StreamBufferHandle_t xStreamBufferGenericCreate( size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + BaseType_t xIsMessageBuffer ) PRIVILEGED_FUNCTION; + +StreamBufferHandle_t xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + BaseType_t xIsMessageBuffer, + uint8_t * const pucStreamBufferStorageArea, + StaticStreamBuffer_t * const pxStaticStreamBuffer ) PRIVILEGED_FUNCTION; + +size_t xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +#if( configUSE_TRACE_FACILITY == 1 ) + void vStreamBufferSetStreamBufferNumber( StreamBufferHandle_t xStreamBuffer, UBaseType_t uxStreamBufferNumber ) PRIVILEGED_FUNCTION; + UBaseType_t uxStreamBufferGetStreamBufferNumber( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + uint8_t ucStreamBufferGetStreamBufferType( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; +#endif + +#if defined( __cplusplus ) +} +#endif + +#endif /* !defined( STREAM_BUFFER_H ) */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/task.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/task.h new file mode 100644 index 0000000..4041bbf --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/task.h @@ -0,0 +1,2421 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + + +#ifndef INC_TASK_H +#define INC_TASK_H + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h must appear in source files before include task.h" +#endif + +#include "list.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/*----------------------------------------------------------- + * MACROS AND DEFINITIONS + *----------------------------------------------------------*/ + +#define tskKERNEL_VERSION_NUMBER "V10.2.0" +#define tskKERNEL_VERSION_MAJOR 10 +#define tskKERNEL_VERSION_MINOR 2 +#define tskKERNEL_VERSION_BUILD 0 + +/* MPU region parameters passed in ulParameters + * of MemoryRegion_t struct. */ +#define tskMPU_REGION_READ_ONLY ( 1UL << 0UL ) +#define tskMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define tskMPU_REGION_EXECUTE_NEVER ( 1UL << 2UL ) +#define tskMPU_REGION_NORMAL_MEMORY ( 1UL << 3UL ) +#define tskMPU_REGION_DEVICE_MEMORY ( 1UL << 4UL ) + +/** + * task. h + * + * Type by which tasks are referenced. For example, a call to xTaskCreate + * returns (via a pointer parameter) an TaskHandle_t variable that can then + * be used as a parameter to vTaskDelete to delete the task. + * + * \defgroup TaskHandle_t TaskHandle_t + * \ingroup Tasks + */ +struct tskTaskControlBlock; /* The old naming convention is used to prevent breaking kernel aware debuggers. */ +typedef struct tskTaskControlBlock* TaskHandle_t; + +/* + * Defines the prototype to which the application task hook function must + * conform. + */ +typedef BaseType_t (*TaskHookFunction_t)( void * ); + +/* Task states returned by eTaskGetState. */ +typedef enum +{ + eRunning = 0, /* A task is querying the state of itself, so must be running. */ + eReady, /* The task being queried is in a read or pending ready list. */ + eBlocked, /* The task being queried is in the Blocked state. */ + eSuspended, /* The task being queried is in the Suspended state, or is in the Blocked state with an infinite time out. */ + eDeleted, /* The task being queried has been deleted, but its TCB has not yet been freed. */ + eInvalid /* Used as an 'invalid state' value. */ +} eTaskState; + +/* Actions that can be performed when vTaskNotify() is called. */ +typedef enum +{ + eNoAction = 0, /* Notify the task without updating its notify value. */ + eSetBits, /* Set bits in the task's notification value. */ + eIncrement, /* Increment the task's notification value. */ + eSetValueWithOverwrite, /* Set the task's notification value to a specific value even if the previous value has not yet been read by the task. */ + eSetValueWithoutOverwrite /* Set the task's notification value if the previous value has been read by the task. */ +} eNotifyAction; + +/* + * Used internally only. + */ +typedef struct xTIME_OUT +{ + BaseType_t xOverflowCount; + TickType_t xTimeOnEntering; +} TimeOut_t; + +/* + * Defines the memory ranges allocated to the task when an MPU is used. + */ +typedef struct xMEMORY_REGION +{ + void *pvBaseAddress; + uint32_t ulLengthInBytes; + uint32_t ulParameters; +} MemoryRegion_t; + +/* + * Parameters required to create an MPU protected task. + */ +typedef struct xTASK_PARAMETERS +{ + TaskFunction_t pvTaskCode; + const char * const pcName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + configSTACK_DEPTH_TYPE usStackDepth; + void *pvParameters; + UBaseType_t uxPriority; + StackType_t *puxStackBuffer; + MemoryRegion_t xRegions[ portNUM_CONFIGURABLE_REGIONS ]; + #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + StaticTask_t * const pxTaskBuffer; + #endif +} TaskParameters_t; + +/* Used with the uxTaskGetSystemState() function to return the state of each task +in the system. */ +typedef struct xTASK_STATUS +{ + TaskHandle_t xHandle; /* The handle of the task to which the rest of the information in the structure relates. */ + const char *pcTaskName; /* A pointer to the task's name. This value will be invalid if the task was deleted since the structure was populated! */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + UBaseType_t xTaskNumber; /* A number unique to the task. */ + eTaskState eCurrentState; /* The state in which the task existed when the structure was populated. */ + UBaseType_t uxCurrentPriority; /* The priority at which the task was running (may be inherited) when the structure was populated. */ + UBaseType_t uxBasePriority; /* The priority to which the task will return if the task's current priority has been inherited to avoid unbounded priority inversion when obtaining a mutex. Only valid if configUSE_MUTEXES is defined as 1 in FreeRTOSConfig.h. */ + uint32_t ulRunTimeCounter; /* The total run time allocated to the task so far, as defined by the run time stats clock. See http://www.freertos.org/rtos-run-time-stats.html. Only valid when configGENERATE_RUN_TIME_STATS is defined as 1 in FreeRTOSConfig.h. */ + StackType_t *pxStackBase; /* Points to the lowest address of the task's stack area. */ + configSTACK_DEPTH_TYPE usStackHighWaterMark; /* The minimum amount of stack space that has remained for the task since the task was created. The closer this value is to zero the closer the task has come to overflowing its stack. */ +} TaskStatus_t; + +/* Possible return values for eTaskConfirmSleepModeStatus(). */ +typedef enum +{ + eAbortSleep = 0, /* A task has been made ready or a context switch pended since portSUPPORESS_TICKS_AND_SLEEP() was called - abort entering a sleep mode. */ + eStandardSleep, /* Enter a sleep mode that will not last any longer than the expected idle time. */ + eNoTasksWaitingTimeout /* No tasks are waiting for a timeout so it is safe to enter a sleep mode that can only be exited by an external interrupt. */ +} eSleepModeStatus; + +/** + * Defines the priority used by the idle task. This must not be modified. + * + * \ingroup TaskUtils + */ +#define tskIDLE_PRIORITY ( ( UBaseType_t ) 0U ) + +/** + * task. h + * + * Macro for forcing a context switch. + * + * \defgroup taskYIELD taskYIELD + * \ingroup SchedulerControl + */ +#define taskYIELD() portYIELD() + +/** + * task. h + * + * Macro to mark the start of a critical code region. Preemptive context + * switches cannot occur when in a critical region. + * + * NOTE: This may alter the stack (depending on the portable implementation) + * so must be used with care! + * + * \defgroup taskENTER_CRITICAL taskENTER_CRITICAL + * \ingroup SchedulerControl + */ +#define taskENTER_CRITICAL() portENTER_CRITICAL() +#define taskENTER_CRITICAL_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR() + +/** + * task. h + * + * Macro to mark the end of a critical code region. Preemptive context + * switches cannot occur when in a critical region. + * + * NOTE: This may alter the stack (depending on the portable implementation) + * so must be used with care! + * + * \defgroup taskEXIT_CRITICAL taskEXIT_CRITICAL + * \ingroup SchedulerControl + */ +#define taskEXIT_CRITICAL() portEXIT_CRITICAL() +#define taskEXIT_CRITICAL_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) +/** + * task. h + * + * Macro to disable all maskable interrupts. + * + * \defgroup taskDISABLE_INTERRUPTS taskDISABLE_INTERRUPTS + * \ingroup SchedulerControl + */ +#define taskDISABLE_INTERRUPTS() portDISABLE_INTERRUPTS() + +/** + * task. h + * + * Macro to enable microcontroller interrupts. + * + * \defgroup taskENABLE_INTERRUPTS taskENABLE_INTERRUPTS + * \ingroup SchedulerControl + */ +#define taskENABLE_INTERRUPTS() portENABLE_INTERRUPTS() + +/* Definitions returned by xTaskGetSchedulerState(). taskSCHEDULER_SUSPENDED is +0 to generate more optimal code when configASSERT() is defined as the constant +is used in assert() statements. */ +#define taskSCHEDULER_SUSPENDED ( ( BaseType_t ) 0 ) +#define taskSCHEDULER_NOT_STARTED ( ( BaseType_t ) 1 ) +#define taskSCHEDULER_RUNNING ( ( BaseType_t ) 2 ) + + +/*----------------------------------------------------------- + * TASK CREATION API + *----------------------------------------------------------*/ + +/** + * task. h + *
+ BaseType_t xTaskCreate(
+							  TaskFunction_t pvTaskCode,
+							  const char * const pcName,
+							  configSTACK_DEPTH_TYPE usStackDepth,
+							  void *pvParameters,
+							  UBaseType_t uxPriority,
+							  TaskHandle_t *pvCreatedTask
+						  );
+ * + * Create a new task and add it to the list of tasks that are ready to run. + * + * Internally, within the FreeRTOS implementation, tasks use two blocks of + * memory. The first block is used to hold the task's data structures. The + * second block is used by the task as its stack. If a task is created using + * xTaskCreate() then both blocks of memory are automatically dynamically + * allocated inside the xTaskCreate() function. (see + * http://www.freertos.org/a00111.html). If a task is created using + * xTaskCreateStatic() then the application writer must provide the required + * memory. xTaskCreateStatic() therefore allows a task to be created without + * using any dynamic memory allocation. + * + * See xTaskCreateStatic() for a version that does not use any dynamic memory + * allocation. + * + * xTaskCreate() can only be used to create a task that has unrestricted + * access to the entire microcontroller memory map. Systems that include MPU + * support can alternatively create an MPU constrained task using + * xTaskCreateRestricted(). + * + * @param pvTaskCode Pointer to the task entry function. Tasks + * must be implemented to never return (i.e. continuous loop). + * + * @param pcName A descriptive name for the task. This is mainly used to + * facilitate debugging. Max length defined by configMAX_TASK_NAME_LEN - default + * is 16. + * + * @param usStackDepth The size of the task stack specified as the number of + * variables the stack can hold - not the number of bytes. For example, if + * the stack is 16 bits wide and usStackDepth is defined as 100, 200 bytes + * will be allocated for stack storage. + * + * @param pvParameters Pointer that will be used as the parameter for the task + * being created. + * + * @param uxPriority The priority at which the task should run. Systems that + * include MPU support can optionally create tasks in a privileged (system) + * mode by setting bit portPRIVILEGE_BIT of the priority parameter. For + * example, to create a privileged task at priority 2 the uxPriority parameter + * should be set to ( 2 | portPRIVILEGE_BIT ). + * + * @param pvCreatedTask Used to pass back a handle by which the created task + * can be referenced. + * + * @return pdPASS if the task was successfully created and added to a ready + * list, otherwise an error code defined in the file projdefs.h + * + * Example usage: +
+ // Task to be created.
+ void vTaskCode( void * pvParameters )
+ {
+	 for( ;; )
+	 {
+		 // Task code goes here.
+	 }
+ }
+
+ // Function that creates a task.
+ void vOtherFunction( void )
+ {
+ static uint8_t ucParameterToPass;
+ TaskHandle_t xHandle = NULL;
+
+	 // Create the task, storing the handle.  Note that the passed parameter ucParameterToPass
+	 // must exist for the lifetime of the task, so in this case is declared static.  If it was just an
+	 // an automatic stack variable it might no longer exist, or at least have been corrupted, by the time
+	 // the new task attempts to access it.
+	 xTaskCreate( vTaskCode, "NAME", STACK_SIZE, &ucParameterToPass, tskIDLE_PRIORITY, &xHandle );
+     configASSERT( xHandle );
+
+	 // Use the handle to delete the task.
+     if( xHandle != NULL )
+     {
+	     vTaskDelete( xHandle );
+     }
+ }
+   
+ * \defgroup xTaskCreate xTaskCreate + * \ingroup Tasks + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + BaseType_t xTaskCreate( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const configSTACK_DEPTH_TYPE usStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif + +/** + * task. h + *
+ TaskHandle_t xTaskCreateStatic( TaskFunction_t pvTaskCode,
+								 const char * const pcName,
+								 uint32_t ulStackDepth,
+								 void *pvParameters,
+								 UBaseType_t uxPriority,
+								 StackType_t *pxStackBuffer,
+								 StaticTask_t *pxTaskBuffer );
+ * + * Create a new task and add it to the list of tasks that are ready to run. + * + * Internally, within the FreeRTOS implementation, tasks use two blocks of + * memory. The first block is used to hold the task's data structures. The + * second block is used by the task as its stack. If a task is created using + * xTaskCreate() then both blocks of memory are automatically dynamically + * allocated inside the xTaskCreate() function. (see + * http://www.freertos.org/a00111.html). If a task is created using + * xTaskCreateStatic() then the application writer must provide the required + * memory. xTaskCreateStatic() therefore allows a task to be created without + * using any dynamic memory allocation. + * + * @param pvTaskCode Pointer to the task entry function. Tasks + * must be implemented to never return (i.e. continuous loop). + * + * @param pcName A descriptive name for the task. This is mainly used to + * facilitate debugging. The maximum length of the string is defined by + * configMAX_TASK_NAME_LEN in FreeRTOSConfig.h. + * + * @param ulStackDepth The size of the task stack specified as the number of + * variables the stack can hold - not the number of bytes. For example, if + * the stack is 32-bits wide and ulStackDepth is defined as 100 then 400 bytes + * will be allocated for stack storage. + * + * @param pvParameters Pointer that will be used as the parameter for the task + * being created. + * + * @param uxPriority The priority at which the task will run. + * + * @param pxStackBuffer Must point to a StackType_t array that has at least + * ulStackDepth indexes - the array will then be used as the task's stack, + * removing the need for the stack to be allocated dynamically. + * + * @param pxTaskBuffer Must point to a variable of type StaticTask_t, which will + * then be used to hold the task's data structures, removing the need for the + * memory to be allocated dynamically. + * + * @return If neither pxStackBuffer or pxTaskBuffer are NULL, then the task will + * be created and a handle to the created task is returned. If either + * pxStackBuffer or pxTaskBuffer are NULL then the task will not be created and + * NULL is returned. + * + * Example usage: +
+
+    // Dimensions the buffer that the task being created will use as its stack.
+    // NOTE:  This is the number of words the stack will hold, not the number of
+    // bytes.  For example, if each stack item is 32-bits, and this is set to 100,
+    // then 400 bytes (100 * 32-bits) will be allocated.
+    #define STACK_SIZE 200
+
+    // Structure that will hold the TCB of the task being created.
+    StaticTask_t xTaskBuffer;
+
+    // Buffer that the task being created will use as its stack.  Note this is
+    // an array of StackType_t variables.  The size of StackType_t is dependent on
+    // the RTOS port.
+    StackType_t xStack[ STACK_SIZE ];
+
+    // Function that implements the task being created.
+    void vTaskCode( void * pvParameters )
+    {
+        // The parameter value is expected to be 1 as 1 is passed in the
+        // pvParameters value in the call to xTaskCreateStatic().
+        configASSERT( ( uint32_t ) pvParameters == 1UL );
+
+        for( ;; )
+        {
+            // Task code goes here.
+        }
+    }
+
+    // Function that creates a task.
+    void vOtherFunction( void )
+    {
+        TaskHandle_t xHandle = NULL;
+
+        // Create the task without using any dynamic memory allocation.
+        xHandle = xTaskCreateStatic(
+                      vTaskCode,       // Function that implements the task.
+                      "NAME",          // Text name for the task.
+                      STACK_SIZE,      // Stack size in words, not bytes.
+                      ( void * ) 1,    // Parameter passed into the task.
+                      tskIDLE_PRIORITY,// Priority at which the task is created.
+                      xStack,          // Array to use as the task's stack.
+                      &xTaskBuffer );  // Variable to hold the task's data structure.
+
+        // puxStackBuffer and pxTaskBuffer were not NULL, so the task will have
+        // been created, and xHandle will be the task's handle.  Use the handle
+        // to suspend the task.
+        vTaskSuspend( xHandle );
+    }
+   
+ * \defgroup xTaskCreateStatic xTaskCreateStatic + * \ingroup Tasks + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + StackType_t * const puxStackBuffer, + StaticTask_t * const pxTaskBuffer ) PRIVILEGED_FUNCTION; +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +/** + * task. h + *
+ BaseType_t xTaskCreateRestricted( TaskParameters_t *pxTaskDefinition, TaskHandle_t *pxCreatedTask );
+ * + * Only available when configSUPPORT_DYNAMIC_ALLOCATION is set to 1. + * + * xTaskCreateRestricted() should only be used in systems that include an MPU + * implementation. + * + * Create a new task and add it to the list of tasks that are ready to run. + * The function parameters define the memory regions and associated access + * permissions allocated to the task. + * + * See xTaskCreateRestrictedStatic() for a version that does not use any + * dynamic memory allocation. + * + * @param pxTaskDefinition Pointer to a structure that contains a member + * for each of the normal xTaskCreate() parameters (see the xTaskCreate() API + * documentation) plus an optional stack buffer and the memory region + * definitions. + * + * @param pxCreatedTask Used to pass back a handle by which the created task + * can be referenced. + * + * @return pdPASS if the task was successfully created and added to a ready + * list, otherwise an error code defined in the file projdefs.h + * + * Example usage: +
+// Create an TaskParameters_t structure that defines the task to be created.
+static const TaskParameters_t xCheckTaskParameters =
+{
+	vATask,		// pvTaskCode - the function that implements the task.
+	"ATask",	// pcName - just a text name for the task to assist debugging.
+	100,		// usStackDepth	- the stack size DEFINED IN WORDS.
+	NULL,		// pvParameters - passed into the task function as the function parameters.
+	( 1UL | portPRIVILEGE_BIT ),// uxPriority - task priority, set the portPRIVILEGE_BIT if the task should run in a privileged state.
+	cStackBuffer,// puxStackBuffer - the buffer to be used as the task stack.
+
+	// xRegions - Allocate up to three separate memory regions for access by
+	// the task, with appropriate access permissions.  Different processors have
+	// different memory alignment requirements - refer to the FreeRTOS documentation
+	// for full information.
+	{
+		// Base address					Length	Parameters
+        { cReadWriteArray,				32,		portMPU_REGION_READ_WRITE },
+        { cReadOnlyArray,				32,		portMPU_REGION_READ_ONLY },
+        { cPrivilegedOnlyAccessArray,	128,	portMPU_REGION_PRIVILEGED_READ_WRITE }
+	}
+};
+
+int main( void )
+{
+TaskHandle_t xHandle;
+
+	// Create a task from the const structure defined above.  The task handle
+	// is requested (the second parameter is not NULL) but in this case just for
+	// demonstration purposes as its not actually used.
+	xTaskCreateRestricted( &xRegTest1Parameters, &xHandle );
+
+	// Start the scheduler.
+	vTaskStartScheduler();
+
+	// Will only get here if there was insufficient memory to create the idle
+	// and/or timer task.
+	for( ;; );
+}
+   
+ * \defgroup xTaskCreateRestricted xTaskCreateRestricted + * \ingroup Tasks + */ +#if( portUSING_MPU_WRAPPERS == 1 ) + BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif + +/** + * task. h + *
+ BaseType_t xTaskCreateRestrictedStatic( TaskParameters_t *pxTaskDefinition, TaskHandle_t *pxCreatedTask );
+ * + * Only available when configSUPPORT_STATIC_ALLOCATION is set to 1. + * + * xTaskCreateRestrictedStatic() should only be used in systems that include an + * MPU implementation. + * + * Internally, within the FreeRTOS implementation, tasks use two blocks of + * memory. The first block is used to hold the task's data structures. The + * second block is used by the task as its stack. If a task is created using + * xTaskCreateRestricted() then the stack is provided by the application writer, + * and the memory used to hold the task's data structure is automatically + * dynamically allocated inside the xTaskCreateRestricted() function. If a task + * is created using xTaskCreateRestrictedStatic() then the application writer + * must provide the memory used to hold the task's data structures too. + * xTaskCreateRestrictedStatic() therefore allows a memory protected task to be + * created without using any dynamic memory allocation. + * + * @param pxTaskDefinition Pointer to a structure that contains a member + * for each of the normal xTaskCreate() parameters (see the xTaskCreate() API + * documentation) plus an optional stack buffer and the memory region + * definitions. If configSUPPORT_STATIC_ALLOCATION is set to 1 the structure + * contains an additional member, which is used to point to a variable of type + * StaticTask_t - which is then used to hold the task's data structure. + * + * @param pxCreatedTask Used to pass back a handle by which the created task + * can be referenced. + * + * @return pdPASS if the task was successfully created and added to a ready + * list, otherwise an error code defined in the file projdefs.h + * + * Example usage: +
+// Create an TaskParameters_t structure that defines the task to be created.
+// The StaticTask_t variable is only included in the structure when
+// configSUPPORT_STATIC_ALLOCATION is set to 1.  The PRIVILEGED_DATA macro can
+// be used to force the variable into the RTOS kernel's privileged data area.
+static PRIVILEGED_DATA StaticTask_t xTaskBuffer;
+static const TaskParameters_t xCheckTaskParameters =
+{
+	vATask,		// pvTaskCode - the function that implements the task.
+	"ATask",	// pcName - just a text name for the task to assist debugging.
+	100,		// usStackDepth	- the stack size DEFINED IN WORDS.
+	NULL,		// pvParameters - passed into the task function as the function parameters.
+	( 1UL | portPRIVILEGE_BIT ),// uxPriority - task priority, set the portPRIVILEGE_BIT if the task should run in a privileged state.
+	cStackBuffer,// puxStackBuffer - the buffer to be used as the task stack.
+
+	// xRegions - Allocate up to three separate memory regions for access by
+	// the task, with appropriate access permissions.  Different processors have
+	// different memory alignment requirements - refer to the FreeRTOS documentation
+	// for full information.
+	{
+		// Base address					Length	Parameters
+        { cReadWriteArray,				32,		portMPU_REGION_READ_WRITE },
+        { cReadOnlyArray,				32,		portMPU_REGION_READ_ONLY },
+        { cPrivilegedOnlyAccessArray,	128,	portMPU_REGION_PRIVILEGED_READ_WRITE }
+	}
+
+	&xTaskBuffer; // Holds the task's data structure.
+};
+
+int main( void )
+{
+TaskHandle_t xHandle;
+
+	// Create a task from the const structure defined above.  The task handle
+	// is requested (the second parameter is not NULL) but in this case just for
+	// demonstration purposes as its not actually used.
+	xTaskCreateRestricted( &xRegTest1Parameters, &xHandle );
+
+	// Start the scheduler.
+	vTaskStartScheduler();
+
+	// Will only get here if there was insufficient memory to create the idle
+	// and/or timer task.
+	for( ;; );
+}
+   
+ * \defgroup xTaskCreateRestrictedStatic xTaskCreateRestrictedStatic + * \ingroup Tasks + */ +#if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif + +/** + * task. h + *
+ void vTaskAllocateMPURegions( TaskHandle_t xTask, const MemoryRegion_t * const pxRegions );
+ * + * Memory regions are assigned to a restricted task when the task is created by + * a call to xTaskCreateRestricted(). These regions can be redefined using + * vTaskAllocateMPURegions(). + * + * @param xTask The handle of the task being updated. + * + * @param xRegions A pointer to an MemoryRegion_t structure that contains the + * new memory region definitions. + * + * Example usage: +
+// Define an array of MemoryRegion_t structures that configures an MPU region
+// allowing read/write access for 1024 bytes starting at the beginning of the
+// ucOneKByte array.  The other two of the maximum 3 definable regions are
+// unused so set to zero.
+static const MemoryRegion_t xAltRegions[ portNUM_CONFIGURABLE_REGIONS ] =
+{
+	// Base address		Length		Parameters
+	{ ucOneKByte,		1024,		portMPU_REGION_READ_WRITE },
+	{ 0,				0,			0 },
+	{ 0,				0,			0 }
+};
+
+void vATask( void *pvParameters )
+{
+	// This task was created such that it has access to certain regions of
+	// memory as defined by the MPU configuration.  At some point it is
+	// desired that these MPU regions are replaced with that defined in the
+	// xAltRegions const struct above.  Use a call to vTaskAllocateMPURegions()
+	// for this purpose.  NULL is used as the task handle to indicate that this
+	// function should modify the MPU regions of the calling task.
+	vTaskAllocateMPURegions( NULL, xAltRegions );
+
+	// Now the task can continue its function, but from this point on can only
+	// access its stack and the ucOneKByte array (unless any other statically
+	// defined or shared regions have been declared elsewhere).
+}
+   
+ * \defgroup xTaskCreateRestricted xTaskCreateRestricted + * \ingroup Tasks + */ +void vTaskAllocateMPURegions( TaskHandle_t xTask, const MemoryRegion_t * const pxRegions ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
void vTaskDelete( TaskHandle_t xTask );
+ * + * INCLUDE_vTaskDelete must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Remove a task from the RTOS real time kernel's management. The task being + * deleted will be removed from all ready, blocked, suspended and event lists. + * + * NOTE: The idle task is responsible for freeing the kernel allocated + * memory from tasks that have been deleted. It is therefore important that + * the idle task is not starved of microcontroller processing time if your + * application makes any calls to vTaskDelete (). Memory allocated by the + * task code is not automatically freed, and should be freed before the task + * is deleted. + * + * See the demo application file death.c for sample code that utilises + * vTaskDelete (). + * + * @param xTask The handle of the task to be deleted. Passing NULL will + * cause the calling task to be deleted. + * + * Example usage: +
+ void vOtherFunction( void )
+ {
+ TaskHandle_t xHandle;
+
+	 // Create the task, storing the handle.
+	 xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle );
+
+	 // Use the handle to delete the task.
+	 vTaskDelete( xHandle );
+ }
+   
+ * \defgroup vTaskDelete vTaskDelete + * \ingroup Tasks + */ +void vTaskDelete( TaskHandle_t xTaskToDelete ) PRIVILEGED_FUNCTION; + +/*----------------------------------------------------------- + * TASK CONTROL API + *----------------------------------------------------------*/ + +/** + * task. h + *
void vTaskDelay( const TickType_t xTicksToDelay );
+ * + * Delay a task for a given number of ticks. The actual time that the + * task remains blocked depends on the tick rate. The constant + * portTICK_PERIOD_MS can be used to calculate real time from the tick + * rate - with the resolution of one tick period. + * + * INCLUDE_vTaskDelay must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * + * vTaskDelay() specifies a time at which the task wishes to unblock relative to + * the time at which vTaskDelay() is called. For example, specifying a block + * period of 100 ticks will cause the task to unblock 100 ticks after + * vTaskDelay() is called. vTaskDelay() does not therefore provide a good method + * of controlling the frequency of a periodic task as the path taken through the + * code, as well as other task and interrupt activity, will effect the frequency + * at which vTaskDelay() gets called and therefore the time at which the task + * next executes. See vTaskDelayUntil() for an alternative API function designed + * to facilitate fixed frequency execution. It does this by specifying an + * absolute time (rather than a relative time) at which the calling task should + * unblock. + * + * @param xTicksToDelay The amount of time, in tick periods, that + * the calling task should block. + * + * Example usage: + + void vTaskFunction( void * pvParameters ) + { + // Block for 500ms. + const TickType_t xDelay = 500 / portTICK_PERIOD_MS; + + for( ;; ) + { + // Simply toggle the LED every 500ms, blocking between each toggle. + vToggleLED(); + vTaskDelay( xDelay ); + } + } + + * \defgroup vTaskDelay vTaskDelay + * \ingroup TaskCtrl + */ +void vTaskDelay( const TickType_t xTicksToDelay ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
void vTaskDelayUntil( TickType_t *pxPreviousWakeTime, const TickType_t xTimeIncrement );
+ * + * INCLUDE_vTaskDelayUntil must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Delay a task until a specified time. This function can be used by periodic + * tasks to ensure a constant execution frequency. + * + * This function differs from vTaskDelay () in one important aspect: vTaskDelay () will + * cause a task to block for the specified number of ticks from the time vTaskDelay () is + * called. It is therefore difficult to use vTaskDelay () by itself to generate a fixed + * execution frequency as the time between a task starting to execute and that task + * calling vTaskDelay () may not be fixed [the task may take a different path though the + * code between calls, or may get interrupted or preempted a different number of times + * each time it executes]. + * + * Whereas vTaskDelay () specifies a wake time relative to the time at which the function + * is called, vTaskDelayUntil () specifies the absolute (exact) time at which it wishes to + * unblock. + * + * The constant portTICK_PERIOD_MS can be used to calculate real time from the tick + * rate - with the resolution of one tick period. + * + * @param pxPreviousWakeTime Pointer to a variable that holds the time at which the + * task was last unblocked. The variable must be initialised with the current time + * prior to its first use (see the example below). Following this the variable is + * automatically updated within vTaskDelayUntil (). + * + * @param xTimeIncrement The cycle time period. The task will be unblocked at + * time *pxPreviousWakeTime + xTimeIncrement. Calling vTaskDelayUntil with the + * same xTimeIncrement parameter value will cause the task to execute with + * a fixed interface period. + * + * Example usage: +
+ // Perform an action every 10 ticks.
+ void vTaskFunction( void * pvParameters )
+ {
+ TickType_t xLastWakeTime;
+ const TickType_t xFrequency = 10;
+
+	 // Initialise the xLastWakeTime variable with the current time.
+	 xLastWakeTime = xTaskGetTickCount ();
+	 for( ;; )
+	 {
+		 // Wait for the next cycle.
+		 vTaskDelayUntil( &xLastWakeTime, xFrequency );
+
+		 // Perform action here.
+	 }
+ }
+   
+ * \defgroup vTaskDelayUntil vTaskDelayUntil + * \ingroup TaskCtrl + */ +void vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
BaseType_t xTaskAbortDelay( TaskHandle_t xTask );
+ * + * INCLUDE_xTaskAbortDelay must be defined as 1 in FreeRTOSConfig.h for this + * function to be available. + * + * A task will enter the Blocked state when it is waiting for an event. The + * event it is waiting for can be a temporal event (waiting for a time), such + * as when vTaskDelay() is called, or an event on an object, such as when + * xQueueReceive() or ulTaskNotifyTake() is called. If the handle of a task + * that is in the Blocked state is used in a call to xTaskAbortDelay() then the + * task will leave the Blocked state, and return from whichever function call + * placed the task into the Blocked state. + * + * @param xTask The handle of the task to remove from the Blocked state. + * + * @return If the task referenced by xTask was not in the Blocked state then + * pdFAIL is returned. Otherwise pdPASS is returned. + * + * \defgroup xTaskAbortDelay xTaskAbortDelay + * \ingroup TaskCtrl + */ +BaseType_t xTaskAbortDelay( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask );
+ * + * INCLUDE_uxTaskPriorityGet must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Obtain the priority of any task. + * + * @param xTask Handle of the task to be queried. Passing a NULL + * handle results in the priority of the calling task being returned. + * + * @return The priority of xTask. + * + * Example usage: +
+ void vAFunction( void )
+ {
+ TaskHandle_t xHandle;
+
+	 // Create a task, storing the handle.
+	 xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle );
+
+	 // ...
+
+	 // Use the handle to obtain the priority of the created task.
+	 // It was created with tskIDLE_PRIORITY, but may have changed
+	 // it itself.
+	 if( uxTaskPriorityGet( xHandle ) != tskIDLE_PRIORITY )
+	 {
+		 // The task has changed it's priority.
+	 }
+
+	 // ...
+
+	 // Is our priority higher than the created task?
+	 if( uxTaskPriorityGet( xHandle ) < uxTaskPriorityGet( NULL ) )
+	 {
+		 // Our priority (obtained using NULL handle) is higher.
+	 }
+ }
+   
+ * \defgroup uxTaskPriorityGet uxTaskPriorityGet + * \ingroup TaskCtrl + */ +UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask );
+ * + * A version of uxTaskPriorityGet() that can be used from an ISR. + */ +UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
eTaskState eTaskGetState( TaskHandle_t xTask );
+ * + * INCLUDE_eTaskGetState must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Obtain the state of any task. States are encoded by the eTaskState + * enumerated type. + * + * @param xTask Handle of the task to be queried. + * + * @return The state of xTask at the time the function was called. Note the + * state of the task might change between the function being called, and the + * functions return value being tested by the calling task. + */ +eTaskState eTaskGetState( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
void vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState );
+ * + * configUSE_TRACE_FACILITY must be defined as 1 for this function to be + * available. See the configuration section for more information. + * + * Populates a TaskStatus_t structure with information about a task. + * + * @param xTask Handle of the task being queried. If xTask is NULL then + * information will be returned about the calling task. + * + * @param pxTaskStatus A pointer to the TaskStatus_t structure that will be + * filled with information about the task referenced by the handle passed using + * the xTask parameter. + * + * @xGetFreeStackSpace The TaskStatus_t structure contains a member to report + * the stack high water mark of the task being queried. Calculating the stack + * high water mark takes a relatively long time, and can make the system + * temporarily unresponsive - so the xGetFreeStackSpace parameter is provided to + * allow the high water mark checking to be skipped. The high watermark value + * will only be written to the TaskStatus_t structure if xGetFreeStackSpace is + * not set to pdFALSE; + * + * @param eState The TaskStatus_t structure contains a member to report the + * state of the task being queried. Obtaining the task state is not as fast as + * a simple assignment - so the eState parameter is provided to allow the state + * information to be omitted from the TaskStatus_t structure. To obtain state + * information then set eState to eInvalid - otherwise the value passed in + * eState will be reported as the task state in the TaskStatus_t structure. + * + * Example usage: +
+ void vAFunction( void )
+ {
+ TaskHandle_t xHandle;
+ TaskStatus_t xTaskDetails;
+
+    // Obtain the handle of a task from its name.
+    xHandle = xTaskGetHandle( "Task_Name" );
+
+    // Check the handle is not NULL.
+    configASSERT( xHandle );
+
+    // Use the handle to obtain further information about the task.
+    vTaskGetInfo( xHandle,
+                  &xTaskDetails,
+                  pdTRUE, // Include the high water mark in xTaskDetails.
+                  eInvalid ); // Include the task state in xTaskDetails.
+ }
+   
+ * \defgroup vTaskGetInfo vTaskGetInfo + * \ingroup TaskCtrl + */ +void vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority );
+ * + * INCLUDE_vTaskPrioritySet must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Set the priority of any task. + * + * A context switch will occur before the function returns if the priority + * being set is higher than the currently executing task. + * + * @param xTask Handle to the task for which the priority is being set. + * Passing a NULL handle results in the priority of the calling task being set. + * + * @param uxNewPriority The priority to which the task will be set. + * + * Example usage: +
+ void vAFunction( void )
+ {
+ TaskHandle_t xHandle;
+
+	 // Create a task, storing the handle.
+	 xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle );
+
+	 // ...
+
+	 // Use the handle to raise the priority of the created task.
+	 vTaskPrioritySet( xHandle, tskIDLE_PRIORITY + 1 );
+
+	 // ...
+
+	 // Use a NULL handle to raise our priority to the same value.
+	 vTaskPrioritySet( NULL, tskIDLE_PRIORITY + 1 );
+ }
+   
+ * \defgroup vTaskPrioritySet vTaskPrioritySet + * \ingroup TaskCtrl + */ +void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
void vTaskSuspend( TaskHandle_t xTaskToSuspend );
+ * + * INCLUDE_vTaskSuspend must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Suspend any task. When suspended a task will never get any microcontroller + * processing time, no matter what its priority. + * + * Calls to vTaskSuspend are not accumulative - + * i.e. calling vTaskSuspend () twice on the same task still only requires one + * call to vTaskResume () to ready the suspended task. + * + * @param xTaskToSuspend Handle to the task being suspended. Passing a NULL + * handle will cause the calling task to be suspended. + * + * Example usage: +
+ void vAFunction( void )
+ {
+ TaskHandle_t xHandle;
+
+	 // Create a task, storing the handle.
+	 xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle );
+
+	 // ...
+
+	 // Use the handle to suspend the created task.
+	 vTaskSuspend( xHandle );
+
+	 // ...
+
+	 // The created task will not run during this period, unless
+	 // another task calls vTaskResume( xHandle ).
+
+	 //...
+
+
+	 // Suspend ourselves.
+	 vTaskSuspend( NULL );
+
+	 // We cannot get here unless another task calls vTaskResume
+	 // with our handle as the parameter.
+ }
+   
+ * \defgroup vTaskSuspend vTaskSuspend + * \ingroup TaskCtrl + */ +void vTaskSuspend( TaskHandle_t xTaskToSuspend ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
void vTaskResume( TaskHandle_t xTaskToResume );
+ * + * INCLUDE_vTaskSuspend must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Resumes a suspended task. + * + * A task that has been suspended by one or more calls to vTaskSuspend () + * will be made available for running again by a single call to + * vTaskResume (). + * + * @param xTaskToResume Handle to the task being readied. + * + * Example usage: +
+ void vAFunction( void )
+ {
+ TaskHandle_t xHandle;
+
+	 // Create a task, storing the handle.
+	 xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle );
+
+	 // ...
+
+	 // Use the handle to suspend the created task.
+	 vTaskSuspend( xHandle );
+
+	 // ...
+
+	 // The created task will not run during this period, unless
+	 // another task calls vTaskResume( xHandle ).
+
+	 //...
+
+
+	 // Resume the suspended task ourselves.
+	 vTaskResume( xHandle );
+
+	 // The created task will once again get microcontroller processing
+	 // time in accordance with its priority within the system.
+ }
+   
+ * \defgroup vTaskResume vTaskResume + * \ingroup TaskCtrl + */ +void vTaskResume( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
void xTaskResumeFromISR( TaskHandle_t xTaskToResume );
+ * + * INCLUDE_xTaskResumeFromISR must be defined as 1 for this function to be + * available. See the configuration section for more information. + * + * An implementation of vTaskResume() that can be called from within an ISR. + * + * A task that has been suspended by one or more calls to vTaskSuspend () + * will be made available for running again by a single call to + * xTaskResumeFromISR (). + * + * xTaskResumeFromISR() should not be used to synchronise a task with an + * interrupt if there is a chance that the interrupt could arrive prior to the + * task being suspended - as this can lead to interrupts being missed. Use of a + * semaphore as a synchronisation mechanism would avoid this eventuality. + * + * @param xTaskToResume Handle to the task being readied. + * + * @return pdTRUE if resuming the task should result in a context switch, + * otherwise pdFALSE. This is used by the ISR to determine if a context switch + * may be required following the ISR. + * + * \defgroup vTaskResumeFromISR vTaskResumeFromISR + * \ingroup TaskCtrl + */ +BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; + +/*----------------------------------------------------------- + * SCHEDULER CONTROL + *----------------------------------------------------------*/ + +/** + * task. h + *
void vTaskStartScheduler( void );
+ * + * Starts the real time kernel tick processing. After calling the kernel + * has control over which tasks are executed and when. + * + * See the demo application file main.c for an example of creating + * tasks and starting the kernel. + * + * Example usage: +
+ void vAFunction( void )
+ {
+	 // Create at least one task before starting the kernel.
+	 xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, NULL );
+
+	 // Start the real time kernel with preemption.
+	 vTaskStartScheduler ();
+
+	 // Will not get here unless a task calls vTaskEndScheduler ()
+ }
+   
+ * + * \defgroup vTaskStartScheduler vTaskStartScheduler + * \ingroup SchedulerControl + */ +void vTaskStartScheduler( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
void vTaskEndScheduler( void );
+ * + * NOTE: At the time of writing only the x86 real mode port, which runs on a PC + * in place of DOS, implements this function. + * + * Stops the real time kernel tick. All created tasks will be automatically + * deleted and multitasking (either preemptive or cooperative) will + * stop. Execution then resumes from the point where vTaskStartScheduler () + * was called, as if vTaskStartScheduler () had just returned. + * + * See the demo application file main. c in the demo/PC directory for an + * example that uses vTaskEndScheduler (). + * + * vTaskEndScheduler () requires an exit function to be defined within the + * portable layer (see vPortEndScheduler () in port. c for the PC port). This + * performs hardware specific operations such as stopping the kernel tick. + * + * vTaskEndScheduler () will cause all of the resources allocated by the + * kernel to be freed - but will not free resources allocated by application + * tasks. + * + * Example usage: +
+ void vTaskCode( void * pvParameters )
+ {
+	 for( ;; )
+	 {
+		 // Task code goes here.
+
+		 // At some point we want to end the real time kernel processing
+		 // so call ...
+		 vTaskEndScheduler ();
+	 }
+ }
+
+ void vAFunction( void )
+ {
+	 // Create at least one task before starting the kernel.
+	 xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, NULL );
+
+	 // Start the real time kernel with preemption.
+	 vTaskStartScheduler ();
+
+	 // Will only get here when the vTaskCode () task has called
+	 // vTaskEndScheduler ().  When we get here we are back to single task
+	 // execution.
+ }
+   
+ * + * \defgroup vTaskEndScheduler vTaskEndScheduler + * \ingroup SchedulerControl + */ +void vTaskEndScheduler( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
void vTaskSuspendAll( void );
+ * + * Suspends the scheduler without disabling interrupts. Context switches will + * not occur while the scheduler is suspended. + * + * After calling vTaskSuspendAll () the calling task will continue to execute + * without risk of being swapped out until a call to xTaskResumeAll () has been + * made. + * + * API functions that have the potential to cause a context switch (for example, + * vTaskDelayUntil(), xQueueSend(), etc.) must not be called while the scheduler + * is suspended. + * + * Example usage: +
+ void vTask1( void * pvParameters )
+ {
+	 for( ;; )
+	 {
+		 // Task code goes here.
+
+		 // ...
+
+		 // At some point the task wants to perform a long operation during
+		 // which it does not want to get swapped out.  It cannot use
+		 // taskENTER_CRITICAL ()/taskEXIT_CRITICAL () as the length of the
+		 // operation may cause interrupts to be missed - including the
+		 // ticks.
+
+		 // Prevent the real time kernel swapping out the task.
+		 vTaskSuspendAll ();
+
+		 // Perform the operation here.  There is no need to use critical
+		 // sections as we have all the microcontroller processing time.
+		 // During this time interrupts will still operate and the kernel
+		 // tick count will be maintained.
+
+		 // ...
+
+		 // The operation is complete.  Restart the kernel.
+		 xTaskResumeAll ();
+	 }
+ }
+   
+ * \defgroup vTaskSuspendAll vTaskSuspendAll + * \ingroup SchedulerControl + */ +void vTaskSuspendAll( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
BaseType_t xTaskResumeAll( void );
+ * + * Resumes scheduler activity after it was suspended by a call to + * vTaskSuspendAll(). + * + * xTaskResumeAll() only resumes the scheduler. It does not unsuspend tasks + * that were previously suspended by a call to vTaskSuspend(). + * + * @return If resuming the scheduler caused a context switch then pdTRUE is + * returned, otherwise pdFALSE is returned. + * + * Example usage: +
+ void vTask1( void * pvParameters )
+ {
+	 for( ;; )
+	 {
+		 // Task code goes here.
+
+		 // ...
+
+		 // At some point the task wants to perform a long operation during
+		 // which it does not want to get swapped out.  It cannot use
+		 // taskENTER_CRITICAL ()/taskEXIT_CRITICAL () as the length of the
+		 // operation may cause interrupts to be missed - including the
+		 // ticks.
+
+		 // Prevent the real time kernel swapping out the task.
+		 vTaskSuspendAll ();
+
+		 // Perform the operation here.  There is no need to use critical
+		 // sections as we have all the microcontroller processing time.
+		 // During this time interrupts will still operate and the real
+		 // time kernel tick count will be maintained.
+
+		 // ...
+
+		 // The operation is complete.  Restart the kernel.  We want to force
+		 // a context switch - but there is no point if resuming the scheduler
+		 // caused a context switch already.
+		 if( !xTaskResumeAll () )
+		 {
+			  taskYIELD ();
+		 }
+	 }
+ }
+   
+ * \defgroup xTaskResumeAll xTaskResumeAll + * \ingroup SchedulerControl + */ +BaseType_t xTaskResumeAll( void ) PRIVILEGED_FUNCTION; + +/*----------------------------------------------------------- + * TASK UTILITIES + *----------------------------------------------------------*/ + +/** + * task. h + *
TickType_t xTaskGetTickCount( void );
+ * + * @return The count of ticks since vTaskStartScheduler was called. + * + * \defgroup xTaskGetTickCount xTaskGetTickCount + * \ingroup TaskUtils + */ +TickType_t xTaskGetTickCount( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
TickType_t xTaskGetTickCountFromISR( void );
+ * + * @return The count of ticks since vTaskStartScheduler was called. + * + * This is a version of xTaskGetTickCount() that is safe to be called from an + * ISR - provided that TickType_t is the natural word size of the + * microcontroller being used or interrupt nesting is either not supported or + * not being used. + * + * \defgroup xTaskGetTickCountFromISR xTaskGetTickCountFromISR + * \ingroup TaskUtils + */ +TickType_t xTaskGetTickCountFromISR( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
uint16_t uxTaskGetNumberOfTasks( void );
+ * + * @return The number of tasks that the real time kernel is currently managing. + * This includes all ready, blocked and suspended tasks. A task that + * has been deleted but not yet freed by the idle task will also be + * included in the count. + * + * \defgroup uxTaskGetNumberOfTasks uxTaskGetNumberOfTasks + * \ingroup TaskUtils + */ +UBaseType_t uxTaskGetNumberOfTasks( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
char *pcTaskGetName( TaskHandle_t xTaskToQuery );
+ * + * @return The text (human readable) name of the task referenced by the handle + * xTaskToQuery. A task can query its own name by either passing in its own + * handle, or by setting xTaskToQuery to NULL. + * + * \defgroup pcTaskGetName pcTaskGetName + * \ingroup TaskUtils + */ +char *pcTaskGetName( TaskHandle_t xTaskToQuery ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/** + * task. h + *
TaskHandle_t xTaskGetHandle( const char *pcNameToQuery );
+ * + * NOTE: This function takes a relatively long time to complete and should be + * used sparingly. + * + * @return The handle of the task that has the human readable name pcNameToQuery. + * NULL is returned if no matching name is found. INCLUDE_xTaskGetHandle + * must be set to 1 in FreeRTOSConfig.h for pcTaskGetHandle() to be available. + * + * \defgroup pcTaskGetHandle pcTaskGetHandle + * \ingroup TaskUtils + */ +TaskHandle_t xTaskGetHandle( const char *pcNameToQuery ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/** + * task.h + *
UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask );
+ * + * INCLUDE_uxTaskGetStackHighWaterMark must be set to 1 in FreeRTOSConfig.h for + * this function to be available. + * + * Returns the high water mark of the stack associated with xTask. That is, + * the minimum free stack space there has been (in words, so on a 32 bit machine + * a value of 1 means 4 bytes) since the task started. The smaller the returned + * number the closer the task has come to overflowing its stack. + * + * uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the + * same except for their return type. Using configSTACK_DEPTH_TYPE allows the + * user to determine the return type. It gets around the problem of the value + * overflowing on 8-bit types without breaking backward compatibility for + * applications that expect an 8-bit return type. + * + * @param xTask Handle of the task associated with the stack to be checked. + * Set xTask to NULL to check the stack of the calling task. + * + * @return The smallest amount of free stack space there has been (in words, so + * actual spaces on the stack rather than bytes) since the task referenced by + * xTask was created. + */ +UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task.h + *
configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask );
+ * + * INCLUDE_uxTaskGetStackHighWaterMark2 must be set to 1 in FreeRTOSConfig.h for + * this function to be available. + * + * Returns the high water mark of the stack associated with xTask. That is, + * the minimum free stack space there has been (in words, so on a 32 bit machine + * a value of 1 means 4 bytes) since the task started. The smaller the returned + * number the closer the task has come to overflowing its stack. + * + * uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the + * same except for their return type. Using configSTACK_DEPTH_TYPE allows the + * user to determine the return type. It gets around the problem of the value + * overflowing on 8-bit types without breaking backward compatibility for + * applications that expect an 8-bit return type. + * + * @param xTask Handle of the task associated with the stack to be checked. + * Set xTask to NULL to check the stack of the calling task. + * + * @return The smallest amount of free stack space there has been (in words, so + * actual spaces on the stack rather than bytes) since the task referenced by + * xTask was created. + */ +configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/* When using trace macros it is sometimes necessary to include task.h before +FreeRTOS.h. When this is done TaskHookFunction_t will not yet have been defined, +so the following two prototypes will cause a compilation error. This can be +fixed by simply guarding against the inclusion of these two prototypes unless +they are explicitly required by the configUSE_APPLICATION_TASK_TAG configuration +constant. */ +#ifdef configUSE_APPLICATION_TASK_TAG + #if configUSE_APPLICATION_TASK_TAG == 1 + /** + * task.h + *
void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction );
+ * + * Sets pxHookFunction to be the task hook function used by the task xTask. + * Passing xTask as NULL has the effect of setting the calling tasks hook + * function. + */ + void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction ) PRIVILEGED_FUNCTION; + + /** + * task.h + *
void xTaskGetApplicationTaskTag( TaskHandle_t xTask );
+ * + * Returns the pxHookFunction value assigned to the task xTask. Do not + * call from an interrupt service routine - call + * xTaskGetApplicationTaskTagFromISR() instead. + */ + TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + + /** + * task.h + *
void xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask );
+ * + * Returns the pxHookFunction value assigned to the task xTask. Can + * be called from an interrupt service routine. + */ + TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + #endif /* configUSE_APPLICATION_TASK_TAG ==1 */ +#endif /* ifdef configUSE_APPLICATION_TASK_TAG */ + +#if( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) + + /* Each task contains an array of pointers that is dimensioned by the + configNUM_THREAD_LOCAL_STORAGE_POINTERS setting in FreeRTOSConfig.h. The + kernel does not use the pointers itself, so the application writer can use + the pointers for any purpose they wish. The following two functions are + used to set and query a pointer respectively. */ + void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue ) PRIVILEGED_FUNCTION; + void *pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex ) PRIVILEGED_FUNCTION; + +#endif + +/** + * task.h + *
BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter );
+ * + * Calls the hook function associated with xTask. Passing xTask as NULL has + * the effect of calling the Running tasks (the calling task) hook function. + * + * pvParameter is passed to the hook function for the task to interpret as it + * wants. The return value is the value returned by the task hook function + * registered by the user. + */ +BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter ) PRIVILEGED_FUNCTION; + +/** + * xTaskGetIdleTaskHandle() is only available if + * INCLUDE_xTaskGetIdleTaskHandle is set to 1 in FreeRTOSConfig.h. + * + * Simply returns the handle of the idle task. It is not valid to call + * xTaskGetIdleTaskHandle() before the scheduler has been started. + */ +TaskHandle_t xTaskGetIdleTaskHandle( void ) PRIVILEGED_FUNCTION; + +/** + * configUSE_TRACE_FACILITY must be defined as 1 in FreeRTOSConfig.h for + * uxTaskGetSystemState() to be available. + * + * uxTaskGetSystemState() populates an TaskStatus_t structure for each task in + * the system. TaskStatus_t structures contain, among other things, members + * for the task handle, task name, task priority, task state, and total amount + * of run time consumed by the task. See the TaskStatus_t structure + * definition in this file for the full member list. + * + * NOTE: This function is intended for debugging use only as its use results in + * the scheduler remaining suspended for an extended period. + * + * @param pxTaskStatusArray A pointer to an array of TaskStatus_t structures. + * The array must contain at least one TaskStatus_t structure for each task + * that is under the control of the RTOS. The number of tasks under the control + * of the RTOS can be determined using the uxTaskGetNumberOfTasks() API function. + * + * @param uxArraySize The size of the array pointed to by the pxTaskStatusArray + * parameter. The size is specified as the number of indexes in the array, or + * the number of TaskStatus_t structures contained in the array, not by the + * number of bytes in the array. + * + * @param pulTotalRunTime If configGENERATE_RUN_TIME_STATS is set to 1 in + * FreeRTOSConfig.h then *pulTotalRunTime is set by uxTaskGetSystemState() to the + * total run time (as defined by the run time stats clock, see + * http://www.freertos.org/rtos-run-time-stats.html) since the target booted. + * pulTotalRunTime can be set to NULL to omit the total run time information. + * + * @return The number of TaskStatus_t structures that were populated by + * uxTaskGetSystemState(). This should equal the number returned by the + * uxTaskGetNumberOfTasks() API function, but will be zero if the value passed + * in the uxArraySize parameter was too small. + * + * Example usage: +
+    // This example demonstrates how a human readable table of run time stats
+	// information is generated from raw data provided by uxTaskGetSystemState().
+	// The human readable table is written to pcWriteBuffer
+	void vTaskGetRunTimeStats( char *pcWriteBuffer )
+	{
+	TaskStatus_t *pxTaskStatusArray;
+	volatile UBaseType_t uxArraySize, x;
+	uint32_t ulTotalRunTime, ulStatsAsPercentage;
+
+		// Make sure the write buffer does not contain a string.
+		*pcWriteBuffer = 0x00;
+
+		// Take a snapshot of the number of tasks in case it changes while this
+		// function is executing.
+		uxArraySize = uxTaskGetNumberOfTasks();
+
+		// Allocate a TaskStatus_t structure for each task.  An array could be
+		// allocated statically at compile time.
+		pxTaskStatusArray = pvPortMalloc( uxArraySize * sizeof( TaskStatus_t ) );
+
+		if( pxTaskStatusArray != NULL )
+		{
+			// Generate raw status information about each task.
+			uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalRunTime );
+
+			// For percentage calculations.
+			ulTotalRunTime /= 100UL;
+
+			// Avoid divide by zero errors.
+			if( ulTotalRunTime > 0 )
+			{
+				// For each populated position in the pxTaskStatusArray array,
+				// format the raw data as human readable ASCII data
+				for( x = 0; x < uxArraySize; x++ )
+				{
+					// What percentage of the total run time has the task used?
+					// This will always be rounded down to the nearest integer.
+					// ulTotalRunTimeDiv100 has already been divided by 100.
+					ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalRunTime;
+
+					if( ulStatsAsPercentage > 0UL )
+					{
+						sprintf( pcWriteBuffer, "%s\t\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].pcTaskName, pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage );
+					}
+					else
+					{
+						// If the percentage is zero here then the task has
+						// consumed less than 1% of the total run time.
+						sprintf( pcWriteBuffer, "%s\t\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].pcTaskName, pxTaskStatusArray[ x ].ulRunTimeCounter );
+					}
+
+					pcWriteBuffer += strlen( ( char * ) pcWriteBuffer );
+				}
+			}
+
+			// The array is no longer needed, free the memory it consumes.
+			vPortFree( pxTaskStatusArray );
+		}
+	}
+	
+ */ +UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
void vTaskList( char *pcWriteBuffer );
+ * + * configUSE_TRACE_FACILITY and configUSE_STATS_FORMATTING_FUNCTIONS must + * both be defined as 1 for this function to be available. See the + * configuration section of the FreeRTOS.org website for more information. + * + * NOTE 1: This function will disable interrupts for its duration. It is + * not intended for normal application runtime use but as a debug aid. + * + * Lists all the current tasks, along with their current state and stack + * usage high water mark. + * + * Tasks are reported as blocked ('B'), ready ('R'), deleted ('D') or + * suspended ('S'). + * + * PLEASE NOTE: + * + * This function is provided for convenience only, and is used by many of the + * demo applications. Do not consider it to be part of the scheduler. + * + * vTaskList() calls uxTaskGetSystemState(), then formats part of the + * uxTaskGetSystemState() output into a human readable table that displays task + * names, states and stack usage. + * + * vTaskList() has a dependency on the sprintf() C library function that might + * bloat the code size, use a lot of stack, and provide different results on + * different platforms. An alternative, tiny, third party, and limited + * functionality implementation of sprintf() is provided in many of the + * FreeRTOS/Demo sub-directories in a file called printf-stdarg.c (note + * printf-stdarg.c does not provide a full snprintf() implementation!). + * + * It is recommended that production systems call uxTaskGetSystemState() + * directly to get access to raw stats data, rather than indirectly through a + * call to vTaskList(). + * + * @param pcWriteBuffer A buffer into which the above mentioned details + * will be written, in ASCII form. This buffer is assumed to be large + * enough to contain the generated report. Approximately 40 bytes per + * task should be sufficient. + * + * \defgroup vTaskList vTaskList + * \ingroup TaskUtils + */ +void vTaskList( char * pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/** + * task. h + *
void vTaskGetRunTimeStats( char *pcWriteBuffer );
+ * + * configGENERATE_RUN_TIME_STATS and configUSE_STATS_FORMATTING_FUNCTIONS + * must both be defined as 1 for this function to be available. The application + * must also then provide definitions for + * portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and portGET_RUN_TIME_COUNTER_VALUE() + * to configure a peripheral timer/counter and return the timers current count + * value respectively. The counter should be at least 10 times the frequency of + * the tick count. + * + * NOTE 1: This function will disable interrupts for its duration. It is + * not intended for normal application runtime use but as a debug aid. + * + * Setting configGENERATE_RUN_TIME_STATS to 1 will result in a total + * accumulated execution time being stored for each task. The resolution + * of the accumulated time value depends on the frequency of the timer + * configured by the portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() macro. + * Calling vTaskGetRunTimeStats() writes the total execution time of each + * task into a buffer, both as an absolute count value and as a percentage + * of the total system execution time. + * + * NOTE 2: + * + * This function is provided for convenience only, and is used by many of the + * demo applications. Do not consider it to be part of the scheduler. + * + * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part of the + * uxTaskGetSystemState() output into a human readable table that displays the + * amount of time each task has spent in the Running state in both absolute and + * percentage terms. + * + * vTaskGetRunTimeStats() has a dependency on the sprintf() C library function + * that might bloat the code size, use a lot of stack, and provide different + * results on different platforms. An alternative, tiny, third party, and + * limited functionality implementation of sprintf() is provided in many of the + * FreeRTOS/Demo sub-directories in a file called printf-stdarg.c (note + * printf-stdarg.c does not provide a full snprintf() implementation!). + * + * It is recommended that production systems call uxTaskGetSystemState() directly + * to get access to raw stats data, rather than indirectly through a call to + * vTaskGetRunTimeStats(). + * + * @param pcWriteBuffer A buffer into which the execution times will be + * written, in ASCII form. This buffer is assumed to be large enough to + * contain the generated report. Approximately 40 bytes per task should + * be sufficient. + * + * \defgroup vTaskGetRunTimeStats vTaskGetRunTimeStats + * \ingroup TaskUtils + */ +void vTaskGetRunTimeStats( char *pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/** +* task. h +*
TickType_t xTaskGetIdleRunTimeCounter( void );
+* +* configGENERATE_RUN_TIME_STATS and configUSE_STATS_FORMATTING_FUNCTIONS +* must both be defined as 1 for this function to be available. The application +* must also then provide definitions for +* portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and portGET_RUN_TIME_COUNTER_VALUE() +* to configure a peripheral timer/counter and return the timers current count +* value respectively. The counter should be at least 10 times the frequency of +* the tick count. +* +* Setting configGENERATE_RUN_TIME_STATS to 1 will result in a total +* accumulated execution time being stored for each task. The resolution +* of the accumulated time value depends on the frequency of the timer +* configured by the portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() macro. +* While uxTaskGetSystemState() and vTaskGetRunTimeStats() writes the total +* execution time of each task into a buffer, xTaskGetIdleRunTimeCounter() +* returns the total execution time of just the idle task. +* +* @return The total run time of the idle task. This is the amount of time the +* idle task has actually been executing. The unit of time is dependent on the +* frequency configured using the portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and +* portGET_RUN_TIME_COUNTER_VALUE() macros. +* +* \defgroup xTaskGetIdleRunTimeCounter xTaskGetIdleRunTimeCounter +* \ingroup TaskUtils +*/ +TickType_t xTaskGetIdleRunTimeCounter( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
BaseType_t xTaskNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction );
+ * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this + * function to be available. + * + * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private + * "notification value", which is a 32-bit unsigned integer (uint32_t). + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment the task's notification value. In that way + * task notifications can be used to send data to a task, or be used as light + * weight and fast binary or counting semaphores. + * + * A notification sent to a task will remain pending until it is cleared by the + * task calling xTaskNotifyWait() or ulTaskNotifyTake(). If the task was + * already in the Blocked state to wait for a notification when the notification + * arrives then the task will automatically be removed from the Blocked state + * (unblocked) and the notification cleared. + * + * A task can use xTaskNotifyWait() to [optionally] block to wait for a + * notification to be pending, or ulTaskNotifyTake() to [optionally] block + * to wait for its notification value to have a non-zero value. The task does + * not consume any CPU time while it is in the Blocked state. + * + * See http://www.FreeRTOS.org/RTOS-task-notifications.html for details. + * + * @param xTaskToNotify The handle of the task being notified. The handle to a + * task can be returned from the xTaskCreate() API function used to create the + * task, and the handle of the currently running task can be obtained by calling + * xTaskGetCurrentTaskHandle(). + * + * @param ulValue Data that can be sent with the notification. How the data is + * used depends on the value of the eAction parameter. + * + * @param eAction Specifies how the notification updates the task's notification + * value, if at all. Valid values for eAction are as follows: + * + * eSetBits - + * The task's notification value is bitwise ORed with ulValue. xTaskNofify() + * always returns pdPASS in this case. + * + * eIncrement - + * The task's notification value is incremented. ulValue is not used and + * xTaskNotify() always returns pdPASS in this case. + * + * eSetValueWithOverwrite - + * The task's notification value is set to the value of ulValue, even if the + * task being notified had not yet processed the previous notification (the + * task already had a notification pending). xTaskNotify() always returns + * pdPASS in this case. + * + * eSetValueWithoutOverwrite - + * If the task being notified did not already have a notification pending then + * the task's notification value is set to ulValue and xTaskNotify() will + * return pdPASS. If the task being notified already had a notification + * pending then no action is performed and pdFAIL is returned. + * + * eNoAction - + * The task receives a notification without its notification value being + * updated. ulValue is not used and xTaskNotify() always returns pdPASS in + * this case. + * + * pulPreviousNotificationValue - + * Can be used to pass out the subject task's notification value before any + * bits are modified by the notify function. + * + * @return Dependent on the value of eAction. See the description of the + * eAction parameter. + * + * \defgroup xTaskNotify xTaskNotify + * \ingroup TaskNotifications + */ +BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue ) PRIVILEGED_FUNCTION; +#define xTaskNotify( xTaskToNotify, ulValue, eAction ) xTaskGenericNotify( ( xTaskToNotify ), ( ulValue ), ( eAction ), NULL ) +#define xTaskNotifyAndQuery( xTaskToNotify, ulValue, eAction, pulPreviousNotifyValue ) xTaskGenericNotify( ( xTaskToNotify ), ( ulValue ), ( eAction ), ( pulPreviousNotifyValue ) ) + +/** + * task. h + *
BaseType_t xTaskNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, BaseType_t *pxHigherPriorityTaskWoken );
+ * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this + * function to be available. + * + * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private + * "notification value", which is a 32-bit unsigned integer (uint32_t). + * + * A version of xTaskNotify() that can be used from an interrupt service routine + * (ISR). + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment the task's notification value. In that way + * task notifications can be used to send data to a task, or be used as light + * weight and fast binary or counting semaphores. + * + * A notification sent to a task will remain pending until it is cleared by the + * task calling xTaskNotifyWait() or ulTaskNotifyTake(). If the task was + * already in the Blocked state to wait for a notification when the notification + * arrives then the task will automatically be removed from the Blocked state + * (unblocked) and the notification cleared. + * + * A task can use xTaskNotifyWait() to [optionally] block to wait for a + * notification to be pending, or ulTaskNotifyTake() to [optionally] block + * to wait for its notification value to have a non-zero value. The task does + * not consume any CPU time while it is in the Blocked state. + * + * See http://www.FreeRTOS.org/RTOS-task-notifications.html for details. + * + * @param xTaskToNotify The handle of the task being notified. The handle to a + * task can be returned from the xTaskCreate() API function used to create the + * task, and the handle of the currently running task can be obtained by calling + * xTaskGetCurrentTaskHandle(). + * + * @param ulValue Data that can be sent with the notification. How the data is + * used depends on the value of the eAction parameter. + * + * @param eAction Specifies how the notification updates the task's notification + * value, if at all. Valid values for eAction are as follows: + * + * eSetBits - + * The task's notification value is bitwise ORed with ulValue. xTaskNofify() + * always returns pdPASS in this case. + * + * eIncrement - + * The task's notification value is incremented. ulValue is not used and + * xTaskNotify() always returns pdPASS in this case. + * + * eSetValueWithOverwrite - + * The task's notification value is set to the value of ulValue, even if the + * task being notified had not yet processed the previous notification (the + * task already had a notification pending). xTaskNotify() always returns + * pdPASS in this case. + * + * eSetValueWithoutOverwrite - + * If the task being notified did not already have a notification pending then + * the task's notification value is set to ulValue and xTaskNotify() will + * return pdPASS. If the task being notified already had a notification + * pending then no action is performed and pdFAIL is returned. + * + * eNoAction - + * The task receives a notification without its notification value being + * updated. ulValue is not used and xTaskNotify() always returns pdPASS in + * this case. + * + * @param pxHigherPriorityTaskWoken xTaskNotifyFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending the notification caused the + * task to which the notification was sent to leave the Blocked state, and the + * unblocked task has a priority higher than the currently running task. If + * xTaskNotifyFromISR() sets this value to pdTRUE then a context switch should + * be requested before the interrupt is exited. How a context switch is + * requested from an ISR is dependent on the port - see the documentation page + * for the port in use. + * + * @return Dependent on the value of eAction. See the description of the + * eAction parameter. + * + * \defgroup xTaskNotify xTaskNotify + * \ingroup TaskNotifications + */ +BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; +#define xTaskNotifyFromISR( xTaskToNotify, ulValue, eAction, pxHigherPriorityTaskWoken ) xTaskGenericNotifyFromISR( ( xTaskToNotify ), ( ulValue ), ( eAction ), NULL, ( pxHigherPriorityTaskWoken ) ) +#define xTaskNotifyAndQueryFromISR( xTaskToNotify, ulValue, eAction, pulPreviousNotificationValue, pxHigherPriorityTaskWoken ) xTaskGenericNotifyFromISR( ( xTaskToNotify ), ( ulValue ), ( eAction ), ( pulPreviousNotificationValue ), ( pxHigherPriorityTaskWoken ) ) + +/** + * task. h + *
BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait );
+ * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this + * function to be available. + * + * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private + * "notification value", which is a 32-bit unsigned integer (uint32_t). + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment the task's notification value. In that way + * task notifications can be used to send data to a task, or be used as light + * weight and fast binary or counting semaphores. + * + * A notification sent to a task will remain pending until it is cleared by the + * task calling xTaskNotifyWait() or ulTaskNotifyTake(). If the task was + * already in the Blocked state to wait for a notification when the notification + * arrives then the task will automatically be removed from the Blocked state + * (unblocked) and the notification cleared. + * + * A task can use xTaskNotifyWait() to [optionally] block to wait for a + * notification to be pending, or ulTaskNotifyTake() to [optionally] block + * to wait for its notification value to have a non-zero value. The task does + * not consume any CPU time while it is in the Blocked state. + * + * See http://www.FreeRTOS.org/RTOS-task-notifications.html for details. + * + * @param ulBitsToClearOnEntry Bits that are set in ulBitsToClearOnEntry value + * will be cleared in the calling task's notification value before the task + * checks to see if any notifications are pending, and optionally blocks if no + * notifications are pending. Setting ulBitsToClearOnEntry to ULONG_MAX (if + * limits.h is included) or 0xffffffffUL (if limits.h is not included) will have + * the effect of resetting the task's notification value to 0. Setting + * ulBitsToClearOnEntry to 0 will leave the task's notification value unchanged. + * + * @param ulBitsToClearOnExit If a notification is pending or received before + * the calling task exits the xTaskNotifyWait() function then the task's + * notification value (see the xTaskNotify() API function) is passed out using + * the pulNotificationValue parameter. Then any bits that are set in + * ulBitsToClearOnExit will be cleared in the task's notification value (note + * *pulNotificationValue is set before any bits are cleared). Setting + * ulBitsToClearOnExit to ULONG_MAX (if limits.h is included) or 0xffffffffUL + * (if limits.h is not included) will have the effect of resetting the task's + * notification value to 0 before the function exits. Setting + * ulBitsToClearOnExit to 0 will leave the task's notification value unchanged + * when the function exits (in which case the value passed out in + * pulNotificationValue will match the task's notification value). + * + * @param pulNotificationValue Used to pass the task's notification value out + * of the function. Note the value passed out will not be effected by the + * clearing of any bits caused by ulBitsToClearOnExit being non-zero. + * + * @param xTicksToWait The maximum amount of time that the task should wait in + * the Blocked state for a notification to be received, should a notification + * not already be pending when xTaskNotifyWait() was called. The task + * will not consume any processing time while it is in the Blocked state. This + * is specified in kernel ticks, the macro pdMS_TO_TICSK( value_in_ms ) can be + * used to convert a time specified in milliseconds to a time specified in + * ticks. + * + * @return If a notification was received (including notifications that were + * already pending when xTaskNotifyWait was called) then pdPASS is + * returned. Otherwise pdFAIL is returned. + * + * \defgroup xTaskNotifyWait xTaskNotifyWait + * \ingroup TaskNotifications + */ +BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
BaseType_t xTaskNotifyGive( TaskHandle_t xTaskToNotify );
+ * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this macro + * to be available. + * + * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private + * "notification value", which is a 32-bit unsigned integer (uint32_t). + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment the task's notification value. In that way + * task notifications can be used to send data to a task, or be used as light + * weight and fast binary or counting semaphores. + * + * xTaskNotifyGive() is a helper macro intended for use when task notifications + * are used as light weight and faster binary or counting semaphore equivalents. + * Actual FreeRTOS semaphores are given using the xSemaphoreGive() API function, + * the equivalent action that instead uses a task notification is + * xTaskNotifyGive(). + * + * When task notifications are being used as a binary or counting semaphore + * equivalent then the task being notified should wait for the notification + * using the ulTaskNotificationTake() API function rather than the + * xTaskNotifyWait() API function. + * + * See http://www.FreeRTOS.org/RTOS-task-notifications.html for more details. + * + * @param xTaskToNotify The handle of the task being notified. The handle to a + * task can be returned from the xTaskCreate() API function used to create the + * task, and the handle of the currently running task can be obtained by calling + * xTaskGetCurrentTaskHandle(). + * + * @return xTaskNotifyGive() is a macro that calls xTaskNotify() with the + * eAction parameter set to eIncrement - so pdPASS is always returned. + * + * \defgroup xTaskNotifyGive xTaskNotifyGive + * \ingroup TaskNotifications + */ +#define xTaskNotifyGive( xTaskToNotify ) xTaskGenericNotify( ( xTaskToNotify ), ( 0 ), eIncrement, NULL ) + +/** + * task. h + *
void vTaskNotifyGiveFromISR( TaskHandle_t xTaskHandle, BaseType_t *pxHigherPriorityTaskWoken );
+ *
+ * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this macro
+ * to be available.
+ *
+ * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private
+ * "notification value", which is a 32-bit unsigned integer (uint32_t).
+ *
+ * A version of xTaskNotifyGive() that can be called from an interrupt service
+ * routine (ISR).
+ *
+ * Events can be sent to a task using an intermediary object.  Examples of such
+ * objects are queues, semaphores, mutexes and event groups.  Task notifications
+ * are a method of sending an event directly to a task without the need for such
+ * an intermediary object.
+ *
+ * A notification sent to a task can optionally perform an action, such as
+ * update, overwrite or increment the task's notification value.  In that way
+ * task notifications can be used to send data to a task, or be used as light
+ * weight and fast binary or counting semaphores.
+ *
+ * vTaskNotifyGiveFromISR() is intended for use when task notifications are
+ * used as light weight and faster binary or counting semaphore equivalents.
+ * Actual FreeRTOS semaphores are given from an ISR using the
+ * xSemaphoreGiveFromISR() API function, the equivalent action that instead uses
+ * a task notification is vTaskNotifyGiveFromISR().
+ *
+ * When task notifications are being used as a binary or counting semaphore
+ * equivalent then the task being notified should wait for the notification
+ * using the ulTaskNotificationTake() API function rather than the
+ * xTaskNotifyWait() API function.
+ *
+ * See http://www.FreeRTOS.org/RTOS-task-notifications.html for more details.
+ *
+ * @param xTaskToNotify The handle of the task being notified.  The handle to a
+ * task can be returned from the xTaskCreate() API function used to create the
+ * task, and the handle of the currently running task can be obtained by calling
+ * xTaskGetCurrentTaskHandle().
+ *
+ * @param pxHigherPriorityTaskWoken  vTaskNotifyGiveFromISR() will set
+ * *pxHigherPriorityTaskWoken to pdTRUE if sending the notification caused the
+ * task to which the notification was sent to leave the Blocked state, and the
+ * unblocked task has a priority higher than the currently running task.  If
+ * vTaskNotifyGiveFromISR() sets this value to pdTRUE then a context switch
+ * should be requested before the interrupt is exited.  How a context switch is
+ * requested from an ISR is dependent on the port - see the documentation page
+ * for the port in use.
+ *
+ * \defgroup xTaskNotifyWait xTaskNotifyWait
+ * \ingroup TaskNotifications
+ */
+void vTaskNotifyGiveFromISR( TaskHandle_t xTaskToNotify, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
+
+/**
+ * task. h
+ * 
uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait );
+ * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this + * function to be available. + * + * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private + * "notification value", which is a 32-bit unsigned integer (uint32_t). + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment the task's notification value. In that way + * task notifications can be used to send data to a task, or be used as light + * weight and fast binary or counting semaphores. + * + * ulTaskNotifyTake() is intended for use when a task notification is used as a + * faster and lighter weight binary or counting semaphore alternative. Actual + * FreeRTOS semaphores are taken using the xSemaphoreTake() API function, the + * equivalent action that instead uses a task notification is + * ulTaskNotifyTake(). + * + * When a task is using its notification value as a binary or counting semaphore + * other tasks should send notifications to it using the xTaskNotifyGive() + * macro, or xTaskNotify() function with the eAction parameter set to + * eIncrement. + * + * ulTaskNotifyTake() can either clear the task's notification value to + * zero on exit, in which case the notification value acts like a binary + * semaphore, or decrement the task's notification value on exit, in which case + * the notification value acts like a counting semaphore. + * + * A task can use ulTaskNotifyTake() to [optionally] block to wait for a + * the task's notification value to be non-zero. The task does not consume any + * CPU time while it is in the Blocked state. + * + * Where as xTaskNotifyWait() will return when a notification is pending, + * ulTaskNotifyTake() will return when the task's notification value is + * not zero. + * + * See http://www.FreeRTOS.org/RTOS-task-notifications.html for details. + * + * @param xClearCountOnExit if xClearCountOnExit is pdFALSE then the task's + * notification value is decremented when the function exits. In this way the + * notification value acts like a counting semaphore. If xClearCountOnExit is + * not pdFALSE then the task's notification value is cleared to zero when the + * function exits. In this way the notification value acts like a binary + * semaphore. + * + * @param xTicksToWait The maximum amount of time that the task should wait in + * the Blocked state for the task's notification value to be greater than zero, + * should the count not already be greater than zero when + * ulTaskNotifyTake() was called. The task will not consume any processing + * time while it is in the Blocked state. This is specified in kernel ticks, + * the macro pdMS_TO_TICSK( value_in_ms ) can be used to convert a time + * specified in milliseconds to a time specified in ticks. + * + * @return The task's notification count before it is either cleared to zero or + * decremented (see the xClearCountOnExit parameter). + * + * \defgroup ulTaskNotifyTake ulTaskNotifyTake + * \ingroup TaskNotifications + */ +uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
BaseType_t xTaskNotifyStateClear( TaskHandle_t xTask );
+ * + * If the notification state of the task referenced by the handle xTask is + * eNotified, then set the task's notification state to eNotWaitingNotification. + * The task's notification value is not altered. Set xTask to NULL to clear the + * notification state of the calling task. + * + * @return pdTRUE if the task's notification state was set to + * eNotWaitingNotification, otherwise pdFALSE. + * \defgroup xTaskNotifyStateClear xTaskNotifyStateClear + * \ingroup TaskNotifications + */ +BaseType_t xTaskNotifyStateClear( TaskHandle_t xTask ); + +/*----------------------------------------------------------- + * SCHEDULER INTERNALS AVAILABLE FOR PORTING PURPOSES + *----------------------------------------------------------*/ + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS ONLY + * INTENDED FOR USE WHEN IMPLEMENTING A PORT OF THE SCHEDULER AND IS + * AN INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * Called from the real time kernel tick (either preemptive or cooperative), + * this increments the tick count and checks if any tasks that are blocked + * for a finite period required removing from a blocked list and placing on + * a ready list. If a non-zero value is returned then a context switch is + * required because either: + * + A task was removed from a blocked list because its timeout had expired, + * or + * + Time slicing is in use and there is a task of equal priority to the + * currently running task. + */ +BaseType_t xTaskIncrementTick( void ) PRIVILEGED_FUNCTION; + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN + * INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED. + * + * Removes the calling task from the ready list and places it both + * on the list of tasks waiting for a particular event, and the + * list of delayed tasks. The task will be removed from both lists + * and replaced on the ready list should either the event occur (and + * there be no higher priority tasks waiting on the same event) or + * the delay period expires. + * + * The 'unordered' version replaces the event list item value with the + * xItemValue value, and inserts the list item at the end of the list. + * + * The 'ordered' version uses the existing event list item value (which is the + * owning tasks priority) to insert the list item into the event list is task + * priority order. + * + * @param pxEventList The list containing tasks that are blocked waiting + * for the event to occur. + * + * @param xItemValue The item value to use for the event list item when the + * event list is not ordered by task priority. + * + * @param xTicksToWait The maximum amount of time that the task should wait + * for the event to occur. This is specified in kernel ticks,the constant + * portTICK_PERIOD_MS can be used to convert kernel ticks into a real time + * period. + */ +void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; +void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xItemValue, const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN + * INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED. + * + * This function performs nearly the same function as vTaskPlaceOnEventList(). + * The difference being that this function does not permit tasks to block + * indefinitely, whereas vTaskPlaceOnEventList() does. + * + */ +void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely ) PRIVILEGED_FUNCTION; + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN + * INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED. + * + * Removes a task from both the specified event list and the list of blocked + * tasks, and places it on a ready queue. + * + * xTaskRemoveFromEventList()/vTaskRemoveFromUnorderedEventList() will be called + * if either an event occurs to unblock a task, or the block timeout period + * expires. + * + * xTaskRemoveFromEventList() is used when the event list is in task priority + * order. It removes the list item from the head of the event list as that will + * have the highest priority owning task of all the tasks on the event list. + * vTaskRemoveFromUnorderedEventList() is used when the event list is not + * ordered and the event list items hold something other than the owning tasks + * priority. In this case the event list item value is updated to the value + * passed in the xItemValue parameter. + * + * @return pdTRUE if the task being removed has a higher priority than the task + * making the call, otherwise pdFALSE. + */ +BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) PRIVILEGED_FUNCTION; +void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue ) PRIVILEGED_FUNCTION; + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS ONLY + * INTENDED FOR USE WHEN IMPLEMENTING A PORT OF THE SCHEDULER AND IS + * AN INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * Sets the pointer to the current TCB to the TCB of the highest priority task + * that is ready to run. + */ +portDONT_DISCARD void vTaskSwitchContext( void ) PRIVILEGED_FUNCTION; + +/* + * THESE FUNCTIONS MUST NOT BE USED FROM APPLICATION CODE. THEY ARE USED BY + * THE EVENT BITS MODULE. + */ +TickType_t uxTaskResetEventItemValue( void ) PRIVILEGED_FUNCTION; + +/* + * Return the handle of the calling task. + */ +TaskHandle_t xTaskGetCurrentTaskHandle( void ) PRIVILEGED_FUNCTION; + +/* + * Capture the current time status for future reference. + */ +void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION; + +/* + * Compare the time status now with that previously captured to see if the + * timeout has expired. + */ +BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait ) PRIVILEGED_FUNCTION; + +/* + * Shortcut used by the queue implementation to prevent unnecessary call to + * taskYIELD(); + */ +void vTaskMissedYield( void ) PRIVILEGED_FUNCTION; + +/* + * Returns the scheduler state as taskSCHEDULER_RUNNING, + * taskSCHEDULER_NOT_STARTED or taskSCHEDULER_SUSPENDED. + */ +BaseType_t xTaskGetSchedulerState( void ) PRIVILEGED_FUNCTION; + +/* + * Raises the priority of the mutex holder to that of the calling task should + * the mutex holder have a priority less than the calling task. + */ +BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder ) PRIVILEGED_FUNCTION; + +/* + * Set the priority of a task back to its proper priority in the case that it + * inherited a higher priority while it was holding a semaphore. + */ +BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder ) PRIVILEGED_FUNCTION; + +/* + * If a higher priority task attempting to obtain a mutex caused a lower + * priority task to inherit the higher priority task's priority - but the higher + * priority task then timed out without obtaining the mutex, then the lower + * priority task will disinherit the priority again - but only down as far as + * the highest priority task that is still waiting for the mutex (if there were + * more than one task waiting for the mutex). + */ +void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder, UBaseType_t uxHighestPriorityWaitingTask ) PRIVILEGED_FUNCTION; + +/* + * Get the uxTCBNumber assigned to the task referenced by the xTask parameter. + */ +UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/* + * Set the uxTaskNumber of the task referenced by the xTask parameter to + * uxHandle. + */ +void vTaskSetTaskNumber( TaskHandle_t xTask, const UBaseType_t uxHandle ) PRIVILEGED_FUNCTION; + +/* + * Only available when configUSE_TICKLESS_IDLE is set to 1. + * If tickless mode is being used, or a low power mode is implemented, then + * the tick interrupt will not execute during idle periods. When this is the + * case, the tick count value maintained by the scheduler needs to be kept up + * to date with the actual execution time by being skipped forward by a time + * equal to the idle period. + */ +void vTaskStepTick( const TickType_t xTicksToJump ) PRIVILEGED_FUNCTION; + +/* + * Only available when configUSE_TICKLESS_IDLE is set to 1. + * Provided for use within portSUPPRESS_TICKS_AND_SLEEP() to allow the port + * specific sleep function to determine if it is ok to proceed with the sleep, + * and if it is ok to proceed, if it is ok to sleep indefinitely. + * + * This function is necessary because portSUPPRESS_TICKS_AND_SLEEP() is only + * called with the scheduler suspended, not from within a critical section. It + * is therefore possible for an interrupt to request a context switch between + * portSUPPRESS_TICKS_AND_SLEEP() and the low power mode actually being + * entered. eTaskConfirmSleepModeStatus() should be called from a short + * critical section between the timer being stopped and the sleep mode being + * entered to ensure it is ok to proceed into the sleep mode. + */ +eSleepModeStatus eTaskConfirmSleepModeStatus( void ) PRIVILEGED_FUNCTION; + +/* + * For internal use only. Increment the mutex held count when a mutex is + * taken and return the handle of the task that has taken the mutex. + */ +TaskHandle_t pvTaskIncrementMutexHeldCount( void ) PRIVILEGED_FUNCTION; + +/* + * For internal use only. Same as vTaskSetTimeOutState(), but without a critial + * section. + */ +void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION; + + +#ifdef __cplusplus +} +#endif +#endif /* INC_TASK_H */ + + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/timers.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/timers.h new file mode 100644 index 0000000..5abb7f1 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/timers.h @@ -0,0 +1,1295 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + + +#ifndef TIMERS_H +#define TIMERS_H + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h must appear in source files before include timers.h" +#endif + +/*lint -save -e537 This headers are only multiply included if the application code +happens to also be including task.h. */ +#include "task.h" +/*lint -restore */ + +#ifdef __cplusplus +extern "C" { +#endif + +/*----------------------------------------------------------- + * MACROS AND DEFINITIONS + *----------------------------------------------------------*/ + +/* IDs for commands that can be sent/received on the timer queue. These are to +be used solely through the macros that make up the public software timer API, +as defined below. The commands that are sent from interrupts must use the +highest numbers as tmrFIRST_FROM_ISR_COMMAND is used to determine if the task +or interrupt version of the queue send function should be used. */ +#define tmrCOMMAND_EXECUTE_CALLBACK_FROM_ISR ( ( BaseType_t ) -2 ) +#define tmrCOMMAND_EXECUTE_CALLBACK ( ( BaseType_t ) -1 ) +#define tmrCOMMAND_START_DONT_TRACE ( ( BaseType_t ) 0 ) +#define tmrCOMMAND_START ( ( BaseType_t ) 1 ) +#define tmrCOMMAND_RESET ( ( BaseType_t ) 2 ) +#define tmrCOMMAND_STOP ( ( BaseType_t ) 3 ) +#define tmrCOMMAND_CHANGE_PERIOD ( ( BaseType_t ) 4 ) +#define tmrCOMMAND_DELETE ( ( BaseType_t ) 5 ) + +#define tmrFIRST_FROM_ISR_COMMAND ( ( BaseType_t ) 6 ) +#define tmrCOMMAND_START_FROM_ISR ( ( BaseType_t ) 6 ) +#define tmrCOMMAND_RESET_FROM_ISR ( ( BaseType_t ) 7 ) +#define tmrCOMMAND_STOP_FROM_ISR ( ( BaseType_t ) 8 ) +#define tmrCOMMAND_CHANGE_PERIOD_FROM_ISR ( ( BaseType_t ) 9 ) + + +/** + * Type by which software timers are referenced. For example, a call to + * xTimerCreate() returns an TimerHandle_t variable that can then be used to + * reference the subject timer in calls to other software timer API functions + * (for example, xTimerStart(), xTimerReset(), etc.). + */ +struct tmrTimerControl; /* The old naming convention is used to prevent breaking kernel aware debuggers. */ +typedef struct tmrTimerControl * TimerHandle_t; + +/* + * Defines the prototype to which timer callback functions must conform. + */ +typedef void (*TimerCallbackFunction_t)( TimerHandle_t xTimer ); + +/* + * Defines the prototype to which functions used with the + * xTimerPendFunctionCallFromISR() function must conform. + */ +typedef void (*PendedFunction_t)( void *, uint32_t ); + +/** + * TimerHandle_t xTimerCreate( const char * const pcTimerName, + * TickType_t xTimerPeriodInTicks, + * UBaseType_t uxAutoReload, + * void * pvTimerID, + * TimerCallbackFunction_t pxCallbackFunction ); + * + * Creates a new software timer instance, and returns a handle by which the + * created software timer can be referenced. + * + * Internally, within the FreeRTOS implementation, software timers use a block + * of memory, in which the timer data structure is stored. If a software timer + * is created using xTimerCreate() then the required memory is automatically + * dynamically allocated inside the xTimerCreate() function. (see + * http://www.freertos.org/a00111.html). If a software timer is created using + * xTimerCreateStatic() then the application writer must provide the memory that + * will get used by the software timer. xTimerCreateStatic() therefore allows a + * software timer to be created without using any dynamic memory allocation. + * + * Timers are created in the dormant state. The xTimerStart(), xTimerReset(), + * xTimerStartFromISR(), xTimerResetFromISR(), xTimerChangePeriod() and + * xTimerChangePeriodFromISR() API functions can all be used to transition a + * timer into the active state. + * + * @param pcTimerName A text name that is assigned to the timer. This is done + * purely to assist debugging. The kernel itself only ever references a timer + * by its handle, and never by its name. + * + * @param xTimerPeriodInTicks The timer period. The time is defined in tick + * periods so the constant portTICK_PERIOD_MS can be used to convert a time that + * has been specified in milliseconds. For example, if the timer must expire + * after 100 ticks, then xTimerPeriodInTicks should be set to 100. + * Alternatively, if the timer must expire after 500ms, then xPeriod can be set + * to ( 500 / portTICK_PERIOD_MS ) provided configTICK_RATE_HZ is less than or + * equal to 1000. + * + * @param uxAutoReload If uxAutoReload is set to pdTRUE then the timer will + * expire repeatedly with a frequency set by the xTimerPeriodInTicks parameter. + * If uxAutoReload is set to pdFALSE then the timer will be a one-shot timer and + * enter the dormant state after it expires. + * + * @param pvTimerID An identifier that is assigned to the timer being created. + * Typically this would be used in the timer callback function to identify which + * timer expired when the same callback function is assigned to more than one + * timer. + * + * @param pxCallbackFunction The function to call when the timer expires. + * Callback functions must have the prototype defined by TimerCallbackFunction_t, + * which is "void vCallbackFunction( TimerHandle_t xTimer );". + * + * @return If the timer is successfully created then a handle to the newly + * created timer is returned. If the timer cannot be created (because either + * there is insufficient FreeRTOS heap remaining to allocate the timer + * structures, or the timer period was set to 0) then NULL is returned. + * + * Example usage: + * @verbatim + * #define NUM_TIMERS 5 + * + * // An array to hold handles to the created timers. + * TimerHandle_t xTimers[ NUM_TIMERS ]; + * + * // An array to hold a count of the number of times each timer expires. + * int32_t lExpireCounters[ NUM_TIMERS ] = { 0 }; + * + * // Define a callback function that will be used by multiple timer instances. + * // The callback function does nothing but count the number of times the + * // associated timer expires, and stop the timer once the timer has expired + * // 10 times. + * void vTimerCallback( TimerHandle_t pxTimer ) + * { + * int32_t lArrayIndex; + * const int32_t xMaxExpiryCountBeforeStopping = 10; + * + * // Optionally do something if the pxTimer parameter is NULL. + * configASSERT( pxTimer ); + * + * // Which timer expired? + * lArrayIndex = ( int32_t ) pvTimerGetTimerID( pxTimer ); + * + * // Increment the number of times that pxTimer has expired. + * lExpireCounters[ lArrayIndex ] += 1; + * + * // If the timer has expired 10 times then stop it from running. + * if( lExpireCounters[ lArrayIndex ] == xMaxExpiryCountBeforeStopping ) + * { + * // Do not use a block time if calling a timer API function from a + * // timer callback function, as doing so could cause a deadlock! + * xTimerStop( pxTimer, 0 ); + * } + * } + * + * void main( void ) + * { + * int32_t x; + * + * // Create then start some timers. Starting the timers before the scheduler + * // has been started means the timers will start running immediately that + * // the scheduler starts. + * for( x = 0; x < NUM_TIMERS; x++ ) + * { + * xTimers[ x ] = xTimerCreate( "Timer", // Just a text name, not used by the kernel. + * ( 100 * x ), // The timer period in ticks. + * pdTRUE, // The timers will auto-reload themselves when they expire. + * ( void * ) x, // Assign each timer a unique id equal to its array index. + * vTimerCallback // Each timer calls the same callback when it expires. + * ); + * + * if( xTimers[ x ] == NULL ) + * { + * // The timer was not created. + * } + * else + * { + * // Start the timer. No block time is specified, and even if one was + * // it would be ignored because the scheduler has not yet been + * // started. + * if( xTimerStart( xTimers[ x ], 0 ) != pdPASS ) + * { + * // The timer could not be set into the Active state. + * } + * } + * } + * + * // ... + * // Create tasks here. + * // ... + * + * // Starting the scheduler will start the timers running as they have already + * // been set into the active state. + * vTaskStartScheduler(); + * + * // Should not reach here. + * for( ;; ); + * } + * @endverbatim + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + TimerHandle_t xTimerCreate( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction ) PRIVILEGED_FUNCTION; +#endif + +/** + * TimerHandle_t xTimerCreateStatic(const char * const pcTimerName, + * TickType_t xTimerPeriodInTicks, + * UBaseType_t uxAutoReload, + * void * pvTimerID, + * TimerCallbackFunction_t pxCallbackFunction, + * StaticTimer_t *pxTimerBuffer ); + * + * Creates a new software timer instance, and returns a handle by which the + * created software timer can be referenced. + * + * Internally, within the FreeRTOS implementation, software timers use a block + * of memory, in which the timer data structure is stored. If a software timer + * is created using xTimerCreate() then the required memory is automatically + * dynamically allocated inside the xTimerCreate() function. (see + * http://www.freertos.org/a00111.html). If a software timer is created using + * xTimerCreateStatic() then the application writer must provide the memory that + * will get used by the software timer. xTimerCreateStatic() therefore allows a + * software timer to be created without using any dynamic memory allocation. + * + * Timers are created in the dormant state. The xTimerStart(), xTimerReset(), + * xTimerStartFromISR(), xTimerResetFromISR(), xTimerChangePeriod() and + * xTimerChangePeriodFromISR() API functions can all be used to transition a + * timer into the active state. + * + * @param pcTimerName A text name that is assigned to the timer. This is done + * purely to assist debugging. The kernel itself only ever references a timer + * by its handle, and never by its name. + * + * @param xTimerPeriodInTicks The timer period. The time is defined in tick + * periods so the constant portTICK_PERIOD_MS can be used to convert a time that + * has been specified in milliseconds. For example, if the timer must expire + * after 100 ticks, then xTimerPeriodInTicks should be set to 100. + * Alternatively, if the timer must expire after 500ms, then xPeriod can be set + * to ( 500 / portTICK_PERIOD_MS ) provided configTICK_RATE_HZ is less than or + * equal to 1000. + * + * @param uxAutoReload If uxAutoReload is set to pdTRUE then the timer will + * expire repeatedly with a frequency set by the xTimerPeriodInTicks parameter. + * If uxAutoReload is set to pdFALSE then the timer will be a one-shot timer and + * enter the dormant state after it expires. + * + * @param pvTimerID An identifier that is assigned to the timer being created. + * Typically this would be used in the timer callback function to identify which + * timer expired when the same callback function is assigned to more than one + * timer. + * + * @param pxCallbackFunction The function to call when the timer expires. + * Callback functions must have the prototype defined by TimerCallbackFunction_t, + * which is "void vCallbackFunction( TimerHandle_t xTimer );". + * + * @param pxTimerBuffer Must point to a variable of type StaticTimer_t, which + * will be then be used to hold the software timer's data structures, removing + * the need for the memory to be allocated dynamically. + * + * @return If the timer is created then a handle to the created timer is + * returned. If pxTimerBuffer was NULL then NULL is returned. + * + * Example usage: + * @verbatim + * + * // The buffer used to hold the software timer's data structure. + * static StaticTimer_t xTimerBuffer; + * + * // A variable that will be incremented by the software timer's callback + * // function. + * UBaseType_t uxVariableToIncrement = 0; + * + * // A software timer callback function that increments a variable passed to + * // it when the software timer was created. After the 5th increment the + * // callback function stops the software timer. + * static void prvTimerCallback( TimerHandle_t xExpiredTimer ) + * { + * UBaseType_t *puxVariableToIncrement; + * BaseType_t xReturned; + * + * // Obtain the address of the variable to increment from the timer ID. + * puxVariableToIncrement = ( UBaseType_t * ) pvTimerGetTimerID( xExpiredTimer ); + * + * // Increment the variable to show the timer callback has executed. + * ( *puxVariableToIncrement )++; + * + * // If this callback has executed the required number of times, stop the + * // timer. + * if( *puxVariableToIncrement == 5 ) + * { + * // This is called from a timer callback so must not block. + * xTimerStop( xExpiredTimer, staticDONT_BLOCK ); + * } + * } + * + * + * void main( void ) + * { + * // Create the software time. xTimerCreateStatic() has an extra parameter + * // than the normal xTimerCreate() API function. The parameter is a pointer + * // to the StaticTimer_t structure that will hold the software timer + * // structure. If the parameter is passed as NULL then the structure will be + * // allocated dynamically, just as if xTimerCreate() had been called. + * xTimer = xTimerCreateStatic( "T1", // Text name for the task. Helps debugging only. Not used by FreeRTOS. + * xTimerPeriod, // The period of the timer in ticks. + * pdTRUE, // This is an auto-reload timer. + * ( void * ) &uxVariableToIncrement, // A variable incremented by the software timer's callback function + * prvTimerCallback, // The function to execute when the timer expires. + * &xTimerBuffer ); // The buffer that will hold the software timer structure. + * + * // The scheduler has not started yet so a block time is not used. + * xReturned = xTimerStart( xTimer, 0 ); + * + * // ... + * // Create tasks here. + * // ... + * + * // Starting the scheduler will start the timers running as they have already + * // been set into the active state. + * vTaskStartScheduler(); + * + * // Should not reach here. + * for( ;; ); + * } + * @endverbatim + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + TimerHandle_t xTimerCreateStatic( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction, + StaticTimer_t *pxTimerBuffer ) PRIVILEGED_FUNCTION; +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +/** + * void *pvTimerGetTimerID( TimerHandle_t xTimer ); + * + * Returns the ID assigned to the timer. + * + * IDs are assigned to timers using the pvTimerID parameter of the call to + * xTimerCreated() that was used to create the timer, and by calling the + * vTimerSetTimerID() API function. + * + * If the same callback function is assigned to multiple timers then the timer + * ID can be used as time specific (timer local) storage. + * + * @param xTimer The timer being queried. + * + * @return The ID assigned to the timer being queried. + * + * Example usage: + * + * See the xTimerCreate() API function example usage scenario. + */ +void *pvTimerGetTimerID( const TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + +/** + * void vTimerSetTimerID( TimerHandle_t xTimer, void *pvNewID ); + * + * Sets the ID assigned to the timer. + * + * IDs are assigned to timers using the pvTimerID parameter of the call to + * xTimerCreated() that was used to create the timer. + * + * If the same callback function is assigned to multiple timers then the timer + * ID can be used as time specific (timer local) storage. + * + * @param xTimer The timer being updated. + * + * @param pvNewID The ID to assign to the timer. + * + * Example usage: + * + * See the xTimerCreate() API function example usage scenario. + */ +void vTimerSetTimerID( TimerHandle_t xTimer, void *pvNewID ) PRIVILEGED_FUNCTION; + +/** + * BaseType_t xTimerIsTimerActive( TimerHandle_t xTimer ); + * + * Queries a timer to see if it is active or dormant. + * + * A timer will be dormant if: + * 1) It has been created but not started, or + * 2) It is an expired one-shot timer that has not been restarted. + * + * Timers are created in the dormant state. The xTimerStart(), xTimerReset(), + * xTimerStartFromISR(), xTimerResetFromISR(), xTimerChangePeriod() and + * xTimerChangePeriodFromISR() API functions can all be used to transition a timer into the + * active state. + * + * @param xTimer The timer being queried. + * + * @return pdFALSE will be returned if the timer is dormant. A value other than + * pdFALSE will be returned if the timer is active. + * + * Example usage: + * @verbatim + * // This function assumes xTimer has already been created. + * void vAFunction( TimerHandle_t xTimer ) + * { + * if( xTimerIsTimerActive( xTimer ) != pdFALSE ) // or more simply and equivalently "if( xTimerIsTimerActive( xTimer ) )" + * { + * // xTimer is active, do something. + * } + * else + * { + * // xTimer is not active, do something else. + * } + * } + * @endverbatim + */ +BaseType_t xTimerIsTimerActive( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + +/** + * TaskHandle_t xTimerGetTimerDaemonTaskHandle( void ); + * + * Simply returns the handle of the timer service/daemon task. It it not valid + * to call xTimerGetTimerDaemonTaskHandle() before the scheduler has been started. + */ +TaskHandle_t xTimerGetTimerDaemonTaskHandle( void ) PRIVILEGED_FUNCTION; + +/** + * BaseType_t xTimerStart( TimerHandle_t xTimer, TickType_t xTicksToWait ); + * + * Timer functionality is provided by a timer service/daemon task. Many of the + * public FreeRTOS timer API functions send commands to the timer service task + * through a queue called the timer command queue. The timer command queue is + * private to the kernel itself and is not directly accessible to application + * code. The length of the timer command queue is set by the + * configTIMER_QUEUE_LENGTH configuration constant. + * + * xTimerStart() starts a timer that was previously created using the + * xTimerCreate() API function. If the timer had already been started and was + * already in the active state, then xTimerStart() has equivalent functionality + * to the xTimerReset() API function. + * + * Starting a timer ensures the timer is in the active state. If the timer + * is not stopped, deleted, or reset in the mean time, the callback function + * associated with the timer will get called 'n' ticks after xTimerStart() was + * called, where 'n' is the timers defined period. + * + * It is valid to call xTimerStart() before the scheduler has been started, but + * when this is done the timer will not actually start until the scheduler is + * started, and the timers expiry time will be relative to when the scheduler is + * started, not relative to when xTimerStart() was called. + * + * The configUSE_TIMERS configuration constant must be set to 1 for xTimerStart() + * to be available. + * + * @param xTimer The handle of the timer being started/restarted. + * + * @param xTicksToWait Specifies the time, in ticks, that the calling task should + * be held in the Blocked state to wait for the start command to be successfully + * sent to the timer command queue, should the queue already be full when + * xTimerStart() was called. xTicksToWait is ignored if xTimerStart() is called + * before the scheduler is started. + * + * @return pdFAIL will be returned if the start command could not be sent to + * the timer command queue even after xTicksToWait ticks had passed. pdPASS will + * be returned if the command was successfully sent to the timer command queue. + * When the command is actually processed will depend on the priority of the + * timer service/daemon task relative to other tasks in the system, although the + * timers expiry time is relative to when xTimerStart() is actually called. The + * timer service/daemon task priority is set by the configTIMER_TASK_PRIORITY + * configuration constant. + * + * Example usage: + * + * See the xTimerCreate() API function example usage scenario. + * + */ +#define xTimerStart( xTimer, xTicksToWait ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_START, ( xTaskGetTickCount() ), NULL, ( xTicksToWait ) ) + +/** + * BaseType_t xTimerStop( TimerHandle_t xTimer, TickType_t xTicksToWait ); + * + * Timer functionality is provided by a timer service/daemon task. Many of the + * public FreeRTOS timer API functions send commands to the timer service task + * through a queue called the timer command queue. The timer command queue is + * private to the kernel itself and is not directly accessible to application + * code. The length of the timer command queue is set by the + * configTIMER_QUEUE_LENGTH configuration constant. + * + * xTimerStop() stops a timer that was previously started using either of the + * The xTimerStart(), xTimerReset(), xTimerStartFromISR(), xTimerResetFromISR(), + * xTimerChangePeriod() or xTimerChangePeriodFromISR() API functions. + * + * Stopping a timer ensures the timer is not in the active state. + * + * The configUSE_TIMERS configuration constant must be set to 1 for xTimerStop() + * to be available. + * + * @param xTimer The handle of the timer being stopped. + * + * @param xTicksToWait Specifies the time, in ticks, that the calling task should + * be held in the Blocked state to wait for the stop command to be successfully + * sent to the timer command queue, should the queue already be full when + * xTimerStop() was called. xTicksToWait is ignored if xTimerStop() is called + * before the scheduler is started. + * + * @return pdFAIL will be returned if the stop command could not be sent to + * the timer command queue even after xTicksToWait ticks had passed. pdPASS will + * be returned if the command was successfully sent to the timer command queue. + * When the command is actually processed will depend on the priority of the + * timer service/daemon task relative to other tasks in the system. The timer + * service/daemon task priority is set by the configTIMER_TASK_PRIORITY + * configuration constant. + * + * Example usage: + * + * See the xTimerCreate() API function example usage scenario. + * + */ +#define xTimerStop( xTimer, xTicksToWait ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_STOP, 0U, NULL, ( xTicksToWait ) ) + +/** + * BaseType_t xTimerChangePeriod( TimerHandle_t xTimer, + * TickType_t xNewPeriod, + * TickType_t xTicksToWait ); + * + * Timer functionality is provided by a timer service/daemon task. Many of the + * public FreeRTOS timer API functions send commands to the timer service task + * through a queue called the timer command queue. The timer command queue is + * private to the kernel itself and is not directly accessible to application + * code. The length of the timer command queue is set by the + * configTIMER_QUEUE_LENGTH configuration constant. + * + * xTimerChangePeriod() changes the period of a timer that was previously + * created using the xTimerCreate() API function. + * + * xTimerChangePeriod() can be called to change the period of an active or + * dormant state timer. + * + * The configUSE_TIMERS configuration constant must be set to 1 for + * xTimerChangePeriod() to be available. + * + * @param xTimer The handle of the timer that is having its period changed. + * + * @param xNewPeriod The new period for xTimer. Timer periods are specified in + * tick periods, so the constant portTICK_PERIOD_MS can be used to convert a time + * that has been specified in milliseconds. For example, if the timer must + * expire after 100 ticks, then xNewPeriod should be set to 100. Alternatively, + * if the timer must expire after 500ms, then xNewPeriod can be set to + * ( 500 / portTICK_PERIOD_MS ) provided configTICK_RATE_HZ is less than + * or equal to 1000. + * + * @param xTicksToWait Specifies the time, in ticks, that the calling task should + * be held in the Blocked state to wait for the change period command to be + * successfully sent to the timer command queue, should the queue already be + * full when xTimerChangePeriod() was called. xTicksToWait is ignored if + * xTimerChangePeriod() is called before the scheduler is started. + * + * @return pdFAIL will be returned if the change period command could not be + * sent to the timer command queue even after xTicksToWait ticks had passed. + * pdPASS will be returned if the command was successfully sent to the timer + * command queue. When the command is actually processed will depend on the + * priority of the timer service/daemon task relative to other tasks in the + * system. The timer service/daemon task priority is set by the + * configTIMER_TASK_PRIORITY configuration constant. + * + * Example usage: + * @verbatim + * // This function assumes xTimer has already been created. If the timer + * // referenced by xTimer is already active when it is called, then the timer + * // is deleted. If the timer referenced by xTimer is not active when it is + * // called, then the period of the timer is set to 500ms and the timer is + * // started. + * void vAFunction( TimerHandle_t xTimer ) + * { + * if( xTimerIsTimerActive( xTimer ) != pdFALSE ) // or more simply and equivalently "if( xTimerIsTimerActive( xTimer ) )" + * { + * // xTimer is already active - delete it. + * xTimerDelete( xTimer ); + * } + * else + * { + * // xTimer is not active, change its period to 500ms. This will also + * // cause the timer to start. Block for a maximum of 100 ticks if the + * // change period command cannot immediately be sent to the timer + * // command queue. + * if( xTimerChangePeriod( xTimer, 500 / portTICK_PERIOD_MS, 100 ) == pdPASS ) + * { + * // The command was successfully sent. + * } + * else + * { + * // The command could not be sent, even after waiting for 100 ticks + * // to pass. Take appropriate action here. + * } + * } + * } + * @endverbatim + */ + #define xTimerChangePeriod( xTimer, xNewPeriod, xTicksToWait ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_CHANGE_PERIOD, ( xNewPeriod ), NULL, ( xTicksToWait ) ) + +/** + * BaseType_t xTimerDelete( TimerHandle_t xTimer, TickType_t xTicksToWait ); + * + * Timer functionality is provided by a timer service/daemon task. Many of the + * public FreeRTOS timer API functions send commands to the timer service task + * through a queue called the timer command queue. The timer command queue is + * private to the kernel itself and is not directly accessible to application + * code. The length of the timer command queue is set by the + * configTIMER_QUEUE_LENGTH configuration constant. + * + * xTimerDelete() deletes a timer that was previously created using the + * xTimerCreate() API function. + * + * The configUSE_TIMERS configuration constant must be set to 1 for + * xTimerDelete() to be available. + * + * @param xTimer The handle of the timer being deleted. + * + * @param xTicksToWait Specifies the time, in ticks, that the calling task should + * be held in the Blocked state to wait for the delete command to be + * successfully sent to the timer command queue, should the queue already be + * full when xTimerDelete() was called. xTicksToWait is ignored if xTimerDelete() + * is called before the scheduler is started. + * + * @return pdFAIL will be returned if the delete command could not be sent to + * the timer command queue even after xTicksToWait ticks had passed. pdPASS will + * be returned if the command was successfully sent to the timer command queue. + * When the command is actually processed will depend on the priority of the + * timer service/daemon task relative to other tasks in the system. The timer + * service/daemon task priority is set by the configTIMER_TASK_PRIORITY + * configuration constant. + * + * Example usage: + * + * See the xTimerChangePeriod() API function example usage scenario. + */ +#define xTimerDelete( xTimer, xTicksToWait ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_DELETE, 0U, NULL, ( xTicksToWait ) ) + +/** + * BaseType_t xTimerReset( TimerHandle_t xTimer, TickType_t xTicksToWait ); + * + * Timer functionality is provided by a timer service/daemon task. Many of the + * public FreeRTOS timer API functions send commands to the timer service task + * through a queue called the timer command queue. The timer command queue is + * private to the kernel itself and is not directly accessible to application + * code. The length of the timer command queue is set by the + * configTIMER_QUEUE_LENGTH configuration constant. + * + * xTimerReset() re-starts a timer that was previously created using the + * xTimerCreate() API function. If the timer had already been started and was + * already in the active state, then xTimerReset() will cause the timer to + * re-evaluate its expiry time so that it is relative to when xTimerReset() was + * called. If the timer was in the dormant state then xTimerReset() has + * equivalent functionality to the xTimerStart() API function. + * + * Resetting a timer ensures the timer is in the active state. If the timer + * is not stopped, deleted, or reset in the mean time, the callback function + * associated with the timer will get called 'n' ticks after xTimerReset() was + * called, where 'n' is the timers defined period. + * + * It is valid to call xTimerReset() before the scheduler has been started, but + * when this is done the timer will not actually start until the scheduler is + * started, and the timers expiry time will be relative to when the scheduler is + * started, not relative to when xTimerReset() was called. + * + * The configUSE_TIMERS configuration constant must be set to 1 for xTimerReset() + * to be available. + * + * @param xTimer The handle of the timer being reset/started/restarted. + * + * @param xTicksToWait Specifies the time, in ticks, that the calling task should + * be held in the Blocked state to wait for the reset command to be successfully + * sent to the timer command queue, should the queue already be full when + * xTimerReset() was called. xTicksToWait is ignored if xTimerReset() is called + * before the scheduler is started. + * + * @return pdFAIL will be returned if the reset command could not be sent to + * the timer command queue even after xTicksToWait ticks had passed. pdPASS will + * be returned if the command was successfully sent to the timer command queue. + * When the command is actually processed will depend on the priority of the + * timer service/daemon task relative to other tasks in the system, although the + * timers expiry time is relative to when xTimerStart() is actually called. The + * timer service/daemon task priority is set by the configTIMER_TASK_PRIORITY + * configuration constant. + * + * Example usage: + * @verbatim + * // When a key is pressed, an LCD back-light is switched on. If 5 seconds pass + * // without a key being pressed, then the LCD back-light is switched off. In + * // this case, the timer is a one-shot timer. + * + * TimerHandle_t xBacklightTimer = NULL; + * + * // The callback function assigned to the one-shot timer. In this case the + * // parameter is not used. + * void vBacklightTimerCallback( TimerHandle_t pxTimer ) + * { + * // The timer expired, therefore 5 seconds must have passed since a key + * // was pressed. Switch off the LCD back-light. + * vSetBacklightState( BACKLIGHT_OFF ); + * } + * + * // The key press event handler. + * void vKeyPressEventHandler( char cKey ) + * { + * // Ensure the LCD back-light is on, then reset the timer that is + * // responsible for turning the back-light off after 5 seconds of + * // key inactivity. Wait 10 ticks for the command to be successfully sent + * // if it cannot be sent immediately. + * vSetBacklightState( BACKLIGHT_ON ); + * if( xTimerReset( xBacklightTimer, 100 ) != pdPASS ) + * { + * // The reset command was not executed successfully. Take appropriate + * // action here. + * } + * + * // Perform the rest of the key processing here. + * } + * + * void main( void ) + * { + * int32_t x; + * + * // Create then start the one-shot timer that is responsible for turning + * // the back-light off if no keys are pressed within a 5 second period. + * xBacklightTimer = xTimerCreate( "BacklightTimer", // Just a text name, not used by the kernel. + * ( 5000 / portTICK_PERIOD_MS), // The timer period in ticks. + * pdFALSE, // The timer is a one-shot timer. + * 0, // The id is not used by the callback so can take any value. + * vBacklightTimerCallback // The callback function that switches the LCD back-light off. + * ); + * + * if( xBacklightTimer == NULL ) + * { + * // The timer was not created. + * } + * else + * { + * // Start the timer. No block time is specified, and even if one was + * // it would be ignored because the scheduler has not yet been + * // started. + * if( xTimerStart( xBacklightTimer, 0 ) != pdPASS ) + * { + * // The timer could not be set into the Active state. + * } + * } + * + * // ... + * // Create tasks here. + * // ... + * + * // Starting the scheduler will start the timer running as it has already + * // been set into the active state. + * vTaskStartScheduler(); + * + * // Should not reach here. + * for( ;; ); + * } + * @endverbatim + */ +#define xTimerReset( xTimer, xTicksToWait ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_RESET, ( xTaskGetTickCount() ), NULL, ( xTicksToWait ) ) + +/** + * BaseType_t xTimerStartFromISR( TimerHandle_t xTimer, + * BaseType_t *pxHigherPriorityTaskWoken ); + * + * A version of xTimerStart() that can be called from an interrupt service + * routine. + * + * @param xTimer The handle of the timer being started/restarted. + * + * @param pxHigherPriorityTaskWoken The timer service/daemon task spends most + * of its time in the Blocked state, waiting for messages to arrive on the timer + * command queue. Calling xTimerStartFromISR() writes a message to the timer + * command queue, so has the potential to transition the timer service/daemon + * task out of the Blocked state. If calling xTimerStartFromISR() causes the + * timer service/daemon task to leave the Blocked state, and the timer service/ + * daemon task has a priority equal to or greater than the currently executing + * task (the task that was interrupted), then *pxHigherPriorityTaskWoken will + * get set to pdTRUE internally within the xTimerStartFromISR() function. If + * xTimerStartFromISR() sets this value to pdTRUE then a context switch should + * be performed before the interrupt exits. + * + * @return pdFAIL will be returned if the start command could not be sent to + * the timer command queue. pdPASS will be returned if the command was + * successfully sent to the timer command queue. When the command is actually + * processed will depend on the priority of the timer service/daemon task + * relative to other tasks in the system, although the timers expiry time is + * relative to when xTimerStartFromISR() is actually called. The timer + * service/daemon task priority is set by the configTIMER_TASK_PRIORITY + * configuration constant. + * + * Example usage: + * @verbatim + * // This scenario assumes xBacklightTimer has already been created. When a + * // key is pressed, an LCD back-light is switched on. If 5 seconds pass + * // without a key being pressed, then the LCD back-light is switched off. In + * // this case, the timer is a one-shot timer, and unlike the example given for + * // the xTimerReset() function, the key press event handler is an interrupt + * // service routine. + * + * // The callback function assigned to the one-shot timer. In this case the + * // parameter is not used. + * void vBacklightTimerCallback( TimerHandle_t pxTimer ) + * { + * // The timer expired, therefore 5 seconds must have passed since a key + * // was pressed. Switch off the LCD back-light. + * vSetBacklightState( BACKLIGHT_OFF ); + * } + * + * // The key press interrupt service routine. + * void vKeyPressEventInterruptHandler( void ) + * { + * BaseType_t xHigherPriorityTaskWoken = pdFALSE; + * + * // Ensure the LCD back-light is on, then restart the timer that is + * // responsible for turning the back-light off after 5 seconds of + * // key inactivity. This is an interrupt service routine so can only + * // call FreeRTOS API functions that end in "FromISR". + * vSetBacklightState( BACKLIGHT_ON ); + * + * // xTimerStartFromISR() or xTimerResetFromISR() could be called here + * // as both cause the timer to re-calculate its expiry time. + * // xHigherPriorityTaskWoken was initialised to pdFALSE when it was + * // declared (in this function). + * if( xTimerStartFromISR( xBacklightTimer, &xHigherPriorityTaskWoken ) != pdPASS ) + * { + * // The start command was not executed successfully. Take appropriate + * // action here. + * } + * + * // Perform the rest of the key processing here. + * + * // If xHigherPriorityTaskWoken equals pdTRUE, then a context switch + * // should be performed. The syntax required to perform a context switch + * // from inside an ISR varies from port to port, and from compiler to + * // compiler. Inspect the demos for the port you are using to find the + * // actual syntax required. + * if( xHigherPriorityTaskWoken != pdFALSE ) + * { + * // Call the interrupt safe yield function here (actual function + * // depends on the FreeRTOS port being used). + * } + * } + * @endverbatim + */ +#define xTimerStartFromISR( xTimer, pxHigherPriorityTaskWoken ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_START_FROM_ISR, ( xTaskGetTickCountFromISR() ), ( pxHigherPriorityTaskWoken ), 0U ) + +/** + * BaseType_t xTimerStopFromISR( TimerHandle_t xTimer, + * BaseType_t *pxHigherPriorityTaskWoken ); + * + * A version of xTimerStop() that can be called from an interrupt service + * routine. + * + * @param xTimer The handle of the timer being stopped. + * + * @param pxHigherPriorityTaskWoken The timer service/daemon task spends most + * of its time in the Blocked state, waiting for messages to arrive on the timer + * command queue. Calling xTimerStopFromISR() writes a message to the timer + * command queue, so has the potential to transition the timer service/daemon + * task out of the Blocked state. If calling xTimerStopFromISR() causes the + * timer service/daemon task to leave the Blocked state, and the timer service/ + * daemon task has a priority equal to or greater than the currently executing + * task (the task that was interrupted), then *pxHigherPriorityTaskWoken will + * get set to pdTRUE internally within the xTimerStopFromISR() function. If + * xTimerStopFromISR() sets this value to pdTRUE then a context switch should + * be performed before the interrupt exits. + * + * @return pdFAIL will be returned if the stop command could not be sent to + * the timer command queue. pdPASS will be returned if the command was + * successfully sent to the timer command queue. When the command is actually + * processed will depend on the priority of the timer service/daemon task + * relative to other tasks in the system. The timer service/daemon task + * priority is set by the configTIMER_TASK_PRIORITY configuration constant. + * + * Example usage: + * @verbatim + * // This scenario assumes xTimer has already been created and started. When + * // an interrupt occurs, the timer should be simply stopped. + * + * // The interrupt service routine that stops the timer. + * void vAnExampleInterruptServiceRoutine( void ) + * { + * BaseType_t xHigherPriorityTaskWoken = pdFALSE; + * + * // The interrupt has occurred - simply stop the timer. + * // xHigherPriorityTaskWoken was set to pdFALSE where it was defined + * // (within this function). As this is an interrupt service routine, only + * // FreeRTOS API functions that end in "FromISR" can be used. + * if( xTimerStopFromISR( xTimer, &xHigherPriorityTaskWoken ) != pdPASS ) + * { + * // The stop command was not executed successfully. Take appropriate + * // action here. + * } + * + * // If xHigherPriorityTaskWoken equals pdTRUE, then a context switch + * // should be performed. The syntax required to perform a context switch + * // from inside an ISR varies from port to port, and from compiler to + * // compiler. Inspect the demos for the port you are using to find the + * // actual syntax required. + * if( xHigherPriorityTaskWoken != pdFALSE ) + * { + * // Call the interrupt safe yield function here (actual function + * // depends on the FreeRTOS port being used). + * } + * } + * @endverbatim + */ +#define xTimerStopFromISR( xTimer, pxHigherPriorityTaskWoken ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_STOP_FROM_ISR, 0, ( pxHigherPriorityTaskWoken ), 0U ) + +/** + * BaseType_t xTimerChangePeriodFromISR( TimerHandle_t xTimer, + * TickType_t xNewPeriod, + * BaseType_t *pxHigherPriorityTaskWoken ); + * + * A version of xTimerChangePeriod() that can be called from an interrupt + * service routine. + * + * @param xTimer The handle of the timer that is having its period changed. + * + * @param xNewPeriod The new period for xTimer. Timer periods are specified in + * tick periods, so the constant portTICK_PERIOD_MS can be used to convert a time + * that has been specified in milliseconds. For example, if the timer must + * expire after 100 ticks, then xNewPeriod should be set to 100. Alternatively, + * if the timer must expire after 500ms, then xNewPeriod can be set to + * ( 500 / portTICK_PERIOD_MS ) provided configTICK_RATE_HZ is less than + * or equal to 1000. + * + * @param pxHigherPriorityTaskWoken The timer service/daemon task spends most + * of its time in the Blocked state, waiting for messages to arrive on the timer + * command queue. Calling xTimerChangePeriodFromISR() writes a message to the + * timer command queue, so has the potential to transition the timer service/ + * daemon task out of the Blocked state. If calling xTimerChangePeriodFromISR() + * causes the timer service/daemon task to leave the Blocked state, and the + * timer service/daemon task has a priority equal to or greater than the + * currently executing task (the task that was interrupted), then + * *pxHigherPriorityTaskWoken will get set to pdTRUE internally within the + * xTimerChangePeriodFromISR() function. If xTimerChangePeriodFromISR() sets + * this value to pdTRUE then a context switch should be performed before the + * interrupt exits. + * + * @return pdFAIL will be returned if the command to change the timers period + * could not be sent to the timer command queue. pdPASS will be returned if the + * command was successfully sent to the timer command queue. When the command + * is actually processed will depend on the priority of the timer service/daemon + * task relative to other tasks in the system. The timer service/daemon task + * priority is set by the configTIMER_TASK_PRIORITY configuration constant. + * + * Example usage: + * @verbatim + * // This scenario assumes xTimer has already been created and started. When + * // an interrupt occurs, the period of xTimer should be changed to 500ms. + * + * // The interrupt service routine that changes the period of xTimer. + * void vAnExampleInterruptServiceRoutine( void ) + * { + * BaseType_t xHigherPriorityTaskWoken = pdFALSE; + * + * // The interrupt has occurred - change the period of xTimer to 500ms. + * // xHigherPriorityTaskWoken was set to pdFALSE where it was defined + * // (within this function). As this is an interrupt service routine, only + * // FreeRTOS API functions that end in "FromISR" can be used. + * if( xTimerChangePeriodFromISR( xTimer, &xHigherPriorityTaskWoken ) != pdPASS ) + * { + * // The command to change the timers period was not executed + * // successfully. Take appropriate action here. + * } + * + * // If xHigherPriorityTaskWoken equals pdTRUE, then a context switch + * // should be performed. The syntax required to perform a context switch + * // from inside an ISR varies from port to port, and from compiler to + * // compiler. Inspect the demos for the port you are using to find the + * // actual syntax required. + * if( xHigherPriorityTaskWoken != pdFALSE ) + * { + * // Call the interrupt safe yield function here (actual function + * // depends on the FreeRTOS port being used). + * } + * } + * @endverbatim + */ +#define xTimerChangePeriodFromISR( xTimer, xNewPeriod, pxHigherPriorityTaskWoken ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_CHANGE_PERIOD_FROM_ISR, ( xNewPeriod ), ( pxHigherPriorityTaskWoken ), 0U ) + +/** + * BaseType_t xTimerResetFromISR( TimerHandle_t xTimer, + * BaseType_t *pxHigherPriorityTaskWoken ); + * + * A version of xTimerReset() that can be called from an interrupt service + * routine. + * + * @param xTimer The handle of the timer that is to be started, reset, or + * restarted. + * + * @param pxHigherPriorityTaskWoken The timer service/daemon task spends most + * of its time in the Blocked state, waiting for messages to arrive on the timer + * command queue. Calling xTimerResetFromISR() writes a message to the timer + * command queue, so has the potential to transition the timer service/daemon + * task out of the Blocked state. If calling xTimerResetFromISR() causes the + * timer service/daemon task to leave the Blocked state, and the timer service/ + * daemon task has a priority equal to or greater than the currently executing + * task (the task that was interrupted), then *pxHigherPriorityTaskWoken will + * get set to pdTRUE internally within the xTimerResetFromISR() function. If + * xTimerResetFromISR() sets this value to pdTRUE then a context switch should + * be performed before the interrupt exits. + * + * @return pdFAIL will be returned if the reset command could not be sent to + * the timer command queue. pdPASS will be returned if the command was + * successfully sent to the timer command queue. When the command is actually + * processed will depend on the priority of the timer service/daemon task + * relative to other tasks in the system, although the timers expiry time is + * relative to when xTimerResetFromISR() is actually called. The timer service/daemon + * task priority is set by the configTIMER_TASK_PRIORITY configuration constant. + * + * Example usage: + * @verbatim + * // This scenario assumes xBacklightTimer has already been created. When a + * // key is pressed, an LCD back-light is switched on. If 5 seconds pass + * // without a key being pressed, then the LCD back-light is switched off. In + * // this case, the timer is a one-shot timer, and unlike the example given for + * // the xTimerReset() function, the key press event handler is an interrupt + * // service routine. + * + * // The callback function assigned to the one-shot timer. In this case the + * // parameter is not used. + * void vBacklightTimerCallback( TimerHandle_t pxTimer ) + * { + * // The timer expired, therefore 5 seconds must have passed since a key + * // was pressed. Switch off the LCD back-light. + * vSetBacklightState( BACKLIGHT_OFF ); + * } + * + * // The key press interrupt service routine. + * void vKeyPressEventInterruptHandler( void ) + * { + * BaseType_t xHigherPriorityTaskWoken = pdFALSE; + * + * // Ensure the LCD back-light is on, then reset the timer that is + * // responsible for turning the back-light off after 5 seconds of + * // key inactivity. This is an interrupt service routine so can only + * // call FreeRTOS API functions that end in "FromISR". + * vSetBacklightState( BACKLIGHT_ON ); + * + * // xTimerStartFromISR() or xTimerResetFromISR() could be called here + * // as both cause the timer to re-calculate its expiry time. + * // xHigherPriorityTaskWoken was initialised to pdFALSE when it was + * // declared (in this function). + * if( xTimerResetFromISR( xBacklightTimer, &xHigherPriorityTaskWoken ) != pdPASS ) + * { + * // The reset command was not executed successfully. Take appropriate + * // action here. + * } + * + * // Perform the rest of the key processing here. + * + * // If xHigherPriorityTaskWoken equals pdTRUE, then a context switch + * // should be performed. The syntax required to perform a context switch + * // from inside an ISR varies from port to port, and from compiler to + * // compiler. Inspect the demos for the port you are using to find the + * // actual syntax required. + * if( xHigherPriorityTaskWoken != pdFALSE ) + * { + * // Call the interrupt safe yield function here (actual function + * // depends on the FreeRTOS port being used). + * } + * } + * @endverbatim + */ +#define xTimerResetFromISR( xTimer, pxHigherPriorityTaskWoken ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_RESET_FROM_ISR, ( xTaskGetTickCountFromISR() ), ( pxHigherPriorityTaskWoken ), 0U ) + + +/** + * BaseType_t xTimerPendFunctionCallFromISR( PendedFunction_t xFunctionToPend, + * void *pvParameter1, + * uint32_t ulParameter2, + * BaseType_t *pxHigherPriorityTaskWoken ); + * + * + * Used from application interrupt service routines to defer the execution of a + * function to the RTOS daemon task (the timer service task, hence this function + * is implemented in timers.c and is prefixed with 'Timer'). + * + * Ideally an interrupt service routine (ISR) is kept as short as possible, but + * sometimes an ISR either has a lot of processing to do, or needs to perform + * processing that is not deterministic. In these cases + * xTimerPendFunctionCallFromISR() can be used to defer processing of a function + * to the RTOS daemon task. + * + * A mechanism is provided that allows the interrupt to return directly to the + * task that will subsequently execute the pended callback function. This + * allows the callback function to execute contiguously in time with the + * interrupt - just as if the callback had executed in the interrupt itself. + * + * @param xFunctionToPend The function to execute from the timer service/ + * daemon task. The function must conform to the PendedFunction_t + * prototype. + * + * @param pvParameter1 The value of the callback function's first parameter. + * The parameter has a void * type to allow it to be used to pass any type. + * For example, unsigned longs can be cast to a void *, or the void * can be + * used to point to a structure. + * + * @param ulParameter2 The value of the callback function's second parameter. + * + * @param pxHigherPriorityTaskWoken As mentioned above, calling this function + * will result in a message being sent to the timer daemon task. If the + * priority of the timer daemon task (which is set using + * configTIMER_TASK_PRIORITY in FreeRTOSConfig.h) is higher than the priority of + * the currently running task (the task the interrupt interrupted) then + * *pxHigherPriorityTaskWoken will be set to pdTRUE within + * xTimerPendFunctionCallFromISR(), indicating that a context switch should be + * requested before the interrupt exits. For that reason + * *pxHigherPriorityTaskWoken must be initialised to pdFALSE. See the + * example code below. + * + * @return pdPASS is returned if the message was successfully sent to the + * timer daemon task, otherwise pdFALSE is returned. + * + * Example usage: + * @verbatim + * + * // The callback function that will execute in the context of the daemon task. + * // Note callback functions must all use this same prototype. + * void vProcessInterface( void *pvParameter1, uint32_t ulParameter2 ) + * { + * BaseType_t xInterfaceToService; + * + * // The interface that requires servicing is passed in the second + * // parameter. The first parameter is not used in this case. + * xInterfaceToService = ( BaseType_t ) ulParameter2; + * + * // ...Perform the processing here... + * } + * + * // An ISR that receives data packets from multiple interfaces + * void vAnISR( void ) + * { + * BaseType_t xInterfaceToService, xHigherPriorityTaskWoken; + * + * // Query the hardware to determine which interface needs processing. + * xInterfaceToService = prvCheckInterfaces(); + * + * // The actual processing is to be deferred to a task. Request the + * // vProcessInterface() callback function is executed, passing in the + * // number of the interface that needs processing. The interface to + * // service is passed in the second parameter. The first parameter is + * // not used in this case. + * xHigherPriorityTaskWoken = pdFALSE; + * xTimerPendFunctionCallFromISR( vProcessInterface, NULL, ( uint32_t ) xInterfaceToService, &xHigherPriorityTaskWoken ); + * + * // If xHigherPriorityTaskWoken is now set to pdTRUE then a context + * // switch should be requested. The macro used is port specific and will + * // be either portYIELD_FROM_ISR() or portEND_SWITCHING_ISR() - refer to + * // the documentation page for the port being used. + * portYIELD_FROM_ISR( xHigherPriorityTaskWoken ); + * + * } + * @endverbatim + */ +BaseType_t xTimerPendFunctionCallFromISR( PendedFunction_t xFunctionToPend, void *pvParameter1, uint32_t ulParameter2, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + + /** + * BaseType_t xTimerPendFunctionCall( PendedFunction_t xFunctionToPend, + * void *pvParameter1, + * uint32_t ulParameter2, + * TickType_t xTicksToWait ); + * + * + * Used to defer the execution of a function to the RTOS daemon task (the timer + * service task, hence this function is implemented in timers.c and is prefixed + * with 'Timer'). + * + * @param xFunctionToPend The function to execute from the timer service/ + * daemon task. The function must conform to the PendedFunction_t + * prototype. + * + * @param pvParameter1 The value of the callback function's first parameter. + * The parameter has a void * type to allow it to be used to pass any type. + * For example, unsigned longs can be cast to a void *, or the void * can be + * used to point to a structure. + * + * @param ulParameter2 The value of the callback function's second parameter. + * + * @param xTicksToWait Calling this function will result in a message being + * sent to the timer daemon task on a queue. xTicksToWait is the amount of + * time the calling task should remain in the Blocked state (so not using any + * processing time) for space to become available on the timer queue if the + * queue is found to be full. + * + * @return pdPASS is returned if the message was successfully sent to the + * timer daemon task, otherwise pdFALSE is returned. + * + */ +BaseType_t xTimerPendFunctionCall( PendedFunction_t xFunctionToPend, void *pvParameter1, uint32_t ulParameter2, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * const char * const pcTimerGetName( TimerHandle_t xTimer ); + * + * Returns the name that was assigned to a timer when the timer was created. + * + * @param xTimer The handle of the timer being queried. + * + * @return The name assigned to the timer specified by the xTimer parameter. + */ +const char * pcTimerGetName( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/** + * void vTimerSetReloadMode( TimerHandle_t xTimer, const UBaseType_t uxAutoReload ); + * + * Updates a timer to be either an autoreload timer, in which case the timer + * automatically resets itself each time it expires, or a one shot timer, in + * which case the timer will only expire once unless it is manually restarted. + * + * @param xTimer The handle of the timer being updated. + * + * @param uxAutoReload If uxAutoReload is set to pdTRUE then the timer will + * expire repeatedly with a frequency set by the timer's period (see the + * xTimerPeriodInTicks parameter of the xTimerCreate() API function). If + * uxAutoReload is set to pdFALSE then the timer will be a one-shot timer and + * enter the dormant state after it expires. + */ +void vTimerSetReloadMode( TimerHandle_t xTimer, const UBaseType_t uxAutoReload ) PRIVILEGED_FUNCTION; + +/** + * TickType_t xTimerGetPeriod( TimerHandle_t xTimer ); + * + * Returns the period of a timer. + * + * @param xTimer The handle of the timer being queried. + * + * @return The period of the timer in ticks. + */ +TickType_t xTimerGetPeriod( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + +/** +* TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer ); +* +* Returns the time in ticks at which the timer will expire. If this is less +* than the current tick count then the expiry time has overflowed from the +* current time. +* +* @param xTimer The handle of the timer being queried. +* +* @return If the timer is running then the time in ticks at which the timer +* will next expire is returned. If the timer is not running then the return +* value is undefined. +*/ +TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + +/* + * Functions beyond this part are not part of the public API and are intended + * for use by the kernel only. + */ +BaseType_t xTimerCreateTimerTask( void ) PRIVILEGED_FUNCTION; +BaseType_t xTimerGenericCommand( TimerHandle_t xTimer, const BaseType_t xCommandID, const TickType_t xOptionalValue, BaseType_t * const pxHigherPriorityTaskWoken, const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +#if( configUSE_TRACE_FACILITY == 1 ) + void vTimerSetTimerNumber( TimerHandle_t xTimer, UBaseType_t uxTimerNumber ) PRIVILEGED_FUNCTION; + UBaseType_t uxTimerGetTimerNumber( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; +#endif + +#ifdef __cplusplus +} +#endif +#endif /* TIMERS_H */ + + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/list.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/list.c new file mode 100644 index 0000000..9875b90 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/list.c @@ -0,0 +1,198 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + + +#include +#include "FreeRTOS.h" +#include "list.h" + +/*----------------------------------------------------------- + * PUBLIC LIST API documented in list.h + *----------------------------------------------------------*/ + +void vListInitialise( List_t * const pxList ) +{ + /* The list structure contains a list item which is used to mark the + end of the list. To initialise the list the list end is inserted + as the only list entry. */ + pxList->pxIndex = ( ListItem_t * ) &( pxList->xListEnd ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */ + + /* The list end value is the highest possible value in the list to + ensure it remains at the end of the list. */ + pxList->xListEnd.xItemValue = portMAX_DELAY; + + /* The list end next and previous pointers point to itself so we know + when the list is empty. */ + pxList->xListEnd.pxNext = ( ListItem_t * ) &( pxList->xListEnd ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */ + pxList->xListEnd.pxPrevious = ( ListItem_t * ) &( pxList->xListEnd );/*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */ + + pxList->uxNumberOfItems = ( UBaseType_t ) 0U; + + /* Write known values into the list if + configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + listSET_LIST_INTEGRITY_CHECK_1_VALUE( pxList ); + listSET_LIST_INTEGRITY_CHECK_2_VALUE( pxList ); +} +/*-----------------------------------------------------------*/ + +void vListInitialiseItem( ListItem_t * const pxItem ) +{ + /* Make sure the list item is not recorded as being on a list. */ + pxItem->pxContainer = NULL; + + /* Write known values into the list item if + configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ); + listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ); +} +/*-----------------------------------------------------------*/ + +void vListInsertEnd( List_t * const pxList, ListItem_t * const pxNewListItem ) +{ +ListItem_t * const pxIndex = pxList->pxIndex; + + /* Only effective when configASSERT() is also defined, these tests may catch + the list data structures being overwritten in memory. They will not catch + data errors caused by incorrect configuration or use of FreeRTOS. */ + listTEST_LIST_INTEGRITY( pxList ); + listTEST_LIST_ITEM_INTEGRITY( pxNewListItem ); + + /* Insert a new list item into pxList, but rather than sort the list, + makes the new list item the last item to be removed by a call to + listGET_OWNER_OF_NEXT_ENTRY(). */ + pxNewListItem->pxNext = pxIndex; + pxNewListItem->pxPrevious = pxIndex->pxPrevious; + + /* Only used during decision coverage testing. */ + mtCOVERAGE_TEST_DELAY(); + + pxIndex->pxPrevious->pxNext = pxNewListItem; + pxIndex->pxPrevious = pxNewListItem; + + /* Remember which list the item is in. */ + pxNewListItem->pxContainer = pxList; + + ( pxList->uxNumberOfItems )++; +} +/*-----------------------------------------------------------*/ + +void vListInsert( List_t * const pxList, ListItem_t * const pxNewListItem ) +{ +ListItem_t *pxIterator; +const TickType_t xValueOfInsertion = pxNewListItem->xItemValue; + + /* Only effective when configASSERT() is also defined, these tests may catch + the list data structures being overwritten in memory. They will not catch + data errors caused by incorrect configuration or use of FreeRTOS. */ + listTEST_LIST_INTEGRITY( pxList ); + listTEST_LIST_ITEM_INTEGRITY( pxNewListItem ); + + /* Insert the new list item into the list, sorted in xItemValue order. + + If the list already contains a list item with the same item value then the + new list item should be placed after it. This ensures that TCBs which are + stored in ready lists (all of which have the same xItemValue value) get a + share of the CPU. However, if the xItemValue is the same as the back marker + the iteration loop below will not end. Therefore the value is checked + first, and the algorithm slightly modified if necessary. */ + if( xValueOfInsertion == portMAX_DELAY ) + { + pxIterator = pxList->xListEnd.pxPrevious; + } + else + { + /* *** NOTE *********************************************************** + If you find your application is crashing here then likely causes are + listed below. In addition see https://www.freertos.org/FAQHelp.html for + more tips, and ensure configASSERT() is defined! + https://www.freertos.org/a00110.html#configASSERT + + 1) Stack overflow - + see https://www.freertos.org/Stacks-and-stack-overflow-checking.html + 2) Incorrect interrupt priority assignment, especially on Cortex-M + parts where numerically high priority values denote low actual + interrupt priorities, which can seem counter intuitive. See + https://www.freertos.org/RTOS-Cortex-M3-M4.html and the definition + of configMAX_SYSCALL_INTERRUPT_PRIORITY on + https://www.freertos.org/a00110.html + 3) Calling an API function from within a critical section or when + the scheduler is suspended, or calling an API function that does + not end in "FromISR" from an interrupt. + 4) Using a queue or semaphore before it has been initialised or + before the scheduler has been started (are interrupts firing + before vTaskStartScheduler() has been called?). + **********************************************************************/ + + for( pxIterator = ( ListItem_t * ) &( pxList->xListEnd ); pxIterator->pxNext->xItemValue <= xValueOfInsertion; pxIterator = pxIterator->pxNext ) /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. *//*lint !e440 The iterator moves to a different value, not xValueOfInsertion. */ + { + /* There is nothing to do here, just iterating to the wanted + insertion position. */ + } + } + + pxNewListItem->pxNext = pxIterator->pxNext; + pxNewListItem->pxNext->pxPrevious = pxNewListItem; + pxNewListItem->pxPrevious = pxIterator; + pxIterator->pxNext = pxNewListItem; + + /* Remember which list the item is in. This allows fast removal of the + item later. */ + pxNewListItem->pxContainer = pxList; + + ( pxList->uxNumberOfItems )++; +} +/*-----------------------------------------------------------*/ + +UBaseType_t uxListRemove( ListItem_t * const pxItemToRemove ) +{ +/* The list item knows which list it is in. Obtain the list from the list +item. */ +List_t * const pxList = pxItemToRemove->pxContainer; + + pxItemToRemove->pxNext->pxPrevious = pxItemToRemove->pxPrevious; + pxItemToRemove->pxPrevious->pxNext = pxItemToRemove->pxNext; + + /* Only used during decision coverage testing. */ + mtCOVERAGE_TEST_DELAY(); + + /* Make sure the index is left pointing to a valid item. */ + if( pxList->pxIndex == pxItemToRemove ) + { + pxList->pxIndex = pxItemToRemove->pxPrevious; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxItemToRemove->pxContainer = NULL; + ( pxList->uxNumberOfItems )--; + + return pxList->uxNumberOfItems; +} +/*-----------------------------------------------------------*/ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/port.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/port.c new file mode 100644 index 0000000..b5f5b65 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/port.c @@ -0,0 +1,765 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/*----------------------------------------------------------- + * Implementation of functions defined in portable.h for the ARM CM4F port. + *----------------------------------------------------------*/ + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" + +#ifndef __VFP_FP__ + #error This port can only be used when the project options are configured to enable hardware floating point support. +#endif + +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) +#else + /* The way the SysTick is clocked is not modified in case it is not the same + as the core. */ + #define portNVIC_SYSTICK_CLK_BIT ( 0 ) +#endif + +/* Constants required to manipulate the core. Registers first... */ +#define portNVIC_SYSTICK_CTRL_REG ( * ( ( volatile uint32_t * ) 0xe000e010 ) ) +#define portNVIC_SYSTICK_LOAD_REG ( * ( ( volatile uint32_t * ) 0xe000e014 ) ) +#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( * ( ( volatile uint32_t * ) 0xe000e018 ) ) +#define portNVIC_SYSPRI2_REG ( * ( ( volatile uint32_t * ) 0xe000ed20 ) ) +/* ...then bits in the registers. */ +#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) +#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PENDSVCLEAR_BIT ( 1UL << 27UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) + +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) + +/* Constants required to check the validity of an interrupt priority. */ +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( * ( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portMAX_8_BIT_VALUE ( ( uint8_t ) 0xff ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) + +/* Masks off all bits but the VECTACTIVE bits in the ICSR register. */ +#define portVECTACTIVE_MASK ( 0xFFUL ) + +/* Constants required to manipulate the VFP. */ +#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating point context control register. */ +#define portASPEN_AND_LSPEN_BITS ( 0x3UL << 30UL ) + +/* Constants required to set up the initial stack. */ +#define portINITIAL_XPSR ( 0x01000000 ) +#define portINITIAL_EXC_RETURN ( 0xfffffffd ) + +/* The systick is a 24-bit counter. */ +#define portMAX_24_BIT_NUMBER ( 0xffffffUL ) + +/* For strict compliance with the Cortex-M spec the task start address should +have bit-0 clear, as it is loaded into the PC on exit from an ISR. */ +#define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL ) + +/* A fiddle factor to estimate the number of SysTick counts that would have +occurred while the SysTick counter is stopped during tickless idle +calculations. */ +#define portMISSED_COUNTS_FACTOR ( 45UL ) + +/* Let the user override the pre-loading of the initial LR with the address of +prvTaskExitError() in case it messes up unwinding of the stack in the +debugger. */ +#ifdef configTASK_RETURN_ADDRESS + #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS +#else + #define portTASK_RETURN_ADDRESS prvTaskExitError +#endif + +/* + * Setup the timer to generate the tick interrupts. The implementation in this + * file is weak to allow application writers to change the timer used to + * generate the tick interrupt. + */ +void vPortSetupTimerInterrupt( void ); + +/* + * Exception handlers. + */ +void xPortPendSVHandler( void ) __attribute__ (( naked )); +void xPortSysTickHandler( void ); +void vPortSVCHandler( void ) __attribute__ (( naked )); + +/* + * Start first task is a separate function so it can be tested in isolation. + */ +static void prvPortStartFirstTask( void ) __attribute__ (( naked )); + +/* + * Function to enable the VFP. + */ +static void vPortEnableVFP( void ) __attribute__ (( naked )); + +/* + * Used to catch tasks that attempt to return from their implementing function. + */ +static void prvTaskExitError( void ); + +/*-----------------------------------------------------------*/ + +/* Each task maintains its own interrupt status in the critical nesting +variable. */ +static UBaseType_t uxCriticalNesting = 0xaaaaaaaa; + +/* + * The number of SysTick increments that make up one tick period. + */ +#if( configUSE_TICKLESS_IDLE == 1 ) + static uint32_t ulTimerCountsForOneTick = 0; +#endif /* configUSE_TICKLESS_IDLE */ + +/* + * The maximum number of tick periods that can be suppressed is limited by the + * 24 bit resolution of the SysTick timer. + */ +#if( configUSE_TICKLESS_IDLE == 1 ) + static uint32_t xMaximumPossibleSuppressedTicks = 0; +#endif /* configUSE_TICKLESS_IDLE */ + +/* + * Compensate for the CPU cycles that pass while the SysTick is stopped (low + * power functionality only. + */ +#if( configUSE_TICKLESS_IDLE == 1 ) + static uint32_t ulStoppedTimerCompensation = 0; +#endif /* configUSE_TICKLESS_IDLE */ + +/* + * Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if( configASSERT_DEFINED == 1 ) + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * const ) portNVIC_IP_REGISTERS_OFFSET_16; +#endif /* configASSERT_DEFINED */ + +/*-----------------------------------------------------------*/ + +/* + * See header file for description. + */ +StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters ) +{ + /* Simulate the stack frame as it would be created by a context switch + interrupt. */ + + /* Offset added to account for the way the MCU uses the stack on entry/exit + of interrupts, and to ensure alignment. */ + pxTopOfStack--; + + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( ( StackType_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + + /* Save code space by skipping register initialisation. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + + /* A save method is being used that requires each task to maintain its + own exec return value. */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; + + pxTopOfStack -= 8; /* R11, R10, R9, R8, R7, R6, R5 and R4. */ + + return pxTopOfStack; +} +/*-----------------------------------------------------------*/ + +static void prvTaskExitError( void ) +{ +volatile uint32_t ulDummy = 0; + + /* A function that implements a task must not exit or attempt to return to + its caller as there is nothing to return to. If a task wants to exit it + should instead call vTaskDelete( NULL ). + + Artificially force an assert() to be triggered if configASSERT() is + defined, then stop here so application writers can catch the error. */ + configASSERT( uxCriticalNesting == ~0UL ); + portDISABLE_INTERRUPTS(); + while( ulDummy == 0 ) + { + /* This file calls prvTaskExitError() after the scheduler has been + started to remove a compiler warning about the function being defined + but never called. ulDummy is used purely to quieten other warnings + about code appearing after this function is called - making ulDummy + volatile makes the compiler think the function could return and + therefore not output an 'unreachable code' warning for code that appears + after it. */ + } +} +/*-----------------------------------------------------------*/ + +void vPortSVCHandler( void ) +{ + __asm volatile ( + " ldr r3, pxCurrentTCBConst2 \n" /* Restore the context. */ + " ldr r1, [r3] \n" /* Use pxCurrentTCBConst to get the pxCurrentTCB address. */ + " ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. */ + " ldmia r0!, {r4-r11, r14} \n" /* Pop the registers that are not automatically saved on exception entry and the critical nesting count. */ + " msr psp, r0 \n" /* Restore the task stack pointer. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" + " bx r14 \n" + " \n" + " .align 4 \n" + "pxCurrentTCBConst2: .word pxCurrentTCB \n" + ); +} +/*-----------------------------------------------------------*/ + +static void prvPortStartFirstTask( void ) +{ + /* Start the first task. This also clears the bit that indicates the FPU is + in use in case the FPU was used before the scheduler was started - which + would otherwise result in the unnecessary leaving of space in the SVC stack + for lazy saving of FPU registers. */ + __asm volatile( + " ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */ + " ldr r0, [r0] \n" + " ldr r0, [r0] \n" + " msr msp, r0 \n" /* Set the msp back to the start of the stack. */ + " mov r0, #0 \n" /* Clear the bit that indicates the FPU is in use, see comment above. */ + " msr control, r0 \n" + " cpsie i \n" /* Globally enable interrupts. */ + " cpsie f \n" + " dsb \n" + " isb \n" + " svc 0 \n" /* System call to start first task. */ + " nop \n" + ); +} +/*-----------------------------------------------------------*/ + +/* + * See header file for description. + */ +BaseType_t xPortStartScheduler( void ) +{ + /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. + See http://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); + + #if( configASSERT_DEFINED == 1 ) + { + volatile uint32_t ulOriginalPriority; + volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + functions can be called. ISR safe functions are those that end in + "FromISR". FreeRTOS maintains separate thread and ISR API functions to + ensure interrupt entry is as fast and simple as possible. + + Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = *pucFirstUserPriorityRegister; + + /* Determine the number of priority bits available. First write to all + possible bits. */ + *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = *pucFirstUserPriorityRegister; + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Calculate the maximum acceptable priority group value for the number + of bits read back. */ + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulMaxPRIGROUPValue--; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + #ifdef __NVIC_PRIO_BITS + { + /* Check the CMSIS configuration that defines the number of + priority bits matches the number of priority bits actually queried + from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + } + #endif + + #ifdef configPRIO_BITS + { + /* Check the FreeRTOS configuration that defines the number of + priority bits matches the number of priority bits actually queried + from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + } + #endif + + /* Shift the priority group value back to its position within the AIRCR + register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + value. */ + *pucFirstUserPriorityRegister = ulOriginalPriority; + } + #endif /* conifgASSERT_DEFINED */ + + /* Make PendSV and SysTick the lowest priority interrupts. */ + portNVIC_SYSPRI2_REG |= portNVIC_PENDSV_PRI; + portNVIC_SYSPRI2_REG |= portNVIC_SYSTICK_PRI; + + /* Start the timer that generates the tick ISR. Interrupts are disabled + here already. */ + vPortSetupTimerInterrupt(); + + /* Initialise the critical nesting count ready for the first task. */ + uxCriticalNesting = 0; + + /* Ensure the VFP is enabled - it should be anyway. */ + vPortEnableVFP(); + + /* Lazy save always. */ + *( portFPCCR ) |= portASPEN_AND_LSPEN_BITS; + + /* Start the first task. */ + prvPortStartFirstTask(); + + /* Should never get here as the tasks will now be executing! Call the task + exit error function to prevent compiler warnings about a static function + not being called in the case that the application writer overrides this + functionality by defining configTASK_RETURN_ADDRESS. Call + vTaskSwitchContext() so link time optimisation does not remove the + symbol. */ + vTaskSwitchContext(); + prvTaskExitError(); + + /* Should not get here! */ + return 0; +} +/*-----------------------------------------------------------*/ + +void vPortEndScheduler( void ) +{ + /* Not implemented in ports where there is nothing to return to. + Artificially force an assert. */ + configASSERT( uxCriticalNesting == 1000UL ); +} +/*-----------------------------------------------------------*/ + +void vPortEnterCritical( void ) +{ + portDISABLE_INTERRUPTS(); + uxCriticalNesting++; + + /* This is not the interrupt safe version of the enter critical function so + assert() if it is being called from an interrupt context. Only API + functions that end in "FromISR" can be used in an interrupt. Only assert if + the critical nesting count is 1 to protect against recursive calls if the + assert function also uses a critical section. */ + if( uxCriticalNesting == 1 ) + { + configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 ); + } +} +/*-----------------------------------------------------------*/ + +void vPortExitCritical( void ) +{ + configASSERT( uxCriticalNesting ); + uxCriticalNesting--; + if( uxCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } +} +/*-----------------------------------------------------------*/ + +void xPortPendSVHandler( void ) +{ + /* This is a naked function. */ + + __asm volatile + ( + " mrs r0, psp \n" + " isb \n" + " \n" + " ldr r3, pxCurrentTCBConst \n" /* Get the location of the current TCB. */ + " ldr r2, [r3] \n" + " \n" + " tst r14, #0x10 \n" /* Is the task using the FPU context? If so, push high vfp registers. */ + " it eq \n" + " vstmdbeq r0!, {s16-s31} \n" + " \n" + " stmdb r0!, {r4-r11, r14} \n" /* Save the core registers. */ + " str r0, [r2] \n" /* Save the new top of stack into the first member of the TCB. */ + " \n" + " stmdb sp!, {r0, r3} \n" + " mov r0, %0 \n" + " cpsid i \n" /* Errata workaround. */ + " msr basepri, r0 \n" + " dsb \n" + " isb \n" + " cpsie i \n" /* Errata workaround. */ + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" + " ldmia sp!, {r0, r3} \n" + " \n" + " ldr r1, [r3] \n" /* The first item in pxCurrentTCB is the task top of stack. */ + " ldr r0, [r1] \n" + " \n" + " ldmia r0!, {r4-r11, r14} \n" /* Pop the core registers. */ + " \n" + " tst r14, #0x10 \n" /* Is the task using the FPU context? If so, pop the high vfp registers too. */ + " it eq \n" + " vldmiaeq r0!, {s16-s31} \n" + " \n" + " msr psp, r0 \n" + " isb \n" + " \n" + #ifdef WORKAROUND_PMU_CM001 /* XMC4000 specific errata workaround. */ + #if WORKAROUND_PMU_CM001 == 1 + " push { r14 } \n" + " pop { pc } \n" + #endif + #endif + " \n" + " bx r14 \n" + " \n" + " .align 4 \n" + "pxCurrentTCBConst: .word pxCurrentTCB \n" + ::"i"(configMAX_SYSCALL_INTERRUPT_PRIORITY) + ); +} +/*-----------------------------------------------------------*/ + +void xPortSysTickHandler( void ) +{ + /* The SysTick runs at the lowest interrupt priority, so when this interrupt + executes all interrupts must be unmasked. There is therefore no need to + save and then restore the interrupt mask value as its value is already + known. */ + portDISABLE_INTERRUPTS(); + { + /* Increment the RTOS tick. */ + if( xTaskIncrementTick() != pdFALSE ) + { + /* A context switch is required. Context switching is performed in + the PendSV interrupt. Pend the PendSV interrupt. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + } + } + portENABLE_INTERRUPTS(); +} +/*-----------------------------------------------------------*/ + +#if( configUSE_TICKLESS_IDLE == 1 ) + + __attribute__((weak)) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) + { + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + TickType_t xModifiableIdleTime; + + /* Make sure the SysTick reload value does not overflow the counter. */ + if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks ) + { + xExpectedIdleTime = xMaximumPossibleSuppressedTicks; + } + + /* Stop the SysTick momentarily. The time the SysTick is stopped for + is accounted for as best it can be, but using the tickless mode will + inevitably result in some tiny drift of the time maintained by the + kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; + + /* Calculate the reload value required to wait xExpectedIdleTime + tick periods. -1 is used because this code will execute part way + through one of the tick periods. */ + ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + + /* Enter a critical section but don't use the taskENTER_CRITICAL() + method as that will mask interrupts that should exit sleep mode. */ + __asm volatile( "cpsid i" ::: "memory" ); + __asm volatile( "dsb" ); + __asm volatile( "isb" ); + + /* If a context switch is pending or a task is waiting for the scheduler + to be unsuspended then abandon the low power entry. */ + if( eTaskConfirmSleepModeStatus() == eAbortSleep ) + { + /* Restart from whatever is left in the count register to complete + this tick period. */ + portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Reset the reload register to the value required for normal tick + periods. */ + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + + /* Re-enable interrupts - see comments above the cpsid instruction() + above. */ + __asm volatile( "cpsie i" ::: "memory" ); + } + else + { + /* Set the new reload value. */ + portNVIC_SYSTICK_LOAD_REG = ulReloadValue; + + /* Clear the SysTick count flag and set the count value back to + zero. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + set its parameter to 0 to indicate that its implementation contains + its own wait for interrupt or wait for event instruction, and so wfi + should not be executed again. However, the original expected idle + time variable must remain unmodified, so a copy is taken. */ + xModifiableIdleTime = xExpectedIdleTime; + configPRE_SLEEP_PROCESSING( &xModifiableIdleTime ); + if( xModifiableIdleTime > 0 ) + { + __asm volatile( "dsb" ::: "memory" ); + __asm volatile( "wfi" ); + __asm volatile( "isb" ); + } + configPOST_SLEEP_PROCESSING( &xExpectedIdleTime ); + + /* Re-enable interrupts to allow the interrupt that brought the MCU + out of sleep mode to execute immediately. see comments above + __disable_interrupt() call above. */ + __asm volatile( "cpsie i" ::: "memory" ); + __asm volatile( "dsb" ); + __asm volatile( "isb" ); + + /* Disable interrupts again because the clock is about to be stopped + and interrupts that execute while the clock is stopped will increase + any slippage between the time maintained by the RTOS and calendar + time. */ + __asm volatile( "cpsid i" ::: "memory" ); + __asm volatile( "dsb" ); + __asm volatile( "isb" ); + + /* Disable the SysTick clock without reading the + portNVIC_SYSTICK_CTRL_REG register to ensure the + portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + the time the SysTick is stopped for is accounted for as best it can + be, but using the tickless mode will inevitably result in some tiny + drift of the time maintained by the kernel with respect to calendar + time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + + /* Determine if the SysTick clock has already counted to zero and + been set back to the current reload value (the reload back being + correct for the entire expected idle time) or if the SysTick is yet + to count to zero (in which case an interrupt other than the SysTick + must have brought the system out of sleep mode). */ + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + uint32_t ulCalculatedLoadValue; + + /* The tick interrupt is already pending, and the SysTick count + reloaded with ulReloadValue. Reset the + portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick + period. */ + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); + + /* Don't allow a tiny value, or values that have somehow + underflowed because the post sleep hook did something + that took too long. */ + if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + { + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); + } + + portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; + + /* As the pending tick will be processed as soon as this + function exits, the tick value maintained by the tick is stepped + forward by one less than the time spent waiting. */ + ulCompleteTickPeriods = xExpectedIdleTime - 1UL; + } + else + { + /* Something other than the tick interrupt ended the sleep. + Work out how long the sleep lasted rounded to complete tick + periods (not the ulReload value which accounted for part + ticks). */ + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + + /* How many complete tick periods passed while the processor + was waiting? */ + ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick; + + /* The reload value is set to whatever fraction of a single tick + period remains. */ + portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; + } + + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG + again, then set portNVIC_SYSTICK_LOAD_REG back to its standard + value. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + vTaskStepTick( ulCompleteTickPeriods ); + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + + /* Exit with interrpts enabled. */ + __asm volatile( "cpsie i" ::: "memory" ); + } + } + +#endif /* #if configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +/* + * Setup the systick timer to generate the tick interrupts at the required + * frequency. + */ +__attribute__(( weak )) void vPortSetupTimerInterrupt( void ) +{ + /* Calculate the constants required to configure the tick interrupt. */ + #if( configUSE_TICKLESS_IDLE == 1 ) + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } + #endif /* configUSE_TICKLESS_IDLE */ + + /* Stop and clear the SysTick. */ + portNVIC_SYSTICK_CTRL_REG = 0UL; + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Configure SysTick to interrupt at the requested rate. */ + portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); +} +/*-----------------------------------------------------------*/ + +/* This is a naked function. */ +static void vPortEnableVFP( void ) +{ + __asm volatile + ( + " ldr.w r0, =0xE000ED88 \n" /* The FPU enable bits are in the CPACR. */ + " ldr r1, [r0] \n" + " \n" + " orr r1, r1, #( 0xf << 20 ) \n" /* Enable CP10 and CP11 coprocessors, then save back. */ + " str r1, [r0] \n" + " bx r14 " + ); +} +/*-----------------------------------------------------------*/ + +#if( configASSERT_DEFINED == 1 ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile( "mrs %0, ipsr" : "=r"( ulCurrentInterrupt ) :: "memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + an interrupt that has been assigned a priority above + configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + function. ISR safe FreeRTOS API functions must *only* be called + from interrupts that have been assigned a priority at or below + configMAX_SYSCALL_INTERRUPT_PRIORITY. + + Numerically low interrupt priority numbers represent logically high + interrupt priorities, therefore the priority of the interrupt must + be set to a value equal to or numerically *higher* than + configMAX_SYSCALL_INTERRUPT_PRIORITY. + + Interrupts that use the FreeRTOS API must not be left at their + default priority of zero as that is the highest possible priority, + which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + and therefore also guaranteed to be invalid. + + FreeRTOS maintains separate thread and ISR API functions to ensure + interrupt entry is as fast and simple as possible. + + The following links provide detailed information: + http://www.freertos.org/RTOS-Cortex-M3-M4.html + http://www.freertos.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + that define each interrupt's priority to be split between bits that + define the interrupt's pre-emption priority bits and bits that define + the interrupt's sub-priority. For simplicity all bits must be defined + to be pre-emption priority bits. The following assertion will fail if + this is not the case (if some bits represent a sub-priority). + + If the application only uses CMSIS libraries for interrupt + configuration then the correct setting can be achieved on all Cortex-M + devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + scheduler. Note however that some vendor specific peripheral libraries + assume a non-zero priority group setting, in which cases using a value + of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* configASSERT_DEFINED */ + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/portmacro.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/portmacro.h new file mode 100644 index 0000000..1d7d653 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/portmacro.h @@ -0,0 +1,247 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus +extern "C" { +#endif + +/*----------------------------------------------------------- + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the + * given hardware and compiler. + * + * These settings should not be altered. + *----------------------------------------------------------- + */ + +/* Type definitions. */ +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if( configUSE_16_BIT_TICKS == 1 ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#else + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + + /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 +#endif +/*-----------------------------------------------------------*/ + +/* Architecture specifics. */ +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +/*-----------------------------------------------------------*/ + +/* Scheduler utilities. */ +#define portYIELD() \ +{ \ + /* Set a PendSV to request a context switch. */ \ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; \ + \ + /* Barriers are normally not required but do ensure the code is completely \ + within the specified behaviour for the architecture. */ \ + __asm volatile( "dsb" ::: "memory" ); \ + __asm volatile( "isb" ); \ +} + +#define portNVIC_INT_CTRL_REG ( * ( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired != pdFALSE ) portYIELD() +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/* Critical section management. */ +extern void vPortEnterCritical( void ); +extern void vPortExitCritical( void ); +#define portSET_INTERRUPT_MASK_FROM_ISR() ulPortRaiseBASEPRI() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vPortSetBASEPRI(x) +#define portDISABLE_INTERRUPTS() vPortRaiseBASEPRI() +#define portENABLE_INTERRUPTS() vPortSetBASEPRI(0) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() + +/*-----------------------------------------------------------*/ + +/* Task function macros as described on the FreeRTOS.org WEB site. These are +not necessary for to use this port. They are defined so the common demo files +(which build with all the ports) will build. */ +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/*-----------------------------------------------------------*/ + +/* Tickless idle/low power functionality. */ +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif +/*-----------------------------------------------------------*/ + +/* Architecture specific optimisations. */ +#ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION + #define configUSE_PORT_OPTIMISED_TASK_SELECTION 1 +#endif + +#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1 + + /* Generic helper function. */ + __attribute__( ( always_inline ) ) static inline uint8_t ucPortCountLeadingZeros( uint32_t ulBitmap ) + { + uint8_t ucReturn; + + __asm volatile ( "clz %0, %1" : "=r" ( ucReturn ) : "r" ( ulBitmap ) : "memory" ); + return ucReturn; + } + + /* Check the configuration. */ + #if( configMAX_PRIORITIES > 32 ) + #error configUSE_PORT_OPTIMISED_TASK_SELECTION can only be set to 1 when configMAX_PRIORITIES is less than or equal to 32. It is very rare that a system requires more than 10 to 15 difference priorities as tasks that share a priority will time slice. + #endif + + /* Store/clear the ready priorities in a bit map. */ + #define portRECORD_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) |= ( 1UL << ( uxPriority ) ) + #define portRESET_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) &= ~( 1UL << ( uxPriority ) ) + + /*-----------------------------------------------------------*/ + + #define portGET_HIGHEST_PRIORITY( uxTopPriority, uxReadyPriorities ) uxTopPriority = ( 31UL - ( uint32_t ) ucPortCountLeadingZeros( ( uxReadyPriorities ) ) ) + +#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ + +/*-----------------------------------------------------------*/ + +#ifdef configASSERT + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() +#endif + +/* portNOP() is not required by this port. */ +#define portNOP() + +#define portINLINE __inline + +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__(( always_inline)) +#endif + +portFORCE_INLINE static BaseType_t xPortIsInsideInterrupt( void ) +{ +uint32_t ulCurrentInterrupt; +BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile( "mrs %0, ipsr" : "=r"( ulCurrentInterrupt ) :: "memory" ); + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} + +/*-----------------------------------------------------------*/ + +portFORCE_INLINE static void vPortRaiseBASEPRI( void ) +{ +uint32_t ulNewBASEPRI; + + __asm volatile + ( + " mov %0, %1 \n" \ + " cpsid i \n" \ + " msr basepri, %0 \n" \ + " isb \n" \ + " dsb \n" \ + " cpsie i \n" \ + :"=r" (ulNewBASEPRI) : "i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory" + ); +} + +/*-----------------------------------------------------------*/ + +portFORCE_INLINE static uint32_t ulPortRaiseBASEPRI( void ) +{ +uint32_t ulOriginalBASEPRI, ulNewBASEPRI; + + __asm volatile + ( + " mrs %0, basepri \n" \ + " mov %1, %2 \n" \ + " cpsid i \n" \ + " msr basepri, %1 \n" \ + " isb \n" \ + " dsb \n" \ + " cpsie i \n" \ + :"=r" (ulOriginalBASEPRI), "=r" (ulNewBASEPRI) : "i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory" + ); + + /* This return will not be reached but is necessary to prevent compiler + warnings. */ + return ulOriginalBASEPRI; +} +/*-----------------------------------------------------------*/ + +portFORCE_INLINE static void vPortSetBASEPRI( uint32_t ulNewMaskValue ) +{ + __asm volatile + ( + " msr basepri, %0 " :: "r" ( ulNewMaskValue ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#define portMEMORY_BARRIER() __asm volatile( "" ::: "memory" ) + +#ifdef __cplusplus +} +#endif + +#endif /* PORTMACRO_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/portable/MemMang/heap_4.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/portable/MemMang/heap_4.c new file mode 100644 index 0000000..23714eb --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/portable/MemMang/heap_4.c @@ -0,0 +1,436 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* + * A sample implementation of pvPortMalloc() and vPortFree() that combines + * (coalescences) adjacent memory blocks as they are freed, and in so doing + * limits memory fragmentation. + * + * See heap_1.c, heap_2.c and heap_3.c for alternative implementations, and the + * memory management pages of http://www.FreeRTOS.org for more information. + */ +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining +all the API functions to use the MPU wrappers. That should only be done when +task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +#include "FreeRTOS.h" +#include "task.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 0 ) + #error This file must not be used if configSUPPORT_DYNAMIC_ALLOCATION is 0 +#endif + +/* Block sizes must not get too small. */ +#define heapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) ) + +/* Assumes 8bit bytes! */ +#define heapBITS_PER_BYTE ( ( size_t ) 8 ) + +/* Allocate the memory for the heap. */ +#if( configAPPLICATION_ALLOCATED_HEAP == 1 ) + /* The application writer has already defined the array used for the RTOS + heap - probably so it can be placed in a special segment or address. */ + extern uint8_t ucHeap[ configTOTAL_HEAP_SIZE ]; +#else + static uint8_t ucHeap[ configTOTAL_HEAP_SIZE ]; +#endif /* configAPPLICATION_ALLOCATED_HEAP */ + +/* Define the linked list structure. This is used to link free blocks in order +of their memory address. */ +typedef struct A_BLOCK_LINK +{ + struct A_BLOCK_LINK *pxNextFreeBlock; /*<< The next free block in the list. */ + size_t xBlockSize; /*<< The size of the free block. */ +} BlockLink_t; + +/*-----------------------------------------------------------*/ + +/* + * Inserts a block of memory that is being freed into the correct position in + * the list of free memory blocks. The block being freed will be merged with + * the block in front it and/or the block behind it if the memory blocks are + * adjacent to each other. + */ +static void prvInsertBlockIntoFreeList( BlockLink_t *pxBlockToInsert ); + +/* + * Called automatically to setup the required heap structures the first time + * pvPortMalloc() is called. + */ +static void prvHeapInit( void ); + +/*-----------------------------------------------------------*/ + +/* The size of the structure placed at the beginning of each allocated memory +block must by correctly byte aligned. */ +static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( portBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) portBYTE_ALIGNMENT_MASK ); + +/* Create a couple of list links to mark the start and end of the list. */ +static BlockLink_t xStart, *pxEnd = NULL; + +/* Keeps track of the number of free bytes remaining, but says nothing about +fragmentation. */ +static size_t xFreeBytesRemaining = 0U; +static size_t xMinimumEverFreeBytesRemaining = 0U; + +/* Gets set to the top bit of an size_t type. When this bit in the xBlockSize +member of an BlockLink_t structure is set then the block belongs to the +application. When the bit is free the block is still part of the free heap +space. */ +static size_t xBlockAllocatedBit = 0; + +/*-----------------------------------------------------------*/ + +void *pvPortMalloc( size_t xWantedSize ) +{ +BlockLink_t *pxBlock, *pxPreviousBlock, *pxNewBlockLink; +void *pvReturn = NULL; + + vTaskSuspendAll(); + { + /* If this is the first call to malloc then the heap will require + initialisation to setup the list of free blocks. */ + if( pxEnd == NULL ) + { + prvHeapInit(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Check the requested block size is not so large that the top bit is + set. The top bit of the block size member of the BlockLink_t structure + is used to determine who owns the block - the application or the + kernel, so it must be free. */ + if( ( xWantedSize & xBlockAllocatedBit ) == 0 ) + { + /* The wanted size is increased so it can contain a BlockLink_t + structure in addition to the requested amount of bytes. */ + if( xWantedSize > 0 ) + { + xWantedSize += xHeapStructSize; + + /* Ensure that blocks are always aligned to the required number + of bytes. */ + if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 ) + { + /* Byte alignment required. */ + xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ); + configASSERT( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) == 0 ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) ) + { + /* Traverse the list from the start (lowest address) block until + one of adequate size is found. */ + pxPreviousBlock = &xStart; + pxBlock = xStart.pxNextFreeBlock; + while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) ) + { + pxPreviousBlock = pxBlock; + pxBlock = pxBlock->pxNextFreeBlock; + } + + /* If the end marker was reached then a block of adequate size + was not found. */ + if( pxBlock != pxEnd ) + { + /* Return the memory space pointed to - jumping over the + BlockLink_t structure at its start. */ + pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize ); + + /* This block is being returned for use so must be taken out + of the list of free blocks. */ + pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; + + /* If the block is larger than required it can be split into + two. */ + if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE ) + { + /* This block is to be split into two. Create a new + block following the number of bytes requested. The void + cast is used to prevent byte alignment warnings from the + compiler. */ + pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize ); + configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); + + /* Calculate the sizes of two blocks split from the + single block. */ + pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; + pxBlock->xBlockSize = xWantedSize; + + /* Insert the new block into the list of free blocks. */ + prvInsertBlockIntoFreeList( pxNewBlockLink ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xFreeBytesRemaining -= pxBlock->xBlockSize; + + if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) + { + xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* The block is being returned - it is allocated and owned + by the application and has no "next" block. */ + pxBlock->xBlockSize |= xBlockAllocatedBit; + pxBlock->pxNextFreeBlock = NULL; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceMALLOC( pvReturn, xWantedSize ); + } + ( void ) xTaskResumeAll(); + + #if( configUSE_MALLOC_FAILED_HOOK == 1 ) + { + if( pvReturn == NULL ) + { + extern void vApplicationMallocFailedHook( void ); + vApplicationMallocFailedHook(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif + + configASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) portBYTE_ALIGNMENT_MASK ) == 0 ); + return pvReturn; +} +/*-----------------------------------------------------------*/ + +void vPortFree( void *pv ) +{ +uint8_t *puc = ( uint8_t * ) pv; +BlockLink_t *pxLink; + + if( pv != NULL ) + { + /* The memory being freed will have an BlockLink_t structure immediately + before it. */ + puc -= xHeapStructSize; + + /* This casting is to keep the compiler from issuing warnings. */ + pxLink = ( void * ) puc; + + /* Check the block is actually allocated. */ + configASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ); + configASSERT( pxLink->pxNextFreeBlock == NULL ); + + if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ) + { + if( pxLink->pxNextFreeBlock == NULL ) + { + /* The block is being returned to the heap - it is no longer + allocated. */ + pxLink->xBlockSize &= ~xBlockAllocatedBit; + + vTaskSuspendAll(); + { + /* Add this block to the list of free blocks. */ + xFreeBytesRemaining += pxLink->xBlockSize; + traceFREE( pv, pxLink->xBlockSize ); + prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) ); + } + ( void ) xTaskResumeAll(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } +} +/*-----------------------------------------------------------*/ + +size_t xPortGetFreeHeapSize( void ) +{ + return xFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ + +size_t xPortGetMinimumEverFreeHeapSize( void ) +{ + return xMinimumEverFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ + +void vPortInitialiseBlocks( void ) +{ + /* This just exists to keep the linker quiet. */ +} +/*-----------------------------------------------------------*/ + +static void prvHeapInit( void ) +{ +BlockLink_t *pxFirstFreeBlock; +uint8_t *pucAlignedHeap; +size_t uxAddress; +size_t xTotalHeapSize = configTOTAL_HEAP_SIZE; + + /* Ensure the heap starts on a correctly aligned boundary. */ + uxAddress = ( size_t ) ucHeap; + + if( ( uxAddress & portBYTE_ALIGNMENT_MASK ) != 0 ) + { + uxAddress += ( portBYTE_ALIGNMENT - 1 ); + uxAddress &= ~( ( size_t ) portBYTE_ALIGNMENT_MASK ); + xTotalHeapSize -= uxAddress - ( size_t ) ucHeap; + } + + pucAlignedHeap = ( uint8_t * ) uxAddress; + + /* xStart is used to hold a pointer to the first item in the list of free + blocks. The void cast is used to prevent compiler warnings. */ + xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap; + xStart.xBlockSize = ( size_t ) 0; + + /* pxEnd is used to mark the end of the list of free blocks and is inserted + at the end of the heap space. */ + uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize; + uxAddress -= xHeapStructSize; + uxAddress &= ~( ( size_t ) portBYTE_ALIGNMENT_MASK ); + pxEnd = ( void * ) uxAddress; + pxEnd->xBlockSize = 0; + pxEnd->pxNextFreeBlock = NULL; + + /* To start with there is a single free block that is sized to take up the + entire heap space, minus the space taken by pxEnd. */ + pxFirstFreeBlock = ( void * ) pucAlignedHeap; + pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock; + pxFirstFreeBlock->pxNextFreeBlock = pxEnd; + + /* Only one block exists - and it covers the entire usable heap space. */ + xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + + /* Work out the position of the top bit in a size_t variable. */ + xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * heapBITS_PER_BYTE ) - 1 ); +} +/*-----------------------------------------------------------*/ + +static void prvInsertBlockIntoFreeList( BlockLink_t *pxBlockToInsert ) +{ +BlockLink_t *pxIterator; +uint8_t *puc; + + /* Iterate through the list until a block is found that has a higher address + than the block being inserted. */ + for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock ) + { + /* Nothing to do here, just iterate to the right position. */ + } + + /* Do the block being inserted, and the block it is being inserted after + make a contiguous block of memory? */ + puc = ( uint8_t * ) pxIterator; + if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert ) + { + pxIterator->xBlockSize += pxBlockToInsert->xBlockSize; + pxBlockToInsert = pxIterator; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Do the block being inserted, and the block it is being inserted before + make a contiguous block of memory? */ + puc = ( uint8_t * ) pxBlockToInsert; + if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock ) + { + if( pxIterator->pxNextFreeBlock != pxEnd ) + { + /* Form one big block from the two blocks. */ + pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize; + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock; + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxEnd; + } + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock; + } + + /* If the block being inserted plugged a gab, so was merged with the block + before and the block after, then it's pxNextFreeBlock pointer will have + already been set, and should not be set here as that would make it point + to itself. */ + if( pxIterator != pxBlockToInsert ) + { + pxIterator->pxNextFreeBlock = pxBlockToInsert; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/queue.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/queue.c new file mode 100644 index 0000000..6f79c92 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/queue.c @@ -0,0 +1,2941 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#include +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining +all the API functions to use the MPU wrappers. That should only be done when +task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" + +#if ( configUSE_CO_ROUTINES == 1 ) + #include "croutine.h" +#endif + +/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified +because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined +for the header files above, but not in this file, in order to generate the +correct privileged Vs unprivileged linkage and placement. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */ + + +/* Constants used with the cRxLock and cTxLock structure members. */ +#define queueUNLOCKED ( ( int8_t ) -1 ) +#define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 ) + +/* When the Queue_t structure is used to represent a base queue its pcHead and +pcTail members are used as pointers into the queue storage area. When the +Queue_t structure is used to represent a mutex pcHead and pcTail pointers are +not necessary, and the pcHead pointer is set to NULL to indicate that the +structure instead holds a pointer to the mutex holder (if any). Map alternative +names to the pcHead and structure member to ensure the readability of the code +is maintained. The QueuePointers_t and SemaphoreData_t types are used to form +a union as their usage is mutually exclusive dependent on what the queue is +being used for. */ +#define uxQueueType pcHead +#define queueQUEUE_IS_MUTEX NULL + +typedef struct QueuePointers +{ + int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */ + int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */ +} QueuePointers_t; + +typedef struct SemaphoreData +{ + TaskHandle_t xMutexHolder; /*< The handle of the task that holds the mutex. */ + UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */ +} SemaphoreData_t; + +/* Semaphores do not actually store or copy data, so have an item size of +zero. */ +#define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 ) +#define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U ) + +#if( configUSE_PREEMPTION == 0 ) + /* If the cooperative scheduler is being used then a yield should not be + performed just because a higher priority task has been woken. */ + #define queueYIELD_IF_USING_PREEMPTION() +#else + #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() +#endif + +/* + * Definition of the queue used by the scheduler. + * Items are queued by copy, not reference. See the following link for the + * rationale: https://www.freertos.org/Embedded-RTOS-Queues.html + */ +typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */ +{ + int8_t *pcHead; /*< Points to the beginning of the queue storage area. */ + int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */ + + union + { + QueuePointers_t xQueue; /*< Data required exclusively when this structure is used as a queue. */ + SemaphoreData_t xSemaphore; /*< Data required exclusively when this structure is used as a semaphore. */ + } u; + + List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */ + List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */ + + volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */ + UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */ + UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */ + + volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + + #if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */ + #endif + + #if ( configUSE_QUEUE_SETS == 1 ) + struct QueueDefinition *pxQueueSetContainer; + #endif + + #if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxQueueNumber; + uint8_t ucQueueType; + #endif + +} xQUEUE; + +/* The old xQUEUE name is maintained above then typedefed to the new Queue_t +name below to enable the use of older kernel aware debuggers. */ +typedef xQUEUE Queue_t; + +/*-----------------------------------------------------------*/ + +/* + * The queue registry is just a means for kernel aware debuggers to locate + * queue structures. It has no other purpose so is an optional component. + */ +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + + /* The type stored within the queue registry array. This allows a name + to be assigned to each queue making kernel aware debugging a little + more user friendly. */ + typedef struct QUEUE_REGISTRY_ITEM + { + const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + QueueHandle_t xHandle; + } xQueueRegistryItem; + + /* The old xQueueRegistryItem name is maintained above then typedefed to the + new xQueueRegistryItem name below to enable the use of older kernel aware + debuggers. */ + typedef xQueueRegistryItem QueueRegistryItem_t; + + /* The queue registry is simply an array of QueueRegistryItem_t structures. + The pcQueueName member of a structure being NULL is indicative of the + array position being vacant. */ + PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ]; + +#endif /* configQUEUE_REGISTRY_SIZE */ + +/* + * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not + * prevent an ISR from adding or removing items to the queue, but does prevent + * an ISR from removing tasks from the queue event lists. If an ISR finds a + * queue is locked it will instead increment the appropriate queue lock count + * to indicate that a task may require unblocking. When the queue in unlocked + * these lock counts are inspected, and the appropriate action taken. + */ +static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; + +/* + * Uses a critical section to determine if there is any data in a queue. + * + * @return pdTRUE if the queue contains no items, otherwise pdFALSE. + */ +static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION; + +/* + * Uses a critical section to determine if there is any space in a queue. + * + * @return pdTRUE if there is no space, otherwise pdFALSE; + */ +static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION; + +/* + * Copies an item into the queue, either at the front of the queue or the + * back of the queue. + */ +static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION; + +/* + * Copies an item out of a queue. + */ +static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION; + +#if ( configUSE_QUEUE_SETS == 1 ) + /* + * Checks to see if a queue is a member of a queue set, and if so, notifies + * the queue set that the queue contains data. + */ + static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION; +#endif + +/* + * Called after a Queue_t structure has been allocated either statically or + * dynamically to fill in the structure's members. + */ +static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION; + +/* + * Mutexes are a special type of queue. When a mutex is created, first the + * queue is created, then prvInitialiseMutex() is called to configure the queue + * as a mutex. + */ +#if( configUSE_MUTEXES == 1 ) + static void prvInitialiseMutex( Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION; +#endif + +#if( configUSE_MUTEXES == 1 ) + /* + * If a task waiting for a mutex causes the mutex holder to inherit a + * priority, but the waiting task times out, then the holder should + * disinherit the priority - but only down to the highest priority of any + * other tasks that are waiting for the same mutex. This function returns + * that priority. + */ + static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; +#endif +/*-----------------------------------------------------------*/ + +/* + * Macro to mark a queue as locked. Locking a queue prevents an ISR from + * accessing the queue event lists. + */ +#define prvLockQueue( pxQueue ) \ + taskENTER_CRITICAL(); \ + { \ + if( ( pxQueue )->cRxLock == queueUNLOCKED ) \ + { \ + ( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \ + } \ + if( ( pxQueue )->cTxLock == queueUNLOCKED ) \ + { \ + ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \ + } \ + } \ + taskEXIT_CRITICAL() +/*-----------------------------------------------------------*/ + +BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue ) +{ +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + + taskENTER_CRITICAL(); + { + pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ + pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U; + pxQueue->pcWriteTo = pxQueue->pcHead; + pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ + pxQueue->cRxLock = queueUNLOCKED; + pxQueue->cTxLock = queueUNLOCKED; + + if( xNewQueue == pdFALSE ) + { + /* If there are tasks blocked waiting to read from the queue, then + the tasks will remain blocked as after this function exits the queue + will still be empty. If there are tasks blocked waiting to write to + the queue, then one should be unblocked as after this function exits + it will be possible to write to it. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* Ensure the event queues start in the correct state. */ + vListInitialise( &( pxQueue->xTasksWaitingToSend ) ); + vListInitialise( &( pxQueue->xTasksWaitingToReceive ) ); + } + } + taskEXIT_CRITICAL(); + + /* A value is returned for calling semantic consistency with previous + versions. */ + return pdPASS; +} +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType ) + { + Queue_t *pxNewQueue; + + configASSERT( uxQueueLength > ( UBaseType_t ) 0 ); + + /* The StaticQueue_t structure and the queue storage area must be + supplied. */ + configASSERT( pxStaticQueue != NULL ); + + /* A queue storage area should be provided if the item size is not 0, and + should not be provided if the item size is 0. */ + configASSERT( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0 ) ) ); + configASSERT( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0 ) ) ); + + #if( configASSERT_DEFINED == 1 ) + { + /* Sanity check that the size of the structure used to declare a + variable of type StaticQueue_t or StaticSemaphore_t equals the size of + the real queue and semaphore structures. */ + volatile size_t xSize = sizeof( StaticQueue_t ); + configASSERT( xSize == sizeof( Queue_t ) ); + ( void ) xSize; /* Keeps lint quiet when configASSERT() is not defined. */ + } + #endif /* configASSERT_DEFINED */ + + /* The address of a statically allocated queue was passed in, use it. + The address of a statically allocated storage area was also passed in + but is already set. */ + pxNewQueue = ( Queue_t * ) pxStaticQueue; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */ + + if( pxNewQueue != NULL ) + { + #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + { + /* Queues can be allocated wither statically or dynamically, so + note this queue was allocated statically in case the queue is + later deleted. */ + pxNewQueue->ucStaticallyAllocated = pdTRUE; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + + prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue ); + } + else + { + traceQUEUE_CREATE_FAILED( ucQueueType ); + mtCOVERAGE_TEST_MARKER(); + } + + return pxNewQueue; + } + +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType ) + { + Queue_t *pxNewQueue; + size_t xQueueSizeInBytes; + uint8_t *pucQueueStorage; + + configASSERT( uxQueueLength > ( UBaseType_t ) 0 ); + + if( uxItemSize == ( UBaseType_t ) 0 ) + { + /* There is not going to be a queue storage area. */ + xQueueSizeInBytes = ( size_t ) 0; + } + else + { + /* Allocate enough space to hold the maximum number of items that + can be in the queue at any time. */ + xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + + /* Allocate the queue and storage area. Justification for MISRA + deviation as follows: pvPortMalloc() always ensures returned memory + blocks are aligned per the requirements of the MCU stack. In this case + pvPortMalloc() must return a pointer that is guaranteed to meet the + alignment requirements of the Queue_t structure - which in this case + is an int8_t *. Therefore, whenever the stack alignment requirements + are greater than or equal to the pointer to char requirements the cast + is safe. In other cases alignment requirements are not strict (one or + two bytes). */ + pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes ); /*lint !e9087 !e9079 see comment above. */ + + if( pxNewQueue != NULL ) + { + /* Jump past the queue structure to find the location of the queue + storage area. */ + pucQueueStorage = ( uint8_t * ) pxNewQueue; + pucQueueStorage += sizeof( Queue_t ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ + + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + /* Queues can be created either statically or dynamically, so + note this task was created dynamically in case it is later + deleted. */ + pxNewQueue->ucStaticallyAllocated = pdFALSE; + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ + + prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue ); + } + else + { + traceQUEUE_CREATE_FAILED( ucQueueType ); + mtCOVERAGE_TEST_MARKER(); + } + + return pxNewQueue; + } + +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue ) +{ + /* Remove compiler warnings about unused parameters should + configUSE_TRACE_FACILITY not be set to 1. */ + ( void ) ucQueueType; + + if( uxItemSize == ( UBaseType_t ) 0 ) + { + /* No RAM was allocated for the queue storage area, but PC head cannot + be set to NULL because NULL is used as a key to say the queue is used as + a mutex. Therefore just set pcHead to point to the queue as a benign + value that is known to be within the memory map. */ + pxNewQueue->pcHead = ( int8_t * ) pxNewQueue; + } + else + { + /* Set the head to the start of the queue storage area. */ + pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage; + } + + /* Initialise the queue members as described where the queue type is + defined. */ + pxNewQueue->uxLength = uxQueueLength; + pxNewQueue->uxItemSize = uxItemSize; + ( void ) xQueueGenericReset( pxNewQueue, pdTRUE ); + + #if ( configUSE_TRACE_FACILITY == 1 ) + { + pxNewQueue->ucQueueType = ucQueueType; + } + #endif /* configUSE_TRACE_FACILITY */ + + #if( configUSE_QUEUE_SETS == 1 ) + { + pxNewQueue->pxQueueSetContainer = NULL; + } + #endif /* configUSE_QUEUE_SETS */ + + traceQUEUE_CREATE( pxNewQueue ); +} +/*-----------------------------------------------------------*/ + +#if( configUSE_MUTEXES == 1 ) + + static void prvInitialiseMutex( Queue_t *pxNewQueue ) + { + if( pxNewQueue != NULL ) + { + /* The queue create function will set all the queue structure members + correctly for a generic queue, but this function is creating a + mutex. Overwrite those members that need to be set differently - + in particular the information required for priority inheritance. */ + pxNewQueue->u.xSemaphore.xMutexHolder = NULL; + pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX; + + /* In case this is a recursive mutex. */ + pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0; + + traceCREATE_MUTEX( pxNewQueue ); + + /* Start with the semaphore in the expected state. */ + ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK ); + } + else + { + traceCREATE_MUTEX_FAILED(); + } + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType ) + { + QueueHandle_t xNewQueue; + const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0; + + xNewQueue = xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType ); + prvInitialiseMutex( ( Queue_t * ) xNewQueue ); + + return xNewQueue; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + + QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue ) + { + QueueHandle_t xNewQueue; + const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0; + + /* Prevent compiler warnings about unused parameters if + configUSE_TRACE_FACILITY does not equal 1. */ + ( void ) ucQueueType; + + xNewQueue = xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType ); + prvInitialiseMutex( ( Queue_t * ) xNewQueue ); + + return xNewQueue; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + + TaskHandle_t xQueueGetMutexHolder( QueueHandle_t xSemaphore ) + { + TaskHandle_t pxReturn; + Queue_t * const pxSemaphore = ( Queue_t * ) xSemaphore; + + /* This function is called by xSemaphoreGetMutexHolder(), and should not + be called directly. Note: This is a good way of determining if the + calling task is the mutex holder, but not a good way of determining the + identity of the mutex holder, as the holder may change between the + following critical section exiting and the function returning. */ + taskENTER_CRITICAL(); + { + if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX ) + { + pxReturn = pxSemaphore->u.xSemaphore.xMutexHolder; + } + else + { + pxReturn = NULL; + } + } + taskEXIT_CRITICAL(); + + return pxReturn; + } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */ + +#endif +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + + TaskHandle_t xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore ) + { + TaskHandle_t pxReturn; + + configASSERT( xSemaphore ); + + /* Mutexes cannot be used in interrupt service routines, so the mutex + holder should not change in an ISR, and therefore a critical section is + not required here. */ + if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX ) + { + pxReturn = ( ( Queue_t * ) xSemaphore )->u.xSemaphore.xMutexHolder; + } + else + { + pxReturn = NULL; + } + + return pxReturn; + } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */ + +#endif +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + + BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex ) + { + BaseType_t xReturn; + Queue_t * const pxMutex = ( Queue_t * ) xMutex; + + configASSERT( pxMutex ); + + /* If this is the task that holds the mutex then xMutexHolder will not + change outside of this task. If this task does not hold the mutex then + pxMutexHolder can never coincidentally equal the tasks handle, and as + this is the only condition we are interested in it does not matter if + pxMutexHolder is accessed simultaneously by another task. Therefore no + mutual exclusion is required to test the pxMutexHolder variable. */ + if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() ) + { + traceGIVE_MUTEX_RECURSIVE( pxMutex ); + + /* uxRecursiveCallCount cannot be zero if xMutexHolder is equal to + the task handle, therefore no underflow check is required. Also, + uxRecursiveCallCount is only modified by the mutex holder, and as + there can only be one, no mutual exclusion is required to modify the + uxRecursiveCallCount member. */ + ( pxMutex->u.xSemaphore.uxRecursiveCallCount )--; + + /* Has the recursive call count unwound to 0? */ + if( pxMutex->u.xSemaphore.uxRecursiveCallCount == ( UBaseType_t ) 0 ) + { + /* Return the mutex. This will automatically unblock any other + task that might be waiting to access the mutex. */ + ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xReturn = pdPASS; + } + else + { + /* The mutex cannot be given because the calling task is not the + holder. */ + xReturn = pdFAIL; + + traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex ); + } + + return xReturn; + } + +#endif /* configUSE_RECURSIVE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + + BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait ) + { + BaseType_t xReturn; + Queue_t * const pxMutex = ( Queue_t * ) xMutex; + + configASSERT( pxMutex ); + + /* Comments regarding mutual exclusion as per those within + xQueueGiveMutexRecursive(). */ + + traceTAKE_MUTEX_RECURSIVE( pxMutex ); + + if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() ) + { + ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++; + xReturn = pdPASS; + } + else + { + xReturn = xQueueSemaphoreTake( pxMutex, xTicksToWait ); + + /* pdPASS will only be returned if the mutex was successfully + obtained. The calling task may have entered the Blocked state + before reaching here. */ + if( xReturn != pdFAIL ) + { + ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++; + } + else + { + traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex ); + } + } + + return xReturn; + } + +#endif /* configUSE_RECURSIVE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + + QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue ) + { + QueueHandle_t xHandle; + + configASSERT( uxMaxCount != 0 ); + configASSERT( uxInitialCount <= uxMaxCount ); + + xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE ); + + if( xHandle != NULL ) + { + ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount; + + traceCREATE_COUNTING_SEMAPHORE(); + } + else + { + traceCREATE_COUNTING_SEMAPHORE_FAILED(); + } + + return xHandle; + } + +#endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount ) + { + QueueHandle_t xHandle; + + configASSERT( uxMaxCount != 0 ); + configASSERT( uxInitialCount <= uxMaxCount ); + + xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE ); + + if( xHandle != NULL ) + { + ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount; + + traceCREATE_COUNTING_SEMAPHORE(); + } + else + { + traceCREATE_COUNTING_SEMAPHORE_FAILED(); + } + + return xHandle; + } + +#endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + +BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition ) +{ +BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired; +TimeOut_t xTimeOut; +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); + configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) ); + #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + { + configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); + } + #endif + + + /*lint -save -e904 This function relaxes the coding standard somewhat to + allow return statements within the function itself. This is done in the + interest of execution time efficiency. */ + for( ;; ) + { + taskENTER_CRITICAL(); + { + /* Is there room on the queue now? The running task must be the + highest priority task wanting to access the queue. If the head item + in the queue is to be overwritten then it does not matter if the + queue is full. */ + if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) + { + traceQUEUE_SEND( pxQueue ); + + #if ( configUSE_QUEUE_SETS == 1 ) + { + UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting; + + xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); + + if( pxQueue->pxQueueSetContainer != NULL ) + { + if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) ) + { + /* Do not notify the queue set as an existing item + was overwritten in the queue so the number of items + in the queue has not changed. */ + mtCOVERAGE_TEST_MARKER(); + } + else if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE ) + { + /* The queue is a member of a queue set, and posting + to the queue set caused a higher priority task to + unblock. A context switch is required. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* If there was a task waiting for data to arrive on the + queue then unblock it now. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The unblocked task has a priority higher than + our own so yield immediately. Yes it is ok to + do this from within the critical section - the + kernel takes care of that. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else if( xYieldRequired != pdFALSE ) + { + /* This path is a special case that will only get + executed if the task was holding multiple mutexes + and the mutexes were given back in an order that is + different to that in which they were taken. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + #else /* configUSE_QUEUE_SETS */ + { + xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); + + /* If there was a task waiting for data to arrive on the + queue then unblock it now. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The unblocked task has a priority higher than + our own so yield immediately. Yes it is ok to do + this from within the critical section - the kernel + takes care of that. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else if( xYieldRequired != pdFALSE ) + { + /* This path is a special case that will only get + executed if the task was holding multiple mutexes and + the mutexes were given back in an order that is + different to that in which they were taken. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_QUEUE_SETS */ + + taskEXIT_CRITICAL(); + return pdPASS; + } + else + { + if( xTicksToWait == ( TickType_t ) 0 ) + { + /* The queue was full and no block time is specified (or + the block time has expired) so leave now. */ + taskEXIT_CRITICAL(); + + /* Return to the original privilege level before exiting + the function. */ + traceQUEUE_SEND_FAILED( pxQueue ); + return errQUEUE_FULL; + } + else if( xEntryTimeSet == pdFALSE ) + { + /* The queue was full and a block time was specified so + configure the timeout structure. */ + vTaskInternalSetTimeOutState( &xTimeOut ); + xEntryTimeSet = pdTRUE; + } + else + { + /* Entry time was already set. */ + mtCOVERAGE_TEST_MARKER(); + } + } + } + taskEXIT_CRITICAL(); + + /* Interrupts and other tasks can send to and receive from the queue + now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + if( prvIsQueueFull( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_SEND( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait ); + + /* Unlocking the queue means queue events can effect the + event list. It is possible that interrupts occurring now + remove this task from the event list again - but as the + scheduler is suspended the task will go onto the pending + ready last instead of the actual ready list. */ + prvUnlockQueue( pxQueue ); + + /* Resuming the scheduler will move tasks from the pending + ready list into the ready list - so it is feasible that this + task is already in a ready list before it yields - in which + case the yield will not cause a context switch unless there + is also a higher priority task in the pending ready list. */ + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + } + else + { + /* Try again. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* The timeout has expired. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + traceQUEUE_SEND_FAILED( pxQueue ); + return errQUEUE_FULL; + } + } /*lint -restore */ +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition ) +{ +BaseType_t xReturn; +UBaseType_t uxSavedInterruptStatus; +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); + configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) ); + + /* RTOS ports that support interrupt nesting have the concept of a maximum + system call (or maximum API call) interrupt priority. Interrupts that are + above the maximum system call priority are kept permanently enabled, even + when the RTOS kernel is in a critical section, but cannot make any calls to + FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has been + assigned a priority above the configured maximum system call priority. + Only FreeRTOS functions that end in FromISR can be called from interrupts + that have been assigned a priority at or (logically) below the maximum + system call interrupt priority. FreeRTOS maintains a separate interrupt + safe API to ensure interrupt entry is as fast and as simple as possible. + More information (albeit Cortex-M specific) is provided on the following + link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + /* Similar to xQueueGenericSend, except without blocking if there is no room + in the queue. Also don't directly wake a task that was blocked on a queue + read, instead return a flag to say whether a context switch is required or + not (i.e. has a task with a higher priority than us been woken by this + post). */ + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) + { + const int8_t cTxLock = pxQueue->cTxLock; + + traceQUEUE_SEND_FROM_ISR( pxQueue ); + + /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a + semaphore or mutex. That means prvCopyDataToQueue() cannot result + in a task disinheriting a priority and prvCopyDataToQueue() can be + called here even though the disinherit function does not check if + the scheduler is suspended before accessing the ready lists. */ + ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); + + /* The event list is not altered if the queue is locked. This will + be done when the queue is unlocked later. */ + if( cTxLock == queueUNLOCKED ) + { + #if ( configUSE_QUEUE_SETS == 1 ) + { + if( pxQueue->pxQueueSetContainer != NULL ) + { + if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE ) + { + /* The queue is a member of a queue set, and posting + to the queue set caused a higher priority task to + unblock. A context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so + record that a context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + #else /* configUSE_QUEUE_SETS */ + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that a + context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_QUEUE_SETS */ + } + else + { + /* Increment the lock count so the task that unlocks the queue + knows that data was posted while it was locked. */ + pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 ); + } + + xReturn = pdPASS; + } + else + { + traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ); + xReturn = errQUEUE_FULL; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken ) +{ +BaseType_t xReturn; +UBaseType_t uxSavedInterruptStatus; +Queue_t * const pxQueue = xQueue; + + /* Similar to xQueueGenericSendFromISR() but used with semaphores where the + item size is 0. Don't directly wake a task that was blocked on a queue + read, instead return a flag to say whether a context switch is required or + not (i.e. has a task with a higher priority than us been woken by this + post). */ + + configASSERT( pxQueue ); + + /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR() + if the item size is not 0. */ + configASSERT( pxQueue->uxItemSize == 0 ); + + /* Normally a mutex would not be given from an interrupt, especially if + there is a mutex holder, as priority inheritance makes no sense for an + interrupts, only tasks. */ + configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->u.xSemaphore.xMutexHolder != NULL ) ) ); + + /* RTOS ports that support interrupt nesting have the concept of a maximum + system call (or maximum API call) interrupt priority. Interrupts that are + above the maximum system call priority are kept permanently enabled, even + when the RTOS kernel is in a critical section, but cannot make any calls to + FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has been + assigned a priority above the configured maximum system call priority. + Only FreeRTOS functions that end in FromISR can be called from interrupts + that have been assigned a priority at or (logically) below the maximum + system call interrupt priority. FreeRTOS maintains a separate interrupt + safe API to ensure interrupt entry is as fast and as simple as possible. + More information (albeit Cortex-M specific) is provided on the following + link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + /* When the queue is used to implement a semaphore no data is ever + moved through the queue but it is still valid to see if the queue 'has + space'. */ + if( uxMessagesWaiting < pxQueue->uxLength ) + { + const int8_t cTxLock = pxQueue->cTxLock; + + traceQUEUE_SEND_FROM_ISR( pxQueue ); + + /* A task can only have an inherited priority if it is a mutex + holder - and if there is a mutex holder then the mutex cannot be + given from an ISR. As this is the ISR version of the function it + can be assumed there is no mutex holder and no need to determine if + priority disinheritance is needed. Simply increase the count of + messages (semaphores) available. */ + pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1; + + /* The event list is not altered if the queue is locked. This will + be done when the queue is unlocked later. */ + if( cTxLock == queueUNLOCKED ) + { + #if ( configUSE_QUEUE_SETS == 1 ) + { + if( pxQueue->pxQueueSetContainer != NULL ) + { + if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE ) + { + /* The semaphore is a member of a queue set, and + posting to the queue set caused a higher priority + task to unblock. A context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so + record that a context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + #else /* configUSE_QUEUE_SETS */ + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that a + context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_QUEUE_SETS */ + } + else + { + /* Increment the lock count so the task that unlocks the queue + knows that data was posted while it was locked. */ + pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 ); + } + + xReturn = pdPASS; + } + else + { + traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ); + xReturn = errQUEUE_FULL; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) +{ +BaseType_t xEntryTimeSet = pdFALSE; +TimeOut_t xTimeOut; +Queue_t * const pxQueue = xQueue; + + /* Check the pointer is not NULL. */ + configASSERT( ( pxQueue ) ); + + /* The buffer into which data is received can only be NULL if the data size + is zero (so no data is copied into the buffer. */ + configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) ); + + /* Cannot block if the scheduler is suspended. */ + #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + { + configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); + } + #endif + + + /*lint -save -e904 This function relaxes the coding standard somewhat to + allow return statements within the function itself. This is done in the + interest of execution time efficiency. */ + for( ;; ) + { + taskENTER_CRITICAL(); + { + const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + /* Is there data in the queue now? To be running the calling task + must be the highest priority task wanting to access the queue. */ + if( uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* Data available, remove one item. */ + prvCopyDataFromQueue( pxQueue, pvBuffer ); + traceQUEUE_RECEIVE( pxQueue ); + pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1; + + /* There is now space in the queue, were any tasks waiting to + post to the queue? If so, unblock the highest priority waiting + task. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + taskEXIT_CRITICAL(); + return pdPASS; + } + else + { + if( xTicksToWait == ( TickType_t ) 0 ) + { + /* The queue was empty and no block time is specified (or + the block time has expired) so leave now. */ + taskEXIT_CRITICAL(); + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else if( xEntryTimeSet == pdFALSE ) + { + /* The queue was empty and a block time was specified so + configure the timeout structure. */ + vTaskInternalSetTimeOutState( &xTimeOut ); + xEntryTimeSet = pdTRUE; + } + else + { + /* Entry time was already set. */ + mtCOVERAGE_TEST_MARKER(); + } + } + } + taskEXIT_CRITICAL(); + + /* Interrupts and other tasks can send to and receive from the queue + now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* The timeout has not expired. If the queue is still empty place + the task on the list of tasks waiting to receive from the queue. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + prvUnlockQueue( pxQueue ); + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* The queue contains data again. Loop back to try and read the + data. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* Timed out. If there is no data in the queue exit, otherwise loop + back and attempt to read the data. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } /*lint -restore */ +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, TickType_t xTicksToWait ) +{ +BaseType_t xEntryTimeSet = pdFALSE; +TimeOut_t xTimeOut; +Queue_t * const pxQueue = xQueue; + +#if( configUSE_MUTEXES == 1 ) + BaseType_t xInheritanceOccurred = pdFALSE; +#endif + + /* Check the queue pointer is not NULL. */ + configASSERT( ( pxQueue ) ); + + /* Check this really is a semaphore, in which case the item size will be + 0. */ + configASSERT( pxQueue->uxItemSize == 0 ); + + /* Cannot block if the scheduler is suspended. */ + #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + { + configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); + } + #endif + + + /*lint -save -e904 This function relaxes the coding standard somewhat to allow return + statements within the function itself. This is done in the interest + of execution time efficiency. */ + for( ;; ) + { + taskENTER_CRITICAL(); + { + /* Semaphores are queues with an item size of 0, and where the + number of messages in the queue is the semaphore's count value. */ + const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting; + + /* Is there data in the queue now? To be running the calling task + must be the highest priority task wanting to access the queue. */ + if( uxSemaphoreCount > ( UBaseType_t ) 0 ) + { + traceQUEUE_RECEIVE( pxQueue ); + + /* Semaphores are queues with a data size of zero and where the + messages waiting is the semaphore's count. Reduce the count. */ + pxQueue->uxMessagesWaiting = uxSemaphoreCount - ( UBaseType_t ) 1; + + #if ( configUSE_MUTEXES == 1 ) + { + if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) + { + /* Record the information required to implement + priority inheritance should it become necessary. */ + pxQueue->u.xSemaphore.xMutexHolder = pvTaskIncrementMutexHeldCount(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_MUTEXES */ + + /* Check to see if other tasks are blocked waiting to give the + semaphore, and if so, unblock the highest priority such task. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + taskEXIT_CRITICAL(); + return pdPASS; + } + else + { + if( xTicksToWait == ( TickType_t ) 0 ) + { + /* For inheritance to have occurred there must have been an + initial timeout, and an adjusted timeout cannot become 0, as + if it were 0 the function would have exited. */ + #if( configUSE_MUTEXES == 1 ) + { + configASSERT( xInheritanceOccurred == pdFALSE ); + } + #endif /* configUSE_MUTEXES */ + + /* The semaphore count was 0 and no block time is specified + (or the block time has expired) so exit now. */ + taskEXIT_CRITICAL(); + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else if( xEntryTimeSet == pdFALSE ) + { + /* The semaphore count was 0 and a block time was specified + so configure the timeout structure ready to block. */ + vTaskInternalSetTimeOutState( &xTimeOut ); + xEntryTimeSet = pdTRUE; + } + else + { + /* Entry time was already set. */ + mtCOVERAGE_TEST_MARKER(); + } + } + } + taskEXIT_CRITICAL(); + + /* Interrupts and other tasks can give to and take from the semaphore + now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* A block time is specified and not expired. If the semaphore + count is 0 then enter the Blocked state to wait for a semaphore to + become available. As semaphores are implemented with queues the + queue being empty is equivalent to the semaphore count being 0. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + + #if ( configUSE_MUTEXES == 1 ) + { + if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) + { + taskENTER_CRITICAL(); + { + xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder ); + } + taskEXIT_CRITICAL(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif + + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + prvUnlockQueue( pxQueue ); + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* There was no timeout and the semaphore count was not 0, so + attempt to take the semaphore again. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* Timed out. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + /* If the semaphore count is 0 exit now as the timeout has + expired. Otherwise return to attempt to take the semaphore that is + known to be available. As semaphores are implemented by queues the + queue being empty is equivalent to the semaphore count being 0. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + #if ( configUSE_MUTEXES == 1 ) + { + /* xInheritanceOccurred could only have be set if + pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to + test the mutex type again to check it is actually a mutex. */ + if( xInheritanceOccurred != pdFALSE ) + { + taskENTER_CRITICAL(); + { + UBaseType_t uxHighestWaitingPriority; + + /* This task blocking on the mutex caused another + task to inherit this task's priority. Now this task + has timed out the priority should be disinherited + again, but only as low as the next highest priority + task that is waiting for the same mutex. */ + uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue ); + vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority ); + } + taskEXIT_CRITICAL(); + } + } + #endif /* configUSE_MUTEXES */ + + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } /*lint -restore */ +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueuePeek( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) +{ +BaseType_t xEntryTimeSet = pdFALSE; +TimeOut_t xTimeOut; +int8_t *pcOriginalReadPosition; +Queue_t * const pxQueue = xQueue; + + /* Check the pointer is not NULL. */ + configASSERT( ( pxQueue ) ); + + /* The buffer into which data is received can only be NULL if the data size + is zero (so no data is copied into the buffer. */ + configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) ); + + /* Cannot block if the scheduler is suspended. */ + #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + { + configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); + } + #endif + + + /*lint -save -e904 This function relaxes the coding standard somewhat to + allow return statements within the function itself. This is done in the + interest of execution time efficiency. */ + for( ;; ) + { + taskENTER_CRITICAL(); + { + const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + /* Is there data in the queue now? To be running the calling task + must be the highest priority task wanting to access the queue. */ + if( uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* Remember the read position so it can be reset after the data + is read from the queue as this function is only peeking the + data, not removing it. */ + pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom; + + prvCopyDataFromQueue( pxQueue, pvBuffer ); + traceQUEUE_PEEK( pxQueue ); + + /* The data is not being removed, so reset the read pointer. */ + pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition; + + /* The data is being left in the queue, so see if there are + any other tasks waiting for the data. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority than this task. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + taskEXIT_CRITICAL(); + return pdPASS; + } + else + { + if( xTicksToWait == ( TickType_t ) 0 ) + { + /* The queue was empty and no block time is specified (or + the block time has expired) so leave now. */ + taskEXIT_CRITICAL(); + traceQUEUE_PEEK_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else if( xEntryTimeSet == pdFALSE ) + { + /* The queue was empty and a block time was specified so + configure the timeout structure ready to enter the blocked + state. */ + vTaskInternalSetTimeOutState( &xTimeOut ); + xEntryTimeSet = pdTRUE; + } + else + { + /* Entry time was already set. */ + mtCOVERAGE_TEST_MARKER(); + } + } + } + taskEXIT_CRITICAL(); + + /* Interrupts and other tasks can send to and receive from the queue + now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* Timeout has not expired yet, check to see if there is data in the + queue now, and if not enter the Blocked state to wait for data. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_PEEK( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + prvUnlockQueue( pxQueue ); + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* There is data in the queue now, so don't enter the blocked + state, instead return to try and obtain the data. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* The timeout has expired. If there is still no data in the queue + exit, otherwise go back and try to read the data again. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceQUEUE_PEEK_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } /*lint -restore */ +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken ) +{ +BaseType_t xReturn; +UBaseType_t uxSavedInterruptStatus; +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); + + /* RTOS ports that support interrupt nesting have the concept of a maximum + system call (or maximum API call) interrupt priority. Interrupts that are + above the maximum system call priority are kept permanently enabled, even + when the RTOS kernel is in a critical section, but cannot make any calls to + FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has been + assigned a priority above the configured maximum system call priority. + Only FreeRTOS functions that end in FromISR can be called from interrupts + that have been assigned a priority at or (logically) below the maximum + system call interrupt priority. FreeRTOS maintains a separate interrupt + safe API to ensure interrupt entry is as fast and as simple as possible. + More information (albeit Cortex-M specific) is provided on the following + link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + /* Cannot block in an ISR, so check there is data available. */ + if( uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + const int8_t cRxLock = pxQueue->cRxLock; + + traceQUEUE_RECEIVE_FROM_ISR( pxQueue ); + + prvCopyDataFromQueue( pxQueue, pvBuffer ); + pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1; + + /* If the queue is locked the event list will not be modified. + Instead update the lock count so the task that unlocks the queue + will know that an ISR has removed data while the queue was + locked. */ + if( cRxLock == queueUNLOCKED ) + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + /* The task waiting has a higher priority than us so + force a context switch. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* Increment the lock count so the task that unlocks the queue + knows that data was removed while it was locked. */ + pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 ); + } + + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer ) +{ +BaseType_t xReturn; +UBaseType_t uxSavedInterruptStatus; +int8_t *pcOriginalReadPosition; +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); + configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */ + + /* RTOS ports that support interrupt nesting have the concept of a maximum + system call (or maximum API call) interrupt priority. Interrupts that are + above the maximum system call priority are kept permanently enabled, even + when the RTOS kernel is in a critical section, but cannot make any calls to + FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has been + assigned a priority above the configured maximum system call priority. + Only FreeRTOS functions that end in FromISR can be called from interrupts + that have been assigned a priority at or (logically) below the maximum + system call interrupt priority. FreeRTOS maintains a separate interrupt + safe API to ensure interrupt entry is as fast and as simple as possible. + More information (albeit Cortex-M specific) is provided on the following + link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* Cannot block in an ISR, so check there is data available. */ + if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + traceQUEUE_PEEK_FROM_ISR( pxQueue ); + + /* Remember the read position so it can be reset as nothing is + actually being removed from the queue. */ + pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom; + prvCopyDataFromQueue( pxQueue, pvBuffer ); + pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition; + + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ); + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue ) +{ +UBaseType_t uxReturn; + + configASSERT( xQueue ); + + taskENTER_CRITICAL(); + { + uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting; + } + taskEXIT_CRITICAL(); + + return uxReturn; +} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ +/*-----------------------------------------------------------*/ + +UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue ) +{ +UBaseType_t uxReturn; +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + + taskENTER_CRITICAL(); + { + uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting; + } + taskEXIT_CRITICAL(); + + return uxReturn; +} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ +/*-----------------------------------------------------------*/ + +UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) +{ +UBaseType_t uxReturn; +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + uxReturn = pxQueue->uxMessagesWaiting; + + return uxReturn; +} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ +/*-----------------------------------------------------------*/ + +void vQueueDelete( QueueHandle_t xQueue ) +{ +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + traceQUEUE_DELETE( pxQueue ); + + #if ( configQUEUE_REGISTRY_SIZE > 0 ) + { + vQueueUnregisterQueue( pxQueue ); + } + #endif + + #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) + { + /* The queue can only have been allocated dynamically - free it + again. */ + vPortFree( pxQueue ); + } + #elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + { + /* The queue could have been allocated statically or dynamically, so + check before attempting to free the memory. */ + if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE ) + { + vPortFree( pxQueue ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #else + { + /* The queue must have been statically allocated, so is not going to be + deleted. Avoid compiler warnings about the unused parameter. */ + ( void ) pxQueue; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue ) + { + return ( ( Queue_t * ) xQueue )->uxQueueNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber ) + { + ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + uint8_t ucQueueGetQueueType( QueueHandle_t xQueue ) + { + return ( ( Queue_t * ) xQueue )->ucQueueType; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if( configUSE_MUTEXES == 1 ) + + static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) + { + UBaseType_t uxHighestPriorityOfWaitingTasks; + + /* If a task waiting for a mutex causes the mutex holder to inherit a + priority, but the waiting task times out, then the holder should + disinherit the priority - but only down to the highest priority of any + other tasks that are waiting for the same mutex. For this purpose, + return the priority of the highest priority task that is waiting for the + mutex. */ + if( listCURRENT_LIST_LENGTH( &( pxQueue->xTasksWaitingToReceive ) ) > 0U ) + { + uxHighestPriorityOfWaitingTasks = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) listGET_ITEM_VALUE_OF_HEAD_ENTRY( &( pxQueue->xTasksWaitingToReceive ) ); + } + else + { + uxHighestPriorityOfWaitingTasks = tskIDLE_PRIORITY; + } + + return uxHighestPriorityOfWaitingTasks; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) +{ +BaseType_t xReturn = pdFALSE; +UBaseType_t uxMessagesWaiting; + + /* This function is called from a critical section. */ + + uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + if( pxQueue->uxItemSize == ( UBaseType_t ) 0 ) + { + #if ( configUSE_MUTEXES == 1 ) + { + if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) + { + /* The mutex is no longer being held. */ + xReturn = xTaskPriorityDisinherit( pxQueue->u.xSemaphore.xMutexHolder ); + pxQueue->u.xSemaphore.xMutexHolder = NULL; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_MUTEXES */ + } + else if( xPosition == queueSEND_TO_BACK ) + { + ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */ + pxQueue->pcWriteTo += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */ + if( pxQueue->pcWriteTo >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */ + { + pxQueue->pcWriteTo = pxQueue->pcHead; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + ( void ) memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e9087 !e418 MISRA exception as the casts are only redundant for some ports. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. Assert checks null pointer only used when length is 0. */ + pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize; + if( pxQueue->u.xQueue.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */ + { + pxQueue->u.xQueue.pcReadFrom = ( pxQueue->u.xQueue.pcTail - pxQueue->uxItemSize ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xPosition == queueOVERWRITE ) + { + if( uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* An item is not being added but overwritten, so subtract + one from the recorded number of items in the queue so when + one is added again below the number of recorded items remains + correct. */ + --uxMessagesWaiting; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1; + + return xReturn; +} +/*-----------------------------------------------------------*/ + +static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) +{ + if( pxQueue->uxItemSize != ( UBaseType_t ) 0 ) + { + pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */ + if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */ + { + pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */ + } +} +/*-----------------------------------------------------------*/ + +static void prvUnlockQueue( Queue_t * const pxQueue ) +{ + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */ + + /* The lock counts contains the number of extra data items placed or + removed from the queue while the queue was locked. When a queue is + locked items can be added or removed, but the event lists cannot be + updated. */ + taskENTER_CRITICAL(); + { + int8_t cTxLock = pxQueue->cTxLock; + + /* See if data was added to the queue while it was locked. */ + while( cTxLock > queueLOCKED_UNMODIFIED ) + { + /* Data was posted while the queue was locked. Are any tasks + blocked waiting for data to become available? */ + #if ( configUSE_QUEUE_SETS == 1 ) + { + if( pxQueue->pxQueueSetContainer != NULL ) + { + if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE ) + { + /* The queue is a member of a queue set, and posting to + the queue set caused a higher priority task to unblock. + A context switch is required. */ + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* Tasks that are removed from the event list will get + added to the pending ready list as the scheduler is still + suspended. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that a + context switch is required. */ + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + break; + } + } + } + #else /* configUSE_QUEUE_SETS */ + { + /* Tasks that are removed from the event list will get added to + the pending ready list as the scheduler is still suspended. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that + a context switch is required. */ + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + break; + } + } + #endif /* configUSE_QUEUE_SETS */ + + --cTxLock; + } + + pxQueue->cTxLock = queueUNLOCKED; + } + taskEXIT_CRITICAL(); + + /* Do the same for the Rx lock. */ + taskENTER_CRITICAL(); + { + int8_t cRxLock = pxQueue->cRxLock; + + while( cRxLock > queueLOCKED_UNMODIFIED ) + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + --cRxLock; + } + else + { + break; + } + } + + pxQueue->cRxLock = queueUNLOCKED; + } + taskEXIT_CRITICAL(); +} +/*-----------------------------------------------------------*/ + +static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) +{ +BaseType_t xReturn; + + taskENTER_CRITICAL(); + { + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + taskEXIT_CRITICAL(); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) +{ +BaseType_t xReturn; +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ +/*-----------------------------------------------------------*/ + +static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) +{ +BaseType_t xReturn; + + taskENTER_CRITICAL(); + { + if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + taskEXIT_CRITICAL(); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) +{ +BaseType_t xReturn; +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_CO_ROUTINES == 1 ) + + BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait ) + { + BaseType_t xReturn; + Queue_t * const pxQueue = xQueue; + + /* If the queue is already full we may have to block. A critical section + is required to prevent an interrupt removing something from the queue + between the check to see if the queue is full and blocking on the queue. */ + portDISABLE_INTERRUPTS(); + { + if( prvIsQueueFull( pxQueue ) != pdFALSE ) + { + /* The queue is full - do we want to block or just leave without + posting? */ + if( xTicksToWait > ( TickType_t ) 0 ) + { + /* As this is called from a coroutine we cannot block directly, but + return indicating that we need to block. */ + vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) ); + portENABLE_INTERRUPTS(); + return errQUEUE_BLOCKED; + } + else + { + portENABLE_INTERRUPTS(); + return errQUEUE_FULL; + } + } + } + portENABLE_INTERRUPTS(); + + portDISABLE_INTERRUPTS(); + { + if( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) + { + /* There is room in the queue, copy the data into the queue. */ + prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK ); + xReturn = pdPASS; + + /* Were any co-routines waiting for data to become available? */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + /* In this instance the co-routine could be placed directly + into the ready list as we are within a critical section. + Instead the same pending ready list mechanism is used as if + the event were caused from within an interrupt. */ + if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The co-routine waiting has a higher priority so record + that a yield might be appropriate. */ + xReturn = errQUEUE_YIELD; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + xReturn = errQUEUE_FULL; + } + } + portENABLE_INTERRUPTS(); + + return xReturn; + } + +#endif /* configUSE_CO_ROUTINES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_CO_ROUTINES == 1 ) + + BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait ) + { + BaseType_t xReturn; + Queue_t * const pxQueue = xQueue; + + /* If the queue is already empty we may have to block. A critical section + is required to prevent an interrupt adding something to the queue + between the check to see if the queue is empty and blocking on the queue. */ + portDISABLE_INTERRUPTS(); + { + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) + { + /* There are no messages in the queue, do we want to block or just + leave with nothing? */ + if( xTicksToWait > ( TickType_t ) 0 ) + { + /* As this is a co-routine we cannot block directly, but return + indicating that we need to block. */ + vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) ); + portENABLE_INTERRUPTS(); + return errQUEUE_BLOCKED; + } + else + { + portENABLE_INTERRUPTS(); + return errQUEUE_FULL; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + portENABLE_INTERRUPTS(); + + portDISABLE_INTERRUPTS(); + { + if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* Data is available from the queue. */ + pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; + if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) + { + pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + --( pxQueue->uxMessagesWaiting ); + ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize ); + + xReturn = pdPASS; + + /* Were any co-routines waiting for space to become available? */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + /* In this instance the co-routine could be placed directly + into the ready list as we are within a critical section. + Instead the same pending ready list mechanism is used as if + the event were caused from within an interrupt. */ + if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + xReturn = errQUEUE_YIELD; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + xReturn = pdFAIL; + } + } + portENABLE_INTERRUPTS(); + + return xReturn; + } + +#endif /* configUSE_CO_ROUTINES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_CO_ROUTINES == 1 ) + + BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken ) + { + Queue_t * const pxQueue = xQueue; + + /* Cannot block within an ISR so if there is no space on the queue then + exit without doing anything. */ + if( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) + { + prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK ); + + /* We only want to wake one co-routine per ISR, so check that a + co-routine has not already been woken. */ + if( xCoRoutinePreviouslyWoken == pdFALSE ) + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + return pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xCoRoutinePreviouslyWoken; + } + +#endif /* configUSE_CO_ROUTINES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_CO_ROUTINES == 1 ) + + BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken ) + { + BaseType_t xReturn; + Queue_t * const pxQueue = xQueue; + + /* We cannot block from an ISR, so check there is data available. If + not then just leave without doing anything. */ + if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* Copy the data from the queue. */ + pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; + if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) + { + pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + --( pxQueue->uxMessagesWaiting ); + ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize ); + + if( ( *pxCoRoutineWoken ) == pdFALSE ) + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + *pxCoRoutineWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + } + + return xReturn; + } + +#endif /* configUSE_CO_ROUTINES */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + + void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + { + UBaseType_t ux; + + /* See if there is an empty space in the registry. A NULL name denotes + a free slot. */ + for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) + { + if( xQueueRegistry[ ux ].pcQueueName == NULL ) + { + /* Store the information on this queue. */ + xQueueRegistry[ ux ].pcQueueName = pcQueueName; + xQueueRegistry[ ux ].xHandle = xQueue; + + traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName ); + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + +#endif /* configQUEUE_REGISTRY_SIZE */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + + const char *pcQueueGetName( QueueHandle_t xQueue ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + { + UBaseType_t ux; + const char *pcReturn = NULL; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + + /* Note there is nothing here to protect against another task adding or + removing entries from the registry while it is being searched. */ + for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) + { + if( xQueueRegistry[ ux ].xHandle == xQueue ) + { + pcReturn = xQueueRegistry[ ux ].pcQueueName; + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + return pcReturn; + } /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */ + +#endif /* configQUEUE_REGISTRY_SIZE */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + + void vQueueUnregisterQueue( QueueHandle_t xQueue ) + { + UBaseType_t ux; + + /* See if the handle of the queue being unregistered in actually in the + registry. */ + for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) + { + if( xQueueRegistry[ ux ].xHandle == xQueue ) + { + /* Set the name to NULL to show that this slot if free again. */ + xQueueRegistry[ ux ].pcQueueName = NULL; + + /* Set the handle to NULL to ensure the same queue handle cannot + appear in the registry twice if it is added, removed, then + added again. */ + xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0; + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ + +#endif /* configQUEUE_REGISTRY_SIZE */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + + void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely ) + { + Queue_t * const pxQueue = xQueue; + + /* This function should not be called by application code hence the + 'Restricted' in its name. It is not part of the public API. It is + designed for use by kernel code, and has special calling requirements. + It can result in vListInsert() being called on a list that can only + possibly ever have one item in it, so the list will be fast, but even + so it should be called with the scheduler locked and not from a critical + section. */ + + /* Only do anything if there are no messages in the queue. This function + will not actually cause the task to block, just place it on a blocked + list. It will not block until the scheduler is unlocked - at which + time a yield will be performed. If an item is added to the queue while + the queue is locked, and the calling task blocks on the queue, then the + calling task will be immediately unblocked when the queue is unlocked. */ + prvLockQueue( pxQueue ); + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U ) + { + /* There is nothing in the queue, block for the specified period. */ + vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + prvUnlockQueue( pxQueue ); + } + +#endif /* configUSE_TIMERS */ +/*-----------------------------------------------------------*/ + +#if( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength ) + { + QueueSetHandle_t pxQueue; + + pxQueue = xQueueGenericCreate( uxEventQueueLength, ( UBaseType_t ) sizeof( Queue_t * ), queueQUEUE_TYPE_SET ); + + return pxQueue; + } + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) + { + BaseType_t xReturn; + + taskENTER_CRITICAL(); + { + if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL ) + { + /* Cannot add a queue/semaphore to more than one queue set. */ + xReturn = pdFAIL; + } + else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 ) + { + /* Cannot add a queue/semaphore to a queue set if there are already + items in the queue/semaphore. */ + xReturn = pdFAIL; + } + else + { + ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet; + xReturn = pdPASS; + } + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) + { + BaseType_t xReturn; + Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore; + + if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet ) + { + /* The queue was not a member of the set. */ + xReturn = pdFAIL; + } + else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 ) + { + /* It is dangerous to remove a queue from a set when the queue is + not empty because the queue set will still hold pending events for + the queue. */ + xReturn = pdFAIL; + } + else + { + taskENTER_CRITICAL(); + { + /* The queue is no longer contained in the set. */ + pxQueueOrSemaphore->pxQueueSetContainer = NULL; + } + taskEXIT_CRITICAL(); + xReturn = pdPASS; + } + + return xReturn; + } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */ + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait ) + { + QueueSetMemberHandle_t xReturn = NULL; + + ( void ) xQueueReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait ); /*lint !e961 Casting from one typedef to another is not redundant. */ + return xReturn; + } + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet ) + { + QueueSetMemberHandle_t xReturn = NULL; + + ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */ + return xReturn; + } + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) + { + Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer; + BaseType_t xReturn = pdFALSE; + + /* This function must be called form a critical section. */ + + configASSERT( pxQueueSetContainer ); + configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ); + + if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ) + { + const int8_t cTxLock = pxQueueSetContainer->cTxLock; + + traceQUEUE_SEND( pxQueueSetContainer ); + + /* The data copied is the handle of the queue that contains data. */ + xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition ); + + if( cTxLock == queueUNLOCKED ) + { + if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + pxQueueSetContainer->cTxLock = ( int8_t ) ( cTxLock + 1 ); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } + +#endif /* configUSE_QUEUE_SETS */ + + + + + + + + + + + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/stream_buffer.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/stream_buffer.c new file mode 100644 index 0000000..8cda238 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/stream_buffer.c @@ -0,0 +1,1263 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* Standard includes. */ +#include +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining +all the API functions to use the MPU wrappers. That should only be done when +task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* FreeRTOS includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "stream_buffer.h" + +#if( configUSE_TASK_NOTIFICATIONS != 1 ) + #error configUSE_TASK_NOTIFICATIONS must be set to 1 to build stream_buffer.c +#endif + +/* Lint e961, e9021 and e750 are suppressed as a MISRA exception justified +because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined +for the header files above, but not in this file, in order to generate the +correct privileged Vs unprivileged linkage and placement. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */ + +/* If the user has not provided application specific Rx notification macros, +or #defined the notification macros away, them provide default implementations +that uses task notifications. */ +/*lint -save -e9026 Function like macros allowed and needed here so they can be overidden. */ +#ifndef sbRECEIVE_COMPLETED + #define sbRECEIVE_COMPLETED( pxStreamBuffer ) \ + vTaskSuspendAll(); \ + { \ + if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ + { \ + ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToSend, \ + ( uint32_t ) 0, \ + eNoAction ); \ + ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ + } \ + } \ + ( void ) xTaskResumeAll(); +#endif /* sbRECEIVE_COMPLETED */ + +#ifndef sbRECEIVE_COMPLETED_FROM_ISR + #define sbRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, \ + pxHigherPriorityTaskWoken ) \ + { \ + UBaseType_t uxSavedInterruptStatus; \ + \ + uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); \ + { \ + if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ + { \ + ( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToSend, \ + ( uint32_t ) 0, \ + eNoAction, \ + pxHigherPriorityTaskWoken ); \ + ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ + } \ + } \ + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \ + } +#endif /* sbRECEIVE_COMPLETED_FROM_ISR */ + +/* If the user has not provided an application specific Tx notification macro, +or #defined the notification macro away, them provide a default implementation +that uses task notifications. */ +#ifndef sbSEND_COMPLETED + #define sbSEND_COMPLETED( pxStreamBuffer ) \ + vTaskSuspendAll(); \ + { \ + if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ + { \ + ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToReceive, \ + ( uint32_t ) 0, \ + eNoAction ); \ + ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ + } \ + } \ + ( void ) xTaskResumeAll(); +#endif /* sbSEND_COMPLETED */ + +#ifndef sbSEND_COMPLETE_FROM_ISR + #define sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ + { \ + UBaseType_t uxSavedInterruptStatus; \ + \ + uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); \ + { \ + if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ + { \ + ( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToReceive, \ + ( uint32_t ) 0, \ + eNoAction, \ + pxHigherPriorityTaskWoken ); \ + ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ + } \ + } \ + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \ + } +#endif /* sbSEND_COMPLETE_FROM_ISR */ +/*lint -restore (9026) */ + +/* The number of bytes used to hold the length of a message in the buffer. */ +#define sbBYTES_TO_STORE_MESSAGE_LENGTH ( sizeof( configMESSAGE_BUFFER_LENGTH_TYPE ) ) + +/* Bits stored in the ucFlags field of the stream buffer. */ +#define sbFLAGS_IS_MESSAGE_BUFFER ( ( uint8_t ) 1 ) /* Set if the stream buffer was created as a message buffer, in which case it holds discrete messages rather than a stream. */ +#define sbFLAGS_IS_STATICALLY_ALLOCATED ( ( uint8_t ) 2 ) /* Set if the stream buffer was created using statically allocated memory. */ + +/*-----------------------------------------------------------*/ + +/* Structure that hold state information on the buffer. */ +typedef struct StreamBufferDef_t /*lint !e9058 Style convention uses tag. */ +{ + volatile size_t xTail; /* Index to the next item to read within the buffer. */ + volatile size_t xHead; /* Index to the next item to write within the buffer. */ + size_t xLength; /* The length of the buffer pointed to by pucBuffer. */ + size_t xTriggerLevelBytes; /* The number of bytes that must be in the stream buffer before a task that is waiting for data is unblocked. */ + volatile TaskHandle_t xTaskWaitingToReceive; /* Holds the handle of a task waiting for data, or NULL if no tasks are waiting. */ + volatile TaskHandle_t xTaskWaitingToSend; /* Holds the handle of a task waiting to send data to a message buffer that is full. */ + uint8_t *pucBuffer; /* Points to the buffer itself - that is - the RAM that stores the data passed through the buffer. */ + uint8_t ucFlags; + + #if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxStreamBufferNumber; /* Used for tracing purposes. */ + #endif +} StreamBuffer_t; + +/* + * The number of bytes available to be read from the buffer. + */ +static size_t prvBytesInBuffer( const StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION; + +/* + * Add xCount bytes from pucData into the pxStreamBuffer message buffer. + * Returns the number of bytes written, which will either equal xCount in the + * success case, or 0 if there was not enough space in the buffer (in which case + * no data is written into the buffer). + */ +static size_t prvWriteBytesToBuffer( StreamBuffer_t * const pxStreamBuffer, const uint8_t *pucData, size_t xCount ) PRIVILEGED_FUNCTION; + +/* + * If the stream buffer is being used as a message buffer, then reads an entire + * message out of the buffer. If the stream buffer is being used as a stream + * buffer then read as many bytes as possible from the buffer. + * prvReadBytesFromBuffer() is called to actually extract the bytes from the + * buffer's data storage area. + */ +static size_t prvReadMessageFromBuffer( StreamBuffer_t *pxStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + size_t xBytesAvailable, + size_t xBytesToStoreMessageLength ) PRIVILEGED_FUNCTION; + +/* + * If the stream buffer is being used as a message buffer, then writes an entire + * message to the buffer. If the stream buffer is being used as a stream + * buffer then write as many bytes as possible to the buffer. + * prvWriteBytestoBuffer() is called to actually send the bytes to the buffer's + * data storage area. + */ +static size_t prvWriteMessageToBuffer( StreamBuffer_t * const pxStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + size_t xSpace, + size_t xRequiredSpace ) PRIVILEGED_FUNCTION; + +/* + * Read xMaxCount bytes from the pxStreamBuffer message buffer and write them + * to pucData. + */ +static size_t prvReadBytesFromBuffer( StreamBuffer_t *pxStreamBuffer, + uint8_t *pucData, + size_t xMaxCount, + size_t xBytesAvailable ) PRIVILEGED_FUNCTION; + +/* + * Called by both pxStreamBufferCreate() and pxStreamBufferCreateStatic() to + * initialise the members of the newly created stream buffer structure. + */ +static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, + uint8_t * const pucBuffer, + size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + uint8_t ucFlags ) PRIVILEGED_FUNCTION; + +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + StreamBufferHandle_t xStreamBufferGenericCreate( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer ) + { + uint8_t *pucAllocatedMemory; + uint8_t ucFlags; + + /* In case the stream buffer is going to be used as a message buffer + (that is, it will hold discrete messages with a little meta data that + says how big the next message is) check the buffer will be large enough + to hold at least one message. */ + if( xIsMessageBuffer == pdTRUE ) + { + /* Is a message buffer but not statically allocated. */ + ucFlags = sbFLAGS_IS_MESSAGE_BUFFER; + configASSERT( xBufferSizeBytes > sbBYTES_TO_STORE_MESSAGE_LENGTH ); + } + else + { + /* Not a message buffer and not statically allocated. */ + ucFlags = 0; + configASSERT( xBufferSizeBytes > 0 ); + } + configASSERT( xTriggerLevelBytes <= xBufferSizeBytes ); + + /* A trigger level of 0 would cause a waiting task to unblock even when + the buffer was empty. */ + if( xTriggerLevelBytes == ( size_t ) 0 ) + { + xTriggerLevelBytes = ( size_t ) 1; + } + + /* A stream buffer requires a StreamBuffer_t structure and a buffer. + Both are allocated in a single call to pvPortMalloc(). The + StreamBuffer_t structure is placed at the start of the allocated memory + and the buffer follows immediately after. The requested size is + incremented so the free space is returned as the user would expect - + this is a quirk of the implementation that means otherwise the free + space would be reported as one byte smaller than would be logically + expected. */ + xBufferSizeBytes++; + pucAllocatedMemory = ( uint8_t * ) pvPortMalloc( xBufferSizeBytes + sizeof( StreamBuffer_t ) ); /*lint !e9079 malloc() only returns void*. */ + + if( pucAllocatedMemory != NULL ) + { + prvInitialiseNewStreamBuffer( ( StreamBuffer_t * ) pucAllocatedMemory, /* Structure at the start of the allocated memory. */ /*lint !e9087 Safe cast as allocated memory is aligned. */ /*lint !e826 Area is not too small and alignment is guaranteed provided malloc() behaves as expected and returns aligned buffer. */ + pucAllocatedMemory + sizeof( StreamBuffer_t ), /* Storage area follows. */ /*lint !e9016 Indexing past structure valid for uint8_t pointer, also storage area has no alignment requirement. */ + xBufferSizeBytes, + xTriggerLevelBytes, + ucFlags ); + + traceSTREAM_BUFFER_CREATE( ( ( StreamBuffer_t * ) pucAllocatedMemory ), xIsMessageBuffer ); + } + else + { + traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer ); + } + + return ( StreamBufferHandle_t ) pucAllocatedMemory; /*lint !e9087 !e826 Safe cast as allocated memory is aligned. */ + } + +#endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + StreamBufferHandle_t xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + BaseType_t xIsMessageBuffer, + uint8_t * const pucStreamBufferStorageArea, + StaticStreamBuffer_t * const pxStaticStreamBuffer ) + { + StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) pxStaticStreamBuffer; /*lint !e740 !e9087 Safe cast as StaticStreamBuffer_t is opaque Streambuffer_t. */ + StreamBufferHandle_t xReturn; + uint8_t ucFlags; + + configASSERT( pucStreamBufferStorageArea ); + configASSERT( pxStaticStreamBuffer ); + configASSERT( xTriggerLevelBytes <= xBufferSizeBytes ); + + /* A trigger level of 0 would cause a waiting task to unblock even when + the buffer was empty. */ + if( xTriggerLevelBytes == ( size_t ) 0 ) + { + xTriggerLevelBytes = ( size_t ) 1; + } + + if( xIsMessageBuffer != pdFALSE ) + { + /* Statically allocated message buffer. */ + ucFlags = sbFLAGS_IS_MESSAGE_BUFFER | sbFLAGS_IS_STATICALLY_ALLOCATED; + } + else + { + /* Statically allocated stream buffer. */ + ucFlags = sbFLAGS_IS_STATICALLY_ALLOCATED; + } + + /* In case the stream buffer is going to be used as a message buffer + (that is, it will hold discrete messages with a little meta data that + says how big the next message is) check the buffer will be large enough + to hold at least one message. */ + configASSERT( xBufferSizeBytes > sbBYTES_TO_STORE_MESSAGE_LENGTH ); + + #if( configASSERT_DEFINED == 1 ) + { + /* Sanity check that the size of the structure used to declare a + variable of type StaticStreamBuffer_t equals the size of the real + message buffer structure. */ + volatile size_t xSize = sizeof( StaticStreamBuffer_t ); + configASSERT( xSize == sizeof( StreamBuffer_t ) ); + } /*lint !e529 xSize is referenced is configASSERT() is defined. */ + #endif /* configASSERT_DEFINED */ + + if( ( pucStreamBufferStorageArea != NULL ) && ( pxStaticStreamBuffer != NULL ) ) + { + prvInitialiseNewStreamBuffer( pxStreamBuffer, + pucStreamBufferStorageArea, + xBufferSizeBytes, + xTriggerLevelBytes, + ucFlags ); + + /* Remember this was statically allocated in case it is ever deleted + again. */ + pxStreamBuffer->ucFlags |= sbFLAGS_IS_STATICALLY_ALLOCATED; + + traceSTREAM_BUFFER_CREATE( pxStreamBuffer, xIsMessageBuffer ); + + xReturn = ( StreamBufferHandle_t ) pxStaticStreamBuffer; /*lint !e9087 Data hiding requires cast to opaque type. */ + } + else + { + xReturn = NULL; + traceSTREAM_BUFFER_CREATE_STATIC_FAILED( xReturn, xIsMessageBuffer ); + } + + return xReturn; + } + +#endif /* ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ +/*-----------------------------------------------------------*/ + +void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) +{ +StreamBuffer_t * pxStreamBuffer = xStreamBuffer; + + configASSERT( pxStreamBuffer ); + + traceSTREAM_BUFFER_DELETE( xStreamBuffer ); + + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_STATICALLY_ALLOCATED ) == ( uint8_t ) pdFALSE ) + { + #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + { + /* Both the structure and the buffer were allocated using a single call + to pvPortMalloc(), hence only one call to vPortFree() is required. */ + vPortFree( ( void * ) pxStreamBuffer ); /*lint !e9087 Standard free() semantics require void *, plus pxStreamBuffer was allocated by pvPortMalloc(). */ + } + #else + { + /* Should not be possible to get here, ucFlags must be corrupt. + Force an assert. */ + configASSERT( xStreamBuffer == ( StreamBufferHandle_t ) ~0 ); + } + #endif + } + else + { + /* The structure and buffer were not allocated dynamically and cannot be + freed - just scrub the structure so future use will assert. */ + ( void ) memset( pxStreamBuffer, 0x00, sizeof( StreamBuffer_t ) ); + } +} +/*-----------------------------------------------------------*/ + +BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +BaseType_t xReturn = pdFAIL; + +#if( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxStreamBufferNumber; +#endif + + configASSERT( pxStreamBuffer ); + + #if( configUSE_TRACE_FACILITY == 1 ) + { + /* Store the stream buffer number so it can be restored after the + reset. */ + uxStreamBufferNumber = pxStreamBuffer->uxStreamBufferNumber; + } + #endif + + /* Can only reset a message buffer if there are no tasks blocked on it. */ + taskENTER_CRITICAL(); + { + if( pxStreamBuffer->xTaskWaitingToReceive == NULL ) + { + if( pxStreamBuffer->xTaskWaitingToSend == NULL ) + { + prvInitialiseNewStreamBuffer( pxStreamBuffer, + pxStreamBuffer->pucBuffer, + pxStreamBuffer->xLength, + pxStreamBuffer->xTriggerLevelBytes, + pxStreamBuffer->ucFlags ); + xReturn = pdPASS; + + #if( configUSE_TRACE_FACILITY == 1 ) + { + pxStreamBuffer->uxStreamBufferNumber = uxStreamBufferNumber; + } + #endif + + traceSTREAM_BUFFER_RESET( xStreamBuffer ); + } + } + } + taskEXIT_CRITICAL(); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +BaseType_t xReturn; + + configASSERT( pxStreamBuffer ); + + /* It is not valid for the trigger level to be 0. */ + if( xTriggerLevel == ( size_t ) 0 ) + { + xTriggerLevel = ( size_t ) 1; + } + + /* The trigger level is the number of bytes that must be in the stream + buffer before a task that is waiting for data is unblocked. */ + if( xTriggerLevel <= pxStreamBuffer->xLength ) + { + pxStreamBuffer->xTriggerLevelBytes = xTriggerLevel; + xReturn = pdPASS; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) +{ +const StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +size_t xSpace; + + configASSERT( pxStreamBuffer ); + + xSpace = pxStreamBuffer->xLength + pxStreamBuffer->xTail; + xSpace -= pxStreamBuffer->xHead; + xSpace -= ( size_t ) 1; + + if( xSpace >= pxStreamBuffer->xLength ) + { + xSpace -= pxStreamBuffer->xLength; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xSpace; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) +{ +const StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +size_t xReturn; + + configASSERT( pxStreamBuffer ); + + xReturn = prvBytesInBuffer( pxStreamBuffer ); + return xReturn; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void *pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +size_t xReturn, xSpace = 0; +size_t xRequiredSpace = xDataLengthBytes; +TimeOut_t xTimeOut; + + configASSERT( pvTxData ); + configASSERT( pxStreamBuffer ); + + /* This send function is used to write to both message buffers and stream + buffers. If this is a message buffer then the space needed must be + increased by the amount of bytes needed to store the length of the + message. */ + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) + { + xRequiredSpace += sbBYTES_TO_STORE_MESSAGE_LENGTH; + + /* Overflow? */ + configASSERT( xRequiredSpace > xDataLengthBytes ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xTicksToWait != ( TickType_t ) 0 ) + { + vTaskSetTimeOutState( &xTimeOut ); + + do + { + /* Wait until the required number of bytes are free in the message + buffer. */ + taskENTER_CRITICAL(); + { + xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer ); + + if( xSpace < xRequiredSpace ) + { + /* Clear notification state as going to wait for space. */ + ( void ) xTaskNotifyStateClear( NULL ); + + /* Should only be one writer. */ + configASSERT( pxStreamBuffer->xTaskWaitingToSend == NULL ); + pxStreamBuffer->xTaskWaitingToSend = xTaskGetCurrentTaskHandle(); + } + else + { + taskEXIT_CRITICAL(); + break; + } + } + taskEXIT_CRITICAL(); + + traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer ); + ( void ) xTaskNotifyWait( ( uint32_t ) 0, ( uint32_t ) 0, NULL, xTicksToWait ); + pxStreamBuffer->xTaskWaitingToSend = NULL; + + } while( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xSpace == ( size_t ) 0 ) + { + xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xReturn = prvWriteMessageToBuffer( pxStreamBuffer, pvTxData, xDataLengthBytes, xSpace, xRequiredSpace ); + + if( xReturn > ( size_t ) 0 ) + { + traceSTREAM_BUFFER_SEND( xStreamBuffer, xReturn ); + + /* Was a task waiting for the data? */ + if( prvBytesInBuffer( pxStreamBuffer ) >= pxStreamBuffer->xTriggerLevelBytes ) + { + sbSEND_COMPLETED( pxStreamBuffer ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + traceSTREAM_BUFFER_SEND_FAILED( xStreamBuffer ); + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer, + const void *pvTxData, + size_t xDataLengthBytes, + BaseType_t * const pxHigherPriorityTaskWoken ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +size_t xReturn, xSpace; +size_t xRequiredSpace = xDataLengthBytes; + + configASSERT( pvTxData ); + configASSERT( pxStreamBuffer ); + + /* This send function is used to write to both message buffers and stream + buffers. If this is a message buffer then the space needed must be + increased by the amount of bytes needed to store the length of the + message. */ + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) + { + xRequiredSpace += sbBYTES_TO_STORE_MESSAGE_LENGTH; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer ); + xReturn = prvWriteMessageToBuffer( pxStreamBuffer, pvTxData, xDataLengthBytes, xSpace, xRequiredSpace ); + + if( xReturn > ( size_t ) 0 ) + { + /* Was a task waiting for the data? */ + if( prvBytesInBuffer( pxStreamBuffer ) >= pxStreamBuffer->xTriggerLevelBytes ) + { + sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceSTREAM_BUFFER_SEND_FROM_ISR( xStreamBuffer, xReturn ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +static size_t prvWriteMessageToBuffer( StreamBuffer_t * const pxStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + size_t xSpace, + size_t xRequiredSpace ) +{ + BaseType_t xShouldWrite; + size_t xReturn; + + if( xSpace == ( size_t ) 0 ) + { + /* Doesn't matter if this is a stream buffer or a message buffer, there + is no space to write. */ + xShouldWrite = pdFALSE; + } + else if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) == ( uint8_t ) 0 ) + { + /* This is a stream buffer, as opposed to a message buffer, so writing a + stream of bytes rather than discrete messages. Write as many bytes as + possible. */ + xShouldWrite = pdTRUE; + xDataLengthBytes = configMIN( xDataLengthBytes, xSpace ); + } + else if( xSpace >= xRequiredSpace ) + { + /* This is a message buffer, as opposed to a stream buffer, and there + is enough space to write both the message length and the message itself + into the buffer. Start by writing the length of the data, the data + itself will be written later in this function. */ + xShouldWrite = pdTRUE; + ( void ) prvWriteBytesToBuffer( pxStreamBuffer, ( const uint8_t * ) &( xDataLengthBytes ), sbBYTES_TO_STORE_MESSAGE_LENGTH ); + } + else + { + /* There is space available, but not enough space. */ + xShouldWrite = pdFALSE; + } + + if( xShouldWrite != pdFALSE ) + { + /* Writes the data itself. */ + xReturn = prvWriteBytesToBuffer( pxStreamBuffer, ( const uint8_t * ) pvTxData, xDataLengthBytes ); /*lint !e9079 Storage buffer is implemented as uint8_t for ease of sizing, alighment and access. */ + } + else + { + xReturn = 0; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +size_t xReceivedLength = 0, xBytesAvailable, xBytesToStoreMessageLength; + + configASSERT( pvRxData ); + configASSERT( pxStreamBuffer ); + + /* This receive function is used by both message buffers, which store + discrete messages, and stream buffers, which store a continuous stream of + bytes. Discrete messages include an additional + sbBYTES_TO_STORE_MESSAGE_LENGTH bytes that hold the length of the + message. */ + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) + { + xBytesToStoreMessageLength = sbBYTES_TO_STORE_MESSAGE_LENGTH; + } + else + { + xBytesToStoreMessageLength = 0; + } + + if( xTicksToWait != ( TickType_t ) 0 ) + { + /* Checking if there is data and clearing the notification state must be + performed atomically. */ + taskENTER_CRITICAL(); + { + xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); + + /* If this function was invoked by a message buffer read then + xBytesToStoreMessageLength holds the number of bytes used to hold + the length of the next discrete message. If this function was + invoked by a stream buffer read then xBytesToStoreMessageLength will + be 0. */ + if( xBytesAvailable <= xBytesToStoreMessageLength ) + { + /* Clear notification state as going to wait for data. */ + ( void ) xTaskNotifyStateClear( NULL ); + + /* Should only be one reader. */ + configASSERT( pxStreamBuffer->xTaskWaitingToReceive == NULL ); + pxStreamBuffer->xTaskWaitingToReceive = xTaskGetCurrentTaskHandle(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + if( xBytesAvailable <= xBytesToStoreMessageLength ) + { + /* Wait for data to be available. */ + traceBLOCKING_ON_STREAM_BUFFER_RECEIVE( xStreamBuffer ); + ( void ) xTaskNotifyWait( ( uint32_t ) 0, ( uint32_t ) 0, NULL, xTicksToWait ); + pxStreamBuffer->xTaskWaitingToReceive = NULL; + + /* Recheck the data available after blocking. */ + xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); + } + + /* Whether receiving a discrete message (where xBytesToStoreMessageLength + holds the number of bytes used to store the message length) or a stream of + bytes (where xBytesToStoreMessageLength is zero), the number of bytes + available must be greater than xBytesToStoreMessageLength to be able to + read bytes from the buffer. */ + if( xBytesAvailable > xBytesToStoreMessageLength ) + { + xReceivedLength = prvReadMessageFromBuffer( pxStreamBuffer, pvRxData, xBufferLengthBytes, xBytesAvailable, xBytesToStoreMessageLength ); + + /* Was a task waiting for space in the buffer? */ + if( xReceivedLength != ( size_t ) 0 ) + { + traceSTREAM_BUFFER_RECEIVE( xStreamBuffer, xReceivedLength ); + sbRECEIVE_COMPLETED( pxStreamBuffer ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + traceSTREAM_BUFFER_RECEIVE_FAILED( xStreamBuffer ); + mtCOVERAGE_TEST_MARKER(); + } + + return xReceivedLength; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +size_t xReturn, xBytesAvailable, xOriginalTail; +configMESSAGE_BUFFER_LENGTH_TYPE xTempReturn; + + configASSERT( pxStreamBuffer ); + + /* Ensure the stream buffer is being used as a message buffer. */ + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) + { + xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); + if( xBytesAvailable > sbBYTES_TO_STORE_MESSAGE_LENGTH ) + { + /* The number of bytes available is greater than the number of bytes + required to hold the length of the next message, so another message + is available. Return its length without removing the length bytes + from the buffer. A copy of the tail is stored so the buffer can be + returned to its prior state as the message is not actually being + removed from the buffer. */ + xOriginalTail = pxStreamBuffer->xTail; + ( void ) prvReadBytesFromBuffer( pxStreamBuffer, ( uint8_t * ) &xTempReturn, sbBYTES_TO_STORE_MESSAGE_LENGTH, xBytesAvailable ); + xReturn = ( size_t ) xTempReturn; + pxStreamBuffer->xTail = xOriginalTail; + } + else + { + /* The minimum amount of bytes in a message buffer is + ( sbBYTES_TO_STORE_MESSAGE_LENGTH + 1 ), so if xBytesAvailable is + less than sbBYTES_TO_STORE_MESSAGE_LENGTH the only other valid + value is 0. */ + configASSERT( xBytesAvailable == 0 ); + xReturn = 0; + } + } + else + { + xReturn = 0; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + BaseType_t * const pxHigherPriorityTaskWoken ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +size_t xReceivedLength = 0, xBytesAvailable, xBytesToStoreMessageLength; + + configASSERT( pvRxData ); + configASSERT( pxStreamBuffer ); + + /* This receive function is used by both message buffers, which store + discrete messages, and stream buffers, which store a continuous stream of + bytes. Discrete messages include an additional + sbBYTES_TO_STORE_MESSAGE_LENGTH bytes that hold the length of the + message. */ + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) + { + xBytesToStoreMessageLength = sbBYTES_TO_STORE_MESSAGE_LENGTH; + } + else + { + xBytesToStoreMessageLength = 0; + } + + xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); + + /* Whether receiving a discrete message (where xBytesToStoreMessageLength + holds the number of bytes used to store the message length) or a stream of + bytes (where xBytesToStoreMessageLength is zero), the number of bytes + available must be greater than xBytesToStoreMessageLength to be able to + read bytes from the buffer. */ + if( xBytesAvailable > xBytesToStoreMessageLength ) + { + xReceivedLength = prvReadMessageFromBuffer( pxStreamBuffer, pvRxData, xBufferLengthBytes, xBytesAvailable, xBytesToStoreMessageLength ); + + /* Was a task waiting for space in the buffer? */ + if( xReceivedLength != ( size_t ) 0 ) + { + sbRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceSTREAM_BUFFER_RECEIVE_FROM_ISR( xStreamBuffer, xReceivedLength ); + + return xReceivedLength; +} +/*-----------------------------------------------------------*/ + +static size_t prvReadMessageFromBuffer( StreamBuffer_t *pxStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + size_t xBytesAvailable, + size_t xBytesToStoreMessageLength ) +{ +size_t xOriginalTail, xReceivedLength, xNextMessageLength; +configMESSAGE_BUFFER_LENGTH_TYPE xTempNextMessageLength; + + if( xBytesToStoreMessageLength != ( size_t ) 0 ) + { + /* A discrete message is being received. First receive the length + of the message. A copy of the tail is stored so the buffer can be + returned to its prior state if the length of the message is too + large for the provided buffer. */ + xOriginalTail = pxStreamBuffer->xTail; + ( void ) prvReadBytesFromBuffer( pxStreamBuffer, ( uint8_t * ) &xTempNextMessageLength, xBytesToStoreMessageLength, xBytesAvailable ); + xNextMessageLength = ( size_t ) xTempNextMessageLength; + + /* Reduce the number of bytes available by the number of bytes just + read out. */ + xBytesAvailable -= xBytesToStoreMessageLength; + + /* Check there is enough space in the buffer provided by the + user. */ + if( xNextMessageLength > xBufferLengthBytes ) + { + /* The user has provided insufficient space to read the message + so return the buffer to its previous state (so the length of + the message is in the buffer again). */ + pxStreamBuffer->xTail = xOriginalTail; + xNextMessageLength = 0; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* A stream of bytes is being received (as opposed to a discrete + message), so read as many bytes as possible. */ + xNextMessageLength = xBufferLengthBytes; + } + + /* Read the actual data. */ + xReceivedLength = prvReadBytesFromBuffer( pxStreamBuffer, ( uint8_t * ) pvRxData, xNextMessageLength, xBytesAvailable ); /*lint !e9079 Data storage area is implemented as uint8_t array for ease of sizing, indexing and alignment. */ + + return xReceivedLength; +} +/*-----------------------------------------------------------*/ + +BaseType_t xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) +{ +const StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +BaseType_t xReturn; +size_t xTail; + + configASSERT( pxStreamBuffer ); + + /* True if no bytes are available. */ + xTail = pxStreamBuffer->xTail; + if( pxStreamBuffer->xHead == xTail ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) +{ +BaseType_t xReturn; +size_t xBytesToStoreMessageLength; +const StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; + + configASSERT( pxStreamBuffer ); + + /* This generic version of the receive function is used by both message + buffers, which store discrete messages, and stream buffers, which store a + continuous stream of bytes. Discrete messages include an additional + sbBYTES_TO_STORE_MESSAGE_LENGTH bytes that hold the length of the message. */ + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) + { + xBytesToStoreMessageLength = sbBYTES_TO_STORE_MESSAGE_LENGTH; + } + else + { + xBytesToStoreMessageLength = 0; + } + + /* True if the available space equals zero. */ + if( xStreamBufferSpacesAvailable( xStreamBuffer ) <= xBytesToStoreMessageLength ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +BaseType_t xReturn; +UBaseType_t uxSavedInterruptStatus; + + configASSERT( pxStreamBuffer ); + + uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); + { + if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) + { + ( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToReceive, + ( uint32_t ) 0, + eNoAction, + pxHigherPriorityTaskWoken ); + ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +BaseType_t xReturn; +UBaseType_t uxSavedInterruptStatus; + + configASSERT( pxStreamBuffer ); + + uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); + { + if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) + { + ( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToSend, + ( uint32_t ) 0, + eNoAction, + pxHigherPriorityTaskWoken ); + ( pxStreamBuffer )->xTaskWaitingToSend = NULL; + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +static size_t prvWriteBytesToBuffer( StreamBuffer_t * const pxStreamBuffer, const uint8_t *pucData, size_t xCount ) +{ +size_t xNextHead, xFirstLength; + + configASSERT( xCount > ( size_t ) 0 ); + + xNextHead = pxStreamBuffer->xHead; + + /* Calculate the number of bytes that can be added in the first write - + which may be less than the total number of bytes that need to be added if + the buffer will wrap back to the beginning. */ + xFirstLength = configMIN( pxStreamBuffer->xLength - xNextHead, xCount ); + + /* Write as many bytes as can be written in the first write. */ + configASSERT( ( xNextHead + xFirstLength ) <= pxStreamBuffer->xLength ); + ( void ) memcpy( ( void* ) ( &( pxStreamBuffer->pucBuffer[ xNextHead ] ) ), ( const void * ) pucData, xFirstLength ); /*lint !e9087 memcpy() requires void *. */ + + /* If the number of bytes written was less than the number that could be + written in the first write... */ + if( xCount > xFirstLength ) + { + /* ...then write the remaining bytes to the start of the buffer. */ + configASSERT( ( xCount - xFirstLength ) <= pxStreamBuffer->xLength ); + ( void ) memcpy( ( void * ) pxStreamBuffer->pucBuffer, ( const void * ) &( pucData[ xFirstLength ] ), xCount - xFirstLength ); /*lint !e9087 memcpy() requires void *. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xNextHead += xCount; + if( xNextHead >= pxStreamBuffer->xLength ) + { + xNextHead -= pxStreamBuffer->xLength; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxStreamBuffer->xHead = xNextHead; + + return xCount; +} +/*-----------------------------------------------------------*/ + +static size_t prvReadBytesFromBuffer( StreamBuffer_t *pxStreamBuffer, uint8_t *pucData, size_t xMaxCount, size_t xBytesAvailable ) +{ +size_t xCount, xFirstLength, xNextTail; + + /* Use the minimum of the wanted bytes and the available bytes. */ + xCount = configMIN( xBytesAvailable, xMaxCount ); + + if( xCount > ( size_t ) 0 ) + { + xNextTail = pxStreamBuffer->xTail; + + /* Calculate the number of bytes that can be read - which may be + less than the number wanted if the data wraps around to the start of + the buffer. */ + xFirstLength = configMIN( pxStreamBuffer->xLength - xNextTail, xCount ); + + /* Obtain the number of bytes it is possible to obtain in the first + read. Asserts check bounds of read and write. */ + configASSERT( xFirstLength <= xMaxCount ); + configASSERT( ( xNextTail + xFirstLength ) <= pxStreamBuffer->xLength ); + ( void ) memcpy( ( void * ) pucData, ( const void * ) &( pxStreamBuffer->pucBuffer[ xNextTail ] ), xFirstLength ); /*lint !e9087 memcpy() requires void *. */ + + /* If the total number of wanted bytes is greater than the number + that could be read in the first read... */ + if( xCount > xFirstLength ) + { + /*...then read the remaining bytes from the start of the buffer. */ + configASSERT( xCount <= xMaxCount ); + ( void ) memcpy( ( void * ) &( pucData[ xFirstLength ] ), ( void * ) ( pxStreamBuffer->pucBuffer ), xCount - xFirstLength ); /*lint !e9087 memcpy() requires void *. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Move the tail pointer to effectively remove the data read from + the buffer. */ + xNextTail += xCount; + + if( xNextTail >= pxStreamBuffer->xLength ) + { + xNextTail -= pxStreamBuffer->xLength; + } + + pxStreamBuffer->xTail = xNextTail; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xCount; +} +/*-----------------------------------------------------------*/ + +static size_t prvBytesInBuffer( const StreamBuffer_t * const pxStreamBuffer ) +{ +/* Returns the distance between xTail and xHead. */ +size_t xCount; + + xCount = pxStreamBuffer->xLength + pxStreamBuffer->xHead; + xCount -= pxStreamBuffer->xTail; + if ( xCount >= pxStreamBuffer->xLength ) + { + xCount -= pxStreamBuffer->xLength; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xCount; +} +/*-----------------------------------------------------------*/ + +static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, + uint8_t * const pucBuffer, + size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + uint8_t ucFlags ) +{ + /* Assert here is deliberately writing to the entire buffer to ensure it can + be written to without generating exceptions, and is setting the buffer to a + known value to assist in development/debugging. */ + #if( configASSERT_DEFINED == 1 ) + { + /* The value written just has to be identifiable when looking at the + memory. Don't use 0xA5 as that is the stack fill value and could + result in confusion as to what is actually being observed. */ + const BaseType_t xWriteValue = 0x55; + configASSERT( memset( pucBuffer, ( int ) xWriteValue, xBufferSizeBytes ) == pucBuffer ); + } /*lint !e529 !e438 xWriteValue is only used if configASSERT() is defined. */ + #endif + + ( void ) memset( ( void * ) pxStreamBuffer, 0x00, sizeof( StreamBuffer_t ) ); /*lint !e9087 memset() requires void *. */ + pxStreamBuffer->pucBuffer = pucBuffer; + pxStreamBuffer->xLength = xBufferSizeBytes; + pxStreamBuffer->xTriggerLevelBytes = xTriggerLevelBytes; + pxStreamBuffer->ucFlags = ucFlags; +} + +#if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t uxStreamBufferGetStreamBufferNumber( StreamBufferHandle_t xStreamBuffer ) + { + return xStreamBuffer->uxStreamBufferNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + void vStreamBufferSetStreamBufferNumber( StreamBufferHandle_t xStreamBuffer, UBaseType_t uxStreamBufferNumber ) + { + xStreamBuffer->uxStreamBufferNumber = uxStreamBufferNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + uint8_t ucStreamBufferGetStreamBufferType( StreamBufferHandle_t xStreamBuffer ) + { + return ( xStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ); + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/tasks.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/tasks.c new file mode 100644 index 0000000..f9c4eb6 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/tasks.c @@ -0,0 +1,5214 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* Standard includes. */ +#include +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining +all the API functions to use the MPU wrappers. That should only be done when +task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* FreeRTOS includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "timers.h" +#include "stack_macros.h" + +/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified +because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined +for the header files above, but not in this file, in order to generate the +correct privileged Vs unprivileged linkage and placement. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */ + +/* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting +functions but without including stdio.h here. */ +#if ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) + /* At the bottom of this file are two optional functions that can be used + to generate human readable text from the raw data generated by the + uxTaskGetSystemState() function. Note the formatting functions are provided + for convenience only, and are NOT considered part of the kernel. */ + #include +#endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */ + +#if( configUSE_PREEMPTION == 0 ) + /* If the cooperative scheduler is being used then a yield should not be + performed just because a higher priority task has been woken. */ + #define taskYIELD_IF_USING_PREEMPTION() +#else + #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() +#endif + +/* Values that can be assigned to the ucNotifyState member of the TCB. */ +#define taskNOT_WAITING_NOTIFICATION ( ( uint8_t ) 0 ) +#define taskWAITING_NOTIFICATION ( ( uint8_t ) 1 ) +#define taskNOTIFICATION_RECEIVED ( ( uint8_t ) 2 ) + +/* + * The value used to fill the stack of a task when the task is created. This + * is used purely for checking the high water mark for tasks. + */ +#define tskSTACK_FILL_BYTE ( 0xa5U ) + +/* Bits used to recored how a task's stack and TCB were allocated. */ +#define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 ) +#define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 ) +#define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 ) + +/* If any of the following are set then task stacks are filled with a known +value so the high water mark can be determined. If none of the following are +set then don't fill the stack so there is no unnecessary dependency on memset. */ +#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) + #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 1 +#else + #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 0 +#endif + +/* + * Macros used by vListTask to indicate which state a task is in. + */ +#define tskRUNNING_CHAR ( 'X' ) +#define tskBLOCKED_CHAR ( 'B' ) +#define tskREADY_CHAR ( 'R' ) +#define tskDELETED_CHAR ( 'D' ) +#define tskSUSPENDED_CHAR ( 'S' ) + +/* + * Some kernel aware debuggers require the data the debugger needs access to be + * global, rather than file scope. + */ +#ifdef portREMOVE_STATIC_QUALIFIER + #define static +#endif + +/* The name allocated to the Idle task. This can be overridden by defining +configIDLE_TASK_NAME in FreeRTOSConfig.h. */ +#ifndef configIDLE_TASK_NAME + #define configIDLE_TASK_NAME "IDLE" +#endif + +#if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) + + /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is + performed in a generic way that is not optimised to any particular + microcontroller architecture. */ + + /* uxTopReadyPriority holds the priority of the highest priority ready + state task. */ + #define taskRECORD_READY_PRIORITY( uxPriority ) \ + { \ + if( ( uxPriority ) > uxTopReadyPriority ) \ + { \ + uxTopReadyPriority = ( uxPriority ); \ + } \ + } /* taskRECORD_READY_PRIORITY */ + + /*-----------------------------------------------------------*/ + + #define taskSELECT_HIGHEST_PRIORITY_TASK() \ + { \ + UBaseType_t uxTopPriority = uxTopReadyPriority; \ + \ + /* Find the highest priority queue that contains ready tasks. */ \ + while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \ + { \ + configASSERT( uxTopPriority ); \ + --uxTopPriority; \ + } \ + \ + /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \ + the same priority get an equal share of the processor time. */ \ + listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ + uxTopReadyPriority = uxTopPriority; \ + } /* taskSELECT_HIGHEST_PRIORITY_TASK */ + + /*-----------------------------------------------------------*/ + + /* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as + they are only required when a port optimised method of task selection is + being used. */ + #define taskRESET_READY_PRIORITY( uxPriority ) + #define portRESET_READY_PRIORITY( uxPriority, uxTopReadyPriority ) + +#else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ + + /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is + performed in a way that is tailored to the particular microcontroller + architecture being used. */ + + /* A port optimised version is provided. Call the port defined macros. */ + #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( uxPriority, uxTopReadyPriority ) + + /*-----------------------------------------------------------*/ + + #define taskSELECT_HIGHEST_PRIORITY_TASK() \ + { \ + UBaseType_t uxTopPriority; \ + \ + /* Find the highest priority list that contains ready tasks. */ \ + portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \ + configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \ + listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ + } /* taskSELECT_HIGHEST_PRIORITY_TASK() */ + + /*-----------------------------------------------------------*/ + + /* A port optimised version is provided, call it only if the TCB being reset + is being referenced from a ready list. If it is referenced from a delayed + or suspended list then it won't be in a ready list. */ + #define taskRESET_READY_PRIORITY( uxPriority ) \ + { \ + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \ + { \ + portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \ + } \ + } + +#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ + +/*-----------------------------------------------------------*/ + +/* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick +count overflows. */ +#define taskSWITCH_DELAYED_LISTS() \ +{ \ + List_t *pxTemp; \ + \ + /* The delayed tasks list should be empty when the lists are switched. */ \ + configASSERT( ( listLIST_IS_EMPTY( pxDelayedTaskList ) ) ); \ + \ + pxTemp = pxDelayedTaskList; \ + pxDelayedTaskList = pxOverflowDelayedTaskList; \ + pxOverflowDelayedTaskList = pxTemp; \ + xNumOfOverflows++; \ + prvResetNextTaskUnblockTime(); \ +} + +/*-----------------------------------------------------------*/ + +/* + * Place the task represented by pxTCB into the appropriate ready list for + * the task. It is inserted at the end of the list. + */ +#define prvAddTaskToReadyList( pxTCB ) \ + traceMOVED_TASK_TO_READY_STATE( pxTCB ); \ + taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \ + vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \ + tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB ) +/*-----------------------------------------------------------*/ + +/* + * Several functions take an TaskHandle_t parameter that can optionally be NULL, + * where NULL is used to indicate that the handle of the currently executing + * task should be used in place of the parameter. This macro simply checks to + * see if the parameter is NULL and returns a pointer to the appropriate TCB. + */ +#define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? pxCurrentTCB : ( pxHandle ) ) + +/* The item value of the event list item is normally used to hold the priority +of the task to which it belongs (coded to allow it to be held in reverse +priority order). However, it is occasionally borrowed for other purposes. It +is important its value is not updated due to a task priority change while it is +being used for another purpose. The following bit definition is used to inform +the scheduler that the value should not be changed - in which case it is the +responsibility of whichever module is using the value to ensure it gets set back +to its original value when it is released. */ +#if( configUSE_16_BIT_TICKS == 1 ) + #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U +#else + #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL +#endif + +/* + * Task control block. A task control block (TCB) is allocated for each task, + * and stores task state information, including a pointer to the task's context + * (the task's run time environment, including register values) + */ +typedef struct tskTaskControlBlock /* The old naming convention is used to prevent breaking kernel aware debuggers. */ +{ + volatile StackType_t *pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */ + + #if ( portUSING_MPU_WRAPPERS == 1 ) + xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */ + #endif + + ListItem_t xStateListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */ + ListItem_t xEventListItem; /*< Used to reference a task from an event list. */ + UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */ + StackType_t *pxStack; /*< Points to the start of the stack. */ + char pcTaskName[ configMAX_TASK_NAME_LEN ];/*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + + #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) + StackType_t *pxEndOfStack; /*< Points to the highest valid address for the stack. */ + #endif + + #if ( portCRITICAL_NESTING_IN_TCB == 1 ) + UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */ + #endif + + #if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */ + UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */ + #endif + + #if ( configUSE_MUTEXES == 1 ) + UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */ + UBaseType_t uxMutexesHeld; + #endif + + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) + TaskHookFunction_t pxTaskTag; + #endif + + #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) + void *pvThreadLocalStoragePointers[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ]; + #endif + + #if( configGENERATE_RUN_TIME_STATS == 1 ) + uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */ + #endif + + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + /* Allocate a Newlib reent structure that is specific to this task. + Note Newlib support has been included by popular demand, but is not + used by the FreeRTOS maintainers themselves. FreeRTOS is not + responsible for resulting newlib operation. User must be familiar with + newlib and must provide system-wide implementations of the necessary + stubs. Be warned that (at the time of writing) the current newlib design + implements a system-wide malloc() that must be provided with locks. */ + struct _reent xNewLib_reent; + #endif + + #if( configUSE_TASK_NOTIFICATIONS == 1 ) + volatile uint32_t ulNotifiedValue; + volatile uint8_t ucNotifyState; + #endif + + /* See the comments in FreeRTOS.h with the definition of + tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */ + #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */ + uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */ + #endif + + #if( INCLUDE_xTaskAbortDelay == 1 ) + uint8_t ucDelayAborted; + #endif + + #if( configUSE_POSIX_ERRNO == 1 ) + int iTaskErrno; + #endif + +} tskTCB; + +/* The old tskTCB name is maintained above then typedefed to the new TCB_t name +below to enable the use of older kernel aware debuggers. */ +typedef tskTCB TCB_t; + +/*lint -save -e956 A manual analysis and inspection has been used to determine +which static variables must be declared volatile. */ +PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL; + +/* Lists for ready and blocked tasks. -------------------- +xDelayedTaskList1 and xDelayedTaskList2 could be move to function scople but +doing so breaks some kernel aware debuggers and debuggers that rely on removing +the static qualifier. */ +PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ] = { 0 };/*< Prioritised ready tasks. */ +PRIVILEGED_DATA static List_t xDelayedTaskList1 = { 0 }; /*< Delayed tasks. */ +PRIVILEGED_DATA static List_t xDelayedTaskList2 = { 0 }; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */ +PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList = NULL; /*< Points to the delayed task list currently being used. */ +PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList = NULL; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */ +PRIVILEGED_DATA static List_t xPendingReadyList = { 0 }; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */ + +#if( INCLUDE_vTaskDelete == 1 ) + +PRIVILEGED_DATA static List_t xTasksWaitingTermination = { 0 }; /*< Tasks that have been deleted - but their memory not yet freed. */ + PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U; + +#endif + +#if ( INCLUDE_vTaskSuspend == 1 ) + + PRIVILEGED_DATA static List_t xSuspendedTaskList = { 0 }; /*< Tasks that are currently suspended. */ + +#endif + +/* Global POSIX errno. Its value is changed upon context switching to match +the errno of the currently running task. */ +#if ( configUSE_POSIX_ERRNO == 1 ) + int FreeRTOS_errno = 0; +#endif + +/* Other file private variables. --------------------------------*/ +PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U; +PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT; +PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY; +PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE; +PRIVILEGED_DATA static volatile UBaseType_t uxPendedTicks = ( UBaseType_t ) 0U; +PRIVILEGED_DATA static volatile BaseType_t xYieldPending = pdFALSE; +PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0; +PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U; +PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */ +PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle = NULL; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */ + +/* Context switches are held pending while the scheduler is suspended. Also, +interrupts must not manipulate the xStateListItem of a TCB, or any of the +lists the xStateListItem can be referenced from, if the scheduler is suspended. +If an interrupt needs to unblock a task while the scheduler is suspended then it +moves the task's event list item into the xPendingReadyList, ready for the +kernel to move the task from the pending ready list into the real ready list +when the scheduler is unsuspended. The pending ready list itself can only be +accessed from a critical section. */ +PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) pdFALSE; + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + + /* Do not move these variables to function scope as doing so prevents the + code working with debuggers that need to remove the static qualifier. */ + PRIVILEGED_DATA static uint32_t ulTaskSwitchedInTime = 0UL; /*< Holds the value of a timer/counter the last time a task was switched in. */ + PRIVILEGED_DATA static uint32_t ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */ + +#endif + +/*lint -restore */ + +/*-----------------------------------------------------------*/ + +/* Callback function prototypes. --------------------------*/ +#if( configCHECK_FOR_STACK_OVERFLOW > 0 ) + + extern void vApplicationStackOverflowHook( TaskHandle_t xTask, char *pcTaskName ); + +#endif + +#if( configUSE_TICK_HOOK > 0 ) + + extern void vApplicationTickHook( void ); /*lint !e526 Symbol not defined as it is an application callback. */ + +#endif + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + extern void vApplicationGetIdleTaskMemory( StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize ); /*lint !e526 Symbol not defined as it is an application callback. */ + +#endif + +/* File private functions. --------------------------------*/ + +/** + * Utility task that simply returns pdTRUE if the task referenced by xTask is + * currently in the Suspended state, or pdFALSE if the task referenced by xTask + * is in any other state. + */ +#if ( INCLUDE_vTaskSuspend == 1 ) + + static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +#endif /* INCLUDE_vTaskSuspend */ + +/* + * Utility to ready all the lists used by the scheduler. This is called + * automatically upon the creation of the first task. + */ +static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION; + +/* + * The idle task, which as all tasks is implemented as a never ending loop. + * The idle task is automatically created and added to the ready lists upon + * creation of the first user task. + * + * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific + * language extensions. The equivalent prototype for this function is: + * + * void prvIdleTask( void *pvParameters ); + * + */ +static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ); + +/* + * Utility to free all memory allocated by the scheduler to hold a TCB, + * including the stack pointed to by the TCB. + * + * This does not free memory allocated by the task itself (i.e. memory + * allocated by calls to pvPortMalloc from within the tasks application code). + */ +#if ( INCLUDE_vTaskDelete == 1 ) + + static void prvDeleteTCB( TCB_t *pxTCB ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Used only by the idle task. This checks to see if anything has been placed + * in the list of tasks waiting to be deleted. If so the task is cleaned up + * and its TCB deleted. + */ +static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION; + +/* + * The currently executing task is entering the Blocked state. Add the task to + * either the current or the overflow delayed task list. + */ +static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely ) PRIVILEGED_FUNCTION; + +/* + * Fills an TaskStatus_t structure with information on each task that is + * referenced from the pxList list (which may be a ready list, a delayed list, + * a suspended list, etc.). + * + * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM + * NORMAL APPLICATION CODE. + */ +#if ( configUSE_TRACE_FACILITY == 1 ) + + static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Searches pxList for a task with name pcNameToQuery - returning a handle to + * the task if it is found, or NULL if the task is not found. + */ +#if ( INCLUDE_xTaskGetHandle == 1 ) + + static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] ) PRIVILEGED_FUNCTION; + +#endif + +/* + * When a task is created, the stack of the task is filled with a known value. + * This function determines the 'high water mark' of the task stack by + * determining how much of the stack remains at the original preset value. + */ +#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) + + static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Return the amount of time, in ticks, that will pass before the kernel will + * next move a task from the Blocked state to the Running state. + * + * This conditional compilation should use inequality to 0, not equality to 1. + * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user + * defined low power mode implementations require configUSE_TICKLESS_IDLE to be + * set to a value other than 1. + */ +#if ( configUSE_TICKLESS_IDLE != 0 ) + + static TickType_t prvGetExpectedIdleTime( void ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Set xNextTaskUnblockTime to the time at which the next Blocked state task + * will exit the Blocked state. + */ +static void prvResetNextTaskUnblockTime( void ); + +#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) + + /* + * Helper function used to pad task names with spaces when printing out + * human readable tables of task information. + */ + static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Called after a Task_t structure has been allocated either statically or + * dynamically to fill in the structure's members. + */ +static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask, + TCB_t *pxNewTCB, + const MemoryRegion_t * const xRegions ) PRIVILEGED_FUNCTION; + +/* + * Called after a new task has been created and initialised to place the task + * under the control of the scheduler. + */ +static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) PRIVILEGED_FUNCTION; + +/* + * freertos_tasks_c_additions_init() should only be called if the user definable + * macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is the only macro + * called by the function. + */ +#ifdef FREERTOS_TASKS_C_ADDITIONS_INIT + + static void freertos_tasks_c_additions_init( void ) PRIVILEGED_FUNCTION; + +#endif + +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + StackType_t * const puxStackBuffer, + StaticTask_t * const pxTaskBuffer ) + { + TCB_t *pxNewTCB; + TaskHandle_t xReturn; + + configASSERT( puxStackBuffer != NULL ); + configASSERT( pxTaskBuffer != NULL ); + + #if( configASSERT_DEFINED == 1 ) + { + /* Sanity check that the size of the structure used to declare a + variable of type StaticTask_t equals the size of the real task + structure. */ + volatile size_t xSize = sizeof( StaticTask_t ); + configASSERT( xSize == sizeof( TCB_t ) ); + ( void ) xSize; /* Prevent lint warning when configASSERT() is not used. */ + } + #endif /* configASSERT_DEFINED */ + + + if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) ) + { + /* The memory used for the task's TCB and stack are passed into this + function - use them. */ + pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */ + pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer; + + #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */ + { + /* Tasks can be created statically or dynamically, so note this + task was created statically in case the task is later deleted. */ + pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB; + } + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ + + prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL ); + prvAddNewTaskToReadyList( pxNewTCB ); + } + else + { + xReturn = NULL; + } + + return xReturn; + } + +#endif /* SUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +#if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + + BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) + { + TCB_t *pxNewTCB; + BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + + configASSERT( pxTaskDefinition->puxStackBuffer != NULL ); + configASSERT( pxTaskDefinition->pxTaskBuffer != NULL ); + + if( ( pxTaskDefinition->puxStackBuffer != NULL ) && ( pxTaskDefinition->pxTaskBuffer != NULL ) ) + { + /* Allocate space for the TCB. Where the memory comes from depends + on the implementation of the port malloc function and whether or + not static allocation is being used. */ + pxNewTCB = ( TCB_t * ) pxTaskDefinition->pxTaskBuffer; + + /* Store the stack location in the TCB. */ + pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer; + + #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) + { + /* Tasks can be created statically or dynamically, so note this + task was created statically in case the task is later deleted. */ + pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB; + } + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ + + prvInitialiseNewTask( pxTaskDefinition->pvTaskCode, + pxTaskDefinition->pcName, + ( uint32_t ) pxTaskDefinition->usStackDepth, + pxTaskDefinition->pvParameters, + pxTaskDefinition->uxPriority, + pxCreatedTask, pxNewTCB, + pxTaskDefinition->xRegions ); + + prvAddNewTaskToReadyList( pxNewTCB ); + xReturn = pdPASS; + } + + return xReturn; + } + +#endif /* ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ +/*-----------------------------------------------------------*/ + +#if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) + { + TCB_t *pxNewTCB; + BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + + configASSERT( pxTaskDefinition->puxStackBuffer ); + + if( pxTaskDefinition->puxStackBuffer != NULL ) + { + /* Allocate space for the TCB. Where the memory comes from depends + on the implementation of the port malloc function and whether or + not static allocation is being used. */ + pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); + + if( pxNewTCB != NULL ) + { + /* Store the stack location in the TCB. */ + pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer; + + #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) + { + /* Tasks can be created statically or dynamically, so note + this task had a statically allocated stack in case it is + later deleted. The TCB was allocated dynamically. */ + pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY; + } + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ + + prvInitialiseNewTask( pxTaskDefinition->pvTaskCode, + pxTaskDefinition->pcName, + ( uint32_t ) pxTaskDefinition->usStackDepth, + pxTaskDefinition->pvParameters, + pxTaskDefinition->uxPriority, + pxCreatedTask, pxNewTCB, + pxTaskDefinition->xRegions ); + + prvAddNewTaskToReadyList( pxNewTCB ); + xReturn = pdPASS; + } + } + + return xReturn; + } + +#endif /* portUSING_MPU_WRAPPERS */ +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + BaseType_t xTaskCreate( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const configSTACK_DEPTH_TYPE usStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask ) + { + TCB_t *pxNewTCB; + BaseType_t xReturn; + + /* If the stack grows down then allocate the stack then the TCB so the stack + does not grow into the TCB. Likewise if the stack grows up then allocate + the TCB then the stack. */ + #if( portSTACK_GROWTH > 0 ) + { + /* Allocate space for the TCB. Where the memory comes from depends on + the implementation of the port malloc function and whether or not static + allocation is being used. */ + pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); + + if( pxNewTCB != NULL ) + { + /* Allocate space for the stack used by the task being created. + The base of the stack memory stored in the TCB so the task can + be deleted later if required. */ + pxNewTCB->pxStack = ( StackType_t * ) pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + + if( pxNewTCB->pxStack == NULL ) + { + /* Could not allocate the stack. Delete the allocated TCB. */ + vPortFree( pxNewTCB ); + pxNewTCB = NULL; + } + } + } + #else /* portSTACK_GROWTH */ + { + StackType_t *pxStack; + + /* Allocate space for the stack used by the task being created. */ + pxStack = pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation is the stack. */ + + if( pxStack != NULL ) + { + /* Allocate space for the TCB. */ + pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); /*lint !e9087 !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack, and the first member of TCB_t is always a pointer to the task's stack. */ + + if( pxNewTCB != NULL ) + { + /* Store the stack location in the TCB. */ + pxNewTCB->pxStack = pxStack; + } + else + { + /* The stack cannot be used as the TCB was not created. Free + it again. */ + vPortFree( pxStack ); + } + } + else + { + pxNewTCB = NULL; + } + } + #endif /* portSTACK_GROWTH */ + + if( pxNewTCB != NULL ) + { + #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e9029 !e731 Macro has been consolidated for readability reasons. */ + { + /* Tasks can be created statically or dynamically, so note this + task was created dynamically in case it is later deleted. */ + pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB; + } + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ + + prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL ); + prvAddNewTaskToReadyList( pxNewTCB ); + xReturn = pdPASS; + } + else + { + xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + } + + return xReturn; + } + +#endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask, + TCB_t *pxNewTCB, + const MemoryRegion_t * const xRegions ) +{ +StackType_t *pxTopOfStack; +UBaseType_t x; + + #if( portUSING_MPU_WRAPPERS == 1 ) + /* Should the task be created in privileged mode? */ + BaseType_t xRunPrivileged; + if( ( uxPriority & portPRIVILEGE_BIT ) != 0U ) + { + xRunPrivileged = pdTRUE; + } + else + { + xRunPrivileged = pdFALSE; + } + uxPriority &= ~portPRIVILEGE_BIT; + #endif /* portUSING_MPU_WRAPPERS == 1 */ + + /* Avoid dependency on memset() if it is not required. */ + #if( tskSET_NEW_STACKS_TO_KNOWN_VALUE == 1 ) + { + /* Fill the stack with a known value to assist debugging. */ + ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) ulStackDepth * sizeof( StackType_t ) ); + } + #endif /* tskSET_NEW_STACKS_TO_KNOWN_VALUE */ + + /* Calculate the top of stack address. This depends on whether the stack + grows from high memory to low (as per the 80x86) or vice versa. + portSTACK_GROWTH is used to make the result positive or negative as required + by the port. */ + #if( portSTACK_GROWTH < 0 ) + { + pxTopOfStack = &( pxNewTCB->pxStack[ ulStackDepth - ( uint32_t ) 1 ] ); + pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /*lint !e923 !e9033 !e9078 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. Checked by assert(). */ + + /* Check the alignment of the calculated top of stack is correct. */ + configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) ); + + #if( configRECORD_STACK_HIGH_ADDRESS == 1 ) + { + /* Also record the stack's high address, which may assist + debugging. */ + pxNewTCB->pxEndOfStack = pxTopOfStack; + } + #endif /* configRECORD_STACK_HIGH_ADDRESS */ + } + #else /* portSTACK_GROWTH */ + { + pxTopOfStack = pxNewTCB->pxStack; + + /* Check the alignment of the stack buffer is correct. */ + configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxNewTCB->pxStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) ); + + /* The other extreme of the stack space is required if stack checking is + performed. */ + pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 ); + } + #endif /* portSTACK_GROWTH */ + + /* Store the task name in the TCB. */ + if( pcName != NULL ) + { + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ ) + { + pxNewTCB->pcTaskName[ x ] = pcName[ x ]; + + /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than + configMAX_TASK_NAME_LEN characters just in case the memory after the + string is not accessible (extremely unlikely). */ + if( pcName[ x ] == ( char ) 0x00 ) + { + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + /* Ensure the name string is terminated in the case that the string length + was greater or equal to configMAX_TASK_NAME_LEN. */ + pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0'; + } + else + { + /* The task has not been given a name, so just ensure there is a NULL + terminator when it is read out. */ + pxNewTCB->pcTaskName[ 0 ] = 0x00; + } + + /* This is used as an array index so must ensure it's not too large. First + remove the privilege bit if one is present. */ + if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES ) + { + uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxNewTCB->uxPriority = uxPriority; + #if ( configUSE_MUTEXES == 1 ) + { + pxNewTCB->uxBasePriority = uxPriority; + pxNewTCB->uxMutexesHeld = 0; + } + #endif /* configUSE_MUTEXES */ + + vListInitialiseItem( &( pxNewTCB->xStateListItem ) ); + vListInitialiseItem( &( pxNewTCB->xEventListItem ) ); + + /* Set the pxNewTCB as a link back from the ListItem_t. This is so we can get + back to the containing TCB from a generic item in a list. */ + listSET_LIST_ITEM_OWNER( &( pxNewTCB->xStateListItem ), pxNewTCB ); + + /* Event lists are always in priority order. */ + listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB ); + + #if ( portCRITICAL_NESTING_IN_TCB == 1 ) + { + pxNewTCB->uxCriticalNesting = ( UBaseType_t ) 0U; + } + #endif /* portCRITICAL_NESTING_IN_TCB */ + + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) + { + pxNewTCB->pxTaskTag = NULL; + } + #endif /* configUSE_APPLICATION_TASK_TAG */ + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + pxNewTCB->ulRunTimeCounter = 0UL; + } + #endif /* configGENERATE_RUN_TIME_STATS */ + + #if ( portUSING_MPU_WRAPPERS == 1 ) + { + vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, ulStackDepth ); + } + #else + { + /* Avoid compiler warning about unreferenced parameter. */ + ( void ) xRegions; + } + #endif + + #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + { + for( x = 0; x < ( UBaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ ) + { + pxNewTCB->pvThreadLocalStoragePointers[ x ] = NULL; + } + } + #endif + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + { + pxNewTCB->ulNotifiedValue = 0; + pxNewTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION; + } + #endif + + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + { + /* Initialise this task's Newlib reent structure. */ + _REENT_INIT_PTR( ( &( pxNewTCB->xNewLib_reent ) ) ); + } + #endif + + #if( INCLUDE_xTaskAbortDelay == 1 ) + { + pxNewTCB->ucDelayAborted = pdFALSE; + } + #endif + + /* Initialize the TCB stack to look as if the task was already running, + but had been interrupted by the scheduler. The return address is set + to the start of the task function. Once the stack has been initialised + the top of stack variable is updated. */ + #if( portUSING_MPU_WRAPPERS == 1 ) + { + /* If the port has capability to detect stack overflow, + pass the stack end address to the stack initialization + function as well. */ + #if( portHAS_STACK_OVERFLOW_CHECKING == 1 ) + { + #if( portSTACK_GROWTH < 0 ) + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged ); + } + #else /* portSTACK_GROWTH */ + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged ); + } + #endif /* portSTACK_GROWTH */ + } + #else /* portHAS_STACK_OVERFLOW_CHECKING */ + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged ); + } + #endif /* portHAS_STACK_OVERFLOW_CHECKING */ + } + #else /* portUSING_MPU_WRAPPERS */ + { + /* If the port has capability to detect stack overflow, + pass the stack end address to the stack initialization + function as well. */ + #if( portHAS_STACK_OVERFLOW_CHECKING == 1 ) + { + #if( portSTACK_GROWTH < 0 ) + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters ); + } + #else /* portSTACK_GROWTH */ + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters ); + } + #endif /* portSTACK_GROWTH */ + } + #else /* portHAS_STACK_OVERFLOW_CHECKING */ + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters ); + } + #endif /* portHAS_STACK_OVERFLOW_CHECKING */ + } + #endif /* portUSING_MPU_WRAPPERS */ + + if( pxCreatedTask != NULL ) + { + /* Pass the handle out in an anonymous way. The handle can be used to + change the created task's priority, delete the created task, etc.*/ + *pxCreatedTask = ( TaskHandle_t ) pxNewTCB; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} +/*-----------------------------------------------------------*/ + +static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) +{ + /* Ensure interrupts don't access the task lists while the lists are being + updated. */ + taskENTER_CRITICAL(); + { + uxCurrentNumberOfTasks++; + if( pxCurrentTCB == NULL ) + { + /* There are no other tasks, or all the other tasks are in + the suspended state - make this the current task. */ + pxCurrentTCB = pxNewTCB; + + if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 ) + { + /* This is the first task to be created so do the preliminary + initialisation required. We will not recover if this call + fails, but we will report the failure. */ + prvInitialiseTaskLists(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* If the scheduler is not already running, make this task the + current task if it is the highest priority task to be created + so far. */ + if( xSchedulerRunning == pdFALSE ) + { + if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority ) + { + pxCurrentTCB = pxNewTCB; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + uxTaskNumber++; + + #if ( configUSE_TRACE_FACILITY == 1 ) + { + /* Add a counter into the TCB for tracing only. */ + pxNewTCB->uxTCBNumber = uxTaskNumber; + } + #endif /* configUSE_TRACE_FACILITY */ + traceTASK_CREATE( pxNewTCB ); + + prvAddTaskToReadyList( pxNewTCB ); + + portSETUP_TCB( pxNewTCB ); + } + taskEXIT_CRITICAL(); + + if( xSchedulerRunning != pdFALSE ) + { + /* If the created task is of a higher priority than the current task + then it should run now. */ + if( pxCurrentTCB->uxPriority < pxNewTCB->uxPriority ) + { + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelete == 1 ) + + void vTaskDelete( TaskHandle_t xTaskToDelete ) + { + TCB_t *pxTCB; + + taskENTER_CRITICAL(); + { + /* If null is passed in here then it is the calling task that is + being deleted. */ + pxTCB = prvGetTCBFromHandle( xTaskToDelete ); + + /* Remove task from the ready list. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + taskRESET_READY_PRIORITY( pxTCB->uxPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Is the task waiting on an event also? */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Increment the uxTaskNumber also so kernel aware debuggers can + detect that the task lists need re-generating. This is done before + portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will + not return. */ + uxTaskNumber++; + + if( pxTCB == pxCurrentTCB ) + { + /* A task is deleting itself. This cannot complete within the + task itself, as a context switch to another task is required. + Place the task in the termination list. The idle task will + check the termination list and free up any memory allocated by + the scheduler for the TCB and stack of the deleted task. */ + vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) ); + + /* Increment the ucTasksDeleted variable so the idle task knows + there is a task that has been deleted and that it should therefore + check the xTasksWaitingTermination list. */ + ++uxDeletedTasksWaitingCleanUp; + + /* The pre-delete hook is primarily for the Windows simulator, + in which Windows specific clean up operations are performed, + after which it is not possible to yield away from this task - + hence xYieldPending is used to latch that a context switch is + required. */ + portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending ); + } + else + { + --uxCurrentNumberOfTasks; + prvDeleteTCB( pxTCB ); + + /* Reset the next expected unblock time in case it referred to + the task that has just been deleted. */ + prvResetNextTaskUnblockTime(); + } + + traceTASK_DELETE( pxTCB ); + } + taskEXIT_CRITICAL(); + + /* Force a reschedule if it is the currently running task that has just + been deleted. */ + if( xSchedulerRunning != pdFALSE ) + { + if( pxTCB == pxCurrentTCB ) + { + configASSERT( uxSchedulerSuspended == 0 ); + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + +#endif /* INCLUDE_vTaskDelete */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelayUntil == 1 ) + + void vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement ) + { + TickType_t xTimeToWake; + BaseType_t xAlreadyYielded, xShouldDelay = pdFALSE; + + configASSERT( pxPreviousWakeTime ); + configASSERT( ( xTimeIncrement > 0U ) ); + configASSERT( uxSchedulerSuspended == 0 ); + + vTaskSuspendAll(); + { + /* Minor optimisation. The tick count cannot change in this + block. */ + const TickType_t xConstTickCount = xTickCount; + + /* Generate the tick time at which the task wants to wake. */ + xTimeToWake = *pxPreviousWakeTime + xTimeIncrement; + + if( xConstTickCount < *pxPreviousWakeTime ) + { + /* The tick count has overflowed since this function was + lasted called. In this case the only time we should ever + actually delay is if the wake time has also overflowed, + and the wake time is greater than the tick time. When this + is the case it is as if neither time had overflowed. */ + if( ( xTimeToWake < *pxPreviousWakeTime ) && ( xTimeToWake > xConstTickCount ) ) + { + xShouldDelay = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* The tick time has not overflowed. In this case we will + delay if either the wake time has overflowed, and/or the + tick time is less than the wake time. */ + if( ( xTimeToWake < *pxPreviousWakeTime ) || ( xTimeToWake > xConstTickCount ) ) + { + xShouldDelay = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + /* Update the wake time ready for the next call. */ + *pxPreviousWakeTime = xTimeToWake; + + if( xShouldDelay != pdFALSE ) + { + traceTASK_DELAY_UNTIL( xTimeToWake ); + + /* prvAddCurrentTaskToDelayedList() needs the block time, not + the time to wake, so subtract the current tick count. */ + prvAddCurrentTaskToDelayedList( xTimeToWake - xConstTickCount, pdFALSE ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + xAlreadyYielded = xTaskResumeAll(); + + /* Force a reschedule if xTaskResumeAll has not already done so, we may + have put ourselves to sleep. */ + if( xAlreadyYielded == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* INCLUDE_vTaskDelayUntil */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + + void vTaskDelay( const TickType_t xTicksToDelay ) + { + BaseType_t xAlreadyYielded = pdFALSE; + + /* A delay time of zero just forces a reschedule. */ + if( xTicksToDelay > ( TickType_t ) 0U ) + { + configASSERT( uxSchedulerSuspended == 0 ); + vTaskSuspendAll(); + { + traceTASK_DELAY(); + + /* A task that is removed from the event list while the + scheduler is suspended will not get placed in the ready + list or removed from the blocked list until the scheduler + is resumed. + + This task cannot be in an event list as it is the currently + executing task. */ + prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE ); + } + xAlreadyYielded = xTaskResumeAll(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Force a reschedule if xTaskResumeAll has not already done so, we may + have put ourselves to sleep. */ + if( xAlreadyYielded == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* INCLUDE_vTaskDelay */ +/*-----------------------------------------------------------*/ + +#if( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_xTaskAbortDelay == 1 ) ) + + eTaskState eTaskGetState( TaskHandle_t xTask ) + { + eTaskState eReturn; + List_t const * pxStateList, *pxDelayedList, *pxOverflowedDelayedList; + const TCB_t * const pxTCB = xTask; + + configASSERT( pxTCB ); + + if( pxTCB == pxCurrentTCB ) + { + /* The task calling this function is querying its own state. */ + eReturn = eRunning; + } + else + { + taskENTER_CRITICAL(); + { + pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) ); + pxDelayedList = pxDelayedTaskList; + pxOverflowedDelayedList = pxOverflowDelayedTaskList; + } + taskEXIT_CRITICAL(); + + if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) ) + { + /* The task being queried is referenced from one of the Blocked + lists. */ + eReturn = eBlocked; + } + + #if ( INCLUDE_vTaskSuspend == 1 ) + else if( pxStateList == &xSuspendedTaskList ) + { + /* The task being queried is referenced from the suspended + list. Is it genuinely suspended or is it blocked + indefinitely? */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ) + { + #if( configUSE_TASK_NOTIFICATIONS == 1 ) + { + /* The task does not appear on the event list item of + and of the RTOS objects, but could still be in the + blocked state if it is waiting on its notification + rather than waiting on an object. */ + if( pxTCB->ucNotifyState == taskWAITING_NOTIFICATION ) + { + eReturn = eBlocked; + } + else + { + eReturn = eSuspended; + } + } + #else + { + eReturn = eSuspended; + } + #endif + } + else + { + eReturn = eBlocked; + } + } + #endif + + #if ( INCLUDE_vTaskDelete == 1 ) + else if( ( pxStateList == &xTasksWaitingTermination ) || ( pxStateList == NULL ) ) + { + /* The task being queried is referenced from the deleted + tasks list, or it is not referenced from any lists at + all. */ + eReturn = eDeleted; + } + #endif + + else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */ + { + /* If the task is not in any other state, it must be in the + Ready (including pending ready) state. */ + eReturn = eReady; + } + } + + return eReturn; + } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */ + +#endif /* INCLUDE_eTaskGetState */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + + UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask ) + { + TCB_t const *pxTCB; + UBaseType_t uxReturn; + + taskENTER_CRITICAL(); + { + /* If null is passed in here then it is the priority of the task + that called uxTaskPriorityGet() that is being queried. */ + pxTCB = prvGetTCBFromHandle( xTask ); + uxReturn = pxTCB->uxPriority; + } + taskEXIT_CRITICAL(); + + return uxReturn; + } + +#endif /* INCLUDE_uxTaskPriorityGet */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + + UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask ) + { + TCB_t const *pxTCB; + UBaseType_t uxReturn, uxSavedInterruptState; + + /* RTOS ports that support interrupt nesting have the concept of a + maximum system call (or maximum API call) interrupt priority. + Interrupts that are above the maximum system call priority are keep + permanently enabled, even when the RTOS kernel is in a critical section, + but cannot make any calls to FreeRTOS API functions. If configASSERT() + is defined in FreeRTOSConfig.h then + portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has + been assigned a priority above the configured maximum system call + priority. Only FreeRTOS functions that end in FromISR can be called + from interrupts that have been assigned a priority at or (logically) + below the maximum system call interrupt priority. FreeRTOS maintains a + separate interrupt safe API to ensure interrupt entry is as fast and as + simple as possible. More information (albeit Cortex-M specific) is + provided on the following link: + https://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptState = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* If null is passed in here then it is the priority of the calling + task that is being queried. */ + pxTCB = prvGetTCBFromHandle( xTask ); + uxReturn = pxTCB->uxPriority; + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptState ); + + return uxReturn; + } + +#endif /* INCLUDE_uxTaskPriorityGet */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskPrioritySet == 1 ) + + void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority ) + { + TCB_t *pxTCB; + UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry; + BaseType_t xYieldRequired = pdFALSE; + + configASSERT( ( uxNewPriority < configMAX_PRIORITIES ) ); + + /* Ensure the new priority is valid. */ + if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES ) + { + uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + taskENTER_CRITICAL(); + { + /* If null is passed in here then it is the priority of the calling + task that is being changed. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + traceTASK_PRIORITY_SET( pxTCB, uxNewPriority ); + + #if ( configUSE_MUTEXES == 1 ) + { + uxCurrentBasePriority = pxTCB->uxBasePriority; + } + #else + { + uxCurrentBasePriority = pxTCB->uxPriority; + } + #endif + + if( uxCurrentBasePriority != uxNewPriority ) + { + /* The priority change may have readied a task of higher + priority than the calling task. */ + if( uxNewPriority > uxCurrentBasePriority ) + { + if( pxTCB != pxCurrentTCB ) + { + /* The priority of a task other than the currently + running task is being raised. Is the priority being + raised above that of the running task? */ + if( uxNewPriority >= pxCurrentTCB->uxPriority ) + { + xYieldRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* The priority of the running task is being raised, + but the running task must already be the highest + priority task able to run so no yield is required. */ + } + } + else if( pxTCB == pxCurrentTCB ) + { + /* Setting the priority of the running task down means + there may now be another task of higher priority that + is ready to execute. */ + xYieldRequired = pdTRUE; + } + else + { + /* Setting the priority of any other task down does not + require a yield as the running task must be above the + new priority of the task being modified. */ + } + + /* Remember the ready list the task might be referenced from + before its uxPriority member is changed so the + taskRESET_READY_PRIORITY() macro can function correctly. */ + uxPriorityUsedOnEntry = pxTCB->uxPriority; + + #if ( configUSE_MUTEXES == 1 ) + { + /* Only change the priority being used if the task is not + currently using an inherited priority. */ + if( pxTCB->uxBasePriority == pxTCB->uxPriority ) + { + pxTCB->uxPriority = uxNewPriority; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* The base priority gets set whatever. */ + pxTCB->uxBasePriority = uxNewPriority; + } + #else + { + pxTCB->uxPriority = uxNewPriority; + } + #endif + + /* Only reset the event list item value if the value is not + being used for anything else. */ + if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) + { + listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* If the task is in the blocked or suspended list we need do + nothing more than change its priority variable. However, if + the task is in a ready list it needs to be removed and placed + in the list appropriate to its new priority. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE ) + { + /* The task is currently in its ready list - remove before + adding it to it's new ready list. As we are in a critical + section we can do this even if the scheduler is suspended. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + /* It is known that the task is in its ready list so + there is no need to check again and the port level + reset macro can be called directly. */ + portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + prvAddTaskToReadyList( pxTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xYieldRequired != pdFALSE ) + { + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Remove compiler warning about unused variables when the port + optimised task selection is not being used. */ + ( void ) uxPriorityUsedOnEntry; + } + } + taskEXIT_CRITICAL(); + } + +#endif /* INCLUDE_vTaskPrioritySet */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + + void vTaskSuspend( TaskHandle_t xTaskToSuspend ) + { + TCB_t *pxTCB; + + taskENTER_CRITICAL(); + { + /* If null is passed in here then it is the running task that is + being suspended. */ + pxTCB = prvGetTCBFromHandle( xTaskToSuspend ); + + traceTASK_SUSPEND( pxTCB ); + + /* Remove task from the ready/delayed list and place in the + suspended list. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + taskRESET_READY_PRIORITY( pxTCB->uxPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Is the task waiting on an event also? */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ); + + #if( configUSE_TASK_NOTIFICATIONS == 1 ) + { + if( pxTCB->ucNotifyState == taskWAITING_NOTIFICATION ) + { + /* The task was blocked to wait for a notification, but is + now suspended, so no notification was received. */ + pxTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION; + } + } + #endif + } + taskEXIT_CRITICAL(); + + if( xSchedulerRunning != pdFALSE ) + { + /* Reset the next expected unblock time in case it referred to the + task that is now in the Suspended state. */ + taskENTER_CRITICAL(); + { + prvResetNextTaskUnblockTime(); + } + taskEXIT_CRITICAL(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( pxTCB == pxCurrentTCB ) + { + if( xSchedulerRunning != pdFALSE ) + { + /* The current task has just been suspended. */ + configASSERT( uxSchedulerSuspended == 0 ); + portYIELD_WITHIN_API(); + } + else + { + /* The scheduler is not running, but the task that was pointed + to by pxCurrentTCB has just been suspended and pxCurrentTCB + must be adjusted to point to a different task. */ + if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */ + { + /* No other tasks are ready, so set pxCurrentTCB back to + NULL so when the next task is created pxCurrentTCB will + be set to point to it no matter what its relative priority + is. */ + pxCurrentTCB = NULL; + } + else + { + vTaskSwitchContext(); + } + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* INCLUDE_vTaskSuspend */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + + static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) + { + BaseType_t xReturn = pdFALSE; + const TCB_t * const pxTCB = xTask; + + /* Accesses xPendingReadyList so must be called from a critical + section. */ + + /* It does not make sense to check if the calling task is suspended. */ + configASSERT( xTask ); + + /* Is the task being resumed actually in the suspended list? */ + if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ) != pdFALSE ) + { + /* Has the task already been resumed from within an ISR? */ + if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) == pdFALSE ) + { + /* Is it in the suspended list because it is in the Suspended + state, or because is is blocked with no timeout? */ + if( listIS_CONTAINED_WITHIN( NULL, &( pxTCB->xEventListItem ) ) != pdFALSE ) /*lint !e961. The cast is only redundant when NULL is used. */ + { + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */ + +#endif /* INCLUDE_vTaskSuspend */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + + void vTaskResume( TaskHandle_t xTaskToResume ) + { + TCB_t * const pxTCB = xTaskToResume; + + /* It does not make sense to resume the calling task. */ + configASSERT( xTaskToResume ); + + /* The parameter cannot be NULL as it is impossible to resume the + currently executing task. */ + if( ( pxTCB != pxCurrentTCB ) && ( pxTCB != NULL ) ) + { + taskENTER_CRITICAL(); + { + if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) + { + traceTASK_RESUME( pxTCB ); + + /* The ready list can be accessed even if the scheduler is + suspended because this is inside a critical section. */ + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + + /* A higher priority task may have just been resumed. */ + if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + { + /* This yield may not cause the task just resumed to run, + but will leave the lists in the correct state for the + next yield. */ + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* INCLUDE_vTaskSuspend */ + +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) + + BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) + { + BaseType_t xYieldRequired = pdFALSE; + TCB_t * const pxTCB = xTaskToResume; + UBaseType_t uxSavedInterruptStatus; + + configASSERT( xTaskToResume ); + + /* RTOS ports that support interrupt nesting have the concept of a + maximum system call (or maximum API call) interrupt priority. + Interrupts that are above the maximum system call priority are keep + permanently enabled, even when the RTOS kernel is in a critical section, + but cannot make any calls to FreeRTOS API functions. If configASSERT() + is defined in FreeRTOSConfig.h then + portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has + been assigned a priority above the configured maximum system call + priority. Only FreeRTOS functions that end in FromISR can be called + from interrupts that have been assigned a priority at or (logically) + below the maximum system call interrupt priority. FreeRTOS maintains a + separate interrupt safe API to ensure interrupt entry is as fast and as + simple as possible. More information (albeit Cortex-M specific) is + provided on the following link: + https://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) + { + traceTASK_RESUME_FROM_ISR( pxTCB ); + + /* Check the ready lists can be accessed. */ + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + /* Ready lists can be accessed so move the task from the + suspended list to the ready list directly. */ + if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + { + xYieldRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + } + else + { + /* The delayed or ready lists cannot be accessed so the task + is held in the pending ready list until the scheduler is + unsuspended. */ + vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xYieldRequired; + } + +#endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */ +/*-----------------------------------------------------------*/ + +void vTaskStartScheduler( void ) +{ +BaseType_t xReturn; + + /* Add the idle task at the lowest priority. */ + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + StaticTask_t *pxIdleTaskTCBBuffer = NULL; + StackType_t *pxIdleTaskStackBuffer = NULL; + uint32_t ulIdleTaskStackSize; + + /* The Idle task is created using user provided RAM - obtain the + address of the RAM then create the idle task. */ + vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); + xIdleTaskHandle = xTaskCreateStatic( prvIdleTask, + configIDLE_TASK_NAME, + ulIdleTaskStackSize, + ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + pxIdleTaskStackBuffer, + pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + + if( xIdleTaskHandle != NULL ) + { + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + } + } + #else + { + /* The Idle task is being created using dynamically allocated RAM. */ + xReturn = xTaskCreate( prvIdleTask, + configIDLE_TASK_NAME, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + &xIdleTaskHandle ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ + + #if ( configUSE_TIMERS == 1 ) + { + if( xReturn == pdPASS ) + { + xReturn = xTimerCreateTimerTask(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_TIMERS */ + + if( xReturn == pdPASS ) + { + /* freertos_tasks_c_additions_init() should only be called if the user + definable macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is + the only macro called by the function. */ + #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT + { + freertos_tasks_c_additions_init(); + } + #endif + + /* Interrupts are turned off here, to ensure a tick does not occur + before or during the call to xPortStartScheduler(). The stacks of + the created tasks contain a status word with interrupts switched on + so interrupts will automatically get re-enabled when the first task + starts to run. */ + portDISABLE_INTERRUPTS(); + + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + { + /* Switch Newlib's _impure_ptr variable to point to the _reent + structure specific to the task that will run first. */ + _impure_ptr = &( pxCurrentTCB->xNewLib_reent ); + } + #endif /* configUSE_NEWLIB_REENTRANT */ + + xNextTaskUnblockTime = portMAX_DELAY; + xSchedulerRunning = pdTRUE; + xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT; + + /* If configGENERATE_RUN_TIME_STATS is defined then the following + macro must be defined to configure the timer/counter used to generate + the run time counter time base. NOTE: If configGENERATE_RUN_TIME_STATS + is set to 0 and the following line fails to build then ensure you do not + have portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() defined in your + FreeRTOSConfig.h file. */ + portCONFIGURE_TIMER_FOR_RUN_TIME_STATS(); + + traceTASK_SWITCHED_IN(); + + /* Setting up the timer tick is hardware specific and thus in the + portable interface. */ + if( xPortStartScheduler() != pdFALSE ) + { + /* Should not reach here as if the scheduler is running the + function will not return. */ + } + else + { + /* Should only reach here if a task calls xTaskEndScheduler(). */ + } + } + else + { + /* This line will only be reached if the kernel could not be started, + because there was not enough FreeRTOS heap to create the idle task + or the timer task. */ + configASSERT( xReturn != errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY ); + } + + /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0, + meaning xIdleTaskHandle is not used anywhere else. */ + ( void ) xIdleTaskHandle; +} +/*-----------------------------------------------------------*/ + +void vTaskEndScheduler( void ) +{ + /* Stop the scheduler interrupts and call the portable scheduler end + routine so the original ISRs can be restored if necessary. The port + layer must ensure interrupts enable bit is left in the correct state. */ + portDISABLE_INTERRUPTS(); + xSchedulerRunning = pdFALSE; + vPortEndScheduler(); +} +/*----------------------------------------------------------*/ + +void vTaskSuspendAll( void ) +{ + /* A critical section is not required as the variable is of type + BaseType_t. Please read Richard Barry's reply in the following link to a + post in the FreeRTOS support forum before reporting this as a bug! - + http://goo.gl/wu4acr */ + ++uxSchedulerSuspended; + portMEMORY_BARRIER(); +} +/*----------------------------------------------------------*/ + +#if ( configUSE_TICKLESS_IDLE != 0 ) + + static TickType_t prvGetExpectedIdleTime( void ) + { + TickType_t xReturn; + UBaseType_t uxHigherPriorityReadyTasks = pdFALSE; + + /* uxHigherPriorityReadyTasks takes care of the case where + configUSE_PREEMPTION is 0, so there may be tasks above the idle priority + task that are in the Ready state, even though the idle task is + running. */ + #if( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) + { + if( uxTopReadyPriority > tskIDLE_PRIORITY ) + { + uxHigherPriorityReadyTasks = pdTRUE; + } + } + #else + { + const UBaseType_t uxLeastSignificantBit = ( UBaseType_t ) 0x01; + + /* When port optimised task selection is used the uxTopReadyPriority + variable is used as a bit map. If bits other than the least + significant bit are set then there are tasks that have a priority + above the idle priority that are in the Ready state. This takes + care of the case where the co-operative scheduler is in use. */ + if( uxTopReadyPriority > uxLeastSignificantBit ) + { + uxHigherPriorityReadyTasks = pdTRUE; + } + } + #endif + + if( pxCurrentTCB->uxPriority > tskIDLE_PRIORITY ) + { + xReturn = 0; + } + else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > 1 ) + { + /* There are other idle priority tasks in the ready state. If + time slicing is used then the very next tick interrupt must be + processed. */ + xReturn = 0; + } + else if( uxHigherPriorityReadyTasks != pdFALSE ) + { + /* There are tasks in the Ready state that have a priority above the + idle priority. This path can only be reached if + configUSE_PREEMPTION is 0. */ + xReturn = 0; + } + else + { + xReturn = xNextTaskUnblockTime - xTickCount; + } + + return xReturn; + } + +#endif /* configUSE_TICKLESS_IDLE */ +/*----------------------------------------------------------*/ + +BaseType_t xTaskResumeAll( void ) +{ +TCB_t *pxTCB = NULL; +BaseType_t xAlreadyYielded = pdFALSE; + + /* If uxSchedulerSuspended is zero then this function does not match a + previous call to vTaskSuspendAll(). */ + configASSERT( uxSchedulerSuspended ); + + /* It is possible that an ISR caused a task to be removed from an event + list while the scheduler was suspended. If this was the case then the + removed task will have been added to the xPendingReadyList. Once the + scheduler has been resumed it is safe to move all the pending ready + tasks from this list into their appropriate ready list. */ + taskENTER_CRITICAL(); + { + --uxSchedulerSuspended; + + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U ) + { + /* Move any readied tasks from the pending list into the + appropriate ready list. */ + while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE ) + { + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + + /* If the moved task has a priority higher than the current + task then a yield must be performed. */ + if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + { + xYieldPending = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + if( pxTCB != NULL ) + { + /* A task was unblocked while the scheduler was suspended, + which may have prevented the next unblock time from being + re-calculated, in which case re-calculate it now. Mainly + important for low power tickless implementations, where + this can prevent an unnecessary exit from low power + state. */ + prvResetNextTaskUnblockTime(); + } + + /* If any ticks occurred while the scheduler was suspended then + they should be processed now. This ensures the tick count does + not slip, and that any delayed tasks are resumed at the correct + time. */ + { + UBaseType_t uxPendedCounts = uxPendedTicks; /* Non-volatile copy. */ + + if( uxPendedCounts > ( UBaseType_t ) 0U ) + { + do + { + if( xTaskIncrementTick() != pdFALSE ) + { + xYieldPending = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + --uxPendedCounts; + } while( uxPendedCounts > ( UBaseType_t ) 0U ); + + uxPendedTicks = 0; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + if( xYieldPending != pdFALSE ) + { + #if( configUSE_PREEMPTION != 0 ) + { + xAlreadyYielded = pdTRUE; + } + #endif + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + return xAlreadyYielded; +} +/*-----------------------------------------------------------*/ + +TickType_t xTaskGetTickCount( void ) +{ +TickType_t xTicks; + + /* Critical section required if running on a 16 bit processor. */ + portTICK_TYPE_ENTER_CRITICAL(); + { + xTicks = xTickCount; + } + portTICK_TYPE_EXIT_CRITICAL(); + + return xTicks; +} +/*-----------------------------------------------------------*/ + +TickType_t xTaskGetTickCountFromISR( void ) +{ +TickType_t xReturn; +UBaseType_t uxSavedInterruptStatus; + + /* RTOS ports that support interrupt nesting have the concept of a maximum + system call (or maximum API call) interrupt priority. Interrupts that are + above the maximum system call priority are kept permanently enabled, even + when the RTOS kernel is in a critical section, but cannot make any calls to + FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has been + assigned a priority above the configured maximum system call priority. + Only FreeRTOS functions that end in FromISR can be called from interrupts + that have been assigned a priority at or (logically) below the maximum + system call interrupt priority. FreeRTOS maintains a separate interrupt + safe API to ensure interrupt entry is as fast and as simple as possible. + More information (albeit Cortex-M specific) is provided on the following + link: https://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR(); + { + xReturn = xTickCount; + } + portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +UBaseType_t uxTaskGetNumberOfTasks( void ) +{ + /* A critical section is not required because the variables are of type + BaseType_t. */ + return uxCurrentNumberOfTasks; +} +/*-----------------------------------------------------------*/ + +char *pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ +{ +TCB_t *pxTCB; + + /* If null is passed in here then the name of the calling task is being + queried. */ + pxTCB = prvGetTCBFromHandle( xTaskToQuery ); + configASSERT( pxTCB ); + return &( pxTCB->pcTaskName[ 0 ] ); +} +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetHandle == 1 ) + + static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] ) + { + TCB_t *pxNextTCB, *pxFirstTCB, *pxReturn = NULL; + UBaseType_t x; + char cNextChar; + BaseType_t xBreakLoop; + + /* This function is called with the scheduler suspended. */ + + if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 ) + { + listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + + do + { + listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + + /* Check each character in the name looking for a match or + mismatch. */ + xBreakLoop = pdFALSE; + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ ) + { + cNextChar = pxNextTCB->pcTaskName[ x ]; + + if( cNextChar != pcNameToQuery[ x ] ) + { + /* Characters didn't match. */ + xBreakLoop = pdTRUE; + } + else if( cNextChar == ( char ) 0x00 ) + { + /* Both strings terminated, a match must have been + found. */ + pxReturn = pxNextTCB; + xBreakLoop = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xBreakLoop != pdFALSE ) + { + break; + } + } + + if( pxReturn != NULL ) + { + /* The handle has been found. */ + break; + } + + } while( pxNextTCB != pxFirstTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return pxReturn; + } + +#endif /* INCLUDE_xTaskGetHandle */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetHandle == 1 ) + + TaskHandle_t xTaskGetHandle( const char *pcNameToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + { + UBaseType_t uxQueue = configMAX_PRIORITIES; + TCB_t* pxTCB; + + /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */ + configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN ); + + vTaskSuspendAll(); + { + /* Search the ready lists. */ + do + { + uxQueue--; + pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) &( pxReadyTasksLists[ uxQueue ] ), pcNameToQuery ); + + if( pxTCB != NULL ) + { + /* Found the handle. */ + break; + } + + } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + + /* Search the delayed lists. */ + if( pxTCB == NULL ) + { + pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxDelayedTaskList, pcNameToQuery ); + } + + if( pxTCB == NULL ) + { + pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxOverflowDelayedTaskList, pcNameToQuery ); + } + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + if( pxTCB == NULL ) + { + /* Search the suspended list. */ + pxTCB = prvSearchForNameWithinSingleList( &xSuspendedTaskList, pcNameToQuery ); + } + } + #endif + + #if( INCLUDE_vTaskDelete == 1 ) + { + if( pxTCB == NULL ) + { + /* Search the deleted list. */ + pxTCB = prvSearchForNameWithinSingleList( &xTasksWaitingTermination, pcNameToQuery ); + } + } + #endif + } + ( void ) xTaskResumeAll(); + + return pxTCB; + } + +#endif /* INCLUDE_xTaskGetHandle */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime ) + { + UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES; + + vTaskSuspendAll(); + { + /* Is there a space in the array for each task in the system? */ + if( uxArraySize >= uxCurrentNumberOfTasks ) + { + /* Fill in an TaskStatus_t structure with information on each + task in the Ready state. */ + do + { + uxQueue--; + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady ); + + } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + + /* Fill in an TaskStatus_t structure with information on each + task in the Blocked state. */ + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked ); + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked ); + + #if( INCLUDE_vTaskDelete == 1 ) + { + /* Fill in an TaskStatus_t structure with information on + each task that has been deleted but not yet cleaned up. */ + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted ); + } + #endif + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + /* Fill in an TaskStatus_t structure with information on + each task in the Suspended state. */ + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended ); + } + #endif + + #if ( configGENERATE_RUN_TIME_STATS == 1) + { + if( pulTotalRunTime != NULL ) + { + #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE + portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) ); + #else + *pulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + #endif + } + } + #else + { + if( pulTotalRunTime != NULL ) + { + *pulTotalRunTime = 0; + } + } + #endif + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + ( void ) xTaskResumeAll(); + + return uxTask; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + + TaskHandle_t xTaskGetIdleTaskHandle( void ) + { + /* If xTaskGetIdleTaskHandle() is called before the scheduler has been + started, then xIdleTaskHandle will be NULL. */ + configASSERT( ( xIdleTaskHandle != NULL ) ); + return xIdleTaskHandle; + } + +#endif /* INCLUDE_xTaskGetIdleTaskHandle */ +/*----------------------------------------------------------*/ + +/* This conditional compilation should use inequality to 0, not equality to 1. +This is to ensure vTaskStepTick() is available when user defined low power mode +implementations require configUSE_TICKLESS_IDLE to be set to a value other than +1. */ +#if ( configUSE_TICKLESS_IDLE != 0 ) + + void vTaskStepTick( const TickType_t xTicksToJump ) + { + /* Correct the tick count value after a period during which the tick + was suppressed. Note this does *not* call the tick hook function for + each stepped tick. */ + configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime ); + xTickCount += xTicksToJump; + traceINCREASE_TICK_COUNT( xTicksToJump ); + } + +#endif /* configUSE_TICKLESS_IDLE */ +/*----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + + BaseType_t xTaskAbortDelay( TaskHandle_t xTask ) + { + TCB_t *pxTCB = xTask; + BaseType_t xReturn; + + configASSERT( pxTCB ); + + vTaskSuspendAll(); + { + /* A task can only be prematurely removed from the Blocked state if + it is actually in the Blocked state. */ + if( eTaskGetState( xTask ) == eBlocked ) + { + xReturn = pdPASS; + + /* Remove the reference to the task from the blocked list. An + interrupt won't touch the xStateListItem because the + scheduler is suspended. */ + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + + /* Is the task waiting on an event also? If so remove it from + the event list too. Interrupts can touch the event list item, + even though the scheduler is suspended, so a critical section + is used. */ + taskENTER_CRITICAL(); + { + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + pxTCB->ucDelayAborted = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + /* Place the unblocked task into the appropriate ready list. */ + prvAddTaskToReadyList( pxTCB ); + + /* A task being unblocked cannot cause an immediate context + switch if preemption is turned off. */ + #if ( configUSE_PREEMPTION == 1 ) + { + /* Preemption is on, but a context switch should only be + performed if the unblocked task has a priority that is + equal to or higher than the currently executing task. */ + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* Pend the yield to be performed when the scheduler + is unsuspended. */ + xYieldPending = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_PREEMPTION */ + } + else + { + xReturn = pdFAIL; + } + } + ( void ) xTaskResumeAll(); + + return xReturn; + } + +#endif /* INCLUDE_xTaskAbortDelay */ +/*----------------------------------------------------------*/ + +BaseType_t xTaskIncrementTick( void ) +{ +TCB_t * pxTCB; +TickType_t xItemValue; +BaseType_t xSwitchRequired = pdFALSE; + + /* Called by the portable layer each time a tick interrupt occurs. + Increments the tick then checks to see if the new tick value will cause any + tasks to be unblocked. */ + traceTASK_INCREMENT_TICK( xTickCount ); + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + /* Minor optimisation. The tick count cannot change in this + block. */ + const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; + + /* Increment the RTOS tick, switching the delayed and overflowed + delayed lists if it wraps to 0. */ + xTickCount = xConstTickCount; + + if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */ + { + taskSWITCH_DELAYED_LISTS(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* See if this tick has made a timeout expire. Tasks are stored in + the queue in the order of their wake time - meaning once one task + has been found whose block time has not expired there is no need to + look any further down the list. */ + if( xConstTickCount >= xNextTaskUnblockTime ) + { + for( ;; ) + { + if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE ) + { + /* The delayed list is empty. Set xNextTaskUnblockTime + to the maximum possible value so it is extremely + unlikely that the + if( xTickCount >= xNextTaskUnblockTime ) test will pass + next time through. */ + xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + break; + } + else + { + /* The delayed list is not empty, get the value of the + item at the head of the delayed list. This is the time + at which the task at the head of the delayed list must + be removed from the Blocked state. */ + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) ); + + if( xConstTickCount < xItemValue ) + { + /* It is not time to unblock this item yet, but the + item value is the time at which the task at the head + of the blocked list must be removed from the Blocked + state - so record the item value in + xNextTaskUnblockTime. */ + xNextTaskUnblockTime = xItemValue; + break; /*lint !e9011 Code structure here is deedmed easier to understand with multiple breaks. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* It is time to remove the item from the Blocked state. */ + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + + /* Is the task waiting on an event also? If so remove + it from the event list. */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Place the unblocked task into the appropriate ready + list. */ + prvAddTaskToReadyList( pxTCB ); + + /* A task being unblocked cannot cause an immediate + context switch if preemption is turned off. */ + #if ( configUSE_PREEMPTION == 1 ) + { + /* Preemption is on, but a context switch should + only be performed if the unblocked task has a + priority that is equal to or higher than the + currently executing task. */ + if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_PREEMPTION */ + } + } + } + + /* Tasks of equal priority to the currently running task will share + processing time (time slice) if preemption is on, and the application + writer has not explicitly turned time slicing off. */ + #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) + { + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ + + #if ( configUSE_TICK_HOOK == 1 ) + { + /* Guard against the tick hook being called when the pended tick + count is being unwound (when the scheduler is being unlocked). */ + if( uxPendedTicks == ( UBaseType_t ) 0U ) + { + vApplicationTickHook(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_TICK_HOOK */ + } + else + { + ++uxPendedTicks; + + /* The tick hook gets called at regular intervals, even if the + scheduler is locked. */ + #if ( configUSE_TICK_HOOK == 1 ) + { + vApplicationTickHook(); + } + #endif + } + + #if ( configUSE_PREEMPTION == 1 ) + { + if( xYieldPending != pdFALSE ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_PREEMPTION */ + + return xSwitchRequired; +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction ) + { + TCB_t *xTCB; + + /* If xTask is NULL then it is the task hook of the calling task that is + getting set. */ + if( xTask == NULL ) + { + xTCB = ( TCB_t * ) pxCurrentTCB; + } + else + { + xTCB = xTask; + } + + /* Save the hook function in the TCB. A critical section is required as + the value can be accessed from an interrupt. */ + taskENTER_CRITICAL(); + { + xTCB->pxTaskTag = pxHookFunction; + } + taskEXIT_CRITICAL(); + } + +#endif /* configUSE_APPLICATION_TASK_TAG */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask ) + { + TCB_t *pxTCB; + TaskHookFunction_t xReturn; + + /* If xTask is NULL then set the calling task's hook. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + /* Save the hook function in the TCB. A critical section is required as + the value can be accessed from an interrupt. */ + taskENTER_CRITICAL(); + { + xReturn = pxTCB->pxTaskTag; + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_APPLICATION_TASK_TAG */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask ) + { + TCB_t *pxTCB; + TaskHookFunction_t xReturn; + UBaseType_t uxSavedInterruptStatus; + + /* If xTask is NULL then set the calling task's hook. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + /* Save the hook function in the TCB. A critical section is required as + the value can be accessed from an interrupt. */ + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + xReturn = pxTCB->pxTaskTag; + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; + } + +#endif /* configUSE_APPLICATION_TASK_TAG */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter ) + { + TCB_t *xTCB; + BaseType_t xReturn; + + /* If xTask is NULL then we are calling our own task hook. */ + if( xTask == NULL ) + { + xTCB = pxCurrentTCB; + } + else + { + xTCB = xTask; + } + + if( xTCB->pxTaskTag != NULL ) + { + xReturn = xTCB->pxTaskTag( pvParameter ); + } + else + { + xReturn = pdFAIL; + } + + return xReturn; + } + +#endif /* configUSE_APPLICATION_TASK_TAG */ +/*-----------------------------------------------------------*/ + +void vTaskSwitchContext( void ) +{ + if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE ) + { + /* The scheduler is currently suspended - do not allow a context + switch. */ + xYieldPending = pdTRUE; + } + else + { + xYieldPending = pdFALSE; + traceTASK_SWITCHED_OUT(); + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE + portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); + #else + ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + #endif + + /* Add the amount of time the task has been running to the + accumulated time so far. The time the task started running was + stored in ulTaskSwitchedInTime. Note that there is no overflow + protection here so count values are only valid until the timer + overflows. The guard against negative values is to protect + against suspect run time stat counter implementations - which + are provided by the application, not the kernel. */ + if( ulTotalRunTime > ulTaskSwitchedInTime ) + { + pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + ulTaskSwitchedInTime = ulTotalRunTime; + } + #endif /* configGENERATE_RUN_TIME_STATS */ + + /* Check for stack overflow, if configured. */ + taskCHECK_FOR_STACK_OVERFLOW(); + + /* Before the currently running task is switched out, save its errno. */ + #if( configUSE_POSIX_ERRNO == 1 ) + { + pxCurrentTCB->iTaskErrno = FreeRTOS_errno; + } + #endif + + /* Select a new task to run using either the generic C or port + optimised asm code. */ + taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + traceTASK_SWITCHED_IN(); + + /* After the new task is switched in, update the global errno. */ + #if( configUSE_POSIX_ERRNO == 1 ) + { + FreeRTOS_errno = pxCurrentTCB->iTaskErrno; + } + #endif + + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + { + /* Switch Newlib's _impure_ptr variable to point to the _reent + structure specific to this task. */ + _impure_ptr = &( pxCurrentTCB->xNewLib_reent ); + } + #endif /* configUSE_NEWLIB_REENTRANT */ + } +} +/*-----------------------------------------------------------*/ + +void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait ) +{ + configASSERT( pxEventList ); + + /* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE + SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */ + + /* Place the event list item of the TCB in the appropriate event list. + This is placed in the list in priority order so the highest priority task + is the first to be woken by the event. The queue that contains the event + list is locked, preventing simultaneous access from interrupts. */ + vListInsert( pxEventList, &( pxCurrentTCB->xEventListItem ) ); + + prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); +} +/*-----------------------------------------------------------*/ + +void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xItemValue, const TickType_t xTicksToWait ) +{ + configASSERT( pxEventList ); + + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by + the event groups implementation. */ + configASSERT( uxSchedulerSuspended != 0 ); + + /* Store the item value in the event list item. It is safe to access the + event list item here as interrupts won't access the event list item of a + task that is not in the Blocked state. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE ); + + /* Place the event list item of the TCB at the end of the appropriate event + list. It is safe to access the event list here because it is part of an + event group implementation - and interrupts don't access event groups + directly (instead they access them indirectly by pending function calls to + the task level). */ + vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) ); + + prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); +} +/*-----------------------------------------------------------*/ + +#if( configUSE_TIMERS == 1 ) + + void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely ) + { + configASSERT( pxEventList ); + + /* This function should not be called by application code hence the + 'Restricted' in its name. It is not part of the public API. It is + designed for use by kernel code, and has special calling requirements - + it should be called with the scheduler suspended. */ + + + /* Place the event list item of the TCB in the appropriate event list. + In this case it is assume that this is the only task that is going to + be waiting on this event list, so the faster vListInsertEnd() function + can be used in place of vListInsert. */ + vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) ); + + /* If the task should block indefinitely then set the block time to a + value that will be recognised as an indefinite delay inside the + prvAddCurrentTaskToDelayedList() function. */ + if( xWaitIndefinitely != pdFALSE ) + { + xTicksToWait = portMAX_DELAY; + } + + traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) ); + prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely ); + } + +#endif /* configUSE_TIMERS */ +/*-----------------------------------------------------------*/ + +BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) +{ +TCB_t *pxUnblockedTCB; +BaseType_t xReturn; + + /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be + called from a critical section within an ISR. */ + + /* The event list is sorted in priority order, so the first in the list can + be removed as it is known to be the highest priority. Remove the TCB from + the delayed list, and add it to the ready list. + + If an event is for a queue that is locked then this function will never + get called - the lock count on the queue will get modified instead. This + means exclusive access to the event list is guaranteed here. + + This function assumes that a check has already been made to ensure that + pxEventList is not empty. */ + pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + configASSERT( pxUnblockedTCB ); + ( void ) uxListRemove( &( pxUnblockedTCB->xEventListItem ) ); + + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxUnblockedTCB ); + + #if( configUSE_TICKLESS_IDLE != 0 ) + { + /* If a task is blocked on a kernel object then xNextTaskUnblockTime + might be set to the blocked task's time out time. If the task is + unblocked for a reason other than a timeout xNextTaskUnblockTime is + normally left unchanged, because it is automatically reset to a new + value when the tick count equals xNextTaskUnblockTime. However if + tickless idling is used it might be more important to enter sleep mode + at the earliest possible time - so reset xNextTaskUnblockTime here to + ensure it is updated at the earliest possible time. */ + prvResetNextTaskUnblockTime(); + } + #endif + } + else + { + /* The delayed and ready lists cannot be accessed, so hold this task + pending until the scheduler is resumed. */ + vListInsertEnd( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) ); + } + + if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* Return true if the task removed from the event list has a higher + priority than the calling task. This allows the calling task to know if + it should force a context switch now. */ + xReturn = pdTRUE; + + /* Mark that a yield is pending in case the user is not using the + "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */ + xYieldPending = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue ) +{ +TCB_t *pxUnblockedTCB; + + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by + the event flags implementation. */ + configASSERT( uxSchedulerSuspended != pdFALSE ); + + /* Store the new item value in the event list. */ + listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE ); + + /* Remove the event list form the event flag. Interrupts do not access + event flags. */ + pxUnblockedTCB = listGET_LIST_ITEM_OWNER( pxEventListItem ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + configASSERT( pxUnblockedTCB ); + ( void ) uxListRemove( pxEventListItem ); + + /* Remove the task from the delayed list and add it to the ready list. The + scheduler is suspended so interrupts will not be accessing the ready + lists. */ + ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxUnblockedTCB ); + + if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* The unblocked task has a priority above that of the calling task, so + a context switch is required. This function is called with the + scheduler suspended so xYieldPending is set so the context switch + occurs immediately that the scheduler is resumed (unsuspended). */ + xYieldPending = pdTRUE; + } +} +/*-----------------------------------------------------------*/ + +void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) +{ + configASSERT( pxTimeOut ); + taskENTER_CRITICAL(); + { + pxTimeOut->xOverflowCount = xNumOfOverflows; + pxTimeOut->xTimeOnEntering = xTickCount; + } + taskEXIT_CRITICAL(); +} +/*-----------------------------------------------------------*/ + +void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) +{ + /* For internal use only as it does not use a critical section. */ + pxTimeOut->xOverflowCount = xNumOfOverflows; + pxTimeOut->xTimeOnEntering = xTickCount; +} +/*-----------------------------------------------------------*/ + +BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait ) +{ +BaseType_t xReturn; + + configASSERT( pxTimeOut ); + configASSERT( pxTicksToWait ); + + taskENTER_CRITICAL(); + { + /* Minor optimisation. The tick count cannot change in this block. */ + const TickType_t xConstTickCount = xTickCount; + const TickType_t xElapsedTime = xConstTickCount - pxTimeOut->xTimeOnEntering; + + #if( INCLUDE_xTaskAbortDelay == 1 ) + if( pxCurrentTCB->ucDelayAborted != ( uint8_t ) pdFALSE ) + { + /* The delay was aborted, which is not the same as a time out, + but has the same result. */ + pxCurrentTCB->ucDelayAborted = pdFALSE; + xReturn = pdTRUE; + } + else + #endif + + #if ( INCLUDE_vTaskSuspend == 1 ) + if( *pxTicksToWait == portMAX_DELAY ) + { + /* If INCLUDE_vTaskSuspend is set to 1 and the block time + specified is the maximum block time then the task should block + indefinitely, and therefore never time out. */ + xReturn = pdFALSE; + } + else + #endif + + if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) ) /*lint !e525 Indentation preferred as is to make code within pre-processor directives clearer. */ + { + /* The tick count is greater than the time at which + vTaskSetTimeout() was called, but has also overflowed since + vTaskSetTimeOut() was called. It must have wrapped all the way + around and gone past again. This passed since vTaskSetTimeout() + was called. */ + xReturn = pdTRUE; + } + else if( xElapsedTime < *pxTicksToWait ) /*lint !e961 Explicit casting is only redundant with some compilers, whereas others require it to prevent integer conversion errors. */ + { + /* Not a genuine timeout. Adjust parameters for time remaining. */ + *pxTicksToWait -= xElapsedTime; + vTaskInternalSetTimeOutState( pxTimeOut ); + xReturn = pdFALSE; + } + else + { + *pxTicksToWait = 0; + xReturn = pdTRUE; + } + } + taskEXIT_CRITICAL(); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +void vTaskMissedYield( void ) +{ + xYieldPending = pdTRUE; +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask ) + { + UBaseType_t uxReturn; + TCB_t const *pxTCB; + + if( xTask != NULL ) + { + pxTCB = xTask; + uxReturn = pxTCB->uxTaskNumber; + } + else + { + uxReturn = 0U; + } + + return uxReturn; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + void vTaskSetTaskNumber( TaskHandle_t xTask, const UBaseType_t uxHandle ) + { + TCB_t * pxTCB; + + if( xTask != NULL ) + { + pxTCB = xTask; + pxTCB->uxTaskNumber = uxHandle; + } + } + +#endif /* configUSE_TRACE_FACILITY */ + +/* + * ----------------------------------------------------------- + * The Idle task. + * ---------------------------------------------------------- + * + * The portTASK_FUNCTION() macro is used to allow port/compiler specific + * language extensions. The equivalent prototype for this function is: + * + * void prvIdleTask( void *pvParameters ); + * + */ +static portTASK_FUNCTION( prvIdleTask, pvParameters ) +{ + /* Stop warnings. */ + ( void ) pvParameters; + + /** THIS IS THE RTOS IDLE TASK - WHICH IS CREATED AUTOMATICALLY WHEN THE + SCHEDULER IS STARTED. **/ + + /* In case a task that has a secure context deletes itself, in which case + the idle task is responsible for deleting the task's secure context, if + any. */ + portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE ); + + for( ;; ) + { + /* See if any tasks have deleted themselves - if so then the idle task + is responsible for freeing the deleted task's TCB and stack. */ + prvCheckTasksWaitingTermination(); + + #if ( configUSE_PREEMPTION == 0 ) + { + /* If we are not using preemption we keep forcing a task switch to + see if any other task has become available. If we are using + preemption we don't need to do this as any task becoming available + will automatically get the processor anyway. */ + taskYIELD(); + } + #endif /* configUSE_PREEMPTION */ + + #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) + { + /* When using preemption tasks of equal priority will be + timesliced. If a task that is sharing the idle priority is ready + to run then the idle task should yield before the end of the + timeslice. + + A critical region is not required here as we are just reading from + the list, and an occasional incorrect value will not matter. If + the ready list at the idle priority contains more than one task + then a task other than the idle task is ready to execute. */ + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) 1 ) + { + taskYIELD(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */ + + #if ( configUSE_IDLE_HOOK == 1 ) + { + extern void vApplicationIdleHook( void ); + + /* Call the user defined function from within the idle task. This + allows the application designer to add background functionality + without the overhead of a separate task. + NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES, + CALL A FUNCTION THAT MIGHT BLOCK. */ + vApplicationIdleHook(); + } + #endif /* configUSE_IDLE_HOOK */ + + /* This conditional compilation should use inequality to 0, not equality + to 1. This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when + user defined low power mode implementations require + configUSE_TICKLESS_IDLE to be set to a value other than 1. */ + #if ( configUSE_TICKLESS_IDLE != 0 ) + { + TickType_t xExpectedIdleTime; + + /* It is not desirable to suspend then resume the scheduler on + each iteration of the idle task. Therefore, a preliminary + test of the expected idle time is performed without the + scheduler suspended. The result here is not necessarily + valid. */ + xExpectedIdleTime = prvGetExpectedIdleTime(); + + if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP ) + { + vTaskSuspendAll(); + { + /* Now the scheduler is suspended, the expected idle + time can be sampled again, and this time its value can + be used. */ + configASSERT( xNextTaskUnblockTime >= xTickCount ); + xExpectedIdleTime = prvGetExpectedIdleTime(); + + /* Define the following macro to set xExpectedIdleTime to 0 + if the application does not want + portSUPPRESS_TICKS_AND_SLEEP() to be called. */ + configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING( xExpectedIdleTime ); + + if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP ) + { + traceLOW_POWER_IDLE_BEGIN(); + portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ); + traceLOW_POWER_IDLE_END(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + ( void ) xTaskResumeAll(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_TICKLESS_IDLE */ + } +} +/*-----------------------------------------------------------*/ + +#if( configUSE_TICKLESS_IDLE != 0 ) + + eSleepModeStatus eTaskConfirmSleepModeStatus( void ) + { + /* The idle task exists in addition to the application tasks. */ + const UBaseType_t uxNonApplicationTasks = 1; + eSleepModeStatus eReturn = eStandardSleep; + + if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 ) + { + /* A task was made ready while the scheduler was suspended. */ + eReturn = eAbortSleep; + } + else if( xYieldPending != pdFALSE ) + { + /* A yield was pended while the scheduler was suspended. */ + eReturn = eAbortSleep; + } + else + { + /* If all the tasks are in the suspended list (which might mean they + have an infinite block time rather than actually being suspended) + then it is safe to turn all clocks off and just wait for external + interrupts. */ + if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) ) + { + eReturn = eNoTasksWaitingTimeout; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + return eReturn; + } + +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + + void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue ) + { + TCB_t *pxTCB; + + if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) + { + pxTCB = prvGetTCBFromHandle( xTaskToSet ); + pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue; + } + } + +#endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + + void *pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex ) + { + void *pvReturn = NULL; + TCB_t *pxTCB; + + if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) + { + pxTCB = prvGetTCBFromHandle( xTaskToQuery ); + pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ]; + } + else + { + pvReturn = NULL; + } + + return pvReturn; + } + +#endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */ +/*-----------------------------------------------------------*/ + +#if ( portUSING_MPU_WRAPPERS == 1 ) + + void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, const MemoryRegion_t * const xRegions ) + { + TCB_t *pxTCB; + + /* If null is passed in here then we are modifying the MPU settings of + the calling task. */ + pxTCB = prvGetTCBFromHandle( xTaskToModify ); + + vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), xRegions, NULL, 0 ); + } + +#endif /* portUSING_MPU_WRAPPERS */ +/*-----------------------------------------------------------*/ + +static void prvInitialiseTaskLists( void ) +{ +UBaseType_t uxPriority; + + for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ ) + { + vListInitialise( &( pxReadyTasksLists[ uxPriority ] ) ); + } + + vListInitialise( &xDelayedTaskList1 ); + vListInitialise( &xDelayedTaskList2 ); + vListInitialise( &xPendingReadyList ); + + #if ( INCLUDE_vTaskDelete == 1 ) + { + vListInitialise( &xTasksWaitingTermination ); + } + #endif /* INCLUDE_vTaskDelete */ + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + vListInitialise( &xSuspendedTaskList ); + } + #endif /* INCLUDE_vTaskSuspend */ + + /* Start with pxDelayedTaskList using list1 and the pxOverflowDelayedTaskList + using list2. */ + pxDelayedTaskList = &xDelayedTaskList1; + pxOverflowDelayedTaskList = &xDelayedTaskList2; +} +/*-----------------------------------------------------------*/ + +static void prvCheckTasksWaitingTermination( void ) +{ + + /** THIS FUNCTION IS CALLED FROM THE RTOS IDLE TASK **/ + + #if ( INCLUDE_vTaskDelete == 1 ) + { + TCB_t *pxTCB; + + /* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL() + being called too often in the idle task. */ + while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U ) + { + taskENTER_CRITICAL(); + { + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + --uxCurrentNumberOfTasks; + --uxDeletedTasksWaitingCleanUp; + } + taskEXIT_CRITICAL(); + + prvDeleteTCB( pxTCB ); + } + } + #endif /* INCLUDE_vTaskDelete */ +} +/*-----------------------------------------------------------*/ + +#if( configUSE_TRACE_FACILITY == 1 ) + + void vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState ) + { + TCB_t *pxTCB; + + /* xTask is NULL then get the state of the calling task. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB; + pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName [ 0 ] ); + pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority; + pxTaskStatus->pxStackBase = pxTCB->pxStack; + pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber; + + #if ( configUSE_MUTEXES == 1 ) + { + pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority; + } + #else + { + pxTaskStatus->uxBasePriority = 0; + } + #endif + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter; + } + #else + { + pxTaskStatus->ulRunTimeCounter = 0; + } + #endif + + /* Obtaining the task state is a little fiddly, so is only done if the + value of eState passed into this function is eInvalid - otherwise the + state is just set to whatever is passed in. */ + if( eState != eInvalid ) + { + if( pxTCB == pxCurrentTCB ) + { + pxTaskStatus->eCurrentState = eRunning; + } + else + { + pxTaskStatus->eCurrentState = eState; + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + /* If the task is in the suspended list then there is a + chance it is actually just blocked indefinitely - so really + it should be reported as being in the Blocked state. */ + if( eState == eSuspended ) + { + vTaskSuspendAll(); + { + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + pxTaskStatus->eCurrentState = eBlocked; + } + } + ( void ) xTaskResumeAll(); + } + } + #endif /* INCLUDE_vTaskSuspend */ + } + } + else + { + pxTaskStatus->eCurrentState = eTaskGetState( pxTCB ); + } + + /* Obtaining the stack space takes some time, so the xGetFreeStackSpace + parameter is provided to allow it to be skipped. */ + if( xGetFreeStackSpace != pdFALSE ) + { + #if ( portSTACK_GROWTH > 0 ) + { + pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxEndOfStack ); + } + #else + { + pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxStack ); + } + #endif + } + else + { + pxTaskStatus->usStackHighWaterMark = 0; + } + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState ) + { + configLIST_VOLATILE TCB_t *pxNextTCB, *pxFirstTCB; + UBaseType_t uxTask = 0; + + if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 ) + { + listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + + /* Populate an TaskStatus_t structure within the + pxTaskStatusArray array for each task that is referenced from + pxList. See the definition of TaskStatus_t in task.h for the + meaning of each TaskStatus_t structure member. */ + do + { + listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + vTaskGetInfo( ( TaskHandle_t ) pxNextTCB, &( pxTaskStatusArray[ uxTask ] ), pdTRUE, eState ); + uxTask++; + } while( pxNextTCB != pxFirstTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return uxTask; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) + + static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) + { + uint32_t ulCount = 0U; + + while( *pucStackByte == ( uint8_t ) tskSTACK_FILL_BYTE ) + { + pucStackByte -= portSTACK_GROWTH; + ulCount++; + } + + ulCount /= ( uint32_t ) sizeof( StackType_t ); /*lint !e961 Casting is not redundant on smaller architectures. */ + + return ( configSTACK_DEPTH_TYPE ) ulCount; + } + +#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + + /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the + same except for their return type. Using configSTACK_DEPTH_TYPE allows the + user to determine the return type. It gets around the problem of the value + overflowing on 8-bit types without breaking backward compatibility for + applications that expect an 8-bit return type. */ + configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) + { + TCB_t *pxTCB; + uint8_t *pucEndOfStack; + configSTACK_DEPTH_TYPE uxReturn; + + /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are + the same except for their return type. Using configSTACK_DEPTH_TYPE + allows the user to determine the return type. It gets around the + problem of the value overflowing on 8-bit types without breaking + backward compatibility for applications that expect an 8-bit return + type. */ + + pxTCB = prvGetTCBFromHandle( xTask ); + + #if portSTACK_GROWTH < 0 + { + pucEndOfStack = ( uint8_t * ) pxTCB->pxStack; + } + #else + { + pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack; + } + #endif + + uxReturn = prvTaskCheckFreeStackSpace( pucEndOfStack ); + + return uxReturn; + } + +#endif /* INCLUDE_uxTaskGetStackHighWaterMark2 */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + + UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) + { + TCB_t *pxTCB; + uint8_t *pucEndOfStack; + UBaseType_t uxReturn; + + pxTCB = prvGetTCBFromHandle( xTask ); + + #if portSTACK_GROWTH < 0 + { + pucEndOfStack = ( uint8_t * ) pxTCB->pxStack; + } + #else + { + pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack; + } + #endif + + uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack ); + + return uxReturn; + } + +#endif /* INCLUDE_uxTaskGetStackHighWaterMark */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelete == 1 ) + + static void prvDeleteTCB( TCB_t *pxTCB ) + { + /* This call is required specifically for the TriCore port. It must be + above the vPortFree() calls. The call is also used by ports/demos that + want to allocate and clean RAM statically. */ + portCLEAN_UP_TCB( pxTCB ); + + /* Free up the memory allocated by the scheduler for the task. It is up + to the task to free any memory allocated at the application level. */ + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + { + _reclaim_reent( &( pxTCB->xNewLib_reent ) ); + } + #endif /* configUSE_NEWLIB_REENTRANT */ + + #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) ) + { + /* The task can only have been allocated dynamically - free both + the stack and TCB. */ + vPortFree( pxTCB->pxStack ); + vPortFree( pxTCB ); + } + #elif( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */ + { + /* The task could have been allocated statically or dynamically, so + check what was statically allocated before trying to free the + memory. */ + if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ) + { + /* Both the stack and TCB were allocated dynamically, so both + must be freed. */ + vPortFree( pxTCB->pxStack ); + vPortFree( pxTCB ); + } + else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY ) + { + /* Only the stack was statically allocated, so the TCB is the + only memory that must be freed. */ + vPortFree( pxTCB ); + } + else + { + /* Neither the stack nor the TCB were allocated dynamically, so + nothing needs to be freed. */ + configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB ); + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + } + +#endif /* INCLUDE_vTaskDelete */ +/*-----------------------------------------------------------*/ + +static void prvResetNextTaskUnblockTime( void ) +{ +TCB_t *pxTCB; + + if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE ) + { + /* The new current delayed list is empty. Set xNextTaskUnblockTime to + the maximum possible value so it is extremely unlikely that the + if( xTickCount >= xNextTaskUnblockTime ) test will pass until + there is an item in the delayed list. */ + xNextTaskUnblockTime = portMAX_DELAY; + } + else + { + /* The new current delayed list is not empty, get the value of + the item at the head of the delayed list. This is the time at + which the task at the head of the delayed list should be removed + from the Blocked state. */ + ( pxTCB ) = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + xNextTaskUnblockTime = listGET_LIST_ITEM_VALUE( &( ( pxTCB )->xStateListItem ) ); + } +} +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + + TaskHandle_t xTaskGetCurrentTaskHandle( void ) + { + TaskHandle_t xReturn; + + /* A critical section is not required as this is not called from + an interrupt and the current TCB will always be the same for any + individual execution thread. */ + xReturn = pxCurrentTCB; + + return xReturn; + } + +#endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + + BaseType_t xTaskGetSchedulerState( void ) + { + BaseType_t xReturn; + + if( xSchedulerRunning == pdFALSE ) + { + xReturn = taskSCHEDULER_NOT_STARTED; + } + else + { + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + xReturn = taskSCHEDULER_RUNNING; + } + else + { + xReturn = taskSCHEDULER_SUSPENDED; + } + } + + return xReturn; + } + +#endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MUTEXES == 1 ) + + BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder ) + { + TCB_t * const pxMutexHolderTCB = pxMutexHolder; + BaseType_t xReturn = pdFALSE; + + /* If the mutex was given back by an interrupt while the queue was + locked then the mutex holder might now be NULL. _RB_ Is this still + needed as interrupts can no longer use mutexes? */ + if( pxMutexHolder != NULL ) + { + /* If the holder of the mutex has a priority below the priority of + the task attempting to obtain the mutex then it will temporarily + inherit the priority of the task attempting to obtain the mutex. */ + if( pxMutexHolderTCB->uxPriority < pxCurrentTCB->uxPriority ) + { + /* Adjust the mutex holder state to account for its new + priority. Only reset the event list item value if the value is + not being used for anything else. */ + if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) + { + listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* If the task being modified is in the ready state it will need + to be moved into a new list. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE ) + { + if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + taskRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Inherit the priority before being moved into the new list. */ + pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority; + prvAddTaskToReadyList( pxMutexHolderTCB ); + } + else + { + /* Just inherit the priority. */ + pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority; + } + + traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCB->uxPriority ); + + /* Inheritance occurred. */ + xReturn = pdTRUE; + } + else + { + if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCB->uxPriority ) + { + /* The base priority of the mutex holder is lower than the + priority of the task attempting to take the mutex, but the + current priority of the mutex holder is not lower than the + priority of the task attempting to take the mutex. + Therefore the mutex holder must have already inherited a + priority, but inheritance would have occurred if that had + not been the case. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MUTEXES == 1 ) + + BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder ) + { + TCB_t * const pxTCB = pxMutexHolder; + BaseType_t xReturn = pdFALSE; + + if( pxMutexHolder != NULL ) + { + /* A task can only have an inherited priority if it holds the mutex. + If the mutex is held by a task then it cannot be given from an + interrupt, and if a mutex is given by the holding task then it must + be the running state task. */ + configASSERT( pxTCB == pxCurrentTCB ); + configASSERT( pxTCB->uxMutexesHeld ); + ( pxTCB->uxMutexesHeld )--; + + /* Has the holder of the mutex inherited the priority of another + task? */ + if( pxTCB->uxPriority != pxTCB->uxBasePriority ) + { + /* Only disinherit if no other mutexes are held. */ + if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 ) + { + /* A task can only have an inherited priority if it holds + the mutex. If the mutex is held by a task then it cannot be + given from an interrupt, and if a mutex is given by the + holding task then it must be the running state task. Remove + the holding task from the ready list. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + taskRESET_READY_PRIORITY( pxTCB->uxPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Disinherit the priority before adding the task into the + new ready list. */ + traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority ); + pxTCB->uxPriority = pxTCB->uxBasePriority; + + /* Reset the event list item value. It cannot be in use for + any other purpose if this task is running, and it must be + running to give back the mutex. */ + listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + prvAddTaskToReadyList( pxTCB ); + + /* Return true to indicate that a context switch is required. + This is only actually required in the corner case whereby + multiple mutexes were held and the mutexes were given back + in an order different to that in which they were taken. + If a context switch did not occur when the first mutex was + returned, even if a task was waiting on it, then a context + switch should occur when the last mutex is returned whether + a task is waiting on it or not. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MUTEXES == 1 ) + + void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder, UBaseType_t uxHighestPriorityWaitingTask ) + { + TCB_t * const pxTCB = pxMutexHolder; + UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse; + const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1; + + if( pxMutexHolder != NULL ) + { + /* If pxMutexHolder is not NULL then the holder must hold at least + one mutex. */ + configASSERT( pxTCB->uxMutexesHeld ); + + /* Determine the priority to which the priority of the task that + holds the mutex should be set. This will be the greater of the + holding task's base priority and the priority of the highest + priority task that is waiting to obtain the mutex. */ + if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask ) + { + uxPriorityToUse = uxHighestPriorityWaitingTask; + } + else + { + uxPriorityToUse = pxTCB->uxBasePriority; + } + + /* Does the priority need to change? */ + if( pxTCB->uxPriority != uxPriorityToUse ) + { + /* Only disinherit if no other mutexes are held. This is a + simplification in the priority inheritance implementation. If + the task that holds the mutex is also holding other mutexes then + the other mutexes may have caused the priority inheritance. */ + if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld ) + { + /* If a task has timed out because it already holds the + mutex it was trying to obtain then it cannot of inherited + its own priority. */ + configASSERT( pxTCB != pxCurrentTCB ); + + /* Disinherit the priority, remembering the previous + priority to facilitate determining the subject task's + state. */ + traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority ); + uxPriorityUsedOnEntry = pxTCB->uxPriority; + pxTCB->uxPriority = uxPriorityToUse; + + /* Only reset the event list item value if the value is not + being used for anything else. */ + if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) + { + listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* If the running task is not the task that holds the mutex + then the task that holds the mutex could be in either the + Ready, Blocked or Suspended states. Only remove the task + from its current state list if it is in the Ready state as + the task's priority is going to change and there is one + Ready list per priority. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE ) + { + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + taskRESET_READY_PRIORITY( pxTCB->uxPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + prvAddTaskToReadyList( pxTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( portCRITICAL_NESTING_IN_TCB == 1 ) + + void vTaskEnterCritical( void ) + { + portDISABLE_INTERRUPTS(); + + if( xSchedulerRunning != pdFALSE ) + { + ( pxCurrentTCB->uxCriticalNesting )++; + + /* This is not the interrupt safe version of the enter critical + function so assert() if it is being called from an interrupt + context. Only API functions that end in "FromISR" can be used in an + interrupt. Only assert if the critical nesting count is 1 to + protect against recursive calls if the assert function also uses a + critical section. */ + if( pxCurrentTCB->uxCriticalNesting == 1 ) + { + portASSERT_IF_IN_ISR(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* portCRITICAL_NESTING_IN_TCB */ +/*-----------------------------------------------------------*/ + +#if ( portCRITICAL_NESTING_IN_TCB == 1 ) + + void vTaskExitCritical( void ) + { + if( xSchedulerRunning != pdFALSE ) + { + if( pxCurrentTCB->uxCriticalNesting > 0U ) + { + ( pxCurrentTCB->uxCriticalNesting )--; + + if( pxCurrentTCB->uxCriticalNesting == 0U ) + { + portENABLE_INTERRUPTS(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* portCRITICAL_NESTING_IN_TCB */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) + + static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName ) + { + size_t x; + + /* Start by copying the entire string. */ + strcpy( pcBuffer, pcTaskName ); + + /* Pad the end of the string with spaces to ensure columns line up when + printed out. */ + for( x = strlen( pcBuffer ); x < ( size_t ) ( configMAX_TASK_NAME_LEN - 1 ); x++ ) + { + pcBuffer[ x ] = ' '; + } + + /* Terminate. */ + pcBuffer[ x ] = ( char ) 0x00; + + /* Return the new end of string. */ + return &( pcBuffer[ x ] ); + } + +#endif /* ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + void vTaskList( char * pcWriteBuffer ) + { + TaskStatus_t *pxTaskStatusArray; + UBaseType_t uxArraySize, x; + char cStatus; + + /* + * PLEASE NOTE: + * + * This function is provided for convenience only, and is used by many + * of the demo applications. Do not consider it to be part of the + * scheduler. + * + * vTaskList() calls uxTaskGetSystemState(), then formats part of the + * uxTaskGetSystemState() output into a human readable table that + * displays task names, states and stack usage. + * + * vTaskList() has a dependency on the sprintf() C library function that + * might bloat the code size, use a lot of stack, and provide different + * results on different platforms. An alternative, tiny, third party, + * and limited functionality implementation of sprintf() is provided in + * many of the FreeRTOS/Demo sub-directories in a file called + * printf-stdarg.c (note printf-stdarg.c does not provide a full + * snprintf() implementation!). + * + * It is recommended that production systems call uxTaskGetSystemState() + * directly to get access to raw stats data, rather than indirectly + * through a call to vTaskList(). + */ + + + /* Make sure the write buffer does not contain a string. */ + *pcWriteBuffer = ( char ) 0x00; + + /* Take a snapshot of the number of tasks in case it changes while this + function is executing. */ + uxArraySize = uxCurrentNumberOfTasks; + + /* Allocate an array index for each task. NOTE! if + configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will + equate to NULL. */ + pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */ + + if( pxTaskStatusArray != NULL ) + { + /* Generate the (binary) data. */ + uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, NULL ); + + /* Create a human readable table from the binary data. */ + for( x = 0; x < uxArraySize; x++ ) + { + switch( pxTaskStatusArray[ x ].eCurrentState ) + { + case eRunning: cStatus = tskRUNNING_CHAR; + break; + + case eReady: cStatus = tskREADY_CHAR; + break; + + case eBlocked: cStatus = tskBLOCKED_CHAR; + break; + + case eSuspended: cStatus = tskSUSPENDED_CHAR; + break; + + case eDeleted: cStatus = tskDELETED_CHAR; + break; + + case eInvalid: /* Fall through. */ + default: /* Should not get here, but it is included + to prevent static checking errors. */ + cStatus = ( char ) 0x00; + break; + } + + /* Write the task name to the string, padding with spaces so it + can be printed in tabular form more easily. */ + pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName ); + + /* Write the rest of the string. */ + sprintf( pcWriteBuffer, "\t%c\t%u\t%u\t%u\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */ + pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */ + } + + /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION + is 0 then vPortFree() will be #defined to nothing. */ + vPortFree( pxTaskStatusArray ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ +/*----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + void vTaskGetRunTimeStats( char *pcWriteBuffer ) + { + TaskStatus_t *pxTaskStatusArray; + UBaseType_t uxArraySize, x; + uint32_t ulTotalTime, ulStatsAsPercentage; + + #if( configUSE_TRACE_FACILITY != 1 ) + { + #error configUSE_TRACE_FACILITY must also be set to 1 in FreeRTOSConfig.h to use vTaskGetRunTimeStats(). + } + #endif + + /* + * PLEASE NOTE: + * + * This function is provided for convenience only, and is used by many + * of the demo applications. Do not consider it to be part of the + * scheduler. + * + * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part + * of the uxTaskGetSystemState() output into a human readable table that + * displays the amount of time each task has spent in the Running state + * in both absolute and percentage terms. + * + * vTaskGetRunTimeStats() has a dependency on the sprintf() C library + * function that might bloat the code size, use a lot of stack, and + * provide different results on different platforms. An alternative, + * tiny, third party, and limited functionality implementation of + * sprintf() is provided in many of the FreeRTOS/Demo sub-directories in + * a file called printf-stdarg.c (note printf-stdarg.c does not provide + * a full snprintf() implementation!). + * + * It is recommended that production systems call uxTaskGetSystemState() + * directly to get access to raw stats data, rather than indirectly + * through a call to vTaskGetRunTimeStats(). + */ + + /* Make sure the write buffer does not contain a string. */ + *pcWriteBuffer = ( char ) 0x00; + + /* Take a snapshot of the number of tasks in case it changes while this + function is executing. */ + uxArraySize = uxCurrentNumberOfTasks; + + /* Allocate an array index for each task. NOTE! If + configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will + equate to NULL. */ + pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */ + + if( pxTaskStatusArray != NULL ) + { + /* Generate the (binary) data. */ + uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime ); + + /* For percentage calculations. */ + ulTotalTime /= 100UL; + + /* Avoid divide by zero errors. */ + if( ulTotalTime > 0UL ) + { + /* Create a human readable table from the binary data. */ + for( x = 0; x < uxArraySize; x++ ) + { + /* What percentage of the total run time has the task used? + This will always be rounded down to the nearest integer. + ulTotalRunTimeDiv100 has already been divided by 100. */ + ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalTime; + + /* Write the task name to the string, padding with + spaces so it can be printed in tabular form more + easily. */ + pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName ); + + if( ulStatsAsPercentage > 0UL ) + { + #ifdef portLU_PRINTF_SPECIFIER_REQUIRED + { + sprintf( pcWriteBuffer, "\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage ); + } + #else + { + /* sizeof( int ) == sizeof( long ) so a smaller + printf() library can be used. */ + sprintf( pcWriteBuffer, "\t%u\t\t%u%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter, ( unsigned int ) ulStatsAsPercentage ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */ + } + #endif + } + else + { + /* If the percentage is zero here then the task has + consumed less than 1% of the total run time. */ + #ifdef portLU_PRINTF_SPECIFIER_REQUIRED + { + sprintf( pcWriteBuffer, "\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter ); + } + #else + { + /* sizeof( int ) == sizeof( long ) so a smaller + printf() library can be used. */ + sprintf( pcWriteBuffer, "\t%u\t\t<1%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */ + } + #endif + } + + pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */ + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION + is 0 then vPortFree() will be #defined to nothing. */ + vPortFree( pxTaskStatusArray ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + +TickType_t uxTaskResetEventItemValue( void ) +{ +TickType_t uxReturn; + + uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ) ); + + /* Reset the event list item to its normal value - so it can be used with + queues and semaphores. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + + return uxReturn; +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_MUTEXES == 1 ) + + TaskHandle_t pvTaskIncrementMutexHeldCount( void ) + { + /* If xSemaphoreCreateMutex() is called before any tasks have been created + then pxCurrentTCB will be NULL. */ + if( pxCurrentTCB != NULL ) + { + ( pxCurrentTCB->uxMutexesHeld )++; + } + + return pxCurrentTCB; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait ) + { + uint32_t ulReturn; + + taskENTER_CRITICAL(); + { + /* Only block if the notification count is not already non-zero. */ + if( pxCurrentTCB->ulNotifiedValue == 0UL ) + { + /* Mark this task as waiting for a notification. */ + pxCurrentTCB->ucNotifyState = taskWAITING_NOTIFICATION; + + if( xTicksToWait > ( TickType_t ) 0 ) + { + prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); + traceTASK_NOTIFY_TAKE_BLOCK(); + + /* All ports are written to allow a yield in a critical + section (some will yield immediately, others wait until the + critical section exits) - but it is not something that + application code should ever do. */ + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + taskENTER_CRITICAL(); + { + traceTASK_NOTIFY_TAKE(); + ulReturn = pxCurrentTCB->ulNotifiedValue; + + if( ulReturn != 0UL ) + { + if( xClearCountOnExit != pdFALSE ) + { + pxCurrentTCB->ulNotifiedValue = 0UL; + } + else + { + pxCurrentTCB->ulNotifiedValue = ulReturn - ( uint32_t ) 1; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxCurrentTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION; + } + taskEXIT_CRITICAL(); + + return ulReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait ) + { + BaseType_t xReturn; + + taskENTER_CRITICAL(); + { + /* Only block if a notification is not already pending. */ + if( pxCurrentTCB->ucNotifyState != taskNOTIFICATION_RECEIVED ) + { + /* Clear bits in the task's notification value as bits may get + set by the notifying task or interrupt. This can be used to + clear the value to zero. */ + pxCurrentTCB->ulNotifiedValue &= ~ulBitsToClearOnEntry; + + /* Mark this task as waiting for a notification. */ + pxCurrentTCB->ucNotifyState = taskWAITING_NOTIFICATION; + + if( xTicksToWait > ( TickType_t ) 0 ) + { + prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); + traceTASK_NOTIFY_WAIT_BLOCK(); + + /* All ports are written to allow a yield in a critical + section (some will yield immediately, others wait until the + critical section exits) - but it is not something that + application code should ever do. */ + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + taskENTER_CRITICAL(); + { + traceTASK_NOTIFY_WAIT(); + + if( pulNotificationValue != NULL ) + { + /* Output the current notification value, which may or may not + have changed. */ + *pulNotificationValue = pxCurrentTCB->ulNotifiedValue; + } + + /* If ucNotifyValue is set then either the task never entered the + blocked state (because a notification was already pending) or the + task unblocked because of a notification. Otherwise the task + unblocked because of a timeout. */ + if( pxCurrentTCB->ucNotifyState != taskNOTIFICATION_RECEIVED ) + { + /* A notification was not received. */ + xReturn = pdFALSE; + } + else + { + /* A notification was already pending or a notification was + received while the task was waiting. */ + pxCurrentTCB->ulNotifiedValue &= ~ulBitsToClearOnExit; + xReturn = pdTRUE; + } + + pxCurrentTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION; + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue ) + { + TCB_t * pxTCB; + BaseType_t xReturn = pdPASS; + uint8_t ucOriginalNotifyState; + + configASSERT( xTaskToNotify ); + pxTCB = xTaskToNotify; + + taskENTER_CRITICAL(); + { + if( pulPreviousNotificationValue != NULL ) + { + *pulPreviousNotificationValue = pxTCB->ulNotifiedValue; + } + + ucOriginalNotifyState = pxTCB->ucNotifyState; + + pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED; + + switch( eAction ) + { + case eSetBits : + pxTCB->ulNotifiedValue |= ulValue; + break; + + case eIncrement : + ( pxTCB->ulNotifiedValue )++; + break; + + case eSetValueWithOverwrite : + pxTCB->ulNotifiedValue = ulValue; + break; + + case eSetValueWithoutOverwrite : + if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED ) + { + pxTCB->ulNotifiedValue = ulValue; + } + else + { + /* The value could not be written to the task. */ + xReturn = pdFAIL; + } + break; + + case eNoAction: + /* The task is being notified without its notify value being + updated. */ + break; + + default: + /* Should not get here if all enums are handled. + Artificially force an assert by testing a value the + compiler can't assume is const. */ + configASSERT( pxTCB->ulNotifiedValue == ~0UL ); + + break; + } + + traceTASK_NOTIFY(); + + /* If the task is in the blocked state specifically to wait for a + notification then unblock it now. */ + if( ucOriginalNotifyState == taskWAITING_NOTIFICATION ) + { + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + + /* The task should not have been on an event list. */ + configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ); + + #if( configUSE_TICKLESS_IDLE != 0 ) + { + /* If a task is blocked waiting for a notification then + xNextTaskUnblockTime might be set to the blocked task's time + out time. If the task is unblocked for a reason other than + a timeout xNextTaskUnblockTime is normally left unchanged, + because it will automatically get reset to a new value when + the tick count equals xNextTaskUnblockTime. However if + tickless idling is used it might be more important to enter + sleep mode at the earliest possible time - so reset + xNextTaskUnblockTime here to ensure it is updated at the + earliest possible time. */ + prvResetNextTaskUnblockTime(); + } + #endif + + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* The notified task has a priority above the currently + executing task so a yield is required. */ + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue, BaseType_t *pxHigherPriorityTaskWoken ) + { + TCB_t * pxTCB; + uint8_t ucOriginalNotifyState; + BaseType_t xReturn = pdPASS; + UBaseType_t uxSavedInterruptStatus; + + configASSERT( xTaskToNotify ); + + /* RTOS ports that support interrupt nesting have the concept of a + maximum system call (or maximum API call) interrupt priority. + Interrupts that are above the maximum system call priority are keep + permanently enabled, even when the RTOS kernel is in a critical section, + but cannot make any calls to FreeRTOS API functions. If configASSERT() + is defined in FreeRTOSConfig.h then + portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has + been assigned a priority above the configured maximum system call + priority. Only FreeRTOS functions that end in FromISR can be called + from interrupts that have been assigned a priority at or (logically) + below the maximum system call interrupt priority. FreeRTOS maintains a + separate interrupt safe API to ensure interrupt entry is as fast and as + simple as possible. More information (albeit Cortex-M specific) is + provided on the following link: + http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + pxTCB = xTaskToNotify; + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + if( pulPreviousNotificationValue != NULL ) + { + *pulPreviousNotificationValue = pxTCB->ulNotifiedValue; + } + + ucOriginalNotifyState = pxTCB->ucNotifyState; + pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED; + + switch( eAction ) + { + case eSetBits : + pxTCB->ulNotifiedValue |= ulValue; + break; + + case eIncrement : + ( pxTCB->ulNotifiedValue )++; + break; + + case eSetValueWithOverwrite : + pxTCB->ulNotifiedValue = ulValue; + break; + + case eSetValueWithoutOverwrite : + if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED ) + { + pxTCB->ulNotifiedValue = ulValue; + } + else + { + /* The value could not be written to the task. */ + xReturn = pdFAIL; + } + break; + + case eNoAction : + /* The task is being notified without its notify value being + updated. */ + break; + + default: + /* Should not get here if all enums are handled. + Artificially force an assert by testing a value the + compiler can't assume is const. */ + configASSERT( pxTCB->ulNotifiedValue == ~0UL ); + break; + } + + traceTASK_NOTIFY_FROM_ISR(); + + /* If the task is in the blocked state specifically to wait for a + notification then unblock it now. */ + if( ucOriginalNotifyState == taskWAITING_NOTIFICATION ) + { + /* The task should not have been on an event list. */ + configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ); + + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + } + else + { + /* The delayed and ready lists cannot be accessed, so hold + this task pending until the scheduler is resumed. */ + vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + } + + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* The notified task has a priority above the currently + executing task so a yield is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + + /* Mark that a yield is pending in case the user is not + using the "xHigherPriorityTaskWoken" parameter to an ISR + safe FreeRTOS function. */ + xYieldPending = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + void vTaskNotifyGiveFromISR( TaskHandle_t xTaskToNotify, BaseType_t *pxHigherPriorityTaskWoken ) + { + TCB_t * pxTCB; + uint8_t ucOriginalNotifyState; + UBaseType_t uxSavedInterruptStatus; + + configASSERT( xTaskToNotify ); + + /* RTOS ports that support interrupt nesting have the concept of a + maximum system call (or maximum API call) interrupt priority. + Interrupts that are above the maximum system call priority are keep + permanently enabled, even when the RTOS kernel is in a critical section, + but cannot make any calls to FreeRTOS API functions. If configASSERT() + is defined in FreeRTOSConfig.h then + portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has + been assigned a priority above the configured maximum system call + priority. Only FreeRTOS functions that end in FromISR can be called + from interrupts that have been assigned a priority at or (logically) + below the maximum system call interrupt priority. FreeRTOS maintains a + separate interrupt safe API to ensure interrupt entry is as fast and as + simple as possible. More information (albeit Cortex-M specific) is + provided on the following link: + http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + pxTCB = xTaskToNotify; + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + ucOriginalNotifyState = pxTCB->ucNotifyState; + pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED; + + /* 'Giving' is equivalent to incrementing a count in a counting + semaphore. */ + ( pxTCB->ulNotifiedValue )++; + + traceTASK_NOTIFY_GIVE_FROM_ISR(); + + /* If the task is in the blocked state specifically to wait for a + notification then unblock it now. */ + if( ucOriginalNotifyState == taskWAITING_NOTIFICATION ) + { + /* The task should not have been on an event list. */ + configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ); + + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + } + else + { + /* The delayed and ready lists cannot be accessed, so hold + this task pending until the scheduler is resumed. */ + vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + } + + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* The notified task has a priority above the currently + executing task so a yield is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + + /* Mark that a yield is pending in case the user is not + using the "xHigherPriorityTaskWoken" parameter in an ISR + safe FreeRTOS function. */ + xYieldPending = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ + +/*-----------------------------------------------------------*/ + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t xTaskNotifyStateClear( TaskHandle_t xTask ) + { + TCB_t *pxTCB; + BaseType_t xReturn; + + /* If null is passed in here then it is the calling task that is having + its notification state cleared. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + taskENTER_CRITICAL(); + { + if( pxTCB->ucNotifyState == taskNOTIFICATION_RECEIVED ) + { + pxTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION; + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + } + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + TickType_t xTaskGetIdleRunTimeCounter( void ) + { + return xIdleTaskHandle->ulRunTimeCounter; + } +#endif +/*-----------------------------------------------------------*/ + +static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely ) +{ +TickType_t xTimeToWake; +const TickType_t xConstTickCount = xTickCount; + + #if( INCLUDE_xTaskAbortDelay == 1 ) + { + /* About to enter a delayed list, so ensure the ucDelayAborted flag is + reset to pdFALSE so it can be detected as having been set to pdTRUE + when the task leaves the Blocked state. */ + pxCurrentTCB->ucDelayAborted = pdFALSE; + } + #endif + + /* Remove the task from the ready list before adding it to the blocked list + as the same list item is used for both lists. */ + if( uxListRemove( &( pxCurrentTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + /* The current task must be in a ready list, so there is no need to + check, and the port reset macro can be called directly. */ + portRESET_READY_PRIORITY( pxCurrentTCB->uxPriority, uxTopReadyPriority ); /*lint !e931 pxCurrentTCB cannot change as it is the calling task. pxCurrentTCB->uxPriority and uxTopReadyPriority cannot change as called with scheduler suspended or in a critical section. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + if( ( xTicksToWait == portMAX_DELAY ) && ( xCanBlockIndefinitely != pdFALSE ) ) + { + /* Add the task to the suspended task list instead of a delayed task + list to ensure it is not woken by a timing event. It will block + indefinitely. */ + vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB->xStateListItem ) ); + } + else + { + /* Calculate the time at which the task should be woken if the event + does not occur. This may overflow but this doesn't matter, the + kernel will manage it correctly. */ + xTimeToWake = xConstTickCount + xTicksToWait; + + /* The list item will be inserted in wake time order. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake ); + + if( xTimeToWake < xConstTickCount ) + { + /* Wake time has overflowed. Place this item in the overflow + list. */ + vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) ); + } + else + { + /* The wake time has not overflowed, so the current block list + is used. */ + vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) ); + + /* If the task entering the blocked state was placed at the + head of the list of blocked tasks then xNextTaskUnblockTime + needs to be updated too. */ + if( xTimeToWake < xNextTaskUnblockTime ) + { + xNextTaskUnblockTime = xTimeToWake; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + } + #else /* INCLUDE_vTaskSuspend */ + { + /* Calculate the time at which the task should be woken if the event + does not occur. This may overflow but this doesn't matter, the kernel + will manage it correctly. */ + xTimeToWake = xConstTickCount + xTicksToWait; + + /* The list item will be inserted in wake time order. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake ); + + if( xTimeToWake < xConstTickCount ) + { + /* Wake time has overflowed. Place this item in the overflow list. */ + vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) ); + } + else + { + /* The wake time has not overflowed, so the current block list is used. */ + vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) ); + + /* If the task entering the blocked state was placed at the head of the + list of blocked tasks then xNextTaskUnblockTime needs to be updated + too. */ + if( xTimeToWake < xNextTaskUnblockTime ) + { + xNextTaskUnblockTime = xTimeToWake; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + /* Avoid compiler warning when INCLUDE_vTaskSuspend is not 1. */ + ( void ) xCanBlockIndefinitely; + } + #endif /* INCLUDE_vTaskSuspend */ +} + +/* Code below here allows additional code to be inserted into this source file, +especially where access to file scope functions and data is needed (for example +when performing module tests). */ + +#ifdef FREERTOS_MODULE_TEST + #include "tasks_test_access_functions.h" +#endif + + +#if( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 ) + + #include "freertos_tasks_c_additions.h" + + #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT + static void freertos_tasks_c_additions_init( void ) + { + FREERTOS_TASKS_C_ADDITIONS_INIT(); + } + #endif + +#endif + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/timers.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/timers.c new file mode 100644 index 0000000..ea04327 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/timers.c @@ -0,0 +1,1102 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* Standard includes. */ +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining +all the API functions to use the MPU wrappers. That should only be done when +task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" + +#if ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 0 ) + #error configUSE_TIMERS must be set to 1 to make the xTimerPendFunctionCall() function available. +#endif + +/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified +because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined +for the header files above, but not in this file, in order to generate the +correct privileged Vs unprivileged linkage and placement. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e9021 !e961 !e750. */ + + +/* This entire source file will be skipped if the application is not configured +to include software timer functionality. This #if is closed at the very bottom +of this file. If you want to include software timer functionality then ensure +configUSE_TIMERS is set to 1 in FreeRTOSConfig.h. */ +#if ( configUSE_TIMERS == 1 ) + +/* Misc definitions. */ +#define tmrNO_DELAY ( TickType_t ) 0U + +/* The name assigned to the timer service task. This can be overridden by +defining trmTIMER_SERVICE_TASK_NAME in FreeRTOSConfig.h. */ +#ifndef configTIMER_SERVICE_TASK_NAME + #define configTIMER_SERVICE_TASK_NAME "Tmr Svc" +#endif + +/* Bit definitions used in the ucStatus member of a timer structure. */ +#define tmrSTATUS_IS_ACTIVE ( ( uint8_t ) 0x01 ) +#define tmrSTATUS_IS_STATICALLY_ALLOCATED ( ( uint8_t ) 0x02 ) +#define tmrSTATUS_IS_AUTORELOAD ( ( uint8_t ) 0x04 ) + +/* The definition of the timers themselves. */ +typedef struct tmrTimerControl /* The old naming convention is used to prevent breaking kernel aware debuggers. */ +{ + const char *pcTimerName; /*<< Text name. This is not used by the kernel, it is included simply to make debugging easier. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + ListItem_t xTimerListItem; /*<< Standard linked list item as used by all kernel features for event management. */ + TickType_t xTimerPeriodInTicks;/*<< How quickly and often the timer expires. */ + void *pvTimerID; /*<< An ID to identify the timer. This allows the timer to be identified when the same callback is used for multiple timers. */ + TimerCallbackFunction_t pxCallbackFunction; /*<< The function that will be called when the timer expires. */ + #if( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxTimerNumber; /*<< An ID assigned by trace tools such as FreeRTOS+Trace */ + #endif + uint8_t ucStatus; /*<< Holds bits to say if the timer was statically allocated or not, and if it is active or not. */ +} xTIMER; + +/* The old xTIMER name is maintained above then typedefed to the new Timer_t +name below to enable the use of older kernel aware debuggers. */ +typedef xTIMER Timer_t; + +/* The definition of messages that can be sent and received on the timer queue. +Two types of message can be queued - messages that manipulate a software timer, +and messages that request the execution of a non-timer related callback. The +two message types are defined in two separate structures, xTimerParametersType +and xCallbackParametersType respectively. */ +typedef struct tmrTimerParameters +{ + TickType_t xMessageValue; /*<< An optional value used by a subset of commands, for example, when changing the period of a timer. */ + Timer_t * pxTimer; /*<< The timer to which the command will be applied. */ +} TimerParameter_t; + + +typedef struct tmrCallbackParameters +{ + PendedFunction_t pxCallbackFunction; /* << The callback function to execute. */ + void *pvParameter1; /* << The value that will be used as the callback functions first parameter. */ + uint32_t ulParameter2; /* << The value that will be used as the callback functions second parameter. */ +} CallbackParameters_t; + +/* The structure that contains the two message types, along with an identifier +that is used to determine which message type is valid. */ +typedef struct tmrTimerQueueMessage +{ + BaseType_t xMessageID; /*<< The command being sent to the timer service task. */ + union + { + TimerParameter_t xTimerParameters; + + /* Don't include xCallbackParameters if it is not going to be used as + it makes the structure (and therefore the timer queue) larger. */ + #if ( INCLUDE_xTimerPendFunctionCall == 1 ) + CallbackParameters_t xCallbackParameters; + #endif /* INCLUDE_xTimerPendFunctionCall */ + } u; +} DaemonTaskMessage_t; + +/*lint -save -e956 A manual analysis and inspection has been used to determine +which static variables must be declared volatile. */ + +/* The list in which active timers are stored. Timers are referenced in expire +time order, with the nearest expiry time at the front of the list. Only the +timer service task is allowed to access these lists. +xActiveTimerList1 and xActiveTimerList2 could be at function scope but that +breaks some kernel aware debuggers, and debuggers that reply on removing the +static qualifier. */ +PRIVILEGED_DATA static List_t xActiveTimerList1 = { 0 }; +PRIVILEGED_DATA static List_t xActiveTimerList2 = { 0 }; +PRIVILEGED_DATA static List_t *pxCurrentTimerList = NULL; +PRIVILEGED_DATA static List_t *pxOverflowTimerList = NULL; + +/* A queue that is used to send commands to the timer service task. */ +PRIVILEGED_DATA static QueueHandle_t xTimerQueue = NULL; +PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL; + +/*lint -restore */ + +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + /* If static allocation is supported then the application must provide the + following callback function - which enables the application to optionally + provide the memory that will be used by the timer task as the task's stack + and TCB. */ + extern void vApplicationGetTimerTaskMemory( StaticTask_t **ppxTimerTaskTCBBuffer, StackType_t **ppxTimerTaskStackBuffer, uint32_t *pulTimerTaskStackSize ); + +#endif + +/* + * Initialise the infrastructure used by the timer service task if it has not + * been initialised already. + */ +static void prvCheckForValidListAndQueue( void ) PRIVILEGED_FUNCTION; + +/* + * The timer service task (daemon). Timer functionality is controlled by this + * task. Other tasks communicate with the timer service task using the + * xTimerQueue queue. + */ +static portTASK_FUNCTION_PROTO( prvTimerTask, pvParameters ) PRIVILEGED_FUNCTION; + +/* + * Called by the timer service task to interpret and process a command it + * received on the timer queue. + */ +static void prvProcessReceivedCommands( void ) PRIVILEGED_FUNCTION; + +/* + * Insert the timer into either xActiveTimerList1, or xActiveTimerList2, + * depending on if the expire time causes a timer counter overflow. + */ +static BaseType_t prvInsertTimerInActiveList( Timer_t * const pxTimer, const TickType_t xNextExpiryTime, const TickType_t xTimeNow, const TickType_t xCommandTime ) PRIVILEGED_FUNCTION; + +/* + * An active timer has reached its expire time. Reload the timer if it is an + * auto reload timer, then call its callback. + */ +static void prvProcessExpiredTimer( const TickType_t xNextExpireTime, const TickType_t xTimeNow ) PRIVILEGED_FUNCTION; + +/* + * The tick count has overflowed. Switch the timer lists after ensuring the + * current timer list does not still reference some timers. + */ +static void prvSwitchTimerLists( void ) PRIVILEGED_FUNCTION; + +/* + * Obtain the current tick count, setting *pxTimerListsWereSwitched to pdTRUE + * if a tick count overflow occurred since prvSampleTimeNow() was last called. + */ +static TickType_t prvSampleTimeNow( BaseType_t * const pxTimerListsWereSwitched ) PRIVILEGED_FUNCTION; + +/* + * If the timer list contains any active timers then return the expire time of + * the timer that will expire first and set *pxListWasEmpty to false. If the + * timer list does not contain any timers then return 0 and set *pxListWasEmpty + * to pdTRUE. + */ +static TickType_t prvGetNextExpireTime( BaseType_t * const pxListWasEmpty ) PRIVILEGED_FUNCTION; + +/* + * If a timer has expired, process it. Otherwise, block the timer service task + * until either a timer does expire or a command is received. + */ +static void prvProcessTimerOrBlockTask( const TickType_t xNextExpireTime, BaseType_t xListWasEmpty ) PRIVILEGED_FUNCTION; + +/* + * Called after a Timer_t structure has been allocated either statically or + * dynamically to fill in the structure's members. + */ +static void prvInitialiseNewTimer( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction, + Timer_t *pxNewTimer ) PRIVILEGED_FUNCTION; +/*-----------------------------------------------------------*/ + +BaseType_t xTimerCreateTimerTask( void ) +{ +BaseType_t xReturn = pdFAIL; + + /* This function is called when the scheduler is started if + configUSE_TIMERS is set to 1. Check that the infrastructure used by the + timer service task has been created/initialised. If timers have already + been created then the initialisation will already have been performed. */ + prvCheckForValidListAndQueue(); + + if( xTimerQueue != NULL ) + { + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + StaticTask_t *pxTimerTaskTCBBuffer = NULL; + StackType_t *pxTimerTaskStackBuffer = NULL; + uint32_t ulTimerTaskStackSize; + + vApplicationGetTimerTaskMemory( &pxTimerTaskTCBBuffer, &pxTimerTaskStackBuffer, &ulTimerTaskStackSize ); + xTimerTaskHandle = xTaskCreateStatic( prvTimerTask, + configTIMER_SERVICE_TASK_NAME, + ulTimerTaskStackSize, + NULL, + ( ( UBaseType_t ) configTIMER_TASK_PRIORITY ) | portPRIVILEGE_BIT, + pxTimerTaskStackBuffer, + pxTimerTaskTCBBuffer ); + + if( xTimerTaskHandle != NULL ) + { + xReturn = pdPASS; + } + } + #else + { + xReturn = xTaskCreate( prvTimerTask, + configTIMER_SERVICE_TASK_NAME, + configTIMER_TASK_STACK_DEPTH, + NULL, + ( ( UBaseType_t ) configTIMER_TASK_PRIORITY ) | portPRIVILEGE_BIT, + &xTimerTaskHandle ); + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + configASSERT( xReturn ); + return xReturn; +} +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + TimerHandle_t xTimerCreate( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction ) + { + Timer_t *pxNewTimer; + + pxNewTimer = ( Timer_t * ) pvPortMalloc( sizeof( Timer_t ) ); /*lint !e9087 !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack, and the first member of Timer_t is always a pointer to the timer's mame. */ + + if( pxNewTimer != NULL ) + { + /* Status is thus far zero as the timer is not created statically + and has not been started. The autoreload bit may get set in + prvInitialiseNewTimer. */ + pxNewTimer->ucStatus = 0x00; + prvInitialiseNewTimer( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, pxCallbackFunction, pxNewTimer ); + } + + return pxNewTimer; + } + +#endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + TimerHandle_t xTimerCreateStatic( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction, + StaticTimer_t *pxTimerBuffer ) + { + Timer_t *pxNewTimer; + + #if( configASSERT_DEFINED == 1 ) + { + /* Sanity check that the size of the structure used to declare a + variable of type StaticTimer_t equals the size of the real timer + structure. */ + volatile size_t xSize = sizeof( StaticTimer_t ); + configASSERT( xSize == sizeof( Timer_t ) ); + ( void ) xSize; /* Keeps lint quiet when configASSERT() is not defined. */ + } + #endif /* configASSERT_DEFINED */ + + /* A pointer to a StaticTimer_t structure MUST be provided, use it. */ + configASSERT( pxTimerBuffer ); + pxNewTimer = ( Timer_t * ) pxTimerBuffer; /*lint !e740 !e9087 StaticTimer_t is a pointer to a Timer_t, so guaranteed to be aligned and sized correctly (checked by an assert()), so this is safe. */ + + if( pxNewTimer != NULL ) + { + /* Timers can be created statically or dynamically so note this + timer was created statically in case it is later deleted. The + autoreload bit may get set in prvInitialiseNewTimer(). */ + pxNewTimer->ucStatus = tmrSTATUS_IS_STATICALLY_ALLOCATED; + + prvInitialiseNewTimer( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, pxCallbackFunction, pxNewTimer ); + } + + return pxNewTimer; + } + +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +static void prvInitialiseNewTimer( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction, + Timer_t *pxNewTimer ) +{ + /* 0 is not a valid value for xTimerPeriodInTicks. */ + configASSERT( ( xTimerPeriodInTicks > 0 ) ); + + if( pxNewTimer != NULL ) + { + /* Ensure the infrastructure used by the timer service task has been + created/initialised. */ + prvCheckForValidListAndQueue(); + + /* Initialise the timer structure members using the function + parameters. */ + pxNewTimer->pcTimerName = pcTimerName; + pxNewTimer->xTimerPeriodInTicks = xTimerPeriodInTicks; + pxNewTimer->pvTimerID = pvTimerID; + pxNewTimer->pxCallbackFunction = pxCallbackFunction; + vListInitialiseItem( &( pxNewTimer->xTimerListItem ) ); + if( uxAutoReload != pdFALSE ) + { + pxNewTimer->ucStatus |= tmrSTATUS_IS_AUTORELOAD; + } + traceTIMER_CREATE( pxNewTimer ); + } +} +/*-----------------------------------------------------------*/ + +BaseType_t xTimerGenericCommand( TimerHandle_t xTimer, const BaseType_t xCommandID, const TickType_t xOptionalValue, BaseType_t * const pxHigherPriorityTaskWoken, const TickType_t xTicksToWait ) +{ +BaseType_t xReturn = pdFAIL; +DaemonTaskMessage_t xMessage; + + configASSERT( xTimer ); + + /* Send a message to the timer service task to perform a particular action + on a particular timer definition. */ + if( xTimerQueue != NULL ) + { + /* Send a command to the timer service task to start the xTimer timer. */ + xMessage.xMessageID = xCommandID; + xMessage.u.xTimerParameters.xMessageValue = xOptionalValue; + xMessage.u.xTimerParameters.pxTimer = xTimer; + + if( xCommandID < tmrFIRST_FROM_ISR_COMMAND ) + { + if( xTaskGetSchedulerState() == taskSCHEDULER_RUNNING ) + { + xReturn = xQueueSendToBack( xTimerQueue, &xMessage, xTicksToWait ); + } + else + { + xReturn = xQueueSendToBack( xTimerQueue, &xMessage, tmrNO_DELAY ); + } + } + else + { + xReturn = xQueueSendToBackFromISR( xTimerQueue, &xMessage, pxHigherPriorityTaskWoken ); + } + + traceTIMER_COMMAND_SEND( xTimer, xCommandID, xOptionalValue, xReturn ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +TaskHandle_t xTimerGetTimerDaemonTaskHandle( void ) +{ + /* If xTimerGetTimerDaemonTaskHandle() is called before the scheduler has been + started, then xTimerTaskHandle will be NULL. */ + configASSERT( ( xTimerTaskHandle != NULL ) ); + return xTimerTaskHandle; +} +/*-----------------------------------------------------------*/ + +TickType_t xTimerGetPeriod( TimerHandle_t xTimer ) +{ +Timer_t *pxTimer = xTimer; + + configASSERT( xTimer ); + return pxTimer->xTimerPeriodInTicks; +} +/*-----------------------------------------------------------*/ + +void vTimerSetReloadMode( TimerHandle_t xTimer, const UBaseType_t uxAutoReload ) +{ +Timer_t * pxTimer = xTimer; + + configASSERT( xTimer ); + taskENTER_CRITICAL(); + { + if( uxAutoReload != pdFALSE ) + { + pxTimer->ucStatus |= tmrSTATUS_IS_AUTORELOAD; + } + else + { + pxTimer->ucStatus &= ~tmrSTATUS_IS_AUTORELOAD; + } + } + taskEXIT_CRITICAL(); +} +/*-----------------------------------------------------------*/ + +TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer ) +{ +Timer_t * pxTimer = xTimer; +TickType_t xReturn; + + configASSERT( xTimer ); + xReturn = listGET_LIST_ITEM_VALUE( &( pxTimer->xTimerListItem ) ); + return xReturn; +} +/*-----------------------------------------------------------*/ + +const char * pcTimerGetName( TimerHandle_t xTimer ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ +{ +Timer_t *pxTimer = xTimer; + + configASSERT( xTimer ); + return pxTimer->pcTimerName; +} +/*-----------------------------------------------------------*/ + +static void prvProcessExpiredTimer( const TickType_t xNextExpireTime, const TickType_t xTimeNow ) +{ +BaseType_t xResult; +Timer_t * const pxTimer = ( Timer_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxCurrentTimerList ); /*lint !e9087 !e9079 void * is used as this macro is used with tasks and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + + /* Remove the timer from the list of active timers. A check has already + been performed to ensure the list is not empty. */ + ( void ) uxListRemove( &( pxTimer->xTimerListItem ) ); + traceTIMER_EXPIRED( pxTimer ); + + /* If the timer is an auto reload timer then calculate the next + expiry time and re-insert the timer in the list of active timers. */ + if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) != 0 ) + { + /* The timer is inserted into a list using a time relative to anything + other than the current time. It will therefore be inserted into the + correct list relative to the time this task thinks it is now. */ + if( prvInsertTimerInActiveList( pxTimer, ( xNextExpireTime + pxTimer->xTimerPeriodInTicks ), xTimeNow, xNextExpireTime ) != pdFALSE ) + { + /* The timer expired before it was added to the active timer + list. Reload it now. */ + xResult = xTimerGenericCommand( pxTimer, tmrCOMMAND_START_DONT_TRACE, xNextExpireTime, NULL, tmrNO_DELAY ); + configASSERT( xResult ); + ( void ) xResult; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + pxTimer->ucStatus &= ~tmrSTATUS_IS_ACTIVE; + mtCOVERAGE_TEST_MARKER(); + } + + /* Call the timer callback. */ + pxTimer->pxCallbackFunction( ( TimerHandle_t ) pxTimer ); +} +/*-----------------------------------------------------------*/ + +static portTASK_FUNCTION( prvTimerTask, pvParameters ) +{ +TickType_t xNextExpireTime; +BaseType_t xListWasEmpty; + + /* Just to avoid compiler warnings. */ + ( void ) pvParameters; + + #if( configUSE_DAEMON_TASK_STARTUP_HOOK == 1 ) + { + extern void vApplicationDaemonTaskStartupHook( void ); + + /* Allow the application writer to execute some code in the context of + this task at the point the task starts executing. This is useful if the + application includes initialisation code that would benefit from + executing after the scheduler has been started. */ + vApplicationDaemonTaskStartupHook(); + } + #endif /* configUSE_DAEMON_TASK_STARTUP_HOOK */ + + for( ;; ) + { + /* Query the timers list to see if it contains any timers, and if so, + obtain the time at which the next timer will expire. */ + xNextExpireTime = prvGetNextExpireTime( &xListWasEmpty ); + + /* If a timer has expired, process it. Otherwise, block this task + until either a timer does expire, or a command is received. */ + prvProcessTimerOrBlockTask( xNextExpireTime, xListWasEmpty ); + + /* Empty the command queue. */ + prvProcessReceivedCommands(); + } +} +/*-----------------------------------------------------------*/ + +static void prvProcessTimerOrBlockTask( const TickType_t xNextExpireTime, BaseType_t xListWasEmpty ) +{ +TickType_t xTimeNow; +BaseType_t xTimerListsWereSwitched; + + vTaskSuspendAll(); + { + /* Obtain the time now to make an assessment as to whether the timer + has expired or not. If obtaining the time causes the lists to switch + then don't process this timer as any timers that remained in the list + when the lists were switched will have been processed within the + prvSampleTimeNow() function. */ + xTimeNow = prvSampleTimeNow( &xTimerListsWereSwitched ); + if( xTimerListsWereSwitched == pdFALSE ) + { + /* The tick count has not overflowed, has the timer expired? */ + if( ( xListWasEmpty == pdFALSE ) && ( xNextExpireTime <= xTimeNow ) ) + { + ( void ) xTaskResumeAll(); + prvProcessExpiredTimer( xNextExpireTime, xTimeNow ); + } + else + { + /* The tick count has not overflowed, and the next expire + time has not been reached yet. This task should therefore + block to wait for the next expire time or a command to be + received - whichever comes first. The following line cannot + be reached unless xNextExpireTime > xTimeNow, except in the + case when the current timer list is empty. */ + if( xListWasEmpty != pdFALSE ) + { + /* The current timer list is empty - is the overflow list + also empty? */ + xListWasEmpty = listLIST_IS_EMPTY( pxOverflowTimerList ); + } + + vQueueWaitForMessageRestricted( xTimerQueue, ( xNextExpireTime - xTimeNow ), xListWasEmpty ); + + if( xTaskResumeAll() == pdFALSE ) + { + /* Yield to wait for either a command to arrive, or the + block time to expire. If a command arrived between the + critical section being exited and this yield then the yield + will not cause the task to block. */ + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + else + { + ( void ) xTaskResumeAll(); + } + } +} +/*-----------------------------------------------------------*/ + +static TickType_t prvGetNextExpireTime( BaseType_t * const pxListWasEmpty ) +{ +TickType_t xNextExpireTime; + + /* Timers are listed in expiry time order, with the head of the list + referencing the task that will expire first. Obtain the time at which + the timer with the nearest expiry time will expire. If there are no + active timers then just set the next expire time to 0. That will cause + this task to unblock when the tick count overflows, at which point the + timer lists will be switched and the next expiry time can be + re-assessed. */ + *pxListWasEmpty = listLIST_IS_EMPTY( pxCurrentTimerList ); + if( *pxListWasEmpty == pdFALSE ) + { + xNextExpireTime = listGET_ITEM_VALUE_OF_HEAD_ENTRY( pxCurrentTimerList ); + } + else + { + /* Ensure the task unblocks when the tick count rolls over. */ + xNextExpireTime = ( TickType_t ) 0U; + } + + return xNextExpireTime; +} +/*-----------------------------------------------------------*/ + +static TickType_t prvSampleTimeNow( BaseType_t * const pxTimerListsWereSwitched ) +{ +TickType_t xTimeNow; +PRIVILEGED_DATA static TickType_t xLastTime = ( TickType_t ) 0U; /*lint !e956 Variable is only accessible to one task. */ + + xTimeNow = xTaskGetTickCount(); + + if( xTimeNow < xLastTime ) + { + prvSwitchTimerLists(); + *pxTimerListsWereSwitched = pdTRUE; + } + else + { + *pxTimerListsWereSwitched = pdFALSE; + } + + xLastTime = xTimeNow; + + return xTimeNow; +} +/*-----------------------------------------------------------*/ + +static BaseType_t prvInsertTimerInActiveList( Timer_t * const pxTimer, const TickType_t xNextExpiryTime, const TickType_t xTimeNow, const TickType_t xCommandTime ) +{ +BaseType_t xProcessTimerNow = pdFALSE; + + listSET_LIST_ITEM_VALUE( &( pxTimer->xTimerListItem ), xNextExpiryTime ); + listSET_LIST_ITEM_OWNER( &( pxTimer->xTimerListItem ), pxTimer ); + + if( xNextExpiryTime <= xTimeNow ) + { + /* Has the expiry time elapsed between the command to start/reset a + timer was issued, and the time the command was processed? */ + if( ( ( TickType_t ) ( xTimeNow - xCommandTime ) ) >= pxTimer->xTimerPeriodInTicks ) /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + { + /* The time between a command being issued and the command being + processed actually exceeds the timers period. */ + xProcessTimerNow = pdTRUE; + } + else + { + vListInsert( pxOverflowTimerList, &( pxTimer->xTimerListItem ) ); + } + } + else + { + if( ( xTimeNow < xCommandTime ) && ( xNextExpiryTime >= xCommandTime ) ) + { + /* If, since the command was issued, the tick count has overflowed + but the expiry time has not, then the timer must have already passed + its expiry time and should be processed immediately. */ + xProcessTimerNow = pdTRUE; + } + else + { + vListInsert( pxCurrentTimerList, &( pxTimer->xTimerListItem ) ); + } + } + + return xProcessTimerNow; +} +/*-----------------------------------------------------------*/ + +static void prvProcessReceivedCommands( void ) +{ +DaemonTaskMessage_t xMessage; +Timer_t *pxTimer; +BaseType_t xTimerListsWereSwitched, xResult; +TickType_t xTimeNow; + + while( xQueueReceive( xTimerQueue, &xMessage, tmrNO_DELAY ) != pdFAIL ) /*lint !e603 xMessage does not have to be initialised as it is passed out, not in, and it is not used unless xQueueReceive() returns pdTRUE. */ + { + #if ( INCLUDE_xTimerPendFunctionCall == 1 ) + { + /* Negative commands are pended function calls rather than timer + commands. */ + if( xMessage.xMessageID < ( BaseType_t ) 0 ) + { + const CallbackParameters_t * const pxCallback = &( xMessage.u.xCallbackParameters ); + + /* The timer uses the xCallbackParameters member to request a + callback be executed. Check the callback is not NULL. */ + configASSERT( pxCallback ); + + /* Call the function. */ + pxCallback->pxCallbackFunction( pxCallback->pvParameter1, pxCallback->ulParameter2 ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* INCLUDE_xTimerPendFunctionCall */ + + /* Commands that are positive are timer commands rather than pended + function calls. */ + if( xMessage.xMessageID >= ( BaseType_t ) 0 ) + { + /* The messages uses the xTimerParameters member to work on a + software timer. */ + pxTimer = xMessage.u.xTimerParameters.pxTimer; + + if( listIS_CONTAINED_WITHIN( NULL, &( pxTimer->xTimerListItem ) ) == pdFALSE ) /*lint !e961. The cast is only redundant when NULL is passed into the macro. */ + { + /* The timer is in a list, remove it. */ + ( void ) uxListRemove( &( pxTimer->xTimerListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceTIMER_COMMAND_RECEIVED( pxTimer, xMessage.xMessageID, xMessage.u.xTimerParameters.xMessageValue ); + + /* In this case the xTimerListsWereSwitched parameter is not used, but + it must be present in the function call. prvSampleTimeNow() must be + called after the message is received from xTimerQueue so there is no + possibility of a higher priority task adding a message to the message + queue with a time that is ahead of the timer daemon task (because it + pre-empted the timer daemon task after the xTimeNow value was set). */ + xTimeNow = prvSampleTimeNow( &xTimerListsWereSwitched ); + + switch( xMessage.xMessageID ) + { + case tmrCOMMAND_START : + case tmrCOMMAND_START_FROM_ISR : + case tmrCOMMAND_RESET : + case tmrCOMMAND_RESET_FROM_ISR : + case tmrCOMMAND_START_DONT_TRACE : + /* Start or restart a timer. */ + pxTimer->ucStatus |= tmrSTATUS_IS_ACTIVE; + if( prvInsertTimerInActiveList( pxTimer, xMessage.u.xTimerParameters.xMessageValue + pxTimer->xTimerPeriodInTicks, xTimeNow, xMessage.u.xTimerParameters.xMessageValue ) != pdFALSE ) + { + /* The timer expired before it was added to the active + timer list. Process it now. */ + pxTimer->pxCallbackFunction( ( TimerHandle_t ) pxTimer ); + traceTIMER_EXPIRED( pxTimer ); + + if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) != 0 ) + { + xResult = xTimerGenericCommand( pxTimer, tmrCOMMAND_START_DONT_TRACE, xMessage.u.xTimerParameters.xMessageValue + pxTimer->xTimerPeriodInTicks, NULL, tmrNO_DELAY ); + configASSERT( xResult ); + ( void ) xResult; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + break; + + case tmrCOMMAND_STOP : + case tmrCOMMAND_STOP_FROM_ISR : + /* The timer has already been removed from the active list. */ + pxTimer->ucStatus &= ~tmrSTATUS_IS_ACTIVE; + break; + + case tmrCOMMAND_CHANGE_PERIOD : + case tmrCOMMAND_CHANGE_PERIOD_FROM_ISR : + pxTimer->ucStatus |= tmrSTATUS_IS_ACTIVE; + pxTimer->xTimerPeriodInTicks = xMessage.u.xTimerParameters.xMessageValue; + configASSERT( ( pxTimer->xTimerPeriodInTicks > 0 ) ); + + /* The new period does not really have a reference, and can + be longer or shorter than the old one. The command time is + therefore set to the current time, and as the period cannot + be zero the next expiry time can only be in the future, + meaning (unlike for the xTimerStart() case above) there is + no fail case that needs to be handled here. */ + ( void ) prvInsertTimerInActiveList( pxTimer, ( xTimeNow + pxTimer->xTimerPeriodInTicks ), xTimeNow, xTimeNow ); + break; + + case tmrCOMMAND_DELETE : + #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + { + /* The timer has already been removed from the active list, + just free up the memory if the memory was dynamically + allocated. */ + if( ( pxTimer->ucStatus & tmrSTATUS_IS_STATICALLY_ALLOCATED ) == ( uint8_t ) 0 ) + { + vPortFree( pxTimer ); + } + else + { + pxTimer->ucStatus &= ~tmrSTATUS_IS_ACTIVE; + } + } + #else + { + /* If dynamic allocation is not enabled, the memory + could not have been dynamically allocated. So there is + no need to free the memory - just mark the timer as + "not active". */ + pxTimer->ucStatus &= ~tmrSTATUS_IS_ACTIVE; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + break; + + default : + /* Don't expect to get here. */ + break; + } + } + } +} +/*-----------------------------------------------------------*/ + +static void prvSwitchTimerLists( void ) +{ +TickType_t xNextExpireTime, xReloadTime; +List_t *pxTemp; +Timer_t *pxTimer; +BaseType_t xResult; + + /* The tick count has overflowed. The timer lists must be switched. + If there are any timers still referenced from the current timer list + then they must have expired and should be processed before the lists + are switched. */ + while( listLIST_IS_EMPTY( pxCurrentTimerList ) == pdFALSE ) + { + xNextExpireTime = listGET_ITEM_VALUE_OF_HEAD_ENTRY( pxCurrentTimerList ); + + /* Remove the timer from the list. */ + pxTimer = ( Timer_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxCurrentTimerList ); /*lint !e9087 !e9079 void * is used as this macro is used with tasks and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + ( void ) uxListRemove( &( pxTimer->xTimerListItem ) ); + traceTIMER_EXPIRED( pxTimer ); + + /* Execute its callback, then send a command to restart the timer if + it is an auto-reload timer. It cannot be restarted here as the lists + have not yet been switched. */ + pxTimer->pxCallbackFunction( ( TimerHandle_t ) pxTimer ); + + if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) != 0 ) + { + /* Calculate the reload value, and if the reload value results in + the timer going into the same timer list then it has already expired + and the timer should be re-inserted into the current list so it is + processed again within this loop. Otherwise a command should be sent + to restart the timer to ensure it is only inserted into a list after + the lists have been swapped. */ + xReloadTime = ( xNextExpireTime + pxTimer->xTimerPeriodInTicks ); + if( xReloadTime > xNextExpireTime ) + { + listSET_LIST_ITEM_VALUE( &( pxTimer->xTimerListItem ), xReloadTime ); + listSET_LIST_ITEM_OWNER( &( pxTimer->xTimerListItem ), pxTimer ); + vListInsert( pxCurrentTimerList, &( pxTimer->xTimerListItem ) ); + } + else + { + xResult = xTimerGenericCommand( pxTimer, tmrCOMMAND_START_DONT_TRACE, xNextExpireTime, NULL, tmrNO_DELAY ); + configASSERT( xResult ); + ( void ) xResult; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + pxTemp = pxCurrentTimerList; + pxCurrentTimerList = pxOverflowTimerList; + pxOverflowTimerList = pxTemp; +} +/*-----------------------------------------------------------*/ + +static void prvCheckForValidListAndQueue( void ) +{ + /* Check that the list from which active timers are referenced, and the + queue used to communicate with the timer service, have been + initialised. */ + taskENTER_CRITICAL(); + { + if( xTimerQueue == NULL ) + { + vListInitialise( &xActiveTimerList1 ); + vListInitialise( &xActiveTimerList2 ); + pxCurrentTimerList = &xActiveTimerList1; + pxOverflowTimerList = &xActiveTimerList2; + + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + /* The timer queue is allocated statically in case + configSUPPORT_DYNAMIC_ALLOCATION is 0. */ + static StaticQueue_t xStaticTimerQueue; /*lint !e956 Ok to declare in this manner to prevent additional conditional compilation guards in other locations. */ + static uint8_t ucStaticTimerQueueStorage[ ( size_t ) configTIMER_QUEUE_LENGTH * sizeof( DaemonTaskMessage_t ) ]; /*lint !e956 Ok to declare in this manner to prevent additional conditional compilation guards in other locations. */ + + xTimerQueue = xQueueCreateStatic( ( UBaseType_t ) configTIMER_QUEUE_LENGTH, ( UBaseType_t ) sizeof( DaemonTaskMessage_t ), &( ucStaticTimerQueueStorage[ 0 ] ), &xStaticTimerQueue ); + } + #else + { + xTimerQueue = xQueueCreate( ( UBaseType_t ) configTIMER_QUEUE_LENGTH, sizeof( DaemonTaskMessage_t ) ); + } + #endif + + #if ( configQUEUE_REGISTRY_SIZE > 0 ) + { + if( xTimerQueue != NULL ) + { + vQueueAddToRegistry( xTimerQueue, "TmrQ" ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configQUEUE_REGISTRY_SIZE */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); +} +/*-----------------------------------------------------------*/ + +BaseType_t xTimerIsTimerActive( TimerHandle_t xTimer ) +{ +BaseType_t xReturn; +Timer_t *pxTimer = xTimer; + + configASSERT( xTimer ); + + /* Is the timer in the list of active timers? */ + taskENTER_CRITICAL(); + { + if( ( pxTimer->ucStatus & tmrSTATUS_IS_ACTIVE ) == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + } + taskEXIT_CRITICAL(); + + return xReturn; +} /*lint !e818 Can't be pointer to const due to the typedef. */ +/*-----------------------------------------------------------*/ + +void *pvTimerGetTimerID( const TimerHandle_t xTimer ) +{ +Timer_t * const pxTimer = xTimer; +void *pvReturn; + + configASSERT( xTimer ); + + taskENTER_CRITICAL(); + { + pvReturn = pxTimer->pvTimerID; + } + taskEXIT_CRITICAL(); + + return pvReturn; +} +/*-----------------------------------------------------------*/ + +void vTimerSetTimerID( TimerHandle_t xTimer, void *pvNewID ) +{ +Timer_t * const pxTimer = xTimer; + + configASSERT( xTimer ); + + taskENTER_CRITICAL(); + { + pxTimer->pvTimerID = pvNewID; + } + taskEXIT_CRITICAL(); +} +/*-----------------------------------------------------------*/ + +#if( INCLUDE_xTimerPendFunctionCall == 1 ) + + BaseType_t xTimerPendFunctionCallFromISR( PendedFunction_t xFunctionToPend, void *pvParameter1, uint32_t ulParameter2, BaseType_t *pxHigherPriorityTaskWoken ) + { + DaemonTaskMessage_t xMessage; + BaseType_t xReturn; + + /* Complete the message with the function parameters and post it to the + daemon task. */ + xMessage.xMessageID = tmrCOMMAND_EXECUTE_CALLBACK_FROM_ISR; + xMessage.u.xCallbackParameters.pxCallbackFunction = xFunctionToPend; + xMessage.u.xCallbackParameters.pvParameter1 = pvParameter1; + xMessage.u.xCallbackParameters.ulParameter2 = ulParameter2; + + xReturn = xQueueSendFromISR( xTimerQueue, &xMessage, pxHigherPriorityTaskWoken ); + + tracePEND_FUNC_CALL_FROM_ISR( xFunctionToPend, pvParameter1, ulParameter2, xReturn ); + + return xReturn; + } + +#endif /* INCLUDE_xTimerPendFunctionCall */ +/*-----------------------------------------------------------*/ + +#if( INCLUDE_xTimerPendFunctionCall == 1 ) + + BaseType_t xTimerPendFunctionCall( PendedFunction_t xFunctionToPend, void *pvParameter1, uint32_t ulParameter2, TickType_t xTicksToWait ) + { + DaemonTaskMessage_t xMessage; + BaseType_t xReturn; + + /* This function can only be called after a timer has been created or + after the scheduler has been started because, until then, the timer + queue does not exist. */ + configASSERT( xTimerQueue ); + + /* Complete the message with the function parameters and post it to the + daemon task. */ + xMessage.xMessageID = tmrCOMMAND_EXECUTE_CALLBACK; + xMessage.u.xCallbackParameters.pxCallbackFunction = xFunctionToPend; + xMessage.u.xCallbackParameters.pvParameter1 = pvParameter1; + xMessage.u.xCallbackParameters.ulParameter2 = ulParameter2; + + xReturn = xQueueSendToBack( xTimerQueue, &xMessage, xTicksToWait ); + + tracePEND_FUNC_CALL( xFunctionToPend, pvParameter1, ulParameter2, xReturn ); + + return xReturn; + } + +#endif /* INCLUDE_xTimerPendFunctionCall */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t uxTimerGetTimerNumber( TimerHandle_t xTimer ) + { + return ( ( Timer_t * ) xTimer )->uxTimerNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + void vTimerSetTimerNumber( TimerHandle_t xTimer, UBaseType_t uxTimerNumber ) + { + ( ( Timer_t * ) xTimer )->uxTimerNumber = uxTimerNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +/* This entire source file will be skipped if the application is not configured +to include software timer functionality. If you want to include software timer +functionality then ensure configUSE_TIMERS is set to 1 in FreeRTOSConfig.h. */ +#endif /* configUSE_TIMERS == 1 */ + + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/api_lib.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/api_lib.c new file mode 100644 index 0000000..9a30ec0 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/api_lib.c @@ -0,0 +1,1367 @@ +/** + * @file + * Sequential API External module + * + * @defgroup netconn Netconn API + * @ingroup sequential_api + * Thread-safe, to be called from non-TCPIP threads only. + * TX/RX handling based on @ref netbuf (containing @ref pbuf) + * to avoid copying data around. + * + * @defgroup netconn_common Common functions + * @ingroup netconn + * For use with TCP and UDP + * + * @defgroup netconn_tcp TCP only + * @ingroup netconn + * TCP only functions + * + * @defgroup netconn_udp UDP only + * @ingroup netconn + * UDP only functions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + */ + +/* This is the part of the API that is linked with + the application */ + +#include "lwip/opt.h" + +#if LWIP_NETCONN /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/api.h" +#include "lwip/memp.h" + +#include "lwip/ip.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/priv/api_msg.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/priv/tcpip_priv.h" + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +#include + +#define API_MSG_VAR_REF(name) API_VAR_REF(name) +#define API_MSG_VAR_DECLARE(name) API_VAR_DECLARE(struct api_msg, name) +#define API_MSG_VAR_ALLOC(name) API_VAR_ALLOC(struct api_msg, MEMP_API_MSG, name, ERR_MEM) +#define API_MSG_VAR_ALLOC_RETURN_NULL(name) API_VAR_ALLOC(struct api_msg, MEMP_API_MSG, name, NULL) +#define API_MSG_VAR_FREE(name) API_VAR_FREE(MEMP_API_MSG, name) + +#if TCP_LISTEN_BACKLOG +/* need to allocate API message for accept so empty message pool does not result in event loss + * see bug #47512: MPU_COMPATIBLE may fail on empty pool */ +#define API_MSG_VAR_ALLOC_ACCEPT(msg) API_MSG_VAR_ALLOC(msg) +#define API_MSG_VAR_FREE_ACCEPT(msg) API_MSG_VAR_FREE(msg) +#else /* TCP_LISTEN_BACKLOG */ +#define API_MSG_VAR_ALLOC_ACCEPT(msg) +#define API_MSG_VAR_FREE_ACCEPT(msg) +#endif /* TCP_LISTEN_BACKLOG */ + +#if LWIP_NETCONN_FULLDUPLEX +#define NETCONN_RECVMBOX_WAITABLE(conn) (sys_mbox_valid(&(conn)->recvmbox) && (((conn)->flags & NETCONN_FLAG_MBOXINVALID) == 0)) +#define NETCONN_ACCEPTMBOX_WAITABLE(conn) (sys_mbox_valid(&(conn)->acceptmbox) && (((conn)->flags & (NETCONN_FLAG_MBOXCLOSED|NETCONN_FLAG_MBOXINVALID)) == 0)) +#define NETCONN_MBOX_WAITING_INC(conn) SYS_ARCH_INC(conn->mbox_threads_waiting, 1) +#define NETCONN_MBOX_WAITING_DEC(conn) SYS_ARCH_DEC(conn->mbox_threads_waiting, 1) +#else /* LWIP_NETCONN_FULLDUPLEX */ +#define NETCONN_RECVMBOX_WAITABLE(conn) sys_mbox_valid(&(conn)->recvmbox) +#define NETCONN_ACCEPTMBOX_WAITABLE(conn) (sys_mbox_valid(&(conn)->acceptmbox) && (((conn)->flags & NETCONN_FLAG_MBOXCLOSED) == 0)) +#define NETCONN_MBOX_WAITING_INC(conn) +#define NETCONN_MBOX_WAITING_DEC(conn) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +static err_t netconn_close_shutdown(struct netconn *conn, u8_t how); + +/** + * Call the lower part of a netconn_* function + * This function is then running in the thread context + * of tcpip_thread and has exclusive access to lwIP core code. + * + * @param fn function to call + * @param apimsg a struct containing the function to call and its parameters + * @return ERR_OK if the function was called, another err_t if not + */ +static err_t +netconn_apimsg(tcpip_callback_fn fn, struct api_msg *apimsg) +{ + err_t err; + +#ifdef LWIP_DEBUG + /* catch functions that don't set err */ + apimsg->err = ERR_VAL; +#endif /* LWIP_DEBUG */ + +#if LWIP_NETCONN_SEM_PER_THREAD + apimsg->op_completed_sem = LWIP_NETCONN_THREAD_SEM_GET(); +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + err = tcpip_send_msg_wait_sem(fn, apimsg, LWIP_API_MSG_SEM(apimsg)); + if (err == ERR_OK) { + return apimsg->err; + } + return err; +} + +/** + * Create a new netconn (of a specific type) that has a callback function. + * The corresponding pcb is also created. + * + * @param t the type of 'connection' to create (@see enum netconn_type) + * @param proto the IP protocol for RAW IP pcbs + * @param callback a function to call on status changes (RX available, TX'ed) + * @return a newly allocated struct netconn or + * NULL on memory error + */ +struct netconn * +netconn_new_with_proto_and_callback(enum netconn_type t, u8_t proto, netconn_callback callback) +{ + struct netconn *conn; + API_MSG_VAR_DECLARE(msg); + API_MSG_VAR_ALLOC_RETURN_NULL(msg); + + conn = netconn_alloc(t, callback); + if (conn != NULL) { + err_t err; + + API_MSG_VAR_REF(msg).msg.n.proto = proto; + API_MSG_VAR_REF(msg).conn = conn; + err = netconn_apimsg(lwip_netconn_do_newconn, &API_MSG_VAR_REF(msg)); + if (err != ERR_OK) { + LWIP_ASSERT("freeing conn without freeing pcb", conn->pcb.tcp == NULL); + LWIP_ASSERT("conn has no recvmbox", sys_mbox_valid(&conn->recvmbox)); +#if LWIP_TCP + LWIP_ASSERT("conn->acceptmbox shouldn't exist", !sys_mbox_valid(&conn->acceptmbox)); +#endif /* LWIP_TCP */ +#if !LWIP_NETCONN_SEM_PER_THREAD + LWIP_ASSERT("conn has no op_completed", sys_sem_valid(&conn->op_completed)); + sys_sem_free(&conn->op_completed); +#endif /* !LWIP_NETCONN_SEM_PER_THREAD */ + sys_mbox_free(&conn->recvmbox); + memp_free(MEMP_NETCONN, conn); + API_MSG_VAR_FREE(msg); + return NULL; + } + } + API_MSG_VAR_FREE(msg); + return conn; +} + +/** + * @ingroup netconn_common + * Close a netconn 'connection' and free all its resources but not the netconn itself. + * UDP and RAW connection are completely closed, TCP pcbs might still be in a waitstate + * after this returns. + * + * @param conn the netconn to delete + * @return ERR_OK if the connection was deleted + */ +err_t +netconn_prepare_delete(struct netconn *conn) +{ + err_t err; + API_MSG_VAR_DECLARE(msg); + + /* No ASSERT here because possible to get a (conn == NULL) if we got an accept error */ + if (conn == NULL) { + return ERR_OK; + } + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; +#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER + /* get the time we started, which is later compared to + sys_now() + conn->send_timeout */ + API_MSG_VAR_REF(msg).msg.sd.time_started = sys_now(); +#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ +#if LWIP_TCP + API_MSG_VAR_REF(msg).msg.sd.polls_left = + ((LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT + TCP_SLOW_INTERVAL - 1) / TCP_SLOW_INTERVAL) + 1; +#endif /* LWIP_TCP */ +#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + err = netconn_apimsg(lwip_netconn_do_delconn, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + if (err != ERR_OK) { + return err; + } + return ERR_OK; +} + +/** + * @ingroup netconn_common + * Close a netconn 'connection' and free its resources. + * UDP and RAW connection are completely closed, TCP pcbs might still be in a waitstate + * after this returns. + * + * @param conn the netconn to delete + * @return ERR_OK if the connection was deleted + */ +err_t +netconn_delete(struct netconn *conn) +{ + err_t err; + + /* No ASSERT here because possible to get a (conn == NULL) if we got an accept error */ + if (conn == NULL) { + return ERR_OK; + } + +#if LWIP_NETCONN_FULLDUPLEX + if (conn->flags & NETCONN_FLAG_MBOXINVALID) { + /* Already called netconn_prepare_delete() before */ + err = ERR_OK; + } else +#endif /* LWIP_NETCONN_FULLDUPLEX */ + { + err = netconn_prepare_delete(conn); + } + if (err == ERR_OK) { + netconn_free(conn); + } + return err; +} + +/** + * Get the local or remote IP address and port of a netconn. + * For RAW netconns, this returns the protocol instead of a port! + * + * @param conn the netconn to query + * @param addr a pointer to which to save the IP address + * @param port a pointer to which to save the port (or protocol for RAW) + * @param local 1 to get the local IP address, 0 to get the remote one + * @return ERR_CONN for invalid connections + * ERR_OK if the information was retrieved + */ +err_t +netconn_getaddr(struct netconn *conn, ip_addr_t *addr, u16_t *port, u8_t local) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_getaddr: invalid conn", (conn != NULL), return ERR_ARG;); + LWIP_ERROR("netconn_getaddr: invalid addr", (addr != NULL), return ERR_ARG;); + LWIP_ERROR("netconn_getaddr: invalid port", (port != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.ad.local = local; +#if LWIP_MPU_COMPATIBLE + err = netconn_apimsg(lwip_netconn_do_getaddr, &API_MSG_VAR_REF(msg)); + *addr = msg->msg.ad.ipaddr; + *port = msg->msg.ad.port; +#else /* LWIP_MPU_COMPATIBLE */ + msg.msg.ad.ipaddr = addr; + msg.msg.ad.port = port; + err = netconn_apimsg(lwip_netconn_do_getaddr, &msg); +#endif /* LWIP_MPU_COMPATIBLE */ + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_common + * Bind a netconn to a specific local IP address and port. + * Binding one netconn twice might not always be checked correctly! + * + * @param conn the netconn to bind + * @param addr the local IP address to bind the netconn to + * (use IP4_ADDR_ANY/IP6_ADDR_ANY to bind to all addresses) + * @param port the local port to bind the netconn to (not used for RAW) + * @return ERR_OK if bound, any other err_t on failure + */ +err_t +netconn_bind(struct netconn *conn, const ip_addr_t *addr, u16_t port) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_bind: invalid conn", (conn != NULL), return ERR_ARG;); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */ + if (addr == NULL) { + addr = IP4_ADDR_ANY; + } +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV4 && LWIP_IPV6 + /* "Socket API like" dual-stack support: If IP to bind to is IP6_ADDR_ANY, + * and NETCONN_FLAG_IPV6_V6ONLY is 0, use IP_ANY_TYPE to bind + */ + if ((netconn_get_ipv6only(conn) == 0) && + ip_addr_cmp(addr, IP6_ADDR_ANY)) { + addr = IP_ANY_TYPE; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.bc.ipaddr = API_MSG_VAR_REF(addr); + API_MSG_VAR_REF(msg).msg.bc.port = port; + err = netconn_apimsg(lwip_netconn_do_bind, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_common + * Bind a netconn to a specific interface and port. + * Binding one netconn twice might not always be checked correctly! + * + * @param conn the netconn to bind + * @param if_idx the local interface index to bind the netconn to + * @return ERR_OK if bound, any other err_t on failure + */ +err_t +netconn_bind_if(struct netconn *conn, u8_t if_idx) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_bind_if: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.bc.if_idx = if_idx; + err = netconn_apimsg(lwip_netconn_do_bind_if, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_common + * Connect a netconn to a specific remote IP address and port. + * + * @param conn the netconn to connect + * @param addr the remote IP address to connect to + * @param port the remote port to connect to (no used for RAW) + * @return ERR_OK if connected, return value of tcp_/udp_/raw_connect otherwise + */ +err_t +netconn_connect(struct netconn *conn, const ip_addr_t *addr, u16_t port) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_connect: invalid conn", (conn != NULL), return ERR_ARG;); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */ + if (addr == NULL) { + addr = IP4_ADDR_ANY; + } +#endif /* LWIP_IPV4 */ + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.bc.ipaddr = API_MSG_VAR_REF(addr); + API_MSG_VAR_REF(msg).msg.bc.port = port; + err = netconn_apimsg(lwip_netconn_do_connect, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_udp + * Disconnect a netconn from its current peer (only valid for UDP netconns). + * + * @param conn the netconn to disconnect + * @return See @ref err_t + */ +err_t +netconn_disconnect(struct netconn *conn) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_disconnect: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + err = netconn_apimsg(lwip_netconn_do_disconnect, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_tcp + * Set a TCP netconn into listen mode + * + * @param conn the tcp netconn to set to listen mode + * @param backlog the listen backlog, only used if TCP_LISTEN_BACKLOG==1 + * @return ERR_OK if the netconn was set to listen (UDP and RAW netconns + * don't return any error (yet?)) + */ +err_t +netconn_listen_with_backlog(struct netconn *conn, u8_t backlog) +{ +#if LWIP_TCP + API_MSG_VAR_DECLARE(msg); + err_t err; + + /* This does no harm. If TCP_LISTEN_BACKLOG is off, backlog is unused. */ + LWIP_UNUSED_ARG(backlog); + + LWIP_ERROR("netconn_listen: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; +#if TCP_LISTEN_BACKLOG + API_MSG_VAR_REF(msg).msg.lb.backlog = backlog; +#endif /* TCP_LISTEN_BACKLOG */ + err = netconn_apimsg(lwip_netconn_do_listen, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +#else /* LWIP_TCP */ + LWIP_UNUSED_ARG(conn); + LWIP_UNUSED_ARG(backlog); + return ERR_ARG; +#endif /* LWIP_TCP */ +} + +/** + * @ingroup netconn_tcp + * Accept a new connection on a TCP listening netconn. + * + * @param conn the TCP listen netconn + * @param new_conn pointer where the new connection is stored + * @return ERR_OK if a new connection has been received or an error + * code otherwise + */ +err_t +netconn_accept(struct netconn *conn, struct netconn **new_conn) +{ +#if LWIP_TCP + err_t err; + void *accept_ptr; + struct netconn *newconn; +#if TCP_LISTEN_BACKLOG + API_MSG_VAR_DECLARE(msg); +#endif /* TCP_LISTEN_BACKLOG */ + + LWIP_ERROR("netconn_accept: invalid pointer", (new_conn != NULL), return ERR_ARG;); + *new_conn = NULL; + LWIP_ERROR("netconn_accept: invalid conn", (conn != NULL), return ERR_ARG;); + + /* NOTE: Although the opengroup spec says a pending error shall be returned to + send/recv/getsockopt(SO_ERROR) only, we return it for listening + connections also, to handle embedded-system errors */ + err = netconn_err(conn); + if (err != ERR_OK) { + /* return pending error */ + return err; + } + if (!NETCONN_ACCEPTMBOX_WAITABLE(conn)) { + /* don't accept if closed: this might block the application task + waiting on acceptmbox forever! */ + return ERR_CLSD; + } + + API_MSG_VAR_ALLOC_ACCEPT(msg); + + NETCONN_MBOX_WAITING_INC(conn); + if (netconn_is_nonblocking(conn)) { + if (sys_arch_mbox_tryfetch(&conn->acceptmbox, &accept_ptr) == SYS_ARCH_TIMEOUT) { + API_MSG_VAR_FREE_ACCEPT(msg); + NETCONN_MBOX_WAITING_DEC(conn); + return ERR_WOULDBLOCK; + } + } else { +#if LWIP_SO_RCVTIMEO + if (sys_arch_mbox_fetch(&conn->acceptmbox, &accept_ptr, conn->recv_timeout) == SYS_ARCH_TIMEOUT) { + API_MSG_VAR_FREE_ACCEPT(msg); + NETCONN_MBOX_WAITING_DEC(conn); + return ERR_TIMEOUT; + } +#else + sys_arch_mbox_fetch(&conn->acceptmbox, &accept_ptr, 0); +#endif /* LWIP_SO_RCVTIMEO*/ + } + NETCONN_MBOX_WAITING_DEC(conn); +#if LWIP_NETCONN_FULLDUPLEX + if (conn->flags & NETCONN_FLAG_MBOXINVALID) { + if (lwip_netconn_is_deallocated_msg(accept_ptr)) { + /* the netconn has been closed from another thread */ + API_MSG_VAR_FREE_ACCEPT(msg); + return ERR_CONN; + } + } +#endif + + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVMINUS, 0); + + if (lwip_netconn_is_err_msg(accept_ptr, &err)) { + /* a connection has been aborted: e.g. out of pcbs or out of netconns during accept */ + API_MSG_VAR_FREE_ACCEPT(msg); + return err; + } + if (accept_ptr == NULL) { + /* connection has been aborted */ + API_MSG_VAR_FREE_ACCEPT(msg); + return ERR_CLSD; + } + newconn = (struct netconn *)accept_ptr; +#if TCP_LISTEN_BACKLOG + /* Let the stack know that we have accepted the connection. */ + API_MSG_VAR_REF(msg).conn = newconn; + /* don't care for the return value of lwip_netconn_do_recv */ + netconn_apimsg(lwip_netconn_do_accepted, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); +#endif /* TCP_LISTEN_BACKLOG */ + + *new_conn = newconn; + /* don't set conn->last_err: it's only ERR_OK, anyway */ + return ERR_OK; +#else /* LWIP_TCP */ + LWIP_UNUSED_ARG(conn); + LWIP_UNUSED_ARG(new_conn); + return ERR_ARG; +#endif /* LWIP_TCP */ +} + +/** + * @ingroup netconn_common + * Receive data: actual implementation that doesn't care whether pbuf or netbuf + * is received (this is internal, it's just here for describing common errors) + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new pbuf/netbuf is stored when received data + * @param apiflags flags that control function behaviour. For now only: + * - NETCONN_DONTBLOCK: only read data that is available now, don't wait for more data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error) + * ERR_CONN if not connected + * ERR_CLSD if TCP connection has been closed + * ERR_WOULDBLOCK if the netconn is nonblocking but would block to wait for data + * ERR_TIMEOUT if the netconn has a receive timeout and no data was received + */ +static err_t +netconn_recv_data(struct netconn *conn, void **new_buf, u8_t apiflags) +{ + void *buf = NULL; + u16_t len; + + LWIP_ERROR("netconn_recv: invalid pointer", (new_buf != NULL), return ERR_ARG;); + *new_buf = NULL; + LWIP_ERROR("netconn_recv: invalid conn", (conn != NULL), return ERR_ARG;); + + if (!NETCONN_RECVMBOX_WAITABLE(conn)) { + err_t err = netconn_err(conn); + if (err != ERR_OK) { + /* return pending error */ + return err; + } + return ERR_CONN; + } + + NETCONN_MBOX_WAITING_INC(conn); + if (netconn_is_nonblocking(conn) || (apiflags & NETCONN_DONTBLOCK) || + (conn->flags & NETCONN_FLAG_MBOXCLOSED) || (conn->pending_err != ERR_OK)) { + if (sys_arch_mbox_tryfetch(&conn->recvmbox, &buf) == SYS_ARCH_TIMEOUT) { + err_t err; + NETCONN_MBOX_WAITING_DEC(conn); + err = netconn_err(conn); + if (err != ERR_OK) { + /* return pending error */ + return err; + } + if (conn->flags & NETCONN_FLAG_MBOXCLOSED) { + return ERR_CONN; + } + return ERR_WOULDBLOCK; + } + } else { +#if LWIP_SO_RCVTIMEO + if (sys_arch_mbox_fetch(&conn->recvmbox, &buf, conn->recv_timeout) == SYS_ARCH_TIMEOUT) { + NETCONN_MBOX_WAITING_DEC(conn); + return ERR_TIMEOUT; + } +#else + sys_arch_mbox_fetch(&conn->recvmbox, &buf, 0); +#endif /* LWIP_SO_RCVTIMEO*/ + } + NETCONN_MBOX_WAITING_DEC(conn); +#if LWIP_NETCONN_FULLDUPLEX + if (conn->flags & NETCONN_FLAG_MBOXINVALID) { + if (lwip_netconn_is_deallocated_msg(buf)) { + /* the netconn has been closed from another thread */ + API_MSG_VAR_FREE_ACCEPT(msg); + return ERR_CONN; + } + } +#endif + +#if LWIP_TCP +#if (LWIP_UDP || LWIP_RAW) + if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) +#endif /* (LWIP_UDP || LWIP_RAW) */ + { + err_t err; + /* Check if this is an error message or a pbuf */ + if (lwip_netconn_is_err_msg(buf, &err)) { + /* new_buf has been zeroed above already */ + if (err == ERR_CLSD) { + /* connection closed translates to ERR_OK with *new_buf == NULL */ + return ERR_OK; + } + return err; + } + len = ((struct pbuf *)buf)->tot_len; + } +#endif /* LWIP_TCP */ +#if LWIP_TCP && (LWIP_UDP || LWIP_RAW) + else +#endif /* LWIP_TCP && (LWIP_UDP || LWIP_RAW) */ +#if (LWIP_UDP || LWIP_RAW) + { + LWIP_ASSERT("buf != NULL", buf != NULL); + len = netbuf_len((struct netbuf *)buf); + } +#endif /* (LWIP_UDP || LWIP_RAW) */ + +#if LWIP_SO_RCVBUF + SYS_ARCH_DEC(conn->recv_avail, len); +#endif /* LWIP_SO_RCVBUF */ + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVMINUS, len); + + LWIP_DEBUGF(API_LIB_DEBUG, ("netconn_recv_data: received %p, len=%"U16_F"\n", buf, len)); + + *new_buf = buf; + /* don't set conn->last_err: it's only ERR_OK, anyway */ + return ERR_OK; +} + +#if LWIP_TCP +static err_t +netconn_tcp_recvd_msg(struct netconn *conn, size_t len, struct api_msg *msg) +{ + LWIP_ERROR("netconn_recv_tcp_pbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) == NETCONN_TCP, return ERR_ARG;); + + msg->conn = conn; + msg->msg.r.len = len; + + return netconn_apimsg(lwip_netconn_do_recv, msg); +} + +err_t +netconn_tcp_recvd(struct netconn *conn, size_t len) +{ + err_t err; + API_MSG_VAR_DECLARE(msg); + LWIP_ERROR("netconn_recv_tcp_pbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) == NETCONN_TCP, return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + err = netconn_tcp_recvd_msg(conn, len, &API_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + return err; +} + +static err_t +netconn_recv_data_tcp(struct netconn *conn, struct pbuf **new_buf, u8_t apiflags) +{ + err_t err; + struct pbuf *buf; + API_MSG_VAR_DECLARE(msg); +#if LWIP_MPU_COMPATIBLE + msg = NULL; +#endif + + if (!NETCONN_RECVMBOX_WAITABLE(conn)) { + /* This only happens when calling this function more than once *after* receiving FIN */ + return ERR_CONN; + } + if (netconn_is_flag_set(conn, NETCONN_FIN_RX_PENDING)) { + netconn_clear_flags(conn, NETCONN_FIN_RX_PENDING); + goto handle_fin; + } + + if (!(apiflags & NETCONN_NOAUTORCVD)) { + /* need to allocate API message here so empty message pool does not result in event loss + * see bug #47512: MPU_COMPATIBLE may fail on empty pool */ + API_MSG_VAR_ALLOC(msg); + } + + err = netconn_recv_data(conn, (void **)new_buf, apiflags); + if (err != ERR_OK) { + if (!(apiflags & NETCONN_NOAUTORCVD)) { + API_MSG_VAR_FREE(msg); + } + return err; + } + buf = *new_buf; + if (!(apiflags & NETCONN_NOAUTORCVD)) { + /* Let the stack know that we have taken the data. */ + u16_t len = buf ? buf->tot_len : 1; + /* don't care for the return value of lwip_netconn_do_recv */ + /* @todo: this should really be fixed, e.g. by retrying in poll on error */ + netconn_tcp_recvd_msg(conn, len, &API_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + } + + /* If we are closed, we indicate that we no longer wish to use the socket */ + if (buf == NULL) { + if (apiflags & NETCONN_NOFIN) { + /* received a FIN but the caller cannot handle it right now: + re-enqueue it and return "no data" */ + netconn_set_flags(conn, NETCONN_FIN_RX_PENDING); + return ERR_WOULDBLOCK; + } else { +handle_fin: + API_EVENT(conn, NETCONN_EVT_RCVMINUS, 0); + if (conn->pcb.ip == NULL) { + /* race condition: RST during recv */ + err = netconn_err(conn); + if (err != ERR_OK) { + return err; + } + return ERR_RST; + } + /* RX side is closed, so deallocate the recvmbox */ + netconn_close_shutdown(conn, NETCONN_SHUT_RD); + /* Don' store ERR_CLSD as conn->err since we are only half-closed */ + return ERR_CLSD; + } + } + return err; +} + +/** + * @ingroup netconn_tcp + * Receive data (in form of a pbuf) from a TCP netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new pbuf is stored when received data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error, @see netconn_recv_data) + * ERR_ARG if conn is not a TCP netconn + */ +err_t +netconn_recv_tcp_pbuf(struct netconn *conn, struct pbuf **new_buf) +{ + LWIP_ERROR("netconn_recv_tcp_pbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) == NETCONN_TCP, return ERR_ARG;); + + return netconn_recv_data_tcp(conn, new_buf, 0); +} + +/** + * @ingroup netconn_tcp + * Receive data (in form of a pbuf) from a TCP netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new pbuf is stored when received data + * @param apiflags flags that control function behaviour. For now only: + * - NETCONN_DONTBLOCK: only read data that is available now, don't wait for more data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error, @see netconn_recv_data) + * ERR_ARG if conn is not a TCP netconn + */ +err_t +netconn_recv_tcp_pbuf_flags(struct netconn *conn, struct pbuf **new_buf, u8_t apiflags) +{ + LWIP_ERROR("netconn_recv_tcp_pbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) == NETCONN_TCP, return ERR_ARG;); + + return netconn_recv_data_tcp(conn, new_buf, apiflags); +} +#endif /* LWIP_TCP */ + +/** + * Receive data (in form of a netbuf) from a UDP or RAW netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new netbuf is stored when received data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error) + * ERR_ARG if conn is not a UDP/RAW netconn + */ +err_t +netconn_recv_udp_raw_netbuf(struct netconn *conn, struct netbuf **new_buf) +{ + LWIP_ERROR("netconn_recv_udp_raw_netbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) != NETCONN_TCP, return ERR_ARG;); + + return netconn_recv_data(conn, (void **)new_buf, 0); +} + +/** + * Receive data (in form of a netbuf) from a UDP or RAW netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new netbuf is stored when received data + * @param apiflags flags that control function behaviour. For now only: + * - NETCONN_DONTBLOCK: only read data that is available now, don't wait for more data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error) + * ERR_ARG if conn is not a UDP/RAW netconn + */ +err_t +netconn_recv_udp_raw_netbuf_flags(struct netconn *conn, struct netbuf **new_buf, u8_t apiflags) +{ + LWIP_ERROR("netconn_recv_udp_raw_netbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) != NETCONN_TCP, return ERR_ARG;); + + return netconn_recv_data(conn, (void **)new_buf, apiflags); +} + +/** + * @ingroup netconn_common + * Receive data (in form of a netbuf containing a packet buffer) from a netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new netbuf is stored when received data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error) + */ +err_t +netconn_recv(struct netconn *conn, struct netbuf **new_buf) +{ +#if LWIP_TCP + struct netbuf *buf = NULL; + err_t err; +#endif /* LWIP_TCP */ + + LWIP_ERROR("netconn_recv: invalid pointer", (new_buf != NULL), return ERR_ARG;); + *new_buf = NULL; + LWIP_ERROR("netconn_recv: invalid conn", (conn != NULL), return ERR_ARG;); + +#if LWIP_TCP +#if (LWIP_UDP || LWIP_RAW) + if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) +#endif /* (LWIP_UDP || LWIP_RAW) */ + { + struct pbuf *p = NULL; + /* This is not a listening netconn, since recvmbox is set */ + + buf = (struct netbuf *)memp_malloc(MEMP_NETBUF); + if (buf == NULL) { + return ERR_MEM; + } + + err = netconn_recv_data_tcp(conn, &p, 0); + if (err != ERR_OK) { + memp_free(MEMP_NETBUF, buf); + return err; + } + LWIP_ASSERT("p != NULL", p != NULL); + + buf->p = p; + buf->ptr = p; + buf->port = 0; + ip_addr_set_zero(&buf->addr); + *new_buf = buf; + /* don't set conn->last_err: it's only ERR_OK, anyway */ + return ERR_OK; + } +#endif /* LWIP_TCP */ +#if LWIP_TCP && (LWIP_UDP || LWIP_RAW) + else +#endif /* LWIP_TCP && (LWIP_UDP || LWIP_RAW) */ + { +#if (LWIP_UDP || LWIP_RAW) + return netconn_recv_data(conn, (void **)new_buf, 0); +#endif /* (LWIP_UDP || LWIP_RAW) */ + } +} + +/** + * @ingroup netconn_udp + * Send data (in form of a netbuf) to a specific remote IP address and port. + * Only to be used for UDP and RAW netconns (not TCP). + * + * @param conn the netconn over which to send data + * @param buf a netbuf containing the data to send + * @param addr the remote IP address to which to send the data + * @param port the remote port to which to send the data + * @return ERR_OK if data was sent, any other err_t on error + */ +err_t +netconn_sendto(struct netconn *conn, struct netbuf *buf, const ip_addr_t *addr, u16_t port) +{ + if (buf != NULL) { + ip_addr_set(&buf->addr, addr); + buf->port = port; + return netconn_send(conn, buf); + } + return ERR_VAL; +} + +/** + * @ingroup netconn_udp + * Send data over a UDP or RAW netconn (that is already connected). + * + * @param conn the UDP or RAW netconn over which to send data + * @param buf a netbuf containing the data to send + * @return ERR_OK if data was sent, any other err_t on error + */ +err_t +netconn_send(struct netconn *conn, struct netbuf *buf) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_send: invalid conn", (conn != NULL), return ERR_ARG;); + + LWIP_DEBUGF(API_LIB_DEBUG, ("netconn_send: sending %"U16_F" bytes\n", buf->p->tot_len)); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.b = buf; + err = netconn_apimsg(lwip_netconn_do_send, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_tcp + * Send data over a TCP netconn. + * + * @param conn the TCP netconn over which to send data + * @param dataptr pointer to the application buffer that contains the data to send + * @param size size of the application data to send + * @param apiflags combination of following flags : + * - NETCONN_COPY: data will be copied into memory belonging to the stack + * - NETCONN_MORE: for TCP connection, PSH flag will be set on last segment sent + * - NETCONN_DONTBLOCK: only write the data if all data can be written at once + * @param bytes_written pointer to a location that receives the number of written bytes + * @return ERR_OK if data was sent, any other err_t on error + */ +err_t +netconn_write_partly(struct netconn *conn, const void *dataptr, size_t size, + u8_t apiflags, size_t *bytes_written) +{ + struct netvector vector; + vector.ptr = dataptr; + vector.len = size; + return netconn_write_vectors_partly(conn, &vector, 1, apiflags, bytes_written); +} + +/** + * Send vectorized data atomically over a TCP netconn. + * + * @param conn the TCP netconn over which to send data + * @param vectors array of vectors containing data to send + * @param vectorcnt number of vectors in the array + * @param apiflags combination of following flags : + * - NETCONN_COPY: data will be copied into memory belonging to the stack + * - NETCONN_MORE: for TCP connection, PSH flag will be set on last segment sent + * - NETCONN_DONTBLOCK: only write the data if all data can be written at once + * @param bytes_written pointer to a location that receives the number of written bytes + * @return ERR_OK if data was sent, any other err_t on error + */ +err_t +netconn_write_vectors_partly(struct netconn *conn, struct netvector *vectors, u16_t vectorcnt, + u8_t apiflags, size_t *bytes_written) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + u8_t dontblock; + size_t size; + int i; + + LWIP_ERROR("netconn_write: invalid conn", (conn != NULL), return ERR_ARG;); + LWIP_ERROR("netconn_write: invalid conn->type", (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP), return ERR_VAL;); + dontblock = netconn_is_nonblocking(conn) || (apiflags & NETCONN_DONTBLOCK); +#if LWIP_SO_SNDTIMEO + if (conn->send_timeout != 0) { + dontblock = 1; + } +#endif /* LWIP_SO_SNDTIMEO */ + if (dontblock && !bytes_written) { + /* This implies netconn_write() cannot be used for non-blocking send, since + it has no way to return the number of bytes written. */ + return ERR_VAL; + } + + /* sum up the total size */ + size = 0; + for (i = 0; i < vectorcnt; i++) { + size += vectors[i].len; + if (size < vectors[i].len) { + /* overflow */ + return ERR_VAL; + } + } + if (size == 0) { + return ERR_OK; + } else if (size > SSIZE_MAX) { + ssize_t limited; + /* this is required by the socket layer (cannot send full size_t range) */ + if (!bytes_written) { + return ERR_VAL; + } + /* limit the amount of data to send */ + limited = SSIZE_MAX; + size = (size_t)limited; + } + + API_MSG_VAR_ALLOC(msg); + /* non-blocking write sends as much */ + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.w.vector = vectors; + API_MSG_VAR_REF(msg).msg.w.vector_cnt = vectorcnt; + API_MSG_VAR_REF(msg).msg.w.vector_off = 0; + API_MSG_VAR_REF(msg).msg.w.apiflags = apiflags; + API_MSG_VAR_REF(msg).msg.w.len = size; + API_MSG_VAR_REF(msg).msg.w.offset = 0; +#if LWIP_SO_SNDTIMEO + if (conn->send_timeout != 0) { + /* get the time we started, which is later compared to + sys_now() + conn->send_timeout */ + API_MSG_VAR_REF(msg).msg.w.time_started = sys_now(); + } else { + API_MSG_VAR_REF(msg).msg.w.time_started = 0; + } +#endif /* LWIP_SO_SNDTIMEO */ + + /* For locking the core: this _can_ be delayed on low memory/low send buffer, + but if it is, this is done inside api_msg.c:do_write(), so we can use the + non-blocking version here. */ + err = netconn_apimsg(lwip_netconn_do_write, &API_MSG_VAR_REF(msg)); + if (err == ERR_OK) { + if (bytes_written != NULL) { + *bytes_written = API_MSG_VAR_REF(msg).msg.w.offset; + } + /* for blocking, check all requested bytes were written, NOTE: send_timeout is + treated as dontblock (see dontblock assignment above) */ + if (!dontblock) { + LWIP_ASSERT("do_write failed to write all bytes", API_MSG_VAR_REF(msg).msg.w.offset == size); + } + } + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_tcp + * Close or shutdown a TCP netconn (doesn't delete it). + * + * @param conn the TCP netconn to close or shutdown + * @param how fully close or only shutdown one side? + * @return ERR_OK if the netconn was closed, any other err_t on error + */ +static err_t +netconn_close_shutdown(struct netconn *conn, u8_t how) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + LWIP_UNUSED_ARG(how); + + LWIP_ERROR("netconn_close: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; +#if LWIP_TCP + /* shutting down both ends is the same as closing */ + API_MSG_VAR_REF(msg).msg.sd.shut = how; +#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER + /* get the time we started, which is later compared to + sys_now() + conn->send_timeout */ + API_MSG_VAR_REF(msg).msg.sd.time_started = sys_now(); +#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + API_MSG_VAR_REF(msg).msg.sd.polls_left = + ((LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT + TCP_SLOW_INTERVAL - 1) / TCP_SLOW_INTERVAL) + 1; +#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ +#endif /* LWIP_TCP */ + err = netconn_apimsg(lwip_netconn_do_close, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_tcp + * Close a TCP netconn (doesn't delete it). + * + * @param conn the TCP netconn to close + * @return ERR_OK if the netconn was closed, any other err_t on error + */ +err_t +netconn_close(struct netconn *conn) +{ + /* shutting down both ends is the same as closing */ + return netconn_close_shutdown(conn, NETCONN_SHUT_RDWR); +} + +/** + * @ingroup netconn_common + * Get and reset pending error on a netconn + * + * @param conn the netconn to get the error from + * @return and pending error or ERR_OK if no error was pending + */ +err_t +netconn_err(struct netconn *conn) +{ + err_t err; + SYS_ARCH_DECL_PROTECT(lev); + if (conn == NULL) { + return ERR_OK; + } + SYS_ARCH_PROTECT(lev); + err = conn->pending_err; + conn->pending_err = ERR_OK; + SYS_ARCH_UNPROTECT(lev); + return err; +} + +/** + * @ingroup netconn_tcp + * Shut down one or both sides of a TCP netconn (doesn't delete it). + * + * @param conn the TCP netconn to shut down + * @param shut_rx shut down the RX side (no more read possible after this) + * @param shut_tx shut down the TX side (no more write possible after this) + * @return ERR_OK if the netconn was closed, any other err_t on error + */ +err_t +netconn_shutdown(struct netconn *conn, u8_t shut_rx, u8_t shut_tx) +{ + return netconn_close_shutdown(conn, (u8_t)((shut_rx ? NETCONN_SHUT_RD : 0) | (shut_tx ? NETCONN_SHUT_WR : 0))); +} + +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +/** + * @ingroup netconn_udp + * Join multicast groups for UDP netconns. + * + * @param conn the UDP netconn for which to change multicast addresses + * @param multiaddr IP address of the multicast group to join or leave + * @param netif_addr the IP address of the network interface on which to send + * the igmp message + * @param join_or_leave flag whether to send a join- or leave-message + * @return ERR_OK if the action was taken, any err_t on error + */ +err_t +netconn_join_leave_group(struct netconn *conn, + const ip_addr_t *multiaddr, + const ip_addr_t *netif_addr, + enum netconn_igmp join_or_leave) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_join_leave_group: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */ + if (multiaddr == NULL) { + multiaddr = IP4_ADDR_ANY; + } + if (netif_addr == NULL) { + netif_addr = IP4_ADDR_ANY; + } +#endif /* LWIP_IPV4 */ + + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.jl.multiaddr = API_MSG_VAR_REF(multiaddr); + API_MSG_VAR_REF(msg).msg.jl.netif_addr = API_MSG_VAR_REF(netif_addr); + API_MSG_VAR_REF(msg).msg.jl.join_or_leave = join_or_leave; + err = netconn_apimsg(lwip_netconn_do_join_leave_group, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} +/** + * @ingroup netconn_udp + * Join multicast groups for UDP netconns. + * + * @param conn the UDP netconn for which to change multicast addresses + * @param multiaddr IP address of the multicast group to join or leave + * @param if_idx the index of the netif + * @param join_or_leave flag whether to send a join- or leave-message + * @return ERR_OK if the action was taken, any err_t on error + */ +err_t +netconn_join_leave_group_netif(struct netconn *conn, + const ip_addr_t *multiaddr, + u8_t if_idx, + enum netconn_igmp join_or_leave) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_join_leave_group: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */ + if (multiaddr == NULL) { + multiaddr = IP4_ADDR_ANY; + } + if (if_idx == NETIF_NO_INDEX) { + return ERR_IF; + } +#endif /* LWIP_IPV4 */ + + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.jl.multiaddr = API_MSG_VAR_REF(multiaddr); + API_MSG_VAR_REF(msg).msg.jl.if_idx = if_idx; + API_MSG_VAR_REF(msg).msg.jl.join_or_leave = join_or_leave; + err = netconn_apimsg(lwip_netconn_do_join_leave_group_netif, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ + +#if LWIP_DNS +/** + * @ingroup netconn_common + * Execute a DNS query, only one IP address is returned + * + * @param name a string representation of the DNS host name to query + * @param addr a preallocated ip_addr_t where to store the resolved IP address + * @param dns_addrtype IP address type (IPv4 / IPv6) + * @return ERR_OK: resolving succeeded + * ERR_MEM: memory error, try again later + * ERR_ARG: dns client not initialized or invalid hostname + * ERR_VAL: dns server response was invalid + */ +#if LWIP_IPV4 && LWIP_IPV6 +err_t +netconn_gethostbyname_addrtype(const char *name, ip_addr_t *addr, u8_t dns_addrtype) +#else +err_t +netconn_gethostbyname(const char *name, ip_addr_t *addr) +#endif +{ + API_VAR_DECLARE(struct dns_api_msg, msg); +#if !LWIP_MPU_COMPATIBLE + sys_sem_t sem; +#endif /* LWIP_MPU_COMPATIBLE */ + err_t err; + err_t cberr; + + LWIP_ERROR("netconn_gethostbyname: invalid name", (name != NULL), return ERR_ARG;); + LWIP_ERROR("netconn_gethostbyname: invalid addr", (addr != NULL), return ERR_ARG;); +#if LWIP_MPU_COMPATIBLE + if (strlen(name) >= DNS_MAX_NAME_LENGTH) { + return ERR_ARG; + } +#endif + +#ifdef LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE +#if LWIP_IPV4 && LWIP_IPV6 + if (LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE(name, addr, dns_addrtype, &err)) { +#else + if (LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE(name, addr, NETCONN_DNS_DEFAULT, &err)) { +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + return err; + } +#endif /* LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE */ + + API_VAR_ALLOC(struct dns_api_msg, MEMP_DNS_API_MSG, msg, ERR_MEM); +#if LWIP_MPU_COMPATIBLE + strncpy(API_VAR_REF(msg).name, name, DNS_MAX_NAME_LENGTH - 1); + API_VAR_REF(msg).name[DNS_MAX_NAME_LENGTH - 1] = 0; +#else /* LWIP_MPU_COMPATIBLE */ + msg.err = &err; + msg.sem = &sem; + API_VAR_REF(msg).addr = API_VAR_REF(addr); + API_VAR_REF(msg).name = name; +#endif /* LWIP_MPU_COMPATIBLE */ +#if LWIP_IPV4 && LWIP_IPV6 + API_VAR_REF(msg).dns_addrtype = dns_addrtype; +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_NETCONN_SEM_PER_THREAD + API_VAR_REF(msg).sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else /* LWIP_NETCONN_SEM_PER_THREAD*/ + err = sys_sem_new(API_EXPR_REF(API_VAR_REF(msg).sem), 0); + if (err != ERR_OK) { + API_VAR_FREE(MEMP_DNS_API_MSG, msg); + return err; + } +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + cberr = tcpip_send_msg_wait_sem(lwip_netconn_do_gethostbyname, &API_VAR_REF(msg), API_EXPR_REF(API_VAR_REF(msg).sem)); +#if !LWIP_NETCONN_SEM_PER_THREAD + sys_sem_free(API_EXPR_REF(API_VAR_REF(msg).sem)); +#endif /* !LWIP_NETCONN_SEM_PER_THREAD */ + if (cberr != ERR_OK) { + API_VAR_FREE(MEMP_DNS_API_MSG, msg); + return cberr; + } + +#if LWIP_MPU_COMPATIBLE + *addr = msg->addr; + err = msg->err; +#endif /* LWIP_MPU_COMPATIBLE */ + + API_VAR_FREE(MEMP_DNS_API_MSG, msg); + return err; +} +#endif /* LWIP_DNS*/ + +#if LWIP_NETCONN_SEM_PER_THREAD +void +netconn_thread_init(void) +{ + sys_sem_t *sem = LWIP_NETCONN_THREAD_SEM_GET(); + if ((sem == NULL) || !sys_sem_valid(sem)) { + /* call alloc only once */ + LWIP_NETCONN_THREAD_SEM_ALLOC(); + LWIP_ASSERT("LWIP_NETCONN_THREAD_SEM_ALLOC() failed", sys_sem_valid(LWIP_NETCONN_THREAD_SEM_GET())); + } +} + +void +netconn_thread_cleanup(void) +{ + sys_sem_t *sem = LWIP_NETCONN_THREAD_SEM_GET(); + if ((sem != NULL) && sys_sem_valid(sem)) { + /* call free only once */ + LWIP_NETCONN_THREAD_SEM_FREE(); + } +} +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + +#endif /* LWIP_NETCONN */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/api_msg.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/api_msg.c new file mode 100644 index 0000000..8952baa --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/api_msg.c @@ -0,0 +1,2173 @@ +/** + * @file + * Sequential API Internal module + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_NETCONN /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/priv/api_msg.h" + +#include "lwip/ip.h" +#include "lwip/ip_addr.h" +#include "lwip/udp.h" +#include "lwip/tcp.h" +#include "lwip/raw.h" + +#include "lwip/memp.h" +#include "lwip/igmp.h" +#include "lwip/dns.h" +#include "lwip/mld6.h" +#include "lwip/priv/tcpip_priv.h" + +#include + +/* netconns are polled once per second (e.g. continue write on memory error) */ +#define NETCONN_TCP_POLL_INTERVAL 2 + +#define SET_NONBLOCKING_CONNECT(conn, val) do { if (val) { \ + netconn_set_flags(conn, NETCONN_FLAG_IN_NONBLOCKING_CONNECT); \ +} else { \ + netconn_clear_flags(conn, NETCONN_FLAG_IN_NONBLOCKING_CONNECT); }} while(0) +#define IN_NONBLOCKING_CONNECT(conn) netconn_is_flag_set(conn, NETCONN_FLAG_IN_NONBLOCKING_CONNECT) + +#if LWIP_NETCONN_FULLDUPLEX +#define NETCONN_MBOX_VALID(conn, mbox) (sys_mbox_valid(mbox) && ((conn->flags & NETCONN_FLAG_MBOXINVALID) == 0)) +#else +#define NETCONN_MBOX_VALID(conn, mbox) sys_mbox_valid(mbox) +#endif + +/* forward declarations */ +#if LWIP_TCP +#if LWIP_TCPIP_CORE_LOCKING +#define WRITE_DELAYED , 1 +#define WRITE_DELAYED_PARAM , u8_t delayed +#else /* LWIP_TCPIP_CORE_LOCKING */ +#define WRITE_DELAYED +#define WRITE_DELAYED_PARAM +#endif /* LWIP_TCPIP_CORE_LOCKING */ +static err_t lwip_netconn_do_writemore(struct netconn *conn WRITE_DELAYED_PARAM); +static err_t lwip_netconn_do_close_internal(struct netconn *conn WRITE_DELAYED_PARAM); +#endif + +static void netconn_drain(struct netconn *conn); + +#if LWIP_TCPIP_CORE_LOCKING +#define TCPIP_APIMSG_ACK(m) +#else /* LWIP_TCPIP_CORE_LOCKING */ +#define TCPIP_APIMSG_ACK(m) do { sys_sem_signal(LWIP_API_MSG_SEM(m)); } while(0) +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +#if LWIP_NETCONN_FULLDUPLEX +const u8_t netconn_deleted = 0; + +int +lwip_netconn_is_deallocated_msg(void *msg) +{ + if (msg == &netconn_deleted) { + return 1; + } + return 0; +} +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +#if LWIP_TCP +const u8_t netconn_aborted = 0; +const u8_t netconn_reset = 0; +const u8_t netconn_closed = 0; + +/** Translate an error to a unique void* passed via an mbox */ +static void * +lwip_netconn_err_to_msg(err_t err) +{ + switch (err) { + case ERR_ABRT: + return LWIP_CONST_CAST(void *, &netconn_aborted); + case ERR_RST: + return LWIP_CONST_CAST(void *, &netconn_reset); + case ERR_CLSD: + return LWIP_CONST_CAST(void *, &netconn_closed); + default: + LWIP_ASSERT("unhandled error", err == ERR_OK); + return NULL; + } +} + +int +lwip_netconn_is_err_msg(void *msg, err_t *err) +{ + LWIP_ASSERT("err != NULL", err != NULL); + + if (msg == &netconn_aborted) { + *err = ERR_ABRT; + return 1; + } else if (msg == &netconn_reset) { + *err = ERR_RST; + return 1; + } else if (msg == &netconn_closed) { + *err = ERR_CLSD; + return 1; + } + return 0; +} +#endif /* LWIP_TCP */ + + +#if LWIP_RAW +/** + * Receive callback function for RAW netconns. + * Doesn't 'eat' the packet, only copies it and sends it to + * conn->recvmbox + * + * @see raw.h (struct raw_pcb.recv) for parameters and return value + */ +static u8_t +recv_raw(void *arg, struct raw_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr) +{ + struct pbuf *q; + struct netbuf *buf; + struct netconn *conn; + + LWIP_UNUSED_ARG(addr); + conn = (struct netconn *)arg; + + if ((conn != NULL) && NETCONN_MBOX_VALID(conn, &conn->recvmbox)) { +#if LWIP_SO_RCVBUF + int recv_avail; + SYS_ARCH_GET(conn->recv_avail, recv_avail); + if ((recv_avail + (int)(p->tot_len)) > conn->recv_bufsize) { + return 0; + } +#endif /* LWIP_SO_RCVBUF */ + /* copy the whole packet into new pbufs */ + q = pbuf_clone(PBUF_RAW, PBUF_RAM, p); + if (q != NULL) { + u16_t len; + buf = (struct netbuf *)memp_malloc(MEMP_NETBUF); + if (buf == NULL) { + pbuf_free(q); + return 0; + } + + buf->p = q; + buf->ptr = q; + ip_addr_copy(buf->addr, *ip_current_src_addr()); + buf->port = pcb->protocol; + + len = q->tot_len; + if (sys_mbox_trypost(&conn->recvmbox, buf) != ERR_OK) { + netbuf_delete(buf); + return 0; + } else { +#if LWIP_SO_RCVBUF + SYS_ARCH_INC(conn->recv_avail, len); +#endif /* LWIP_SO_RCVBUF */ + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, len); + } + } + } + + return 0; /* do not eat the packet */ +} +#endif /* LWIP_RAW*/ + +#if LWIP_UDP +/** + * Receive callback function for UDP netconns. + * Posts the packet to conn->recvmbox or deletes it on memory error. + * + * @see udp.h (struct udp_pcb.recv) for parameters + */ +static void +recv_udp(void *arg, struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr, u16_t port) +{ + struct netbuf *buf; + struct netconn *conn; + u16_t len; +#if LWIP_SO_RCVBUF + int recv_avail; +#endif /* LWIP_SO_RCVBUF */ + + LWIP_UNUSED_ARG(pcb); /* only used for asserts... */ + LWIP_ASSERT("recv_udp must have a pcb argument", pcb != NULL); + LWIP_ASSERT("recv_udp must have an argument", arg != NULL); + conn = (struct netconn *)arg; + + if (conn == NULL) { + pbuf_free(p); + return; + } + + LWIP_ASSERT("recv_udp: recv for wrong pcb!", conn->pcb.udp == pcb); + +#if LWIP_SO_RCVBUF + SYS_ARCH_GET(conn->recv_avail, recv_avail); + if (!NETCONN_MBOX_VALID(conn, &conn->recvmbox) || + ((recv_avail + (int)(p->tot_len)) > conn->recv_bufsize)) { +#else /* LWIP_SO_RCVBUF */ + if (!NETCONN_MBOX_VALID(conn, &conn->recvmbox)) { +#endif /* LWIP_SO_RCVBUF */ + pbuf_free(p); + return; + } + + buf = (struct netbuf *)memp_malloc(MEMP_NETBUF); + if (buf == NULL) { + pbuf_free(p); + return; + } else { + buf->p = p; + buf->ptr = p; + ip_addr_set(&buf->addr, addr); + buf->port = port; +#if LWIP_NETBUF_RECVINFO + if (conn->flags & NETCONN_FLAG_PKTINFO) { + /* get the UDP header - always in the first pbuf, ensured by udp_input */ + const struct udp_hdr *udphdr = (const struct udp_hdr *)ip_next_header_ptr(); + buf->flags = NETBUF_FLAG_DESTADDR; + ip_addr_set(&buf->toaddr, ip_current_dest_addr()); + buf->toport_chksum = udphdr->dest; + } +#endif /* LWIP_NETBUF_RECVINFO */ + } + + len = p->tot_len; + if (sys_mbox_trypost(&conn->recvmbox, buf) != ERR_OK) { + netbuf_delete(buf); + return; + } else { +#if LWIP_SO_RCVBUF + SYS_ARCH_INC(conn->recv_avail, len); +#endif /* LWIP_SO_RCVBUF */ + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, len); + } +} +#endif /* LWIP_UDP */ + +#if LWIP_TCP +/** + * Receive callback function for TCP netconns. + * Posts the packet to conn->recvmbox, but doesn't delete it on errors. + * + * @see tcp.h (struct tcp_pcb.recv) for parameters and return value + */ +static err_t +recv_tcp(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err) +{ + struct netconn *conn; + u16_t len; + void *msg; + + LWIP_UNUSED_ARG(pcb); + LWIP_ASSERT("recv_tcp must have a pcb argument", pcb != NULL); + LWIP_ASSERT("recv_tcp must have an argument", arg != NULL); + LWIP_ASSERT("err != ERR_OK unhandled", err == ERR_OK); + LWIP_UNUSED_ARG(err); /* for LWIP_NOASSERT */ + conn = (struct netconn *)arg; + + if (conn == NULL) { + return ERR_VAL; + } + LWIP_ASSERT("recv_tcp: recv for wrong pcb!", conn->pcb.tcp == pcb); + + if (!NETCONN_MBOX_VALID(conn, &conn->recvmbox)) { + /* recvmbox already deleted */ + if (p != NULL) { + tcp_recved(pcb, p->tot_len); + pbuf_free(p); + } + return ERR_OK; + } + /* Unlike for UDP or RAW pcbs, don't check for available space + using recv_avail since that could break the connection + (data is already ACKed) */ + + if (p != NULL) { + msg = p; + len = p->tot_len; + } else { + msg = LWIP_CONST_CAST(void *, &netconn_closed); + len = 0; + } + + if (sys_mbox_trypost(&conn->recvmbox, msg) != ERR_OK) { + /* don't deallocate p: it is presented to us later again from tcp_fasttmr! */ + return ERR_MEM; + } else { +#if LWIP_SO_RCVBUF + SYS_ARCH_INC(conn->recv_avail, len); +#endif /* LWIP_SO_RCVBUF */ + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, len); + } + + return ERR_OK; +} + +/** + * Poll callback function for TCP netconns. + * Wakes up an application thread that waits for a connection to close + * or data to be sent. The application thread then takes the + * appropriate action to go on. + * + * Signals the conn->sem. + * netconn_close waits for conn->sem if closing failed. + * + * @see tcp.h (struct tcp_pcb.poll) for parameters and return value + */ +static err_t +poll_tcp(void *arg, struct tcp_pcb *pcb) +{ + struct netconn *conn = (struct netconn *)arg; + + LWIP_UNUSED_ARG(pcb); + LWIP_ASSERT("conn != NULL", (conn != NULL)); + + if (conn->state == NETCONN_WRITE) { + lwip_netconn_do_writemore(conn WRITE_DELAYED); + } else if (conn->state == NETCONN_CLOSE) { +#if !LWIP_SO_SNDTIMEO && !LWIP_SO_LINGER + if (conn->current_msg && conn->current_msg->msg.sd.polls_left) { + conn->current_msg->msg.sd.polls_left--; + } +#endif /* !LWIP_SO_SNDTIMEO && !LWIP_SO_LINGER */ + lwip_netconn_do_close_internal(conn WRITE_DELAYED); + } + /* @todo: implement connect timeout here? */ + + /* Did a nonblocking write fail before? Then check available write-space. */ + if (conn->flags & NETCONN_FLAG_CHECK_WRITESPACE) { + /* If the queued byte- or pbuf-count drops below the configured low-water limit, + let select mark this pcb as writable again. */ + if ((conn->pcb.tcp != NULL) && (tcp_sndbuf(conn->pcb.tcp) > TCP_SNDLOWAT) && + (tcp_sndqueuelen(conn->pcb.tcp) < TCP_SNDQUEUELOWAT)) { + netconn_clear_flags(conn, NETCONN_FLAG_CHECK_WRITESPACE); + API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); + } + } + + return ERR_OK; +} + +/** + * Sent callback function for TCP netconns. + * Signals the conn->sem and calls API_EVENT. + * netconn_write waits for conn->sem if send buffer is low. + * + * @see tcp.h (struct tcp_pcb.sent) for parameters and return value + */ +static err_t +sent_tcp(void *arg, struct tcp_pcb *pcb, u16_t len) +{ + struct netconn *conn = (struct netconn *)arg; + + LWIP_UNUSED_ARG(pcb); + LWIP_ASSERT("conn != NULL", (conn != NULL)); + + if (conn) { + if (conn->state == NETCONN_WRITE) { + lwip_netconn_do_writemore(conn WRITE_DELAYED); + } else if (conn->state == NETCONN_CLOSE) { + lwip_netconn_do_close_internal(conn WRITE_DELAYED); + } + + /* If the queued byte- or pbuf-count drops below the configured low-water limit, + let select mark this pcb as writable again. */ + if ((conn->pcb.tcp != NULL) && (tcp_sndbuf(conn->pcb.tcp) > TCP_SNDLOWAT) && + (tcp_sndqueuelen(conn->pcb.tcp) < TCP_SNDQUEUELOWAT)) { + netconn_clear_flags(conn, NETCONN_FLAG_CHECK_WRITESPACE); + API_EVENT(conn, NETCONN_EVT_SENDPLUS, len); + } + } + + return ERR_OK; +} + +/** + * Error callback function for TCP netconns. + * Signals conn->sem, posts to all conn mboxes and calls API_EVENT. + * The application thread has then to decide what to do. + * + * @see tcp.h (struct tcp_pcb.err) for parameters + */ +static void +err_tcp(void *arg, err_t err) +{ + struct netconn *conn; + enum netconn_state old_state; + void *mbox_msg; + SYS_ARCH_DECL_PROTECT(lev); + + conn = (struct netconn *)arg; + LWIP_ASSERT("conn != NULL", (conn != NULL)); + + SYS_ARCH_PROTECT(lev); + + /* when err is called, the pcb is deallocated, so delete the reference */ + conn->pcb.tcp = NULL; + /* store pending error */ + conn->pending_err = err; + /* prevent application threads from blocking on 'recvmbox'/'acceptmbox' */ + conn->flags |= NETCONN_FLAG_MBOXCLOSED; + + /* reset conn->state now before waking up other threads */ + old_state = conn->state; + conn->state = NETCONN_NONE; + + SYS_ARCH_UNPROTECT(lev); + + /* Notify the user layer about a connection error. Used to signal select. */ + API_EVENT(conn, NETCONN_EVT_ERROR, 0); + /* Try to release selects pending on 'read' or 'write', too. + They will get an error if they actually try to read or write. */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); + + mbox_msg = lwip_netconn_err_to_msg(err); + /* pass error message to recvmbox to wake up pending recv */ + if (NETCONN_MBOX_VALID(conn, &conn->recvmbox)) { + /* use trypost to prevent deadlock */ + sys_mbox_trypost(&conn->recvmbox, mbox_msg); + } + /* pass error message to acceptmbox to wake up pending accept */ + if (NETCONN_MBOX_VALID(conn, &conn->acceptmbox)) { + /* use trypost to preven deadlock */ + sys_mbox_trypost(&conn->acceptmbox, mbox_msg); + } + + if ((old_state == NETCONN_WRITE) || (old_state == NETCONN_CLOSE) || + (old_state == NETCONN_CONNECT)) { + /* calling lwip_netconn_do_writemore/lwip_netconn_do_close_internal is not necessary + since the pcb has already been deleted! */ + int was_nonblocking_connect = IN_NONBLOCKING_CONNECT(conn); + SET_NONBLOCKING_CONNECT(conn, 0); + + if (!was_nonblocking_connect) { + sys_sem_t *op_completed_sem; + /* set error return code */ + LWIP_ASSERT("conn->current_msg != NULL", conn->current_msg != NULL); + if (old_state == NETCONN_CLOSE) { + /* let close succeed: the connection is closed after all... */ + conn->current_msg->err = ERR_OK; + } else { + /* Write and connect fail */ + conn->current_msg->err = err; + } + op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg); + LWIP_ASSERT("inavlid op_completed_sem", sys_sem_valid(op_completed_sem)); + conn->current_msg = NULL; + /* wake up the waiting task */ + sys_sem_signal(op_completed_sem); + } else { + /* @todo: test what happens for error on nonblocking connect */ + } + } else { + LWIP_ASSERT("conn->current_msg == NULL", conn->current_msg == NULL); + } +} + +/** + * Setup a tcp_pcb with the correct callback function pointers + * and their arguments. + * + * @param conn the TCP netconn to setup + */ +static void +setup_tcp(struct netconn *conn) +{ + struct tcp_pcb *pcb; + + pcb = conn->pcb.tcp; + tcp_arg(pcb, conn); + tcp_recv(pcb, recv_tcp); + tcp_sent(pcb, sent_tcp); + tcp_poll(pcb, poll_tcp, NETCONN_TCP_POLL_INTERVAL); + tcp_err(pcb, err_tcp); +} + +/** + * Accept callback function for TCP netconns. + * Allocates a new netconn and posts that to conn->acceptmbox. + * + * @see tcp.h (struct tcp_pcb_listen.accept) for parameters and return value + */ +static err_t +accept_function(void *arg, struct tcp_pcb *newpcb, err_t err) +{ + struct netconn *newconn; + struct netconn *conn = (struct netconn *)arg; + + if (conn == NULL) { + return ERR_VAL; + } + if (!NETCONN_MBOX_VALID(conn, &conn->acceptmbox)) { + LWIP_DEBUGF(API_MSG_DEBUG, ("accept_function: acceptmbox already deleted\n")); + return ERR_VAL; + } + + if (newpcb == NULL) { + /* out-of-pcbs during connect: pass on this error to the application */ + if (sys_mbox_trypost(&conn->acceptmbox, lwip_netconn_err_to_msg(ERR_ABRT)) == ERR_OK) { + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + } + return ERR_VAL; + } + LWIP_ASSERT("expect newpcb == NULL or err == ERR_OK", err == ERR_OK); + LWIP_UNUSED_ARG(err); /* for LWIP_NOASSERT */ + + LWIP_DEBUGF(API_MSG_DEBUG, ("accept_function: newpcb->state: %s\n", tcp_debug_state_str(newpcb->state))); + + /* We have to set the callback here even though + * the new socket is unknown. newconn->socket is marked as -1. */ + newconn = netconn_alloc(conn->type, conn->callback); + if (newconn == NULL) { + /* outof netconns: pass on this error to the application */ + if (sys_mbox_trypost(&conn->acceptmbox, lwip_netconn_err_to_msg(ERR_ABRT)) == ERR_OK) { + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + } + return ERR_MEM; + } + newconn->pcb.tcp = newpcb; + setup_tcp(newconn); + + /* handle backlog counter */ + tcp_backlog_delayed(newpcb); + + if (sys_mbox_trypost(&conn->acceptmbox, newconn) != ERR_OK) { + /* When returning != ERR_OK, the pcb is aborted in tcp_process(), + so do nothing here! */ + /* remove all references to this netconn from the pcb */ + struct tcp_pcb *pcb = newconn->pcb.tcp; + tcp_arg(pcb, NULL); + tcp_recv(pcb, NULL); + tcp_sent(pcb, NULL); + tcp_poll(pcb, NULL, 0); + tcp_err(pcb, NULL); + /* remove reference from to the pcb from this netconn */ + newconn->pcb.tcp = NULL; + /* no need to drain since we know the recvmbox is empty. */ + sys_mbox_free(&newconn->recvmbox); + sys_mbox_set_invalid(&newconn->recvmbox); + netconn_free(newconn); + return ERR_MEM; + } else { + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + } + + return ERR_OK; +} +#endif /* LWIP_TCP */ + +/** + * Create a new pcb of a specific type. + * Called from lwip_netconn_do_newconn(). + * + * @param msg the api_msg describing the connection type + */ +static void +pcb_new(struct api_msg *msg) +{ + enum lwip_ip_addr_type iptype = IPADDR_TYPE_V4; + + LWIP_ASSERT("pcb_new: pcb already allocated", msg->conn->pcb.tcp == NULL); + +#if LWIP_IPV6 && LWIP_IPV4 + /* IPv6: Dual-stack by default, unless netconn_set_ipv6only() is called */ + if (NETCONNTYPE_ISIPV6(netconn_type(msg->conn))) { + iptype = IPADDR_TYPE_ANY; + } +#endif + + /* Allocate a PCB for this connection */ + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + msg->conn->pcb.raw = raw_new_ip_type(iptype, msg->msg.n.proto); + if (msg->conn->pcb.raw != NULL) { +#if LWIP_IPV6 + /* ICMPv6 packets should always have checksum calculated by the stack as per RFC 3542 chapter 3.1 */ + if (NETCONNTYPE_ISIPV6(msg->conn->type) && msg->conn->pcb.raw->protocol == IP6_NEXTH_ICMP6) { + msg->conn->pcb.raw->chksum_reqd = 1; + msg->conn->pcb.raw->chksum_offset = 2; + } +#endif /* LWIP_IPV6 */ + raw_recv(msg->conn->pcb.raw, recv_raw, msg->conn); + } + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + msg->conn->pcb.udp = udp_new_ip_type(iptype); + if (msg->conn->pcb.udp != NULL) { +#if LWIP_UDPLITE + if (NETCONNTYPE_ISUDPLITE(msg->conn->type)) { + udp_setflags(msg->conn->pcb.udp, UDP_FLAGS_UDPLITE); + } +#endif /* LWIP_UDPLITE */ + if (NETCONNTYPE_ISUDPNOCHKSUM(msg->conn->type)) { + udp_setflags(msg->conn->pcb.udp, UDP_FLAGS_NOCHKSUM); + } + udp_recv(msg->conn->pcb.udp, recv_udp, msg->conn); + } + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + msg->conn->pcb.tcp = tcp_new_ip_type(iptype); + if (msg->conn->pcb.tcp != NULL) { + setup_tcp(msg->conn); + } + break; +#endif /* LWIP_TCP */ + default: + /* Unsupported netconn type, e.g. protocol disabled */ + msg->err = ERR_VAL; + return; + } + if (msg->conn->pcb.ip == NULL) { + msg->err = ERR_MEM; + } +} + +/** + * Create a new pcb of a specific type inside a netconn. + * Called from netconn_new_with_proto_and_callback. + * + * @param m the api_msg describing the connection type + */ +void +lwip_netconn_do_newconn(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + msg->err = ERR_OK; + if (msg->conn->pcb.tcp == NULL) { + pcb_new(msg); + } + /* Else? This "new" connection already has a PCB allocated. */ + /* Is this an error condition? Should it be deleted? */ + /* We currently just are happy and return. */ + + TCPIP_APIMSG_ACK(msg); +} + +/** + * Create a new netconn (of a specific type) that has a callback function. + * The corresponding pcb is NOT created! + * + * @param t the type of 'connection' to create (@see enum netconn_type) + * @param callback a function to call on status changes (RX available, TX'ed) + * @return a newly allocated struct netconn or + * NULL on memory error + */ +struct netconn * +netconn_alloc(enum netconn_type t, netconn_callback callback) +{ + struct netconn *conn; + int size; + u8_t init_flags = 0; + + conn = (struct netconn *)memp_malloc(MEMP_NETCONN); + if (conn == NULL) { + return NULL; + } + + conn->pending_err = ERR_OK; + conn->type = t; + conn->pcb.tcp = NULL; + + /* If all sizes are the same, every compiler should optimize this switch to nothing */ + switch (NETCONNTYPE_GROUP(t)) { +#if LWIP_RAW + case NETCONN_RAW: + size = DEFAULT_RAW_RECVMBOX_SIZE; + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + size = DEFAULT_UDP_RECVMBOX_SIZE; +#if LWIP_NETBUF_RECVINFO + init_flags |= NETCONN_FLAG_PKTINFO; +#endif /* LWIP_NETBUF_RECVINFO */ + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + size = DEFAULT_TCP_RECVMBOX_SIZE; + break; +#endif /* LWIP_TCP */ + default: + LWIP_ASSERT("netconn_alloc: undefined netconn_type", 0); + goto free_and_return; + } + + if (sys_mbox_new(&conn->recvmbox, size) != ERR_OK) { + goto free_and_return; + } +#if !LWIP_NETCONN_SEM_PER_THREAD + if (sys_sem_new(&conn->op_completed, 0) != ERR_OK) { + sys_mbox_free(&conn->recvmbox); + goto free_and_return; + } +#endif + +#if LWIP_TCP + sys_mbox_set_invalid(&conn->acceptmbox); +#endif + conn->state = NETCONN_NONE; +#if LWIP_SOCKET + /* initialize socket to -1 since 0 is a valid socket */ + conn->socket = -1; +#endif /* LWIP_SOCKET */ + conn->callback = callback; +#if LWIP_TCP + conn->current_msg = NULL; +#endif /* LWIP_TCP */ +#if LWIP_SO_SNDTIMEO + conn->send_timeout = 0; +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_RCVTIMEO + conn->recv_timeout = 0; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF + conn->recv_bufsize = RECV_BUFSIZE_DEFAULT; + conn->recv_avail = 0; +#endif /* LWIP_SO_RCVBUF */ +#if LWIP_SO_LINGER + conn->linger = -1; +#endif /* LWIP_SO_LINGER */ + conn->flags = init_flags; + return conn; +free_and_return: + memp_free(MEMP_NETCONN, conn); + return NULL; +} + +/** + * Delete a netconn and all its resources. + * The pcb is NOT freed (since we might not be in the right thread context do this). + * + * @param conn the netconn to free + */ +void +netconn_free(struct netconn *conn) +{ + LWIP_ASSERT("PCB must be deallocated outside this function", conn->pcb.tcp == NULL); + +#if LWIP_NETCONN_FULLDUPLEX + /* in fullduplex, netconn is drained here */ + netconn_drain(conn); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + + LWIP_ASSERT("recvmbox must be deallocated before calling this function", + !sys_mbox_valid(&conn->recvmbox)); +#if LWIP_TCP + LWIP_ASSERT("acceptmbox must be deallocated before calling this function", + !sys_mbox_valid(&conn->acceptmbox)); +#endif /* LWIP_TCP */ + +#if !LWIP_NETCONN_SEM_PER_THREAD + sys_sem_free(&conn->op_completed); + sys_sem_set_invalid(&conn->op_completed); +#endif + + memp_free(MEMP_NETCONN, conn); +} + +/** + * Delete rcvmbox and acceptmbox of a netconn and free the left-over data in + * these mboxes + * + * @param conn the netconn to free + * @bytes_drained bytes drained from recvmbox + * @accepts_drained pending connections drained from acceptmbox + */ +static void +netconn_drain(struct netconn *conn) +{ + void *mem; + + /* This runs when mbox and netconn are marked as closed, + so we don't need to lock against rx packets */ +#if LWIP_NETCONN_FULLDUPLEX + LWIP_ASSERT("netconn marked closed", conn->flags & NETCONN_FLAG_MBOXINVALID); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + + /* Delete and drain the recvmbox. */ + if (sys_mbox_valid(&conn->recvmbox)) { + while (sys_mbox_tryfetch(&conn->recvmbox, &mem) != SYS_MBOX_EMPTY) { +#if LWIP_NETCONN_FULLDUPLEX + if (!lwip_netconn_is_deallocated_msg(mem)) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + { +#if LWIP_TCP + if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) { + err_t err; + if (!lwip_netconn_is_err_msg(mem, &err)) { + pbuf_free((struct pbuf *)mem); + } + } else +#endif /* LWIP_TCP */ + { + netbuf_delete((struct netbuf *)mem); + } + } + } + sys_mbox_free(&conn->recvmbox); + sys_mbox_set_invalid(&conn->recvmbox); + } + + /* Delete and drain the acceptmbox. */ +#if LWIP_TCP + if (sys_mbox_valid(&conn->acceptmbox)) { + while (sys_mbox_tryfetch(&conn->acceptmbox, &mem) != SYS_MBOX_EMPTY) { +#if LWIP_NETCONN_FULLDUPLEX + if (!lwip_netconn_is_deallocated_msg(mem)) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + { + err_t err; + if (!lwip_netconn_is_err_msg(mem, &err)) { + struct netconn *newconn = (struct netconn *)mem; + /* Only tcp pcbs have an acceptmbox, so no need to check conn->type */ + /* pcb might be set to NULL already by err_tcp() */ + /* drain recvmbox */ + netconn_drain(newconn); + if (newconn->pcb.tcp != NULL) { + tcp_abort(newconn->pcb.tcp); + newconn->pcb.tcp = NULL; + } + netconn_free(newconn); + } + } + } + sys_mbox_free(&conn->acceptmbox); + sys_mbox_set_invalid(&conn->acceptmbox); + } +#endif /* LWIP_TCP */ +} + +#if LWIP_NETCONN_FULLDUPLEX +static void +netconn_mark_mbox_invalid(struct netconn *conn) +{ + int i, num_waiting; + void *msg = LWIP_CONST_CAST(void *, &netconn_deleted); + + /* Prevent new calls/threads from reading from the mbox */ + conn->flags |= NETCONN_FLAG_MBOXINVALID; + + SYS_ARCH_LOCKED(num_waiting = conn->mbox_threads_waiting); + for (i = 0; i < num_waiting; i++) { + if (sys_mbox_valid_val(conn->recvmbox)) { + sys_mbox_trypost(&conn->recvmbox, msg); + } else { + sys_mbox_trypost(&conn->acceptmbox, msg); + } + } +} +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +#if LWIP_TCP +/** + * Internal helper function to close a TCP netconn: since this sometimes + * doesn't work at the first attempt, this function is called from multiple + * places. + * + * @param conn the TCP netconn to close + */ +static err_t +lwip_netconn_do_close_internal(struct netconn *conn WRITE_DELAYED_PARAM) +{ + err_t err; + u8_t shut, shut_rx, shut_tx, shut_close; + u8_t close_finished = 0; + struct tcp_pcb *tpcb; +#if LWIP_SO_LINGER + u8_t linger_wait_required = 0; +#endif /* LWIP_SO_LINGER */ + + LWIP_ASSERT("invalid conn", (conn != NULL)); + LWIP_ASSERT("this is for tcp netconns only", (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP)); + LWIP_ASSERT("conn must be in state NETCONN_CLOSE", (conn->state == NETCONN_CLOSE)); + LWIP_ASSERT("pcb already closed", (conn->pcb.tcp != NULL)); + LWIP_ASSERT("conn->current_msg != NULL", conn->current_msg != NULL); + + tpcb = conn->pcb.tcp; + shut = conn->current_msg->msg.sd.shut; + shut_rx = shut & NETCONN_SHUT_RD; + shut_tx = shut & NETCONN_SHUT_WR; + /* shutting down both ends is the same as closing + (also if RD or WR side was shut down before already) */ + if (shut == NETCONN_SHUT_RDWR) { + shut_close = 1; + } else if (shut_rx && + ((tpcb->state == FIN_WAIT_1) || + (tpcb->state == FIN_WAIT_2) || + (tpcb->state == CLOSING))) { + shut_close = 1; + } else if (shut_tx && ((tpcb->flags & TF_RXCLOSED) != 0)) { + shut_close = 1; + } else { + shut_close = 0; + } + + /* Set back some callback pointers */ + if (shut_close) { + tcp_arg(tpcb, NULL); + } + if (tpcb->state == LISTEN) { + tcp_accept(tpcb, NULL); + } else { + /* some callbacks have to be reset if tcp_close is not successful */ + if (shut_rx) { + tcp_recv(tpcb, NULL); + tcp_accept(tpcb, NULL); + } + if (shut_tx) { + tcp_sent(tpcb, NULL); + } + if (shut_close) { + tcp_poll(tpcb, NULL, 0); + tcp_err(tpcb, NULL); + } + } + /* Try to close the connection */ + if (shut_close) { +#if LWIP_SO_LINGER + /* check linger possibilites before calling tcp_close */ + err = ERR_OK; + /* linger enabled/required at all? (i.e. is there untransmitted data left?) */ + if ((conn->linger >= 0) && (conn->pcb.tcp->unsent || conn->pcb.tcp->unacked)) { + if ((conn->linger == 0)) { + /* data left but linger prevents waiting */ + tcp_abort(tpcb); + tpcb = NULL; + } else if (conn->linger > 0) { + /* data left and linger says we should wait */ + if (netconn_is_nonblocking(conn)) { + /* data left on a nonblocking netconn -> cannot linger */ + err = ERR_WOULDBLOCK; + } else if ((s32_t)(sys_now() - conn->current_msg->msg.sd.time_started) >= + (conn->linger * 1000)) { + /* data left but linger timeout has expired (this happens on further + calls to this function through poll_tcp */ + tcp_abort(tpcb); + tpcb = NULL; + } else { + /* data left -> need to wait for ACK after successful close */ + linger_wait_required = 1; + } + } + } + if ((err == ERR_OK) && (tpcb != NULL)) +#endif /* LWIP_SO_LINGER */ + { + err = tcp_close(tpcb); + } + } else { + err = tcp_shutdown(tpcb, shut_rx, shut_tx); + } + if (err == ERR_OK) { + close_finished = 1; +#if LWIP_SO_LINGER + if (linger_wait_required) { + /* wait for ACK of all unsent/unacked data by just getting called again */ + close_finished = 0; + err = ERR_INPROGRESS; + } +#endif /* LWIP_SO_LINGER */ + } else { + if (err == ERR_MEM) { + /* Closing failed because of memory shortage, try again later. Even for + nonblocking netconns, we have to wait since no standard socket application + is prepared for close failing because of resource shortage. + Check the timeout: this is kind of an lwip addition to the standard sockets: + we wait for some time when failing to allocate a segment for the FIN */ +#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER + s32_t close_timeout = LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT; +#if LWIP_SO_SNDTIMEO + if (conn->send_timeout > 0) { + close_timeout = conn->send_timeout; + } +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_LINGER + if (conn->linger >= 0) { + /* use linger timeout (seconds) */ + close_timeout = conn->linger * 1000U; + } +#endif + if ((s32_t)(sys_now() - conn->current_msg->msg.sd.time_started) >= close_timeout) { +#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + if (conn->current_msg->msg.sd.polls_left == 0) { +#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + close_finished = 1; + if (shut_close) { + /* in this case, we want to RST the connection */ + tcp_abort(tpcb); + err = ERR_OK; + } + } + } else { + /* Closing failed for a non-memory error: give up */ + close_finished = 1; + } + } + if (close_finished) { + /* Closing done (succeeded, non-memory error, nonblocking error or timeout) */ + sys_sem_t *op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg); + conn->current_msg->err = err; + conn->current_msg = NULL; + conn->state = NETCONN_NONE; + if (err == ERR_OK) { + if (shut_close) { + /* Set back some callback pointers as conn is going away */ + conn->pcb.tcp = NULL; + /* Trigger select() in socket layer. Make sure everybody notices activity + on the connection, error first! */ + API_EVENT(conn, NETCONN_EVT_ERROR, 0); + } + if (shut_rx) { + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + } + if (shut_tx) { + API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); + } + } +#if LWIP_TCPIP_CORE_LOCKING + if (delayed) +#endif + { + /* wake up the application task */ + sys_sem_signal(op_completed_sem); + } + return ERR_OK; + } + if (!close_finished) { + /* Closing failed and we want to wait: restore some of the callbacks */ + /* Closing of listen pcb will never fail! */ + LWIP_ASSERT("Closing a listen pcb may not fail!", (tpcb->state != LISTEN)); + if (shut_tx) { + tcp_sent(tpcb, sent_tcp); + } + /* when waiting for close, set up poll interval to 500ms */ + tcp_poll(tpcb, poll_tcp, 1); + tcp_err(tpcb, err_tcp); + tcp_arg(tpcb, conn); + /* don't restore recv callback: we don't want to receive any more data */ + } + /* If closing didn't succeed, we get called again either + from poll_tcp or from sent_tcp */ + LWIP_ASSERT("err != ERR_OK", err != ERR_OK); + return err; +} +#endif /* LWIP_TCP */ + +/** + * Delete the pcb inside a netconn. + * Called from netconn_delete. + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_delconn(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + enum netconn_state state = msg->conn->state; + LWIP_ASSERT("netconn state error", /* this only happens for TCP netconns */ + (state == NETCONN_NONE) || (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP)); +#if LWIP_NETCONN_FULLDUPLEX + /* In full duplex mode, blocking write/connect is aborted with ERR_CLSD */ + if (state != NETCONN_NONE) { + if ((state == NETCONN_WRITE) || + ((state == NETCONN_CONNECT) && !IN_NONBLOCKING_CONNECT(msg->conn))) { + /* close requested, abort running write/connect */ + sys_sem_t *op_completed_sem; + LWIP_ASSERT("msg->conn->current_msg != NULL", msg->conn->current_msg != NULL); + op_completed_sem = LWIP_API_MSG_SEM(msg->conn->current_msg); + msg->conn->current_msg->err = ERR_CLSD; + msg->conn->current_msg = NULL; + msg->conn->state = NETCONN_NONE; + sys_sem_signal(op_completed_sem); + } + } +#else /* LWIP_NETCONN_FULLDUPLEX */ + if (((state != NETCONN_NONE) && + (state != NETCONN_LISTEN) && + (state != NETCONN_CONNECT)) || + ((state == NETCONN_CONNECT) && !IN_NONBLOCKING_CONNECT(msg->conn))) { + /* This means either a blocking write or blocking connect is running + (nonblocking write returns and sets state to NONE) */ + msg->err = ERR_INPROGRESS; + } else +#endif /* LWIP_NETCONN_FULLDUPLEX */ + { + LWIP_ASSERT("blocking connect in progress", + (state != NETCONN_CONNECT) || IN_NONBLOCKING_CONNECT(msg->conn)); + msg->err = ERR_OK; +#if LWIP_NETCONN_FULLDUPLEX + /* Mark mboxes invalid */ + netconn_mark_mbox_invalid(msg->conn); +#else /* LWIP_NETCONN_FULLDUPLEX */ + netconn_drain(msg->conn); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + + if (msg->conn->pcb.tcp != NULL) { + + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + raw_remove(msg->conn->pcb.raw); + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + msg->conn->pcb.udp->recv_arg = NULL; + udp_remove(msg->conn->pcb.udp); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + LWIP_ASSERT("already writing or closing", msg->conn->current_msg == NULL); + msg->conn->state = NETCONN_CLOSE; + msg->msg.sd.shut = NETCONN_SHUT_RDWR; + msg->conn->current_msg = msg; +#if LWIP_TCPIP_CORE_LOCKING + if (lwip_netconn_do_close_internal(msg->conn, 0) != ERR_OK) { + LWIP_ASSERT("state!", msg->conn->state == NETCONN_CLOSE); + UNLOCK_TCPIP_CORE(); + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("state!", msg->conn->state == NETCONN_NONE); + } +#else /* LWIP_TCPIP_CORE_LOCKING */ + lwip_netconn_do_close_internal(msg->conn); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + /* API_EVENT is called inside lwip_netconn_do_close_internal, before releasing + the application thread, so we can return at this point! */ + return; +#endif /* LWIP_TCP */ + default: + break; + } + msg->conn->pcb.tcp = NULL; + } + /* tcp netconns don't come here! */ + + /* @todo: this lets select make the socket readable and writable, + which is wrong! errfd instead? */ + API_EVENT(msg->conn, NETCONN_EVT_RCVPLUS, 0); + API_EVENT(msg->conn, NETCONN_EVT_SENDPLUS, 0); + } + if (sys_sem_valid(LWIP_API_MSG_SEM(msg))) { + TCPIP_APIMSG_ACK(msg); + } +} + +/** + * Bind a pcb contained in a netconn + * Called from netconn_bind. + * + * @param m the api_msg pointing to the connection and containing + * the IP address and port to bind to + */ +void +lwip_netconn_do_bind(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + err_t err; + + if (msg->conn->pcb.tcp != NULL) { + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + err = raw_bind(msg->conn->pcb.raw, API_EXPR_REF(msg->msg.bc.ipaddr)); + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + err = udp_bind(msg->conn->pcb.udp, API_EXPR_REF(msg->msg.bc.ipaddr), msg->msg.bc.port); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + err = tcp_bind(msg->conn->pcb.tcp, API_EXPR_REF(msg->msg.bc.ipaddr), msg->msg.bc.port); + break; +#endif /* LWIP_TCP */ + default: + err = ERR_VAL; + break; + } + } else { + err = ERR_VAL; + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} +/** + * Bind a pcb contained in a netconn to an interface + * Called from netconn_bind_if. + * + * @param m the api_msg pointing to the connection and containing + * the IP address and port to bind to + */ +void +lwip_netconn_do_bind_if(void *m) +{ + struct netif *netif; + struct api_msg *msg = (struct api_msg *)m; + err_t err; + + netif = netif_get_by_index(msg->msg.bc.if_idx); + + if ((netif != NULL) && (msg->conn->pcb.tcp != NULL)) { + err = ERR_OK; + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + raw_bind_netif(msg->conn->pcb.raw, netif); + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + udp_bind_netif(msg->conn->pcb.udp, netif); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + tcp_bind_netif(msg->conn->pcb.tcp, netif); + break; +#endif /* LWIP_TCP */ + default: + err = ERR_VAL; + break; + } + } else { + err = ERR_VAL; + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} + +#if LWIP_TCP +/** + * TCP callback function if a connection (opened by tcp_connect/lwip_netconn_do_connect) has + * been established (or reset by the remote host). + * + * @see tcp.h (struct tcp_pcb.connected) for parameters and return values + */ +static err_t +lwip_netconn_do_connected(void *arg, struct tcp_pcb *pcb, err_t err) +{ + struct netconn *conn; + int was_blocking; + sys_sem_t *op_completed_sem = NULL; + + LWIP_UNUSED_ARG(pcb); + + conn = (struct netconn *)arg; + + if (conn == NULL) { + return ERR_VAL; + } + + LWIP_ASSERT("conn->state == NETCONN_CONNECT", conn->state == NETCONN_CONNECT); + LWIP_ASSERT("(conn->current_msg != NULL) || conn->in_non_blocking_connect", + (conn->current_msg != NULL) || IN_NONBLOCKING_CONNECT(conn)); + + if (conn->current_msg != NULL) { + conn->current_msg->err = err; + op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg); + } + if ((NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) && (err == ERR_OK)) { + setup_tcp(conn); + } + was_blocking = !IN_NONBLOCKING_CONNECT(conn); + SET_NONBLOCKING_CONNECT(conn, 0); + LWIP_ASSERT("blocking connect state error", + (was_blocking && op_completed_sem != NULL) || + (!was_blocking && op_completed_sem == NULL)); + conn->current_msg = NULL; + conn->state = NETCONN_NONE; + API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); + + if (was_blocking) { + sys_sem_signal(op_completed_sem); + } + return ERR_OK; +} +#endif /* LWIP_TCP */ + +/** + * Connect a pcb contained inside a netconn + * Called from netconn_connect. + * + * @param m the api_msg pointing to the connection and containing + * the IP address and port to connect to + */ +void +lwip_netconn_do_connect(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + err_t err; + + if (msg->conn->pcb.tcp == NULL) { + /* This may happen when calling netconn_connect() a second time */ + err = ERR_CLSD; + } else { + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + err = raw_connect(msg->conn->pcb.raw, API_EXPR_REF(msg->msg.bc.ipaddr)); + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + err = udp_connect(msg->conn->pcb.udp, API_EXPR_REF(msg->msg.bc.ipaddr), msg->msg.bc.port); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + /* Prevent connect while doing any other action. */ + if (msg->conn->state == NETCONN_CONNECT) { + err = ERR_ALREADY; + } else if (msg->conn->state != NETCONN_NONE) { + err = ERR_ISCONN; + } else { + setup_tcp(msg->conn); + err = tcp_connect(msg->conn->pcb.tcp, API_EXPR_REF(msg->msg.bc.ipaddr), + msg->msg.bc.port, lwip_netconn_do_connected); + if (err == ERR_OK) { + u8_t non_blocking = netconn_is_nonblocking(msg->conn); + msg->conn->state = NETCONN_CONNECT; + SET_NONBLOCKING_CONNECT(msg->conn, non_blocking); + if (non_blocking) { + err = ERR_INPROGRESS; + } else { + msg->conn->current_msg = msg; + /* sys_sem_signal() is called from lwip_netconn_do_connected (or err_tcp()), + when the connection is established! */ +#if LWIP_TCPIP_CORE_LOCKING + LWIP_ASSERT("state!", msg->conn->state == NETCONN_CONNECT); + UNLOCK_TCPIP_CORE(); + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("state!", msg->conn->state != NETCONN_CONNECT); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + return; + } + } + } + break; +#endif /* LWIP_TCP */ + default: + LWIP_ERROR("Invalid netconn type", 0, do { + err = ERR_VAL; + } while (0)); + break; + } + } + msg->err = err; + /* For all other protocols, netconn_connect() calls netconn_apimsg(), + so use TCPIP_APIMSG_ACK() here. */ + TCPIP_APIMSG_ACK(msg); +} + +/** + * Disconnect a pcb contained inside a netconn + * Only used for UDP netconns. + * Called from netconn_disconnect. + * + * @param m the api_msg pointing to the connection to disconnect + */ +void +lwip_netconn_do_disconnect(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + +#if LWIP_UDP + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_UDP) { + udp_disconnect(msg->conn->pcb.udp); + msg->err = ERR_OK; + } else +#endif /* LWIP_UDP */ + { + msg->err = ERR_VAL; + } + TCPIP_APIMSG_ACK(msg); +} + +#if LWIP_TCP +/** + * Set a TCP pcb contained in a netconn into listen mode + * Called from netconn_listen. + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_listen(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + err_t err; + + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { + if (msg->conn->state == NETCONN_NONE) { + struct tcp_pcb *lpcb; + if (msg->conn->pcb.tcp->state != CLOSED) { + /* connection is not closed, cannot listen */ + err = ERR_VAL; + } else { + u8_t backlog; +#if TCP_LISTEN_BACKLOG + backlog = msg->msg.lb.backlog; +#else /* TCP_LISTEN_BACKLOG */ + backlog = TCP_DEFAULT_LISTEN_BACKLOG; +#endif /* TCP_LISTEN_BACKLOG */ +#if LWIP_IPV4 && LWIP_IPV6 + /* "Socket API like" dual-stack support: If IP to listen to is IP6_ADDR_ANY, + * and NETCONN_FLAG_IPV6_V6ONLY is NOT set, use IP_ANY_TYPE to listen + */ + if (ip_addr_cmp(&msg->conn->pcb.ip->local_ip, IP6_ADDR_ANY) && + (netconn_get_ipv6only(msg->conn) == 0)) { + /* change PCB type to IPADDR_TYPE_ANY */ + IP_SET_TYPE_VAL(msg->conn->pcb.tcp->local_ip, IPADDR_TYPE_ANY); + IP_SET_TYPE_VAL(msg->conn->pcb.tcp->remote_ip, IPADDR_TYPE_ANY); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + lpcb = tcp_listen_with_backlog_and_err(msg->conn->pcb.tcp, backlog, &err); + + if (lpcb == NULL) { + /* in this case, the old pcb is still allocated */ + } else { + /* delete the recvmbox and allocate the acceptmbox */ + if (sys_mbox_valid(&msg->conn->recvmbox)) { + /** @todo: should we drain the recvmbox here? */ + sys_mbox_free(&msg->conn->recvmbox); + sys_mbox_set_invalid(&msg->conn->recvmbox); + } + err = ERR_OK; + if (!sys_mbox_valid(&msg->conn->acceptmbox)) { + err = sys_mbox_new(&msg->conn->acceptmbox, DEFAULT_ACCEPTMBOX_SIZE); + } + if (err == ERR_OK) { + msg->conn->state = NETCONN_LISTEN; + msg->conn->pcb.tcp = lpcb; + tcp_arg(msg->conn->pcb.tcp, msg->conn); + tcp_accept(msg->conn->pcb.tcp, accept_function); + } else { + /* since the old pcb is already deallocated, free lpcb now */ + tcp_close(lpcb); + msg->conn->pcb.tcp = NULL; + } + } + } + } else if (msg->conn->state == NETCONN_LISTEN) { + /* already listening, allow updating of the backlog */ + err = ERR_OK; + tcp_backlog_set(msg->conn->pcb.tcp, msg->msg.lb.backlog); + } else { + err = ERR_CONN; + } + } else { + err = ERR_ARG; + } + } else { + err = ERR_CONN; + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} +#endif /* LWIP_TCP */ + +/** + * Send some data on a RAW or UDP pcb contained in a netconn + * Called from netconn_send + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_send(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + err_t err = netconn_err(msg->conn); + if (err == ERR_OK) { + if (msg->conn->pcb.tcp != NULL) { + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + if (ip_addr_isany(&msg->msg.b->addr) || IP_IS_ANY_TYPE_VAL(msg->msg.b->addr)) { + err = raw_send(msg->conn->pcb.raw, msg->msg.b->p); + } else { + err = raw_sendto(msg->conn->pcb.raw, msg->msg.b->p, &msg->msg.b->addr); + } + break; +#endif +#if LWIP_UDP + case NETCONN_UDP: +#if LWIP_CHECKSUM_ON_COPY + if (ip_addr_isany(&msg->msg.b->addr) || IP_IS_ANY_TYPE_VAL(msg->msg.b->addr)) { + err = udp_send_chksum(msg->conn->pcb.udp, msg->msg.b->p, + msg->msg.b->flags & NETBUF_FLAG_CHKSUM, msg->msg.b->toport_chksum); + } else { + err = udp_sendto_chksum(msg->conn->pcb.udp, msg->msg.b->p, + &msg->msg.b->addr, msg->msg.b->port, + msg->msg.b->flags & NETBUF_FLAG_CHKSUM, msg->msg.b->toport_chksum); + } +#else /* LWIP_CHECKSUM_ON_COPY */ + if (ip_addr_isany_val(msg->msg.b->addr) || IP_IS_ANY_TYPE_VAL(msg->msg.b->addr)) { + err = udp_send(msg->conn->pcb.udp, msg->msg.b->p); + } else { + err = udp_sendto(msg->conn->pcb.udp, msg->msg.b->p, &msg->msg.b->addr, msg->msg.b->port); + } +#endif /* LWIP_CHECKSUM_ON_COPY */ + break; +#endif /* LWIP_UDP */ + default: + err = ERR_CONN; + break; + } + } else { + err = ERR_CONN; + } + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} + +#if LWIP_TCP +/** + * Indicate data has been received from a TCP pcb contained in a netconn + * Called from netconn_recv + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_recv(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + msg->err = ERR_OK; + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { + size_t remaining = msg->msg.r.len; + do { + u16_t recved = (u16_t)((remaining > 0xffff) ? 0xffff : remaining); + tcp_recved(msg->conn->pcb.tcp, recved); + remaining -= recved; + } while (remaining != 0); + } + } + TCPIP_APIMSG_ACK(msg); +} + +#if TCP_LISTEN_BACKLOG +/** Indicate that a TCP pcb has been accepted + * Called from netconn_accept + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_accepted(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + msg->err = ERR_OK; + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { + tcp_backlog_accepted(msg->conn->pcb.tcp); + } + } + TCPIP_APIMSG_ACK(msg); +} +#endif /* TCP_LISTEN_BACKLOG */ + +/** + * See if more data needs to be written from a previous call to netconn_write. + * Called initially from lwip_netconn_do_write. If the first call can't send all data + * (because of low memory or empty send-buffer), this function is called again + * from sent_tcp() or poll_tcp() to send more data. If all data is sent, the + * blocking application thread (waiting in netconn_write) is released. + * + * @param conn netconn (that is currently in state NETCONN_WRITE) to process + * @return ERR_OK + * ERR_MEM if LWIP_TCPIP_CORE_LOCKING=1 and sending hasn't yet finished + */ +static err_t +lwip_netconn_do_writemore(struct netconn *conn WRITE_DELAYED_PARAM) +{ + err_t err; + const void *dataptr; + u16_t len, available; + u8_t write_finished = 0; + size_t diff; + u8_t dontblock; + u8_t apiflags; + u8_t write_more; + + LWIP_ASSERT("conn != NULL", conn != NULL); + LWIP_ASSERT("conn->state == NETCONN_WRITE", (conn->state == NETCONN_WRITE)); + LWIP_ASSERT("conn->current_msg != NULL", conn->current_msg != NULL); + LWIP_ASSERT("conn->pcb.tcp != NULL", conn->pcb.tcp != NULL); + LWIP_ASSERT("conn->current_msg->msg.w.offset < conn->current_msg->msg.w.len", + conn->current_msg->msg.w.offset < conn->current_msg->msg.w.len); + LWIP_ASSERT("conn->current_msg->msg.w.vector_cnt > 0", conn->current_msg->msg.w.vector_cnt > 0); + + apiflags = conn->current_msg->msg.w.apiflags; + dontblock = netconn_is_nonblocking(conn) || (apiflags & NETCONN_DONTBLOCK); + +#if LWIP_SO_SNDTIMEO + if ((conn->send_timeout != 0) && + ((s32_t)(sys_now() - conn->current_msg->msg.w.time_started) >= conn->send_timeout)) { + write_finished = 1; + if (conn->current_msg->msg.w.offset == 0) { + /* nothing has been written */ + err = ERR_WOULDBLOCK; + } else { + /* partial write */ + err = ERR_OK; + } + } else +#endif /* LWIP_SO_SNDTIMEO */ + { + do { + dataptr = (const u8_t *)conn->current_msg->msg.w.vector->ptr + conn->current_msg->msg.w.vector_off; + diff = conn->current_msg->msg.w.vector->len - conn->current_msg->msg.w.vector_off; + if (diff > 0xffffUL) { /* max_u16_t */ + len = 0xffff; + apiflags |= TCP_WRITE_FLAG_MORE; + } else { + len = (u16_t)diff; + } + available = tcp_sndbuf(conn->pcb.tcp); + if (available < len) { + /* don't try to write more than sendbuf */ + len = available; + if (dontblock) { + if (!len) { + /* set error according to partial write or not */ + err = (conn->current_msg->msg.w.offset == 0) ? ERR_WOULDBLOCK : ERR_OK; + goto err_mem; + } + } else { + apiflags |= TCP_WRITE_FLAG_MORE; + } + } + LWIP_ASSERT("lwip_netconn_do_writemore: invalid length!", + ((conn->current_msg->msg.w.vector_off + len) <= conn->current_msg->msg.w.vector->len)); + /* we should loop around for more sending in the following cases: + 1) We couldn't finish the current vector because of 16-bit size limitations. + tcp_write() and tcp_sndbuf() both are limited to 16-bit sizes + 2) We are sending the remainder of the current vector and have more */ + if ((len == 0xffff && diff > 0xffffUL) || + (len == (u16_t)diff && conn->current_msg->msg.w.vector_cnt > 1)) { + write_more = 1; + apiflags |= TCP_WRITE_FLAG_MORE; + } else { + write_more = 0; + } + err = tcp_write(conn->pcb.tcp, dataptr, len, apiflags); + if (err == ERR_OK) { + conn->current_msg->msg.w.offset += len; + conn->current_msg->msg.w.vector_off += len; + /* check if current vector is finished */ + if (conn->current_msg->msg.w.vector_off == conn->current_msg->msg.w.vector->len) { + conn->current_msg->msg.w.vector_cnt--; + /* if we have additional vectors, move on to them */ + if (conn->current_msg->msg.w.vector_cnt > 0) { + conn->current_msg->msg.w.vector++; + conn->current_msg->msg.w.vector_off = 0; + } + } + } + } while (write_more && err == ERR_OK); + /* if OK or memory error, check available space */ + if ((err == ERR_OK) || (err == ERR_MEM)) { +err_mem: + if (dontblock && (conn->current_msg->msg.w.offset < conn->current_msg->msg.w.len)) { + /* non-blocking write did not write everything: mark the pcb non-writable + and let poll_tcp check writable space to mark the pcb writable again */ + API_EVENT(conn, NETCONN_EVT_SENDMINUS, 0); + conn->flags |= NETCONN_FLAG_CHECK_WRITESPACE; + } else if ((tcp_sndbuf(conn->pcb.tcp) <= TCP_SNDLOWAT) || + (tcp_sndqueuelen(conn->pcb.tcp) >= TCP_SNDQUEUELOWAT)) { + /* The queued byte- or pbuf-count exceeds the configured low-water limit, + let select mark this pcb as non-writable. */ + API_EVENT(conn, NETCONN_EVT_SENDMINUS, 0); + } + } + + if (err == ERR_OK) { + err_t out_err; + if ((conn->current_msg->msg.w.offset == conn->current_msg->msg.w.len) || dontblock) { + /* return sent length (caller reads length from msg.w.offset) */ + write_finished = 1; + } + out_err = tcp_output(conn->pcb.tcp); + if (out_err == ERR_RTE) { + /* If tcp_output fails because no route is found, + don't try writing any more but return the error + to the application thread. */ + err = out_err; + write_finished = 1; + } + } else if (err == ERR_MEM) { + /* If ERR_MEM, we wait for sent_tcp or poll_tcp to be called. + For blocking sockets, we do NOT return to the application + thread, since ERR_MEM is only a temporary error! Non-blocking + will remain non-writable until sent_tcp/poll_tcp is called */ + + /* tcp_write returned ERR_MEM, try tcp_output anyway */ + err_t out_err = tcp_output(conn->pcb.tcp); + if (out_err == ERR_RTE) { + /* If tcp_output fails because no route is found, + don't try writing any more but return the error + to the application thread. */ + err = out_err; + write_finished = 1; + } else if (dontblock) { + /* non-blocking write is done on ERR_MEM, set error according + to partial write or not */ + err = (conn->current_msg->msg.w.offset == 0) ? ERR_WOULDBLOCK : ERR_OK; + write_finished = 1; + } + } else { + /* On errors != ERR_MEM, we don't try writing any more but return + the error to the application thread. */ + write_finished = 1; + } + } + if (write_finished) { + /* everything was written: set back connection state + and back to application task */ + sys_sem_t *op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg); + conn->current_msg->err = err; + conn->current_msg = NULL; + conn->state = NETCONN_NONE; +#if LWIP_TCPIP_CORE_LOCKING + if (delayed) +#endif + { + sys_sem_signal(op_completed_sem); + } + } +#if LWIP_TCPIP_CORE_LOCKING + else { + return ERR_MEM; + } +#endif + return ERR_OK; +} +#endif /* LWIP_TCP */ + +/** + * Send some data on a TCP pcb contained in a netconn + * Called from netconn_write + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_write(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + err_t err = netconn_err(msg->conn); + if (err == ERR_OK) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { +#if LWIP_TCP + if (msg->conn->state != NETCONN_NONE) { + /* netconn is connecting, closing or in blocking write */ + err = ERR_INPROGRESS; + } else if (msg->conn->pcb.tcp != NULL) { + msg->conn->state = NETCONN_WRITE; + /* set all the variables used by lwip_netconn_do_writemore */ + LWIP_ASSERT("already writing or closing", msg->conn->current_msg == NULL); + LWIP_ASSERT("msg->msg.w.len != 0", msg->msg.w.len != 0); + msg->conn->current_msg = msg; +#if LWIP_TCPIP_CORE_LOCKING + if (lwip_netconn_do_writemore(msg->conn, 0) != ERR_OK) { + LWIP_ASSERT("state!", msg->conn->state == NETCONN_WRITE); + UNLOCK_TCPIP_CORE(); + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("state!", msg->conn->state != NETCONN_WRITE); + } +#else /* LWIP_TCPIP_CORE_LOCKING */ + lwip_netconn_do_writemore(msg->conn); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + /* for both cases: if lwip_netconn_do_writemore was called, don't ACK the APIMSG + since lwip_netconn_do_writemore ACKs it! */ + return; + } else { + err = ERR_CONN; + } +#else /* LWIP_TCP */ + err = ERR_VAL; +#endif /* LWIP_TCP */ +#if (LWIP_UDP || LWIP_RAW) + } else { + err = ERR_VAL; +#endif /* (LWIP_UDP || LWIP_RAW) */ + } + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} + +/** + * Return a connection's local or remote address + * Called from netconn_getaddr + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_getaddr(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + if (msg->conn->pcb.ip != NULL) { + if (msg->msg.ad.local) { + ip_addr_copy(API_EXPR_DEREF(msg->msg.ad.ipaddr), + msg->conn->pcb.ip->local_ip); + } else { + ip_addr_copy(API_EXPR_DEREF(msg->msg.ad.ipaddr), + msg->conn->pcb.ip->remote_ip); + } + + msg->err = ERR_OK; + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + if (msg->msg.ad.local) { + API_EXPR_DEREF(msg->msg.ad.port) = msg->conn->pcb.raw->protocol; + } else { + /* return an error as connecting is only a helper for upper layers */ + msg->err = ERR_CONN; + } + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + if (msg->msg.ad.local) { + API_EXPR_DEREF(msg->msg.ad.port) = msg->conn->pcb.udp->local_port; + } else { + if ((msg->conn->pcb.udp->flags & UDP_FLAGS_CONNECTED) == 0) { + msg->err = ERR_CONN; + } else { + API_EXPR_DEREF(msg->msg.ad.port) = msg->conn->pcb.udp->remote_port; + } + } + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + if ((msg->msg.ad.local == 0) && + ((msg->conn->pcb.tcp->state == CLOSED) || (msg->conn->pcb.tcp->state == LISTEN))) { + /* pcb is not connected and remote name is requested */ + msg->err = ERR_CONN; + } else { + API_EXPR_DEREF(msg->msg.ad.port) = (msg->msg.ad.local ? msg->conn->pcb.tcp->local_port : msg->conn->pcb.tcp->remote_port); + } + break; +#endif /* LWIP_TCP */ + default: + LWIP_ASSERT("invalid netconn_type", 0); + break; + } + } else { + msg->err = ERR_CONN; + } + TCPIP_APIMSG_ACK(msg); +} + +/** + * Close or half-shutdown a TCP pcb contained in a netconn + * Called from netconn_close + * In contrast to closing sockets, the netconn is not deallocated. + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_close(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + +#if LWIP_TCP + enum netconn_state state = msg->conn->state; + /* First check if this is a TCP netconn and if it is in a correct state + (LISTEN doesn't support half shutdown) */ + if ((msg->conn->pcb.tcp != NULL) && + (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) && + ((msg->msg.sd.shut == NETCONN_SHUT_RDWR) || (state != NETCONN_LISTEN))) { + /* Check if we are in a connected state */ + if (state == NETCONN_CONNECT) { + /* TCP connect in progress: cannot shutdown */ + msg->err = ERR_CONN; + } else if (state == NETCONN_WRITE) { +#if LWIP_NETCONN_FULLDUPLEX + if (msg->msg.sd.shut & NETCONN_SHUT_WR) { + /* close requested, abort running write */ + sys_sem_t *write_completed_sem; + LWIP_ASSERT("msg->conn->current_msg != NULL", msg->conn->current_msg != NULL); + write_completed_sem = LWIP_API_MSG_SEM(msg->conn->current_msg); + msg->conn->current_msg->err = ERR_CLSD; + msg->conn->current_msg = NULL; + msg->conn->state = NETCONN_NONE; + state = NETCONN_NONE; + sys_sem_signal(write_completed_sem); + } else { + LWIP_ASSERT("msg->msg.sd.shut == NETCONN_SHUT_RD", msg->msg.sd.shut == NETCONN_SHUT_RD); + /* In this case, let the write continue and do not interfere with + conn->current_msg or conn->state! */ + msg->err = tcp_shutdown(msg->conn->pcb.tcp, 1, 0); + } + } + if (state == NETCONN_NONE) { +#else /* LWIP_NETCONN_FULLDUPLEX */ + msg->err = ERR_INPROGRESS; + } else { +#endif /* LWIP_NETCONN_FULLDUPLEX */ + if (msg->msg.sd.shut & NETCONN_SHUT_RD) { +#if LWIP_NETCONN_FULLDUPLEX + /* Mark mboxes invalid */ + netconn_mark_mbox_invalid(msg->conn); +#else /* LWIP_NETCONN_FULLDUPLEX */ + netconn_drain(msg->conn); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + } + LWIP_ASSERT("already writing or closing", msg->conn->current_msg == NULL); + msg->conn->state = NETCONN_CLOSE; + msg->conn->current_msg = msg; +#if LWIP_TCPIP_CORE_LOCKING + if (lwip_netconn_do_close_internal(msg->conn, 0) != ERR_OK) { + LWIP_ASSERT("state!", msg->conn->state == NETCONN_CLOSE); + UNLOCK_TCPIP_CORE(); + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("state!", msg->conn->state == NETCONN_NONE); + } +#else /* LWIP_TCPIP_CORE_LOCKING */ + lwip_netconn_do_close_internal(msg->conn); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + /* for tcp netconns, lwip_netconn_do_close_internal ACKs the message */ + return; + } + } else +#endif /* LWIP_TCP */ + { + msg->err = ERR_CONN; + } + TCPIP_APIMSG_ACK(msg); +} + +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +/** + * Join multicast groups for UDP netconns. + * Called from netconn_join_leave_group + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_join_leave_group(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + msg->err = ERR_CONN; + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_UDP) { +#if LWIP_UDP +#if LWIP_IPV6 && LWIP_IPV6_MLD + if (NETCONNTYPE_ISIPV6(msg->conn->type)) { + if (msg->msg.jl.join_or_leave == NETCONN_JOIN) { + msg->err = mld6_joingroup(ip_2_ip6(API_EXPR_REF(msg->msg.jl.netif_addr)), + ip_2_ip6(API_EXPR_REF(msg->msg.jl.multiaddr))); + } else { + msg->err = mld6_leavegroup(ip_2_ip6(API_EXPR_REF(msg->msg.jl.netif_addr)), + ip_2_ip6(API_EXPR_REF(msg->msg.jl.multiaddr))); + } + } else +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + { +#if LWIP_IGMP + if (msg->msg.jl.join_or_leave == NETCONN_JOIN) { + msg->err = igmp_joingroup(ip_2_ip4(API_EXPR_REF(msg->msg.jl.netif_addr)), + ip_2_ip4(API_EXPR_REF(msg->msg.jl.multiaddr))); + } else { + msg->err = igmp_leavegroup(ip_2_ip4(API_EXPR_REF(msg->msg.jl.netif_addr)), + ip_2_ip4(API_EXPR_REF(msg->msg.jl.multiaddr))); + } +#endif /* LWIP_IGMP */ + } +#endif /* LWIP_UDP */ +#if (LWIP_TCP || LWIP_RAW) + } else { + msg->err = ERR_VAL; +#endif /* (LWIP_TCP || LWIP_RAW) */ + } + } + TCPIP_APIMSG_ACK(msg); +} +/** + * Join multicast groups for UDP netconns. + * Called from netconn_join_leave_group_netif + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_join_leave_group_netif(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + struct netif *netif; + + netif = netif_get_by_index(msg->msg.jl.if_idx); + if (netif == NULL) { + msg->err = ERR_IF; + goto done; + } + + msg->err = ERR_CONN; + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_UDP) { +#if LWIP_UDP +#if LWIP_IPV6 && LWIP_IPV6_MLD + if (NETCONNTYPE_ISIPV6(msg->conn->type)) { + if (msg->msg.jl.join_or_leave == NETCONN_JOIN) { + msg->err = mld6_joingroup_netif(netif, + ip_2_ip6(API_EXPR_REF(msg->msg.jl.multiaddr))); + } else { + msg->err = mld6_leavegroup_netif(netif, + ip_2_ip6(API_EXPR_REF(msg->msg.jl.multiaddr))); + } + } else +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + { +#if LWIP_IGMP + if (msg->msg.jl.join_or_leave == NETCONN_JOIN) { + msg->err = igmp_joingroup_netif(netif, + ip_2_ip4(API_EXPR_REF(msg->msg.jl.multiaddr))); + } else { + msg->err = igmp_leavegroup_netif(netif, + ip_2_ip4(API_EXPR_REF(msg->msg.jl.multiaddr))); + } +#endif /* LWIP_IGMP */ + } +#endif /* LWIP_UDP */ +#if (LWIP_TCP || LWIP_RAW) + } else { + msg->err = ERR_VAL; +#endif /* (LWIP_TCP || LWIP_RAW) */ + } + } + +done: + TCPIP_APIMSG_ACK(msg); +} +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ + +#if LWIP_DNS +/** + * Callback function that is called when DNS name is resolved + * (or on timeout). A waiting application thread is waked up by + * signaling the semaphore. + */ +static void +lwip_netconn_do_dns_found(const char *name, const ip_addr_t *ipaddr, void *arg) +{ + struct dns_api_msg *msg = (struct dns_api_msg *)arg; + + /* we trust the internal implementation to be correct :-) */ + LWIP_UNUSED_ARG(name); + + if (ipaddr == NULL) { + /* timeout or memory error */ + API_EXPR_DEREF(msg->err) = ERR_VAL; + } else { + /* address was resolved */ + API_EXPR_DEREF(msg->err) = ERR_OK; + API_EXPR_DEREF(msg->addr) = *ipaddr; + } + /* wake up the application task waiting in netconn_gethostbyname */ + sys_sem_signal(API_EXPR_REF_SEM(msg->sem)); +} + +/** + * Execute a DNS query + * Called from netconn_gethostbyname + * + * @param arg the dns_api_msg pointing to the query + */ +void +lwip_netconn_do_gethostbyname(void *arg) +{ + struct dns_api_msg *msg = (struct dns_api_msg *)arg; + u8_t addrtype = +#if LWIP_IPV4 && LWIP_IPV6 + msg->dns_addrtype; +#else + LWIP_DNS_ADDRTYPE_DEFAULT; +#endif + + API_EXPR_DEREF(msg->err) = dns_gethostbyname_addrtype(msg->name, + API_EXPR_REF(msg->addr), lwip_netconn_do_dns_found, msg, addrtype); +#if LWIP_TCPIP_CORE_LOCKING + /* For core locking, only block if we need to wait for answer/timeout */ + if (API_EXPR_DEREF(msg->err) == ERR_INPROGRESS) { + UNLOCK_TCPIP_CORE(); + sys_sem_wait(API_EXPR_REF_SEM(msg->sem)); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("do_gethostbyname still in progress!!", API_EXPR_DEREF(msg->err) != ERR_INPROGRESS); + } +#else /* LWIP_TCPIP_CORE_LOCKING */ + if (API_EXPR_DEREF(msg->err) != ERR_INPROGRESS) { + /* on error or immediate success, wake up the application + * task waiting in netconn_gethostbyname */ + sys_sem_signal(API_EXPR_REF_SEM(msg->sem)); + } +#endif /* LWIP_TCPIP_CORE_LOCKING */ +} +#endif /* LWIP_DNS */ + +#endif /* LWIP_NETCONN */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/err.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/err.c new file mode 100644 index 0000000..f2d7180 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/err.c @@ -0,0 +1,115 @@ +/** + * @file + * Error Management module + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/err.h" +#include "lwip/def.h" +#include "lwip/sys.h" + +#include "lwip/errno.h" + +#if !NO_SYS +/** Table to quickly map an lwIP error (err_t) to a socket error + * by using -err as an index */ +static const int err_to_errno_table[] = { + 0, /* ERR_OK 0 No error, everything OK. */ + ENOMEM, /* ERR_MEM -1 Out of memory error. */ + ENOBUFS, /* ERR_BUF -2 Buffer error. */ + EWOULDBLOCK, /* ERR_TIMEOUT -3 Timeout */ + EHOSTUNREACH, /* ERR_RTE -4 Routing problem. */ + EINPROGRESS, /* ERR_INPROGRESS -5 Operation in progress */ + EINVAL, /* ERR_VAL -6 Illegal value. */ + EWOULDBLOCK, /* ERR_WOULDBLOCK -7 Operation would block. */ + EADDRINUSE, /* ERR_USE -8 Address in use. */ + EALREADY, /* ERR_ALREADY -9 Already connecting. */ + EISCONN, /* ERR_ISCONN -10 Conn already established.*/ + ENOTCONN, /* ERR_CONN -11 Not connected. */ + -1, /* ERR_IF -12 Low-level netif error */ + ECONNABORTED, /* ERR_ABRT -13 Connection aborted. */ + ECONNRESET, /* ERR_RST -14 Connection reset. */ + ENOTCONN, /* ERR_CLSD -15 Connection closed. */ + EIO /* ERR_ARG -16 Illegal argument. */ +}; + +int +err_to_errno(err_t err) +{ + if ((err > 0) || (-err >= (err_t)LWIP_ARRAYSIZE(err_to_errno_table))) { + return EIO; + } + return err_to_errno_table[-err]; +} +#endif /* !NO_SYS */ + +#ifdef LWIP_DEBUG + +static const char *err_strerr[] = { + "Ok.", /* ERR_OK 0 */ + "Out of memory error.", /* ERR_MEM -1 */ + "Buffer error.", /* ERR_BUF -2 */ + "Timeout.", /* ERR_TIMEOUT -3 */ + "Routing problem.", /* ERR_RTE -4 */ + "Operation in progress.", /* ERR_INPROGRESS -5 */ + "Illegal value.", /* ERR_VAL -6 */ + "Operation would block.", /* ERR_WOULDBLOCK -7 */ + "Address in use.", /* ERR_USE -8 */ + "Already connecting.", /* ERR_ALREADY -9 */ + "Already connected.", /* ERR_ISCONN -10 */ + "Not connected.", /* ERR_CONN -11 */ + "Low-level netif error.", /* ERR_IF -12 */ + "Connection aborted.", /* ERR_ABRT -13 */ + "Connection reset.", /* ERR_RST -14 */ + "Connection closed.", /* ERR_CLSD -15 */ + "Illegal argument." /* ERR_ARG -16 */ +}; + +/** + * Convert an lwip internal error to a string representation. + * + * @param err an lwip internal err_t + * @return a string representation for err + */ +const char * +lwip_strerr(err_t err) +{ + if ((err > 0) || (-err >= (err_t)LWIP_ARRAYSIZE(err_strerr))) { + return "Unknown error."; + } + return err_strerr[-err]; +} + +#endif /* LWIP_DEBUG */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/if_api.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/if_api.c new file mode 100644 index 0000000..2eda9f0 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/if_api.c @@ -0,0 +1,102 @@ +/** + * @file + * Interface Identification APIs from: + * RFC 3493: Basic Socket Interface Extensions for IPv6 + * Section 4: Interface Identification + * + * @defgroup if_api Interface Identification API + * @ingroup socket + */ + +/* + * Copyright (c) 2017 Joel Cunningham, Garmin International, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Joel Cunningham + * + */ +#include "lwip/opt.h" + +#if LWIP_SOCKET + +#include "lwip/errno.h" +#include "lwip/if_api.h" +#include "lwip/netifapi.h" +#include "lwip/priv/sockets_priv.h" + +/** + * @ingroup if_api + * Maps an interface index to its corresponding name. + * @param ifindex interface index + * @param ifname shall point to a buffer of at least {IF_NAMESIZE} bytes + * @return If ifindex is an interface index, then the function shall return the + * value supplied in ifname, which points to a buffer now containing the interface name. + * Otherwise, the function shall return a NULL pointer. + */ +char * +lwip_if_indextoname(unsigned int ifindex, char *ifname) +{ +#if LWIP_NETIF_API + if (ifindex <= 0xff) { + err_t err = netifapi_netif_index_to_name((u8_t)ifindex, ifname); + if (!err && ifname[0] != '\0') { + return ifname; + } + } +#else /* LWIP_NETIF_API */ + LWIP_UNUSED_ARG(ifindex); + LWIP_UNUSED_ARG(ifname); +#endif /* LWIP_NETIF_API */ + set_errno(ENXIO); + return NULL; +} + +/** + * @ingroup if_api + * Returs the interface index corresponding to name ifname. + * @param ifname Interface name + * @return The corresponding index if ifname is the name of an interface; + * otherwise, zero. + */ +unsigned int +lwip_if_nametoindex(const char *ifname) +{ +#if LWIP_NETIF_API + err_t err; + u8_t idx; + + err = netifapi_netif_name_to_index(ifname, &idx); + if (!err) { + return idx; + } +#else /* LWIP_NETIF_API */ + LWIP_UNUSED_ARG(ifname); +#endif /* LWIP_NETIF_API */ + return 0; /* invalid index */ +} + +#endif /* LWIP_SOCKET */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/netbuf.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/netbuf.c new file mode 100644 index 0000000..b464440 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/netbuf.c @@ -0,0 +1,250 @@ +/** + * @file + * Network buffer management + * + * @defgroup netbuf Network buffers + * @ingroup netconn + * Network buffer descriptor for @ref netconn. Based on @ref pbuf internally + * to avoid copying data around.\n + * Buffers must not be shared accross multiple threads, all functions except + * netbuf_new() and netbuf_delete() are not thread-safe. + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_NETCONN /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netbuf.h" +#include "lwip/memp.h" + +#include + +/** + * @ingroup netbuf + * Create (allocate) and initialize a new netbuf. + * The netbuf doesn't yet contain a packet buffer! + * + * @return a pointer to a new netbuf + * NULL on lack of memory + */ +struct +netbuf *netbuf_new(void) +{ + struct netbuf *buf; + + buf = (struct netbuf *)memp_malloc(MEMP_NETBUF); + if (buf != NULL) { + memset(buf, 0, sizeof(struct netbuf)); + } + return buf; +} + +/** + * @ingroup netbuf + * Deallocate a netbuf allocated by netbuf_new(). + * + * @param buf pointer to a netbuf allocated by netbuf_new() + */ +void +netbuf_delete(struct netbuf *buf) +{ + if (buf != NULL) { + if (buf->p != NULL) { + pbuf_free(buf->p); + buf->p = buf->ptr = NULL; + } + memp_free(MEMP_NETBUF, buf); + } +} + +/** + * @ingroup netbuf + * Allocate memory for a packet buffer for a given netbuf. + * + * @param buf the netbuf for which to allocate a packet buffer + * @param size the size of the packet buffer to allocate + * @return pointer to the allocated memory + * NULL if no memory could be allocated + */ +void * +netbuf_alloc(struct netbuf *buf, u16_t size) +{ + LWIP_ERROR("netbuf_alloc: invalid buf", (buf != NULL), return NULL;); + + /* Deallocate any previously allocated memory. */ + if (buf->p != NULL) { + pbuf_free(buf->p); + } + buf->p = pbuf_alloc(PBUF_TRANSPORT, size, PBUF_RAM); + if (buf->p == NULL) { + return NULL; + } + LWIP_ASSERT("check that first pbuf can hold size", + (buf->p->len >= size)); + buf->ptr = buf->p; + return buf->p->payload; +} + +/** + * @ingroup netbuf + * Free the packet buffer included in a netbuf + * + * @param buf pointer to the netbuf which contains the packet buffer to free + */ +void +netbuf_free(struct netbuf *buf) +{ + LWIP_ERROR("netbuf_free: invalid buf", (buf != NULL), return;); + if (buf->p != NULL) { + pbuf_free(buf->p); + } + buf->p = buf->ptr = NULL; +#if LWIP_CHECKSUM_ON_COPY + buf->flags = 0; + buf->toport_chksum = 0; +#endif /* LWIP_CHECKSUM_ON_COPY */ +} + +/** + * @ingroup netbuf + * Let a netbuf reference existing (non-volatile) data. + * + * @param buf netbuf which should reference the data + * @param dataptr pointer to the data to reference + * @param size size of the data + * @return ERR_OK if data is referenced + * ERR_MEM if data couldn't be referenced due to lack of memory + */ +err_t +netbuf_ref(struct netbuf *buf, const void *dataptr, u16_t size) +{ + LWIP_ERROR("netbuf_ref: invalid buf", (buf != NULL), return ERR_ARG;); + if (buf->p != NULL) { + pbuf_free(buf->p); + } + buf->p = pbuf_alloc(PBUF_TRANSPORT, 0, PBUF_REF); + if (buf->p == NULL) { + buf->ptr = NULL; + return ERR_MEM; + } + ((struct pbuf_rom *)buf->p)->payload = dataptr; + buf->p->len = buf->p->tot_len = size; + buf->ptr = buf->p; + return ERR_OK; +} + +/** + * @ingroup netbuf + * Chain one netbuf to another (@see pbuf_chain) + * + * @param head the first netbuf + * @param tail netbuf to chain after head, freed by this function, may not be reference after returning + */ +void +netbuf_chain(struct netbuf *head, struct netbuf *tail) +{ + LWIP_ERROR("netbuf_chain: invalid head", (head != NULL), return;); + LWIP_ERROR("netbuf_chain: invalid tail", (tail != NULL), return;); + pbuf_cat(head->p, tail->p); + head->ptr = head->p; + memp_free(MEMP_NETBUF, tail); +} + +/** + * @ingroup netbuf + * Get the data pointer and length of the data inside a netbuf. + * + * @param buf netbuf to get the data from + * @param dataptr pointer to a void pointer where to store the data pointer + * @param len pointer to an u16_t where the length of the data is stored + * @return ERR_OK if the information was retrieved, + * ERR_BUF on error. + */ +err_t +netbuf_data(struct netbuf *buf, void **dataptr, u16_t *len) +{ + LWIP_ERROR("netbuf_data: invalid buf", (buf != NULL), return ERR_ARG;); + LWIP_ERROR("netbuf_data: invalid dataptr", (dataptr != NULL), return ERR_ARG;); + LWIP_ERROR("netbuf_data: invalid len", (len != NULL), return ERR_ARG;); + + if (buf->ptr == NULL) { + return ERR_BUF; + } + *dataptr = buf->ptr->payload; + *len = buf->ptr->len; + return ERR_OK; +} + +/** + * @ingroup netbuf + * Move the current data pointer of a packet buffer contained in a netbuf + * to the next part. + * The packet buffer itself is not modified. + * + * @param buf the netbuf to modify + * @return -1 if there is no next part + * 1 if moved to the next part but now there is no next part + * 0 if moved to the next part and there are still more parts + */ +s8_t +netbuf_next(struct netbuf *buf) +{ + LWIP_ERROR("netbuf_next: invalid buf", (buf != NULL), return -1;); + if (buf->ptr->next == NULL) { + return -1; + } + buf->ptr = buf->ptr->next; + if (buf->ptr->next == NULL) { + return 1; + } + return 0; +} + +/** + * @ingroup netbuf + * Move the current data pointer of a packet buffer contained in a netbuf + * to the beginning of the packet. + * The packet buffer itself is not modified. + * + * @param buf the netbuf to modify + */ +void +netbuf_first(struct netbuf *buf) +{ + LWIP_ERROR("netbuf_first: invalid buf", (buf != NULL), return;); + buf->ptr = buf->p; +} + +#endif /* LWIP_NETCONN */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/netdb.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/netdb.c new file mode 100644 index 0000000..9ffc75a --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/netdb.c @@ -0,0 +1,414 @@ +/** + * @file + * API functions for name resolving + * + * @defgroup netdbapi NETDB API + * @ingroup socket + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/netdb.h" + +#if LWIP_DNS && LWIP_SOCKET + +#include "lwip/err.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/ip_addr.h" +#include "lwip/api.h" +#include "lwip/dns.h" + +#include /* memset */ +#include /* atoi */ + +/** helper struct for gethostbyname_r to access the char* buffer */ +struct gethostbyname_r_helper { + ip_addr_t *addr_list[2]; + ip_addr_t addr; + char *aliases; +}; + +/** h_errno is exported in netdb.h for access by applications. */ +#if LWIP_DNS_API_DECLARE_H_ERRNO +int h_errno; +#endif /* LWIP_DNS_API_DECLARE_H_ERRNO */ + +/** define "hostent" variables storage: 0 if we use a static (but unprotected) + * set of variables for lwip_gethostbyname, 1 if we use a local storage */ +#ifndef LWIP_DNS_API_HOSTENT_STORAGE +#define LWIP_DNS_API_HOSTENT_STORAGE 0 +#endif + +/** define "hostent" variables storage */ +#if LWIP_DNS_API_HOSTENT_STORAGE +#define HOSTENT_STORAGE +#else +#define HOSTENT_STORAGE static +#endif /* LWIP_DNS_API_STATIC_HOSTENT */ + +/** + * Returns an entry containing addresses of address family AF_INET + * for the host with name name. + * Due to dns_gethostbyname limitations, only one address is returned. + * + * @param name the hostname to resolve + * @return an entry containing addresses of address family AF_INET + * for the host with name name + */ +struct hostent * +lwip_gethostbyname(const char *name) +{ + err_t err; + ip_addr_t addr; + + /* buffer variables for lwip_gethostbyname() */ + HOSTENT_STORAGE struct hostent s_hostent; + HOSTENT_STORAGE char *s_aliases; + HOSTENT_STORAGE ip_addr_t s_hostent_addr; + HOSTENT_STORAGE ip_addr_t *s_phostent_addr[2]; + HOSTENT_STORAGE char s_hostname[DNS_MAX_NAME_LENGTH + 1]; + + /* query host IP address */ + err = netconn_gethostbyname(name, &addr); + if (err != ERR_OK) { + LWIP_DEBUGF(DNS_DEBUG, ("lwip_gethostbyname(%s) failed, err=%d\n", name, err)); + h_errno = HOST_NOT_FOUND; + return NULL; + } + + /* fill hostent */ + s_hostent_addr = addr; + s_phostent_addr[0] = &s_hostent_addr; + s_phostent_addr[1] = NULL; + strncpy(s_hostname, name, DNS_MAX_NAME_LENGTH); + s_hostname[DNS_MAX_NAME_LENGTH] = 0; + s_hostent.h_name = s_hostname; + s_aliases = NULL; + s_hostent.h_aliases = &s_aliases; + s_hostent.h_addrtype = AF_INET; + s_hostent.h_length = sizeof(ip_addr_t); + s_hostent.h_addr_list = (char **)&s_phostent_addr; + +#if DNS_DEBUG + /* dump hostent */ + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_name == %s\n", s_hostent.h_name)); + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_aliases == %p\n", (void *)s_hostent.h_aliases)); + /* h_aliases are always empty */ + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addrtype == %d\n", s_hostent.h_addrtype)); + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_length == %d\n", s_hostent.h_length)); + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addr_list == %p\n", (void *)s_hostent.h_addr_list)); + if (s_hostent.h_addr_list != NULL) { + u8_t idx; + for (idx = 0; s_hostent.h_addr_list[idx]; idx++) { + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addr_list[%i] == %p\n", idx, s_hostent.h_addr_list[idx])); + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addr_list[%i]-> == %s\n", idx, ipaddr_ntoa((ip_addr_t *)s_hostent.h_addr_list[idx]))); + } + } +#endif /* DNS_DEBUG */ + +#if LWIP_DNS_API_HOSTENT_STORAGE + /* this function should return the "per-thread" hostent after copy from s_hostent */ + return sys_thread_hostent(&s_hostent); +#else + return &s_hostent; +#endif /* LWIP_DNS_API_HOSTENT_STORAGE */ +} + +/** + * Thread-safe variant of lwip_gethostbyname: instead of using a static + * buffer, this function takes buffer and errno pointers as arguments + * and uses these for the result. + * + * @param name the hostname to resolve + * @param ret pre-allocated struct where to store the result + * @param buf pre-allocated buffer where to store additional data + * @param buflen the size of buf + * @param result pointer to a hostent pointer that is set to ret on success + * and set to zero on error + * @param h_errnop pointer to an int where to store errors (instead of modifying + * the global h_errno) + * @return 0 on success, non-zero on error, additional error information + * is stored in *h_errnop instead of h_errno to be thread-safe + */ +int +lwip_gethostbyname_r(const char *name, struct hostent *ret, char *buf, + size_t buflen, struct hostent **result, int *h_errnop) +{ + err_t err; + struct gethostbyname_r_helper *h; + char *hostname; + size_t namelen; + int lh_errno; + + if (h_errnop == NULL) { + /* ensure h_errnop is never NULL */ + h_errnop = &lh_errno; + } + + if (result == NULL) { + /* not all arguments given */ + *h_errnop = EINVAL; + return -1; + } + /* first thing to do: set *result to nothing */ + *result = NULL; + if ((name == NULL) || (ret == NULL) || (buf == NULL)) { + /* not all arguments given */ + *h_errnop = EINVAL; + return -1; + } + + namelen = strlen(name); + if (buflen < (sizeof(struct gethostbyname_r_helper) + LWIP_MEM_ALIGN_BUFFER(namelen + 1))) { + /* buf can't hold the data needed + a copy of name */ + *h_errnop = ERANGE; + return -1; + } + + h = (struct gethostbyname_r_helper *)LWIP_MEM_ALIGN(buf); + hostname = ((char *)h) + sizeof(struct gethostbyname_r_helper); + + /* query host IP address */ + err = netconn_gethostbyname(name, &h->addr); + if (err != ERR_OK) { + LWIP_DEBUGF(DNS_DEBUG, ("lwip_gethostbyname(%s) failed, err=%d\n", name, err)); + *h_errnop = HOST_NOT_FOUND; + return -1; + } + + /* copy the hostname into buf */ + MEMCPY(hostname, name, namelen); + hostname[namelen] = 0; + + /* fill hostent */ + h->addr_list[0] = &h->addr; + h->addr_list[1] = NULL; + h->aliases = NULL; + ret->h_name = hostname; + ret->h_aliases = &h->aliases; + ret->h_addrtype = AF_INET; + ret->h_length = sizeof(ip_addr_t); + ret->h_addr_list = (char **)&h->addr_list; + + /* set result != NULL */ + *result = ret; + + /* return success */ + return 0; +} + +/** + * Frees one or more addrinfo structures returned by getaddrinfo(), along with + * any additional storage associated with those structures. If the ai_next field + * of the structure is not null, the entire list of structures is freed. + * + * @param ai struct addrinfo to free + */ +void +lwip_freeaddrinfo(struct addrinfo *ai) +{ + struct addrinfo *next; + + while (ai != NULL) { + next = ai->ai_next; + memp_free(MEMP_NETDB, ai); + ai = next; + } +} + +/** + * Translates the name of a service location (for example, a host name) and/or + * a service name and returns a set of socket addresses and associated + * information to be used in creating a socket with which to address the + * specified service. + * Memory for the result is allocated internally and must be freed by calling + * lwip_freeaddrinfo()! + * + * Due to a limitation in dns_gethostbyname, only the first address of a + * host is returned. + * Also, service names are not supported (only port numbers)! + * + * @param nodename descriptive name or address string of the host + * (may be NULL -> local address) + * @param servname port number as string of NULL + * @param hints structure containing input values that set socktype and protocol + * @param res pointer to a pointer where to store the result (set to NULL on failure) + * @return 0 on success, non-zero on failure + * + * @todo: implement AI_V4MAPPED, AI_ADDRCONFIG + */ +int +lwip_getaddrinfo(const char *nodename, const char *servname, + const struct addrinfo *hints, struct addrinfo **res) +{ + err_t err; + ip_addr_t addr; + struct addrinfo *ai; + struct sockaddr_storage *sa = NULL; + int port_nr = 0; + size_t total_size; + size_t namelen = 0; + int ai_family; + + if (res == NULL) { + return EAI_FAIL; + } + *res = NULL; + if ((nodename == NULL) && (servname == NULL)) { + return EAI_NONAME; + } + + if (hints != NULL) { + ai_family = hints->ai_family; + if ((ai_family != AF_UNSPEC) +#if LWIP_IPV4 + && (ai_family != AF_INET) +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + && (ai_family != AF_INET6) +#endif /* LWIP_IPV6 */ + ) { + return EAI_FAMILY; + } + } else { + ai_family = AF_UNSPEC; + } + + if (servname != NULL) { + /* service name specified: convert to port number + * @todo?: currently, only ASCII integers (port numbers) are supported (AI_NUMERICSERV)! */ + port_nr = atoi(servname); + if ((port_nr <= 0) || (port_nr > 0xffff)) { + return EAI_SERVICE; + } + } + + if (nodename != NULL) { + /* service location specified, try to resolve */ + if ((hints != NULL) && (hints->ai_flags & AI_NUMERICHOST)) { + /* no DNS lookup, just parse for an address string */ + if (!ipaddr_aton(nodename, &addr)) { + return EAI_NONAME; + } +#if LWIP_IPV4 && LWIP_IPV6 + if ((IP_IS_V6_VAL(addr) && ai_family == AF_INET) || + (IP_IS_V4_VAL(addr) && ai_family == AF_INET6)) { + return EAI_NONAME; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + } else { +#if LWIP_IPV4 && LWIP_IPV6 + /* AF_UNSPEC: prefer IPv4 */ + u8_t type = NETCONN_DNS_IPV4_IPV6; + if (ai_family == AF_INET) { + type = NETCONN_DNS_IPV4; + } else if (ai_family == AF_INET6) { + type = NETCONN_DNS_IPV6; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + err = netconn_gethostbyname_addrtype(nodename, &addr, type); + if (err != ERR_OK) { + return EAI_FAIL; + } + } + } else { + /* service location specified, use loopback address */ + if ((hints != NULL) && (hints->ai_flags & AI_PASSIVE)) { + ip_addr_set_any_val(ai_family == AF_INET6, addr); + } else { + ip_addr_set_loopback_val(ai_family == AF_INET6, addr); + } + } + + total_size = sizeof(struct addrinfo) + sizeof(struct sockaddr_storage); + if (nodename != NULL) { + namelen = strlen(nodename); + if (namelen > DNS_MAX_NAME_LENGTH) { + /* invalid name length */ + return EAI_FAIL; + } + LWIP_ASSERT("namelen is too long", total_size + namelen + 1 > total_size); + total_size += namelen + 1; + } + /* If this fails, please report to lwip-devel! :-) */ + LWIP_ASSERT("total_size <= NETDB_ELEM_SIZE: please report this!", + total_size <= NETDB_ELEM_SIZE); + ai = (struct addrinfo *)memp_malloc(MEMP_NETDB); + if (ai == NULL) { + return EAI_MEMORY; + } + memset(ai, 0, total_size); + /* cast through void* to get rid of alignment warnings */ + sa = (struct sockaddr_storage *)(void *)((u8_t *)ai + sizeof(struct addrinfo)); + if (IP_IS_V6_VAL(addr)) { +#if LWIP_IPV6 + struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *)sa; + /* set up sockaddr */ + inet6_addr_from_ip6addr(&sa6->sin6_addr, ip_2_ip6(&addr)); + sa6->sin6_family = AF_INET6; + sa6->sin6_len = sizeof(struct sockaddr_in6); + sa6->sin6_port = lwip_htons((u16_t)port_nr); + sa6->sin6_scope_id = ip6_addr_zone(ip_2_ip6(&addr)); + ai->ai_family = AF_INET6; +#endif /* LWIP_IPV6 */ + } else { +#if LWIP_IPV4 + struct sockaddr_in *sa4 = (struct sockaddr_in *)sa; + /* set up sockaddr */ + inet_addr_from_ip4addr(&sa4->sin_addr, ip_2_ip4(&addr)); + sa4->sin_family = AF_INET; + sa4->sin_len = sizeof(struct sockaddr_in); + sa4->sin_port = lwip_htons((u16_t)port_nr); + ai->ai_family = AF_INET; +#endif /* LWIP_IPV4 */ + } + + /* set up addrinfo */ + if (hints != NULL) { + /* copy socktype & protocol from hints if specified */ + ai->ai_socktype = hints->ai_socktype; + ai->ai_protocol = hints->ai_protocol; + } + if (nodename != NULL) { + /* copy nodename to canonname if specified */ + ai->ai_canonname = ((char *)ai + sizeof(struct addrinfo) + sizeof(struct sockaddr_storage)); + MEMCPY(ai->ai_canonname, nodename, namelen); + ai->ai_canonname[namelen] = 0; + } + ai->ai_addrlen = sizeof(struct sockaddr_storage); + ai->ai_addr = (struct sockaddr *)sa; + + *res = ai; + + return 0; +} + +#endif /* LWIP_DNS && LWIP_SOCKET */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/netifapi.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/netifapi.c new file mode 100644 index 0000000..7c2011f --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/netifapi.c @@ -0,0 +1,380 @@ +/** + * @file + * Network Interface Sequential API module + * + * @defgroup netifapi NETIF API + * @ingroup sequential_api + * Thread-safe functions to be called from non-TCPIP threads + * + * @defgroup netifapi_netif NETIF related + * @ingroup netifapi + * To be called from non-TCPIP threads + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/opt.h" + +#if LWIP_NETIF_API /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/etharp.h" +#include "lwip/netifapi.h" +#include "lwip/memp.h" +#include "lwip/priv/tcpip_priv.h" + +#include /* strncpy */ + +#define NETIFAPI_VAR_REF(name) API_VAR_REF(name) +#define NETIFAPI_VAR_DECLARE(name) API_VAR_DECLARE(struct netifapi_msg, name) +#define NETIFAPI_VAR_ALLOC(name) API_VAR_ALLOC(struct netifapi_msg, MEMP_NETIFAPI_MSG, name, ERR_MEM) +#define NETIFAPI_VAR_FREE(name) API_VAR_FREE(MEMP_NETIFAPI_MSG, name) + +/** + * Call netif_add() inside the tcpip_thread context. + */ +static err_t +netifapi_do_netif_add(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + if (!netif_add( msg->netif, +#if LWIP_IPV4 + API_EXPR_REF(msg->msg.add.ipaddr), + API_EXPR_REF(msg->msg.add.netmask), + API_EXPR_REF(msg->msg.add.gw), +#endif /* LWIP_IPV4 */ + msg->msg.add.state, + msg->msg.add.init, + msg->msg.add.input)) { + return ERR_IF; + } else { + return ERR_OK; + } +} + +#if LWIP_IPV4 +/** + * Call netif_set_addr() inside the tcpip_thread context. + */ +static err_t +netifapi_do_netif_set_addr(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + netif_set_addr( msg->netif, + API_EXPR_REF(msg->msg.add.ipaddr), + API_EXPR_REF(msg->msg.add.netmask), + API_EXPR_REF(msg->msg.add.gw)); + return ERR_OK; +} +#endif /* LWIP_IPV4 */ + +/** +* Call netif_name_to_index() inside the tcpip_thread context. +*/ +static err_t +netifapi_do_name_to_index(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + msg->msg.ifs.index = netif_name_to_index(msg->msg.ifs.name); + return ERR_OK; +} + +/** +* Call netif_index_to_name() inside the tcpip_thread context. +*/ +static err_t +netifapi_do_index_to_name(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + if (!netif_index_to_name(msg->msg.ifs.index, msg->msg.ifs.name)) { + /* return failure via empty name */ + msg->msg.ifs.name[0] = '\0'; + } + return ERR_OK; +} + +/** + * Call the "errtfunc" (or the "voidfunc" if "errtfunc" is NULL) inside the + * tcpip_thread context. + */ +static err_t +netifapi_do_netif_common(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + if (msg->msg.common.errtfunc != NULL) { + return msg->msg.common.errtfunc(msg->netif); + } else { + msg->msg.common.voidfunc(msg->netif); + return ERR_OK; + } +} + +#if LWIP_ARP && LWIP_IPV4 +/** + * @ingroup netifapi_arp + * Add or update an entry in the ARP cache. + * For an update, ipaddr is used to find the cache entry. + * + * @param ipaddr IPv4 address of cache entry + * @param ethaddr hardware address mapped to ipaddr + * @param type type of ARP cache entry + * @return ERR_OK: entry added/updated, else error from err_t + */ +err_t +netifapi_arp_add(const ip4_addr_t *ipaddr, struct eth_addr *ethaddr, enum netifapi_arp_entry type) +{ + err_t err; + + /* We only support permanent entries currently */ + LWIP_UNUSED_ARG(type); + +#if ETHARP_SUPPORT_STATIC_ENTRIES && LWIP_TCPIP_CORE_LOCKING + LOCK_TCPIP_CORE(); + err = etharp_add_static_entry(ipaddr, ethaddr); + UNLOCK_TCPIP_CORE(); +#else + /* @todo add new vars to struct netifapi_msg and create a 'do' func */ + LWIP_UNUSED_ARG(ipaddr); + LWIP_UNUSED_ARG(ethaddr); + err = ERR_VAL; +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES && LWIP_TCPIP_CORE_LOCKING */ + + return err; +} + +/** + * @ingroup netifapi_arp + * Remove an entry in the ARP cache identified by ipaddr + * + * @param ipaddr IPv4 address of cache entry + * @param type type of ARP cache entry + * @return ERR_OK: entry removed, else error from err_t + */ +err_t +netifapi_arp_remove(const ip4_addr_t *ipaddr, enum netifapi_arp_entry type) +{ + err_t err; + + /* We only support permanent entries currently */ + LWIP_UNUSED_ARG(type); + +#if ETHARP_SUPPORT_STATIC_ENTRIES && LWIP_TCPIP_CORE_LOCKING + LOCK_TCPIP_CORE(); + err = etharp_remove_static_entry(ipaddr); + UNLOCK_TCPIP_CORE(); +#else + /* @todo add new vars to struct netifapi_msg and create a 'do' func */ + LWIP_UNUSED_ARG(ipaddr); + err = ERR_VAL; +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES && LWIP_TCPIP_CORE_LOCKING */ + + return err; +} +#endif /* LWIP_ARP && LWIP_IPV4 */ + +/** + * @ingroup netifapi_netif + * Call netif_add() in a thread-safe way by running that function inside the + * tcpip_thread context. + * + * @note for params @see netif_add() + */ +err_t +netifapi_netif_add(struct netif *netif, +#if LWIP_IPV4 + const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, +#endif /* LWIP_IPV4 */ + void *state, netif_init_fn init, netif_input_fn input) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + +#if LWIP_IPV4 + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY4; + } + if (netmask == NULL) { + netmask = IP4_ADDR_ANY4; + } + if (gw == NULL) { + gw = IP4_ADDR_ANY4; + } +#endif /* LWIP_IPV4 */ + + NETIFAPI_VAR_REF(msg).netif = netif; +#if LWIP_IPV4 + NETIFAPI_VAR_REF(msg).msg.add.ipaddr = NETIFAPI_VAR_REF(ipaddr); + NETIFAPI_VAR_REF(msg).msg.add.netmask = NETIFAPI_VAR_REF(netmask); + NETIFAPI_VAR_REF(msg).msg.add.gw = NETIFAPI_VAR_REF(gw); +#endif /* LWIP_IPV4 */ + NETIFAPI_VAR_REF(msg).msg.add.state = state; + NETIFAPI_VAR_REF(msg).msg.add.init = init; + NETIFAPI_VAR_REF(msg).msg.add.input = input; + err = tcpip_api_call(netifapi_do_netif_add, &API_VAR_REF(msg).call); + NETIFAPI_VAR_FREE(msg); + return err; +} + +#if LWIP_IPV4 +/** + * @ingroup netifapi_netif + * Call netif_set_addr() in a thread-safe way by running that function inside the + * tcpip_thread context. + * + * @note for params @see netif_set_addr() + */ +err_t +netifapi_netif_set_addr(struct netif *netif, + const ip4_addr_t *ipaddr, + const ip4_addr_t *netmask, + const ip4_addr_t *gw) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY4; + } + if (netmask == NULL) { + netmask = IP4_ADDR_ANY4; + } + if (gw == NULL) { + gw = IP4_ADDR_ANY4; + } + + NETIFAPI_VAR_REF(msg).netif = netif; + NETIFAPI_VAR_REF(msg).msg.add.ipaddr = NETIFAPI_VAR_REF(ipaddr); + NETIFAPI_VAR_REF(msg).msg.add.netmask = NETIFAPI_VAR_REF(netmask); + NETIFAPI_VAR_REF(msg).msg.add.gw = NETIFAPI_VAR_REF(gw); + err = tcpip_api_call(netifapi_do_netif_set_addr, &API_VAR_REF(msg).call); + NETIFAPI_VAR_FREE(msg); + return err; +} +#endif /* LWIP_IPV4 */ + +/** + * call the "errtfunc" (or the "voidfunc" if "errtfunc" is NULL) in a thread-safe + * way by running that function inside the tcpip_thread context. + * + * @note use only for functions where there is only "netif" parameter. + */ +err_t +netifapi_netif_common(struct netif *netif, netifapi_void_fn voidfunc, + netifapi_errt_fn errtfunc) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + + NETIFAPI_VAR_REF(msg).netif = netif; + NETIFAPI_VAR_REF(msg).msg.common.voidfunc = voidfunc; + NETIFAPI_VAR_REF(msg).msg.common.errtfunc = errtfunc; + err = tcpip_api_call(netifapi_do_netif_common, &API_VAR_REF(msg).call); + NETIFAPI_VAR_FREE(msg); + return err; +} + +/** +* @ingroup netifapi_netif +* Call netif_name_to_index() in a thread-safe way by running that function inside the +* tcpip_thread context. +* +* @param name the interface name of the netif +* @param idx output index of the found netif +*/ +err_t +netifapi_netif_name_to_index(const char *name, u8_t *idx) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + + *idx = 0; + +#if LWIP_MPU_COMPATIBLE + strncpy(NETIFAPI_VAR_REF(msg).msg.ifs.name, name, NETIF_NAMESIZE - 1); + NETIFAPI_VAR_REF(msg).msg.ifs.name[NETIF_NAMESIZE - 1] = '\0'; +#else + NETIFAPI_VAR_REF(msg).msg.ifs.name = LWIP_CONST_CAST(char *, name); +#endif /* LWIP_MPU_COMPATIBLE */ + err = tcpip_api_call(netifapi_do_name_to_index, &API_VAR_REF(msg).call); + if (!err) { + *idx = NETIFAPI_VAR_REF(msg).msg.ifs.index; + } + NETIFAPI_VAR_FREE(msg); + return err; +} + +/** +* @ingroup netifapi_netif +* Call netif_index_to_name() in a thread-safe way by running that function inside the +* tcpip_thread context. +* +* @param idx the interface index of the netif +* @param name output name of the found netif, empty '\0' string if netif not found. +* name should be of at least NETIF_NAMESIZE bytes +*/ +err_t +netifapi_netif_index_to_name(u8_t idx, char *name) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + + NETIFAPI_VAR_REF(msg).msg.ifs.index = idx; +#if !LWIP_MPU_COMPATIBLE + NETIFAPI_VAR_REF(msg).msg.ifs.name = name; +#endif /* LWIP_MPU_COMPATIBLE */ + err = tcpip_api_call(netifapi_do_index_to_name, &API_VAR_REF(msg).call); +#if LWIP_MPU_COMPATIBLE + if (!err) { + strncpy(name, NETIFAPI_VAR_REF(msg).msg.ifs.name, NETIF_NAMESIZE - 1); + name[NETIF_NAMESIZE - 1] = '\0'; + } +#endif /* LWIP_MPU_COMPATIBLE */ + NETIFAPI_VAR_FREE(msg); + return err; +} + +#endif /* LWIP_NETIF_API */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/sockets.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/sockets.c new file mode 100644 index 0000000..0e968b8 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/sockets.c @@ -0,0 +1,4160 @@ +/** + * @file + * Sockets BSD-Like API module + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + * Improved by Marc Boucher and David Haas + * + */ + +#include "lwip/opt.h" + +#if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/sockets.h" +#include "lwip/priv/sockets_priv.h" +#include "lwip/api.h" +#include "lwip/igmp.h" +#include "lwip/inet.h" +#include "lwip/tcp.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/memp.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/priv/tcpip_priv.h" +#include "lwip/mld6.h" +#if LWIP_CHECKSUM_ON_COPY +#include "lwip/inet_chksum.h" +#endif + +#if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES +#include +#endif + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/* If the netconn API is not required publicly, then we include the necessary + files here to get the implementation */ +#if !LWIP_NETCONN +#undef LWIP_NETCONN +#define LWIP_NETCONN 1 +#include "api_msg.c" +#include "api_lib.c" +#include "netbuf.c" +#undef LWIP_NETCONN +#define LWIP_NETCONN 0 +#endif + +#define API_SELECT_CB_VAR_REF(name) API_VAR_REF(name) +#define API_SELECT_CB_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_select_cb, name) +#define API_SELECT_CB_VAR_ALLOC(name, retblock) API_VAR_ALLOC_EXT(struct lwip_select_cb, MEMP_SELECT_CB, name, retblock) +#define API_SELECT_CB_VAR_FREE(name) API_VAR_FREE(MEMP_SELECT_CB, name) + +#if LWIP_IPV4 +#define IP4ADDR_PORT_TO_SOCKADDR(sin, ipaddr, port) do { \ + (sin)->sin_len = sizeof(struct sockaddr_in); \ + (sin)->sin_family = AF_INET; \ + (sin)->sin_port = lwip_htons((port)); \ + inet_addr_from_ip4addr(&(sin)->sin_addr, ipaddr); \ + memset((sin)->sin_zero, 0, SIN_ZERO_LEN); }while(0) +#define SOCKADDR4_TO_IP4ADDR_PORT(sin, ipaddr, port) do { \ + inet_addr_to_ip4addr(ip_2_ip4(ipaddr), &((sin)->sin_addr)); \ + (port) = lwip_ntohs((sin)->sin_port); }while(0) +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +#define IP6ADDR_PORT_TO_SOCKADDR(sin6, ipaddr, port) do { \ + (sin6)->sin6_len = sizeof(struct sockaddr_in6); \ + (sin6)->sin6_family = AF_INET6; \ + (sin6)->sin6_port = lwip_htons((port)); \ + (sin6)->sin6_flowinfo = 0; \ + inet6_addr_from_ip6addr(&(sin6)->sin6_addr, ipaddr); \ + (sin6)->sin6_scope_id = ip6_addr_zone(ipaddr); }while(0) +#define SOCKADDR6_TO_IP6ADDR_PORT(sin6, ipaddr, port) do { \ + inet6_addr_to_ip6addr(ip_2_ip6(ipaddr), &((sin6)->sin6_addr)); \ + if (ip6_addr_has_scope(ip_2_ip6(ipaddr), IP6_UNKNOWN)) { \ + ip6_addr_set_zone(ip_2_ip6(ipaddr), (u8_t)((sin6)->sin6_scope_id)); \ + } \ + (port) = lwip_ntohs((sin6)->sin6_port); }while(0) +#endif /* LWIP_IPV6 */ + +#if LWIP_IPV4 && LWIP_IPV6 +static void sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port); + +#define IS_SOCK_ADDR_LEN_VALID(namelen) (((namelen) == sizeof(struct sockaddr_in)) || \ + ((namelen) == sizeof(struct sockaddr_in6))) +#define IS_SOCK_ADDR_TYPE_VALID(name) (((name)->sa_family == AF_INET) || \ + ((name)->sa_family == AF_INET6)) +#define SOCK_ADDR_TYPE_MATCH(name, sock) \ + ((((name)->sa_family == AF_INET) && !(NETCONNTYPE_ISIPV6((sock)->conn->type))) || \ + (((name)->sa_family == AF_INET6) && (NETCONNTYPE_ISIPV6((sock)->conn->type)))) +#define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) do { \ + if (IP_IS_ANY_TYPE_VAL(*ipaddr) || IP_IS_V6_VAL(*ipaddr)) { \ + IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port); \ + } else { \ + IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port); \ + } } while(0) +#define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) sockaddr_to_ipaddr_port(sockaddr, ipaddr, &(port)) +#define DOMAIN_TO_NETCONN_TYPE(domain, type) (((domain) == AF_INET) ? \ + (type) : (enum netconn_type)((type) | NETCONN_TYPE_IPV6)) +#elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */ +#define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in6)) +#define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET6) +#define SOCK_ADDR_TYPE_MATCH(name, sock) 1 +#define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \ + IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port) +#define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \ + SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6*)(const void*)(sockaddr), ipaddr, port) +#define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type) +#else /*-> LWIP_IPV4: LWIP_IPV4 && LWIP_IPV6 */ +#define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in)) +#define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET) +#define SOCK_ADDR_TYPE_MATCH(name, sock) 1 +#define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \ + IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port) +#define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \ + SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in*)(const void*)(sockaddr), ipaddr, port) +#define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type) +#endif /* LWIP_IPV6 */ + +#define IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) (((name)->sa_family == AF_UNSPEC) || \ + IS_SOCK_ADDR_TYPE_VALID(name)) +#define SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock) (((name)->sa_family == AF_UNSPEC) || \ + SOCK_ADDR_TYPE_MATCH(name, sock)) +#define IS_SOCK_ADDR_ALIGNED(name) ((((mem_ptr_t)(name)) % 4) == 0) + + +#define LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype) do { if ((optlen) < sizeof(opttype)) { done_socket(sock); return EINVAL; }}while(0) +#define LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, opttype) do { \ + LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \ + if ((sock)->conn == NULL) { done_socket(sock); return EINVAL; } }while(0) +#define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype) do { \ + LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \ + if (((sock)->conn == NULL) || ((sock)->conn->pcb.tcp == NULL)) { done_socket(sock); return EINVAL; } }while(0) +#define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, opttype, netconntype) do { \ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype); \ + if (NETCONNTYPE_GROUP(netconn_type((sock)->conn)) != netconntype) { done_socket(sock); return ENOPROTOOPT; } }while(0) + + +#define LWIP_SETGETSOCKOPT_DATA_VAR_REF(name) API_VAR_REF(name) +#define LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_setgetsockopt_data, name) +#define LWIP_SETGETSOCKOPT_DATA_VAR_FREE(name) API_VAR_FREE(MEMP_SOCKET_SETGETSOCKOPT_DATA, name) +#if LWIP_MPU_COMPATIBLE +#define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock) do { \ + name = (struct lwip_setgetsockopt_data *)memp_malloc(MEMP_SOCKET_SETGETSOCKOPT_DATA); \ + if (name == NULL) { \ + sock_set_errno(sock, ENOMEM); \ + done_socket(sock); \ + return -1; \ + } }while(0) +#else /* LWIP_MPU_COMPATIBLE */ +#define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock) +#endif /* LWIP_MPU_COMPATIBLE */ + +#if LWIP_SO_SNDRCVTIMEO_NONSTANDARD +#define LWIP_SO_SNDRCVTIMEO_OPTTYPE int +#define LWIP_SO_SNDRCVTIMEO_SET(optval, val) (*(int *)(optval) = (val)) +#define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((long)*(const int*)(optval)) +#else +#define LWIP_SO_SNDRCVTIMEO_OPTTYPE struct timeval +#define LWIP_SO_SNDRCVTIMEO_SET(optval, val) do { \ + u32_t loc = (val); \ + ((struct timeval *)(optval))->tv_sec = (long)((loc) / 1000U); \ + ((struct timeval *)(optval))->tv_usec = (long)(((loc) % 1000U) * 1000U); }while(0) +#define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((((const struct timeval *)(optval))->tv_sec * 1000) + (((const struct timeval *)(optval))->tv_usec / 1000)) +#endif + + +/** A struct sockaddr replacement that has the same alignment as sockaddr_in/ + * sockaddr_in6 if instantiated. + */ +union sockaddr_aligned { + struct sockaddr sa; +#if LWIP_IPV6 + struct sockaddr_in6 sin6; +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + struct sockaddr_in sin; +#endif /* LWIP_IPV4 */ +}; + +/* Define the number of IPv4 multicast memberships, default is one per socket */ +#ifndef LWIP_SOCKET_MAX_MEMBERSHIPS +#define LWIP_SOCKET_MAX_MEMBERSHIPS NUM_SOCKETS +#endif + +#if LWIP_IGMP +/* This is to keep track of IP_ADD_MEMBERSHIP calls to drop the membership when + a socket is closed */ +struct lwip_socket_multicast_pair { + /** the socket */ + struct lwip_sock *sock; + /** the interface address */ + ip4_addr_t if_addr; + /** the group address */ + ip4_addr_t multi_addr; +}; + +static struct lwip_socket_multicast_pair socket_ipv4_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS]; + +static int lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr); +static void lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr); +static void lwip_socket_drop_registered_memberships(int s); +#endif /* LWIP_IGMP */ + +#if LWIP_IPV6_MLD +/* This is to keep track of IP_JOIN_GROUP calls to drop the membership when + a socket is closed */ +struct lwip_socket_multicast_mld6_pair { + /** the socket */ + struct lwip_sock *sock; + /** the interface index */ + u8_t if_idx; + /** the group address */ + ip6_addr_t multi_addr; +}; + +static struct lwip_socket_multicast_mld6_pair socket_ipv6_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS]; + +static int lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr); +static void lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr); +static void lwip_socket_drop_registered_mld6_memberships(int s); +#endif /* LWIP_IPV6_MLD */ + +/** The global array of available sockets */ +static struct lwip_sock sockets[NUM_SOCKETS]; + +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL +#if LWIP_TCPIP_CORE_LOCKING +/* protect the select_cb_list using core lock */ +#define LWIP_SOCKET_SELECT_DECL_PROTECT(lev) +#define LWIP_SOCKET_SELECT_PROTECT(lev) LOCK_TCPIP_CORE() +#define LWIP_SOCKET_SELECT_UNPROTECT(lev) UNLOCK_TCPIP_CORE() +#else /* LWIP_TCPIP_CORE_LOCKING */ +/* protect the select_cb_list using SYS_LIGHTWEIGHT_PROT */ +#define LWIP_SOCKET_SELECT_DECL_PROTECT(lev) SYS_ARCH_DECL_PROTECT(lev) +#define LWIP_SOCKET_SELECT_PROTECT(lev) SYS_ARCH_PROTECT(lev) +#define LWIP_SOCKET_SELECT_UNPROTECT(lev) SYS_ARCH_UNPROTECT(lev) +/** This counter is increased from lwip_select when the list is changed + and checked in select_check_waiters to see if it has changed. */ +static volatile int select_cb_ctr; +#endif /* LWIP_TCPIP_CORE_LOCKING */ +/** The global list of tasks waiting for select */ +static struct lwip_select_cb *select_cb_list; +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ + +#define sock_set_errno(sk, e) do { \ + const int sockerr = (e); \ + set_errno(sockerr); \ +} while (0) + +/* Forward declaration of some functions */ +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL +static void event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len); +#define DEFAULT_SOCKET_EVENTCB event_callback +static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent); +#else +#define DEFAULT_SOCKET_EVENTCB NULL +#endif +#if !LWIP_TCPIP_CORE_LOCKING +static void lwip_getsockopt_callback(void *arg); +static void lwip_setsockopt_callback(void *arg); +#endif +static int lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen); +static int lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen); +static int free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn, + union lwip_sock_lastdata *lastdata); +static void free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata); + +#if LWIP_IPV4 && LWIP_IPV6 +static void +sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port) +{ + if ((sockaddr->sa_family) == AF_INET6) { + SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6 *)(const void *)(sockaddr), ipaddr, *port); + ipaddr->type = IPADDR_TYPE_V6; + } else { + SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in *)(const void *)(sockaddr), ipaddr, *port); + ipaddr->type = IPADDR_TYPE_V4; + } +} +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +/** LWIP_NETCONN_SEM_PER_THREAD==1: initialize thread-local semaphore */ +void +lwip_socket_thread_init(void) +{ + netconn_thread_init(); +} + +/** LWIP_NETCONN_SEM_PER_THREAD==1: destroy thread-local semaphore */ +void +lwip_socket_thread_cleanup(void) +{ + netconn_thread_cleanup(); +} + +#if LWIP_NETCONN_FULLDUPLEX +/* Thread-safe increment of sock->fd_used, with overflow check */ +static int +sock_inc_used(struct lwip_sock *sock) +{ + int ret; + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_ASSERT("sock != NULL", sock != NULL); + + SYS_ARCH_PROTECT(lev); + if (sock->fd_free_pending) { + /* prevent new usage of this socket if free is pending */ + ret = 0; + } else { + ++sock->fd_used; + ret = 1; + LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0); + } + SYS_ARCH_UNPROTECT(lev); + return ret; +} + +/* Like sock_inc_used(), but called under SYS_ARCH_PROTECT lock. */ +static int +sock_inc_used_locked(struct lwip_sock *sock) +{ + LWIP_ASSERT("sock != NULL", sock != NULL); + + if (sock->fd_free_pending) { + LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0); + return 0; + } + + ++sock->fd_used; + LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0); + return 1; +} + +/* In full-duplex mode,sock->fd_used != 0 prevents a socket descriptor from being + * released (and possibly reused) when used from more than one thread + * (e.g. read-while-write or close-while-write, etc) + * This function is called at the end of functions using (try)get_socket*(). + */ +static void +done_socket(struct lwip_sock *sock) +{ + int freed = 0; + int is_tcp = 0; + struct netconn *conn = NULL; + union lwip_sock_lastdata lastdata; + SYS_ARCH_DECL_PROTECT(lev); + LWIP_ASSERT("sock != NULL", sock != NULL); + + SYS_ARCH_PROTECT(lev); + LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0); + if (--sock->fd_used == 0) { + if (sock->fd_free_pending) { + /* free the socket */ + sock->fd_used = 1; + is_tcp = sock->fd_free_pending & LWIP_SOCK_FD_FREE_TCP; + freed = free_socket_locked(sock, is_tcp, &conn, &lastdata); + } + } + SYS_ARCH_UNPROTECT(lev); + + if (freed) { + free_socket_free_elements(is_tcp, conn, &lastdata); + } +} + +#else /* LWIP_NETCONN_FULLDUPLEX */ +#define sock_inc_used(sock) 1 +#define sock_inc_used_locked(sock) 1 +#define done_socket(sock) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +/* Translate a socket 'int' into a pointer (only fails if the index is invalid) */ +static struct lwip_sock * +tryget_socket_unconn_nouse(int fd) +{ + int s = fd - LWIP_SOCKET_OFFSET; + if ((s < 0) || (s >= NUM_SOCKETS)) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("tryget_socket_unconn(%d): invalid\n", fd)); + return NULL; + } + return &sockets[s]; +} + +struct lwip_sock * +lwip_socket_dbg_get_socket(int fd) +{ + return tryget_socket_unconn_nouse(fd); +} + +/* Translate a socket 'int' into a pointer (only fails if the index is invalid) */ +static struct lwip_sock * +tryget_socket_unconn(int fd) +{ + struct lwip_sock *ret = tryget_socket_unconn_nouse(fd); + if (ret != NULL) { + if (!sock_inc_used(ret)) { + return NULL; + } + } + return ret; +} + +/* Like tryget_socket_unconn(), but called under SYS_ARCH_PROTECT lock. */ +static struct lwip_sock * +tryget_socket_unconn_locked(int fd) +{ + struct lwip_sock *ret = tryget_socket_unconn_nouse(fd); + if (ret != NULL) { + if (!sock_inc_used_locked(ret)) { + return NULL; + } + } + return ret; +} + +/** + * Same as get_socket but doesn't set errno + * + * @param fd externally used socket index + * @return struct lwip_sock for the socket or NULL if not found + */ +static struct lwip_sock * +tryget_socket(int fd) +{ + struct lwip_sock *sock = tryget_socket_unconn(fd); + if (sock != NULL) { + if (sock->conn) { + return sock; + } + done_socket(sock); + } + return NULL; +} + +/** + * Map a externally used socket index to the internal socket representation. + * + * @param fd externally used socket index + * @return struct lwip_sock for the socket or NULL if not found + */ +static struct lwip_sock * +get_socket(int fd) +{ + struct lwip_sock *sock = tryget_socket(fd); + if (!sock) { + if ((fd < LWIP_SOCKET_OFFSET) || (fd >= (LWIP_SOCKET_OFFSET + NUM_SOCKETS))) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("get_socket(%d): invalid\n", fd)); + } + set_errno(EBADF); + return NULL; + } + return sock; +} + +/** + * Allocate a new socket for a given netconn. + * + * @param newconn the netconn for which to allocate a socket + * @param accepted 1 if socket has been created by accept(), + * 0 if socket has been created by socket() + * @return the index of the new socket; -1 on error + */ +static int +alloc_socket(struct netconn *newconn, int accepted) +{ + int i; + SYS_ARCH_DECL_PROTECT(lev); + LWIP_UNUSED_ARG(accepted); + + /* allocate a new socket identifier */ + for (i = 0; i < NUM_SOCKETS; ++i) { + /* Protect socket array */ + SYS_ARCH_PROTECT(lev); + if (!sockets[i].conn) { +#if LWIP_NETCONN_FULLDUPLEX + if (sockets[i].fd_used) { + SYS_ARCH_UNPROTECT(lev); + continue; + } + sockets[i].fd_used = 1; + sockets[i].fd_free_pending = 0; +#endif + sockets[i].conn = newconn; + /* The socket is not yet known to anyone, so no need to protect + after having marked it as used. */ + SYS_ARCH_UNPROTECT(lev); + sockets[i].lastdata.pbuf = NULL; +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL + LWIP_ASSERT("sockets[i].select_waiting == 0", sockets[i].select_waiting == 0); + sockets[i].rcvevent = 0; + /* TCP sendbuf is empty, but the socket is not yet writable until connected + * (unless it has been created by accept()). */ + sockets[i].sendevent = (NETCONNTYPE_GROUP(newconn->type) == NETCONN_TCP ? (accepted != 0) : 1); + sockets[i].errevent = 0; +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ + return i + LWIP_SOCKET_OFFSET; + } + SYS_ARCH_UNPROTECT(lev); + } + return -1; +} + +/** Free a socket (under lock) + * + * @param sock the socket to free + * @param is_tcp != 0 for TCP sockets, used to free lastdata + * @param conn the socekt's netconn is stored here, must be freed externally + * @param lastdata lastdata is stored here, must be freed externally + */ +static int +free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn, + union lwip_sock_lastdata *lastdata) +{ +#if LWIP_NETCONN_FULLDUPLEX + LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0); + sock->fd_used--; + if (sock->fd_used > 0) { + sock->fd_free_pending = LWIP_SOCK_FD_FREE_FREE | (is_tcp ? LWIP_SOCK_FD_FREE_TCP : 0); + return 0; + } +#else /* LWIP_NETCONN_FULLDUPLEX */ + LWIP_UNUSED_ARG(is_tcp); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + + *lastdata = sock->lastdata; + sock->lastdata.pbuf = NULL; + *conn = sock->conn; + sock->conn = NULL; + return 1; +} + +/** Free a socket's leftover members. + */ +static void +free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata) +{ + if (lastdata->pbuf != NULL) { + if (is_tcp) { + pbuf_free(lastdata->pbuf); + } else { + netbuf_delete(lastdata->netbuf); + } + } + if (conn != NULL) { + /* netconn_prepare_delete() has already been called, here we only free the conn */ + netconn_delete(conn); + } +} + +/** Free a socket. The socket's netconn must have been + * delete before! + * + * @param sock the socket to free + * @param is_tcp != 0 for TCP sockets, used to free lastdata + */ +static void +free_socket(struct lwip_sock *sock, int is_tcp) +{ + int freed; + struct netconn *conn; + union lwip_sock_lastdata lastdata; + SYS_ARCH_DECL_PROTECT(lev); + + /* Protect socket array */ + SYS_ARCH_PROTECT(lev); + + freed = free_socket_locked(sock, is_tcp, &conn, &lastdata); + SYS_ARCH_UNPROTECT(lev); + /* don't use 'sock' after this line, as another task might have allocated it */ + + if (freed) { + free_socket_free_elements(is_tcp, conn, &lastdata); + } +} + +/* Below this, the well-known socket functions are implemented. + * Use google.com or opengroup.org to get a good description :-) + * + * Exceptions are documented! + */ + +int +lwip_accept(int s, struct sockaddr *addr, socklen_t *addrlen) +{ + struct lwip_sock *sock, *nsock; + struct netconn *newconn; + ip_addr_t naddr; + u16_t port = 0; + int newsock; + err_t err; + int recvevent; + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d)...\n", s)); + sock = get_socket(s); + if (!sock) { + return -1; + } + + /* wait for a new connection */ + err = netconn_accept(sock->conn, &newconn); + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_acept failed, err=%d\n", s, err)); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + sock_set_errno(sock, EOPNOTSUPP); + } else if (err == ERR_CLSD) { + sock_set_errno(sock, EINVAL); + } else { + sock_set_errno(sock, err_to_errno(err)); + } + done_socket(sock); + return -1; + } + LWIP_ASSERT("newconn != NULL", newconn != NULL); + + newsock = alloc_socket(newconn, 1); + if (newsock == -1) { + netconn_delete(newconn); + sock_set_errno(sock, ENFILE); + done_socket(sock); + return -1; + } + LWIP_ASSERT("invalid socket index", (newsock >= LWIP_SOCKET_OFFSET) && (newsock < NUM_SOCKETS + LWIP_SOCKET_OFFSET)); + nsock = &sockets[newsock - LWIP_SOCKET_OFFSET]; + + /* See event_callback: If data comes in right away after an accept, even + * though the server task might not have created a new socket yet. + * In that case, newconn->socket is counted down (newconn->socket--), + * so nsock->rcvevent is >= 1 here! + */ + SYS_ARCH_PROTECT(lev); + recvevent = (s16_t)(-1 - newconn->socket); + newconn->socket = newsock; + SYS_ARCH_UNPROTECT(lev); + + if (newconn->callback) { + LOCK_TCPIP_CORE(); + while (recvevent > 0) { + recvevent--; + newconn->callback(newconn, NETCONN_EVT_RCVPLUS, 0); + } + UNLOCK_TCPIP_CORE(); + } + + /* Note that POSIX only requires us to check addr is non-NULL. addrlen must + * not be NULL if addr is valid. + */ + if ((addr != NULL) && (addrlen != NULL)) { + union sockaddr_aligned tempaddr; + /* get the IP address and port of the remote host */ + err = netconn_peer(newconn, &naddr, &port); + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_peer failed, err=%d\n", s, err)); + netconn_delete(newconn); + free_socket(nsock, 1); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + + IPADDR_PORT_TO_SOCKADDR(&tempaddr, &naddr, port); + if (*addrlen > tempaddr.sa.sa_len) { + *addrlen = tempaddr.sa.sa_len; + } + MEMCPY(addr, &tempaddr, *addrlen); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d addr=", s, newsock)); + ip_addr_debug_print_val(SOCKETS_DEBUG, naddr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", port)); + } else { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d", s, newsock)); + } + + sock_set_errno(sock, 0); + done_socket(sock); + done_socket(nsock); + return newsock; +} + +int +lwip_bind(int s, const struct sockaddr *name, socklen_t namelen) +{ + struct lwip_sock *sock; + ip_addr_t local_addr; + u16_t local_port; + err_t err; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (!SOCK_ADDR_TYPE_MATCH(name, sock)) { + /* sockaddr does not match socket type (IPv4/IPv6) */ + sock_set_errno(sock, err_to_errno(ERR_VAL)); + done_socket(sock); + return -1; + } + + /* check size, family and alignment of 'name' */ + LWIP_ERROR("lwip_bind: invalid address", (IS_SOCK_ADDR_LEN_VALID(namelen) && + IS_SOCK_ADDR_TYPE_VALID(name) && IS_SOCK_ADDR_ALIGNED(name)), + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + LWIP_UNUSED_ARG(namelen); + + SOCKADDR_TO_IPADDR_PORT(name, &local_addr, local_port); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d, addr=", s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, local_addr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", local_port)); + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */ + if (IP_IS_V6_VAL(local_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&local_addr))) { + unmap_ipv4_mapped_ipv6(ip_2_ip4(&local_addr), ip_2_ip6(&local_addr)); + IP_SET_TYPE_VAL(local_addr, IPADDR_TYPE_V4); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + err = netconn_bind(sock->conn, &local_addr, local_port); + + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) failed, err=%d\n", s, err)); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) succeeded\n", s)); + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +} + +int +lwip_close(int s) +{ + struct lwip_sock *sock; + int is_tcp = 0; + err_t err; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_close(%d)\n", s)); + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (sock->conn != NULL) { + is_tcp = NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP; + } else { + LWIP_ASSERT("sock->lastdata == NULL", sock->lastdata.pbuf == NULL); + } + +#if LWIP_IGMP + /* drop all possibly joined IGMP memberships */ + lwip_socket_drop_registered_memberships(s); +#endif /* LWIP_IGMP */ +#if LWIP_IPV6_MLD + /* drop all possibly joined MLD6 memberships */ + lwip_socket_drop_registered_mld6_memberships(s); +#endif /* LWIP_IPV6_MLD */ + + err = netconn_prepare_delete(sock->conn); + if (err != ERR_OK) { + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + + free_socket(sock, is_tcp); + set_errno(0); + return 0; +} + +int +lwip_connect(int s, const struct sockaddr *name, socklen_t namelen) +{ + struct lwip_sock *sock; + err_t err; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (!SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock)) { + /* sockaddr does not match socket type (IPv4/IPv6) */ + sock_set_errno(sock, err_to_errno(ERR_VAL)); + done_socket(sock); + return -1; + } + + LWIP_UNUSED_ARG(namelen); + if (name->sa_family == AF_UNSPEC) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, AF_UNSPEC)\n", s)); + err = netconn_disconnect(sock->conn); + } else { + ip_addr_t remote_addr; + u16_t remote_port; + + /* check size, family and alignment of 'name' */ + LWIP_ERROR("lwip_connect: invalid address", IS_SOCK_ADDR_LEN_VALID(namelen) && + IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) && IS_SOCK_ADDR_ALIGNED(name), + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + + SOCKADDR_TO_IPADDR_PORT(name, &remote_addr, remote_port); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, addr=", s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, remote_addr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", remote_port)); + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */ + if (IP_IS_V6_VAL(remote_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&remote_addr))) { + unmap_ipv4_mapped_ipv6(ip_2_ip4(&remote_addr), ip_2_ip6(&remote_addr)); + IP_SET_TYPE_VAL(remote_addr, IPADDR_TYPE_V4); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + err = netconn_connect(sock->conn, &remote_addr, remote_port); + } + + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) failed, err=%d\n", s, err)); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) succeeded\n", s)); + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +} + +/** + * Set a socket into listen mode. + * The socket may not have been used for another connection previously. + * + * @param s the socket to set to listening mode + * @param backlog (ATTENTION: needs TCP_LISTEN_BACKLOG=1) + * @return 0 on success, non-zero on failure + */ +int +lwip_listen(int s, int backlog) +{ + struct lwip_sock *sock; + err_t err; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d, backlog=%d)\n", s, backlog)); + + sock = get_socket(s); + if (!sock) { + return -1; + } + + /* limit the "backlog" parameter to fit in an u8_t */ + backlog = LWIP_MIN(LWIP_MAX(backlog, 0), 0xff); + + err = netconn_listen_with_backlog(sock->conn, (u8_t)backlog); + + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d) failed, err=%d\n", s, err)); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + sock_set_errno(sock, EOPNOTSUPP); + } else { + sock_set_errno(sock, err_to_errno(err)); + } + done_socket(sock); + return -1; + } + + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +} + +#if LWIP_TCP +/* Helper function to loop over receiving pbufs from netconn + * until "len" bytes are received or we're otherwise done. + * Keeps sock->lastdata for peeking or partly copying. + */ +static ssize_t +lwip_recv_tcp(struct lwip_sock *sock, void *mem, size_t len, int flags) +{ + u8_t apiflags = NETCONN_NOAUTORCVD; + ssize_t recvd = 0; + ssize_t recv_left = (len <= SSIZE_MAX) ? (ssize_t)len : SSIZE_MAX; + + LWIP_ASSERT("no socket given", sock != NULL); + LWIP_ASSERT("this should be checked internally", NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP); + + if (flags & MSG_DONTWAIT) { + apiflags |= NETCONN_DONTBLOCK; + } + + do { + struct pbuf *p; + err_t err; + u16_t copylen; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: top while sock->lastdata=%p\n", (void *)sock->lastdata.pbuf)); + /* Check if there is data left from the last recv operation. */ + if (sock->lastdata.pbuf) { + p = sock->lastdata.pbuf; + } else { + /* No data was left from the previous operation, so we try to get + some from the network. */ + err = netconn_recv_tcp_pbuf_flags(sock->conn, &p, apiflags); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: netconn_recv err=%d, pbuf=%p\n", + err, (void *)p)); + + if (err != ERR_OK) { + if (recvd > 0) { + /* already received data, return that (this trusts in getting the same error from + netconn layer again next time netconn_recv is called) */ + goto lwip_recv_tcp_done; + } + /* We should really do some error checking here. */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: p == NULL, error is \"%s\"!\n", + lwip_strerr(err))); + sock_set_errno(sock, err_to_errno(err)); + if (err == ERR_CLSD) { + return 0; + } else { + return -1; + } + } + LWIP_ASSERT("p != NULL", p != NULL); + sock->lastdata.pbuf = p; + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: buflen=%"U16_F" recv_left=%d off=%d\n", + p->tot_len, (int)recv_left, (int)recvd)); + + if (recv_left > p->tot_len) { + copylen = p->tot_len; + } else { + copylen = (u16_t)recv_left; + } + if (recvd + copylen < recvd) { + /* overflow */ + copylen = (u16_t)(SSIZE_MAX - recvd); + } + + /* copy the contents of the received buffer into + the supplied memory pointer mem */ + pbuf_copy_partial(p, (u8_t *)mem + recvd, copylen, 0); + + recvd += copylen; + + /* TCP combines multiple pbufs for one recv */ + LWIP_ASSERT("invalid copylen, len would underflow", recv_left >= copylen); + recv_left -= copylen; + + /* Unless we peek the incoming message... */ + if ((flags & MSG_PEEK) == 0) { + /* ... check if there is data left in the pbuf */ + LWIP_ASSERT("invalid copylen", p->tot_len >= copylen); + if (p->tot_len - copylen > 0) { + /* If so, it should be saved in the sock structure for the next recv call. + We store the pbuf but hide/free the consumed data: */ + sock->lastdata.pbuf = pbuf_free_header(p, copylen); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: lastdata now pbuf=%p\n", (void *)sock->lastdata.pbuf)); + } else { + sock->lastdata.pbuf = NULL; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: deleting pbuf=%p\n", (void *)p)); + pbuf_free(p); + } + } + /* once we have some data to return, only add more if we don't need to wait */ + apiflags |= NETCONN_DONTBLOCK | NETCONN_NOFIN; + /* @todo: do we need to support peeking more than one pbuf? */ + } while ((recv_left > 0) && !(flags & MSG_PEEK)); +lwip_recv_tcp_done: + if ((recvd > 0) && !(flags & MSG_PEEK)) { + /* ensure window update after copying all data */ + netconn_tcp_recvd(sock->conn, (size_t)recvd); + } + sock_set_errno(sock, 0); + return recvd; +} +#endif + +/* Convert a netbuf's address data to struct sockaddr */ +static int +lwip_sock_make_addr(struct netconn *conn, ip_addr_t *fromaddr, u16_t port, + struct sockaddr *from, socklen_t *fromlen) +{ + int truncated = 0; + union sockaddr_aligned saddr; + + LWIP_UNUSED_ARG(conn); + + LWIP_ASSERT("fromaddr != NULL", fromaddr != NULL); + LWIP_ASSERT("from != NULL", from != NULL); + LWIP_ASSERT("fromlen != NULL", fromlen != NULL); + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */ + if (NETCONNTYPE_ISIPV6(netconn_type(conn)) && IP_IS_V4(fromaddr)) { + ip4_2_ipv4_mapped_ipv6(ip_2_ip6(fromaddr), ip_2_ip4(fromaddr)); + IP_SET_TYPE(fromaddr, IPADDR_TYPE_V6); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + IPADDR_PORT_TO_SOCKADDR(&saddr, fromaddr, port); + if (*fromlen < saddr.sa.sa_len) { + truncated = 1; + } else if (*fromlen > saddr.sa.sa_len) { + *fromlen = saddr.sa.sa_len; + } + MEMCPY(from, &saddr, *fromlen); + return truncated; +} + +#if LWIP_TCP +/* Helper function to get a tcp socket's remote address info */ +static int +lwip_recv_tcp_from(struct lwip_sock *sock, struct sockaddr *from, socklen_t *fromlen, const char *dbg_fn, int dbg_s, ssize_t dbg_ret) +{ + if (sock == NULL) { + return 0; + } + LWIP_UNUSED_ARG(dbg_fn); + LWIP_UNUSED_ARG(dbg_s); + LWIP_UNUSED_ARG(dbg_ret); + +#if !SOCKETS_DEBUG + if (from && fromlen) +#endif /* !SOCKETS_DEBUG */ + { + /* get remote addr/port from tcp_pcb */ + u16_t port; + ip_addr_t tmpaddr; + netconn_getaddr(sock->conn, &tmpaddr, &port, 0); + LWIP_DEBUGF(SOCKETS_DEBUG, ("%s(%d): addr=", dbg_fn, dbg_s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, tmpaddr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", port, (int)dbg_ret)); + if (from && fromlen) { + return lwip_sock_make_addr(sock->conn, &tmpaddr, port, from, fromlen); + } + } + return 0; +} +#endif + +/* Helper function to receive a netbuf from a udp or raw netconn. + * Keeps sock->lastdata for peeking. + */ +static err_t +lwip_recvfrom_udp_raw(struct lwip_sock *sock, int flags, struct msghdr *msg, u16_t *datagram_len, int dbg_s) +{ + struct netbuf *buf; + u8_t apiflags; + err_t err; + u16_t buflen, copylen, copied; + int i; + + LWIP_UNUSED_ARG(dbg_s); + LWIP_ERROR("lwip_recvfrom_udp_raw: invalid arguments", (msg->msg_iov != NULL) || (msg->msg_iovlen <= 0), return ERR_ARG;); + + if (flags & MSG_DONTWAIT) { + apiflags = NETCONN_DONTBLOCK; + } else { + apiflags = 0; + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: top sock->lastdata=%p\n", (void *)sock->lastdata.netbuf)); + /* Check if there is data left from the last recv operation. */ + buf = sock->lastdata.netbuf; + if (buf == NULL) { + /* No data was left from the previous operation, so we try to get + some from the network. */ + err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &buf, apiflags); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: netconn_recv err=%d, netbuf=%p\n", + err, (void *)buf)); + + if (err != ERR_OK) { + return err; + } + LWIP_ASSERT("buf != NULL", buf != NULL); + sock->lastdata.netbuf = buf; + } + buflen = buf->p->tot_len; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw: buflen=%"U16_F"\n", buflen)); + + copied = 0; + /* copy the pbuf payload into the iovs */ + for (i = 0; (i < msg->msg_iovlen) && (copied < buflen); i++) { + u16_t len_left = (u16_t)(buflen - copied); + if (msg->msg_iov[i].iov_len > len_left) { + copylen = len_left; + } else { + copylen = (u16_t)msg->msg_iov[i].iov_len; + } + + /* copy the contents of the received buffer into + the supplied memory buffer */ + pbuf_copy_partial(buf->p, (u8_t *)msg->msg_iov[i].iov_base, copylen, copied); + copied = (u16_t)(copied + copylen); + } + + /* Check to see from where the data was.*/ +#if !SOCKETS_DEBUG + if (msg->msg_name && msg->msg_namelen) +#endif /* !SOCKETS_DEBUG */ + { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw(%d): addr=", dbg_s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, *netbuf_fromaddr(buf)); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", netbuf_fromport(buf), copied)); + if (msg->msg_name && msg->msg_namelen) { + lwip_sock_make_addr(sock->conn, netbuf_fromaddr(buf), netbuf_fromport(buf), + (struct sockaddr *)msg->msg_name, &msg->msg_namelen); + } + } + + /* Initialize flag output */ + msg->msg_flags = 0; + + if (msg->msg_control) { + u8_t wrote_msg = 0; +#if LWIP_NETBUF_RECVINFO + /* Check if packet info was recorded */ + if (buf->flags & NETBUF_FLAG_DESTADDR) { + if (IP_IS_V4(&buf->toaddr)) { +#if LWIP_IPV4 + if (msg->msg_controllen >= CMSG_SPACE(sizeof(struct in_pktinfo))) { + struct cmsghdr *chdr = CMSG_FIRSTHDR(msg); /* This will always return a header!! */ + struct in_pktinfo *pkti = (struct in_pktinfo *)CMSG_DATA(chdr); + chdr->cmsg_level = IPPROTO_IP; + chdr->cmsg_type = IP_PKTINFO; + chdr->cmsg_len = CMSG_LEN(sizeof(struct in_pktinfo)); + pkti->ipi_ifindex = buf->p->if_idx; + inet_addr_from_ip4addr(&pkti->ipi_addr, ip_2_ip4(netbuf_destaddr(buf))); + msg->msg_controllen = CMSG_SPACE(sizeof(struct in_pktinfo)); + wrote_msg = 1; + } else { + msg->msg_flags |= MSG_CTRUNC; + } +#endif /* LWIP_IPV4 */ + } + } +#endif /* LWIP_NETBUF_RECVINFO */ + + if (!wrote_msg) { + msg->msg_controllen = 0; + } + } + + /* If we don't peek the incoming message: zero lastdata pointer and free the netbuf */ + if ((flags & MSG_PEEK) == 0) { + sock->lastdata.netbuf = NULL; + netbuf_delete(buf); + } + if (datagram_len) { + *datagram_len = buflen; + } + return ERR_OK; +} + +ssize_t +lwip_recvfrom(int s, void *mem, size_t len, int flags, + struct sockaddr *from, socklen_t *fromlen) +{ + struct lwip_sock *sock; + ssize_t ret; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom(%d, %p, %"SZT_F", 0x%x, ..)\n", s, mem, len, flags)); + sock = get_socket(s); + if (!sock) { + return -1; + } +#if LWIP_TCP + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { + ret = lwip_recv_tcp(sock, mem, len, flags); + lwip_recv_tcp_from(sock, from, fromlen, "lwip_recvfrom", s, ret); + done_socket(sock); + return ret; + } else +#endif + { + u16_t datagram_len = 0; + struct iovec vec; + struct msghdr msg; + err_t err; + vec.iov_base = mem; + vec.iov_len = len; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + msg.msg_iov = &vec; + msg.msg_iovlen = 1; + msg.msg_name = from; + msg.msg_namelen = (fromlen ? *fromlen : 0); + err = lwip_recvfrom_udp_raw(sock, flags, &msg, &datagram_len, s); + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n", + s, lwip_strerr(err))); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + ret = (ssize_t)LWIP_MIN(LWIP_MIN(len, datagram_len), SSIZE_MAX); + if (fromlen) { + *fromlen = msg.msg_namelen; + } + } + + sock_set_errno(sock, 0); + done_socket(sock); + return ret; +} + +ssize_t +lwip_read(int s, void *mem, size_t len) +{ + return lwip_recvfrom(s, mem, len, 0, NULL, NULL); +} + +ssize_t +lwip_readv(int s, const struct iovec *iov, int iovcnt) +{ + struct msghdr msg; + + msg.msg_name = NULL; + msg.msg_namelen = 0; + /* Hack: we have to cast via number to cast from 'const' pointer to non-const. + Blame the opengroup standard for this inconsistency. */ + msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov); + msg.msg_iovlen = iovcnt; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + return lwip_recvmsg(s, &msg, 0); +} + +ssize_t +lwip_recv(int s, void *mem, size_t len, int flags) +{ + return lwip_recvfrom(s, mem, len, flags, NULL, NULL); +} + +ssize_t +lwip_recvmsg(int s, struct msghdr *message, int flags) +{ + struct lwip_sock *sock; + int i; + ssize_t buflen; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg(%d, message=%p, flags=0x%x)\n", s, (void *)message, flags)); + LWIP_ERROR("lwip_recvmsg: invalid message pointer", message != NULL, return ERR_ARG;); + LWIP_ERROR("lwip_recvmsg: unsupported flags", (flags & ~(MSG_PEEK|MSG_DONTWAIT)) == 0, + set_errno(EOPNOTSUPP); return -1;); + + if ((message->msg_iovlen <= 0) || (message->msg_iovlen > IOV_MAX)) { + set_errno(EMSGSIZE); + return -1; + } + + sock = get_socket(s); + if (!sock) { + return -1; + } + + /* check for valid vectors */ + buflen = 0; + for (i = 0; i < message->msg_iovlen; i++) { + if ((message->msg_iov[i].iov_base == NULL) || ((ssize_t)message->msg_iov[i].iov_len <= 0) || + ((size_t)(ssize_t)message->msg_iov[i].iov_len != message->msg_iov[i].iov_len) || + ((ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len) <= 0)) { + sock_set_errno(sock, err_to_errno(ERR_VAL)); + done_socket(sock); + return -1; + } + buflen = (ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len); + } + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { +#if LWIP_TCP + int recv_flags = flags; + message->msg_flags = 0; + /* recv the data */ + buflen = 0; + for (i = 0; i < message->msg_iovlen; i++) { + /* try to receive into this vector's buffer */ + ssize_t recvd_local = lwip_recv_tcp(sock, message->msg_iov[i].iov_base, message->msg_iov[i].iov_len, recv_flags); + if (recvd_local > 0) { + /* sum up received bytes */ + buflen += recvd_local; + } + if ((recvd_local < 0) || (recvd_local < (int)message->msg_iov[i].iov_len) || + (flags & MSG_PEEK)) { + /* returned prematurely (or peeking, which might actually be limitated to the first iov) */ + if (buflen <= 0) { + /* nothing received at all, propagate the error */ + buflen = recvd_local; + } + break; + } + /* pass MSG_DONTWAIT to lwip_recv_tcp() to prevent waiting for more data */ + recv_flags |= MSG_DONTWAIT; + } + if (buflen > 0) { + /* reset socket error since we have received something */ + sock_set_errno(sock, 0); + } + /* " If the socket is connected, the msg_name and msg_namelen members shall be ignored." */ + done_socket(sock); + return buflen; +#else /* LWIP_TCP */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_TCP */ + } + /* else, UDP and RAW NETCONNs */ +#if LWIP_UDP || LWIP_RAW + { + u16_t datagram_len = 0; + err_t err; + err = lwip_recvfrom_udp_raw(sock, flags, message, &datagram_len, s); + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n", + s, lwip_strerr(err))); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + if (datagram_len > buflen) { + message->msg_flags |= MSG_TRUNC; + } + + sock_set_errno(sock, 0); + done_socket(sock); + return (int)datagram_len; + } +#else /* LWIP_UDP || LWIP_RAW */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_UDP || LWIP_RAW */ +} + +ssize_t +lwip_send(int s, const void *data, size_t size, int flags) +{ + struct lwip_sock *sock; + err_t err; + u8_t write_flags; + size_t written; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d, data=%p, size=%"SZT_F", flags=0x%x)\n", + s, data, size, flags)); + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { +#if (LWIP_UDP || LWIP_RAW) + done_socket(sock); + return lwip_sendto(s, data, size, flags, NULL, 0); +#else /* (LWIP_UDP || LWIP_RAW) */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* (LWIP_UDP || LWIP_RAW) */ + } + + write_flags = (u8_t)(NETCONN_COPY | + ((flags & MSG_MORE) ? NETCONN_MORE : 0) | + ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0)); + written = 0; + err = netconn_write_partly(sock->conn, data, size, write_flags, &written); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d) err=%d written=%"SZT_F"\n", s, err, written)); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */ + return (err == ERR_OK ? (ssize_t)written : -1); +} + +ssize_t +lwip_sendmsg(int s, const struct msghdr *msg, int flags) +{ + struct lwip_sock *sock; +#if LWIP_TCP + u8_t write_flags; + size_t written; +#endif + err_t err = ERR_OK; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + LWIP_ERROR("lwip_sendmsg: invalid msghdr", msg != NULL, + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + LWIP_ERROR("lwip_sendmsg: invalid msghdr iov", msg->msg_iov != NULL, + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + LWIP_ERROR("lwip_sendmsg: maximum iovs exceeded", (msg->msg_iovlen > 0) && (msg->msg_iovlen <= IOV_MAX), + sock_set_errno(sock, EMSGSIZE); done_socket(sock); return -1;); + LWIP_ERROR("lwip_sendmsg: unsupported flags", (flags & ~(MSG_DONTWAIT | MSG_MORE)) == 0, + sock_set_errno(sock, EOPNOTSUPP); done_socket(sock); return -1;); + + LWIP_UNUSED_ARG(msg->msg_control); + LWIP_UNUSED_ARG(msg->msg_controllen); + LWIP_UNUSED_ARG(msg->msg_flags); + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { +#if LWIP_TCP + write_flags = (u8_t)(NETCONN_COPY | + ((flags & MSG_MORE) ? NETCONN_MORE : 0) | + ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0)); + + written = 0; + err = netconn_write_vectors_partly(sock->conn, (struct netvector *)msg->msg_iov, (u16_t)msg->msg_iovlen, write_flags, &written); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */ + return (err == ERR_OK ? (ssize_t)written : -1); +#else /* LWIP_TCP */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_TCP */ + } + /* else, UDP and RAW NETCONNs */ +#if LWIP_UDP || LWIP_RAW + { + struct netbuf chain_buf; + int i; + ssize_t size = 0; + + LWIP_UNUSED_ARG(flags); + LWIP_ERROR("lwip_sendmsg: invalid msghdr name", (((msg->msg_name == NULL) && (msg->msg_namelen == 0)) || + IS_SOCK_ADDR_LEN_VALID(msg->msg_namelen)), + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + + /* initialize chain buffer with destination */ + memset(&chain_buf, 0, sizeof(struct netbuf)); + if (msg->msg_name) { + u16_t remote_port; + SOCKADDR_TO_IPADDR_PORT((const struct sockaddr *)msg->msg_name, &chain_buf.addr, remote_port); + netbuf_fromport(&chain_buf) = remote_port; + } +#if LWIP_NETIF_TX_SINGLE_PBUF + for (i = 0; i < msg->msg_iovlen; i++) { + size += msg->msg_iov[i].iov_len; + if ((msg->msg_iov[i].iov_len > INT_MAX) || (size < (int)msg->msg_iov[i].iov_len)) { + /* overflow */ + goto sendmsg_emsgsize; + } + } + if (size > 0xFFFF) { + /* overflow */ + goto sendmsg_emsgsize; + } + /* Allocate a new netbuf and copy the data into it. */ + if (netbuf_alloc(&chain_buf, (u16_t)size) == NULL) { + err = ERR_MEM; + } else { + /* flatten the IO vectors */ + size_t offset = 0; + for (i = 0; i < msg->msg_iovlen; i++) { + MEMCPY(&((u8_t *)chain_buf.p->payload)[offset], msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len); + offset += msg->msg_iov[i].iov_len; + } +#if LWIP_CHECKSUM_ON_COPY + { + /* This can be improved by using LWIP_CHKSUM_COPY() and aggregating the checksum for each IO vector */ + u16_t chksum = ~inet_chksum_pbuf(chain_buf.p); + netbuf_set_chksum(&chain_buf, chksum); + } +#endif /* LWIP_CHECKSUM_ON_COPY */ + err = ERR_OK; + } +#else /* LWIP_NETIF_TX_SINGLE_PBUF */ + /* create a chained netbuf from the IO vectors. NOTE: we assemble a pbuf chain + manually to avoid having to allocate, chain, and delete a netbuf for each iov */ + for (i = 0; i < msg->msg_iovlen; i++) { + struct pbuf *p; + if (msg->msg_iov[i].iov_len > 0xFFFF) { + /* overflow */ + goto sendmsg_emsgsize; + } + p = pbuf_alloc(PBUF_TRANSPORT, 0, PBUF_REF); + if (p == NULL) { + err = ERR_MEM; /* let netbuf_delete() cleanup chain_buf */ + break; + } + p->payload = msg->msg_iov[i].iov_base; + p->len = p->tot_len = (u16_t)msg->msg_iov[i].iov_len; + /* netbuf empty, add new pbuf */ + if (chain_buf.p == NULL) { + chain_buf.p = chain_buf.ptr = p; + /* add pbuf to existing pbuf chain */ + } else { + if (chain_buf.p->tot_len + p->len > 0xffff) { + /* overflow */ + pbuf_free(p); + goto sendmsg_emsgsize; + } + pbuf_cat(chain_buf.p, p); + } + } + /* save size of total chain */ + if (err == ERR_OK) { + size = netbuf_len(&chain_buf); + } +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + + if (err == ERR_OK) { +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */ + if (IP_IS_V6_VAL(chain_buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&chain_buf.addr))) { + unmap_ipv4_mapped_ipv6(ip_2_ip4(&chain_buf.addr), ip_2_ip6(&chain_buf.addr)); + IP_SET_TYPE_VAL(chain_buf.addr, IPADDR_TYPE_V4); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + /* send the data */ + err = netconn_send(sock->conn, &chain_buf); + } + + /* deallocated the buffer */ + netbuf_free(&chain_buf); + + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return (err == ERR_OK ? size : -1); +sendmsg_emsgsize: + sock_set_errno(sock, EMSGSIZE); + netbuf_free(&chain_buf); + done_socket(sock); + return -1; + } +#else /* LWIP_UDP || LWIP_RAW */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_UDP || LWIP_RAW */ +} + +ssize_t +lwip_sendto(int s, const void *data, size_t size, int flags, + const struct sockaddr *to, socklen_t tolen) +{ + struct lwip_sock *sock; + err_t err; + u16_t short_size; + u16_t remote_port; + struct netbuf buf; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { +#if LWIP_TCP + done_socket(sock); + return lwip_send(s, data, size, flags); +#else /* LWIP_TCP */ + LWIP_UNUSED_ARG(flags); + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_TCP */ + } + + if (size > LWIP_MIN(0xFFFF, SSIZE_MAX)) { + /* cannot fit into one datagram (at least for us) */ + sock_set_errno(sock, EMSGSIZE); + done_socket(sock); + return -1; + } + short_size = (u16_t)size; + LWIP_ERROR("lwip_sendto: invalid address", (((to == NULL) && (tolen == 0)) || + (IS_SOCK_ADDR_LEN_VALID(tolen) && + ((to != NULL) && (IS_SOCK_ADDR_TYPE_VALID(to) && IS_SOCK_ADDR_ALIGNED(to))))), + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + LWIP_UNUSED_ARG(tolen); + + /* initialize a buffer */ + buf.p = buf.ptr = NULL; +#if LWIP_CHECKSUM_ON_COPY + buf.flags = 0; +#endif /* LWIP_CHECKSUM_ON_COPY */ + if (to) { + SOCKADDR_TO_IPADDR_PORT(to, &buf.addr, remote_port); + } else { + remote_port = 0; + ip_addr_set_any(NETCONNTYPE_ISIPV6(netconn_type(sock->conn)), &buf.addr); + } + netbuf_fromport(&buf) = remote_port; + + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_sendto(%d, data=%p, short_size=%"U16_F", flags=0x%x to=", + s, data, short_size, flags)); + ip_addr_debug_print_val(SOCKETS_DEBUG, buf.addr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", remote_port)); + + /* make the buffer point to the data that should be sent */ +#if LWIP_NETIF_TX_SINGLE_PBUF + /* Allocate a new netbuf and copy the data into it. */ + if (netbuf_alloc(&buf, short_size) == NULL) { + err = ERR_MEM; + } else { +#if LWIP_CHECKSUM_ON_COPY + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_RAW) { + u16_t chksum = LWIP_CHKSUM_COPY(buf.p->payload, data, short_size); + netbuf_set_chksum(&buf, chksum); + } else +#endif /* LWIP_CHECKSUM_ON_COPY */ + { + MEMCPY(buf.p->payload, data, short_size); + } + err = ERR_OK; + } +#else /* LWIP_NETIF_TX_SINGLE_PBUF */ + err = netbuf_ref(&buf, data, short_size); +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + if (err == ERR_OK) { +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */ + if (IP_IS_V6_VAL(buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&buf.addr))) { + unmap_ipv4_mapped_ipv6(ip_2_ip4(&buf.addr), ip_2_ip6(&buf.addr)); + IP_SET_TYPE_VAL(buf.addr, IPADDR_TYPE_V4); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + /* send the data */ + err = netconn_send(sock->conn, &buf); + } + + /* deallocated the buffer */ + netbuf_free(&buf); + + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return (err == ERR_OK ? short_size : -1); +} + +int +lwip_socket(int domain, int type, int protocol) +{ + struct netconn *conn; + int i; + + LWIP_UNUSED_ARG(domain); /* @todo: check this */ + + /* create a netconn */ + switch (type) { + case SOCK_RAW: + conn = netconn_new_with_proto_and_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_RAW), + (u8_t)protocol, DEFAULT_SOCKET_EVENTCB); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_RAW, %d) = ", + domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol)); + break; + case SOCK_DGRAM: + conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain, + ((protocol == IPPROTO_UDPLITE) ? NETCONN_UDPLITE : NETCONN_UDP)), + DEFAULT_SOCKET_EVENTCB); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_DGRAM, %d) = ", + domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol)); +#if LWIP_NETBUF_RECVINFO + if (conn) { + /* netconn layer enables pktinfo by default, sockets default to off */ + conn->flags &= ~NETCONN_FLAG_PKTINFO; + } +#endif /* LWIP_NETBUF_RECVINFO */ + break; + case SOCK_STREAM: + conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_TCP), DEFAULT_SOCKET_EVENTCB); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_STREAM, %d) = ", + domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol)); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%d, %d/UNKNOWN, %d) = -1\n", + domain, type, protocol)); + set_errno(EINVAL); + return -1; + } + + if (!conn) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("-1 / ENOBUFS (could not create netconn)\n")); + set_errno(ENOBUFS); + return -1; + } + + i = alloc_socket(conn, 0); + + if (i == -1) { + netconn_delete(conn); + set_errno(ENFILE); + return -1; + } + conn->socket = i; + done_socket(&sockets[i - LWIP_SOCKET_OFFSET]); + LWIP_DEBUGF(SOCKETS_DEBUG, ("%d\n", i)); + set_errno(0); + return i; +} + +ssize_t +lwip_write(int s, const void *data, size_t size) +{ + return lwip_send(s, data, size, 0); +} + +ssize_t +lwip_writev(int s, const struct iovec *iov, int iovcnt) +{ + struct msghdr msg; + + msg.msg_name = NULL; + msg.msg_namelen = 0; + /* Hack: we have to cast via number to cast from 'const' pointer to non-const. + Blame the opengroup standard for this inconsistency. */ + msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov); + msg.msg_iovlen = iovcnt; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + return lwip_sendmsg(s, &msg, 0); +} + +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL +/* Add select_cb to select_cb_list. */ +static void +lwip_link_select_cb(struct lwip_select_cb *select_cb) +{ + LWIP_SOCKET_SELECT_DECL_PROTECT(lev); + + /* Protect the select_cb_list */ + LWIP_SOCKET_SELECT_PROTECT(lev); + + /* Put this select_cb on top of list */ + select_cb->next = select_cb_list; + if (select_cb_list != NULL) { + select_cb_list->prev = select_cb; + } + select_cb_list = select_cb; +#if !LWIP_TCPIP_CORE_LOCKING + /* Increasing this counter tells select_check_waiters that the list has changed. */ + select_cb_ctr++; +#endif + + /* Now we can safely unprotect */ + LWIP_SOCKET_SELECT_UNPROTECT(lev); +} + +/* Remove select_cb from select_cb_list. */ +static void +lwip_unlink_select_cb(struct lwip_select_cb *select_cb) +{ + LWIP_SOCKET_SELECT_DECL_PROTECT(lev); + + /* Take us off the list */ + LWIP_SOCKET_SELECT_PROTECT(lev); + if (select_cb->next != NULL) { + select_cb->next->prev = select_cb->prev; + } + if (select_cb_list == select_cb) { + LWIP_ASSERT("select_cb->prev == NULL", select_cb->prev == NULL); + select_cb_list = select_cb->next; + } else { + LWIP_ASSERT("select_cb->prev != NULL", select_cb->prev != NULL); + select_cb->prev->next = select_cb->next; + } +#if !LWIP_TCPIP_CORE_LOCKING + /* Increasing this counter tells select_check_waiters that the list has changed. */ + select_cb_ctr++; +#endif + LWIP_SOCKET_SELECT_UNPROTECT(lev); +} +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ + +#if LWIP_SOCKET_SELECT +/** + * Go through the readset and writeset lists and see which socket of the sockets + * set in the sets has events. On return, readset, writeset and exceptset have + * the sockets enabled that had events. + * + * @param maxfdp1 the highest socket index in the sets + * @param readset_in set of sockets to check for read events + * @param writeset_in set of sockets to check for write events + * @param exceptset_in set of sockets to check for error events + * @param readset_out set of sockets that had read events + * @param writeset_out set of sockets that had write events + * @param exceptset_out set os sockets that had error events + * @return number of sockets that had events (read/write/exception) (>= 0) + */ +static int +lwip_selscan(int maxfdp1, fd_set *readset_in, fd_set *writeset_in, fd_set *exceptset_in, + fd_set *readset_out, fd_set *writeset_out, fd_set *exceptset_out) +{ + int i, nready = 0; + fd_set lreadset, lwriteset, lexceptset; + struct lwip_sock *sock; + SYS_ARCH_DECL_PROTECT(lev); + + FD_ZERO(&lreadset); + FD_ZERO(&lwriteset); + FD_ZERO(&lexceptset); + + /* Go through each socket in each list to count number of sockets which + currently match */ + for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) { + /* if this FD is not in the set, continue */ + if (!(readset_in && FD_ISSET(i, readset_in)) && + !(writeset_in && FD_ISSET(i, writeset_in)) && + !(exceptset_in && FD_ISSET(i, exceptset_in))) { + continue; + } + /* First get the socket's status (protected)... */ + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(i); + if (sock != NULL) { + void *lastdata = sock->lastdata.pbuf; + s16_t rcvevent = sock->rcvevent; + u16_t sendevent = sock->sendevent; + u16_t errevent = sock->errevent; + SYS_ARCH_UNPROTECT(lev); + + /* ... then examine it: */ + /* See if netconn of this socket is ready for read */ + if (readset_in && FD_ISSET(i, readset_in) && ((lastdata != NULL) || (rcvevent > 0))) { + FD_SET(i, &lreadset); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for reading\n", i)); + nready++; + } + /* See if netconn of this socket is ready for write */ + if (writeset_in && FD_ISSET(i, writeset_in) && (sendevent != 0)) { + FD_SET(i, &lwriteset); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for writing\n", i)); + nready++; + } + /* See if netconn of this socket had an error */ + if (exceptset_in && FD_ISSET(i, exceptset_in) && (errevent != 0)) { + FD_SET(i, &lexceptset); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for exception\n", i)); + nready++; + } + done_socket(sock); + } else { + SYS_ARCH_UNPROTECT(lev); + /* no a valid open socket */ + return -1; + } + } + /* copy local sets to the ones provided as arguments */ + *readset_out = lreadset; + *writeset_out = lwriteset; + *exceptset_out = lexceptset; + + LWIP_ASSERT("nready >= 0", nready >= 0); + return nready; +} + +#if LWIP_NETCONN_FULLDUPLEX +/* Mark all of the set sockets in one of the three fdsets passed to select as used. + * All sockets are marked (and later unmarked), whether they are open or not. + * This is OK as lwip_selscan aborts select when non-open sockets are found. + */ +static void +lwip_select_inc_sockets_used_set(int maxfdp, fd_set *fdset, fd_set *used_sockets) +{ + SYS_ARCH_DECL_PROTECT(lev); + if (fdset) { + int i; + for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) { + /* if this FD is in the set, lock it (unless already done) */ + if (FD_ISSET(i, fdset) && !FD_ISSET(i, used_sockets)) { + struct lwip_sock *sock; + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(i); + if (sock != NULL) { + /* leave the socket used until released by lwip_select_dec_sockets_used */ + FD_SET(i, used_sockets); + } + SYS_ARCH_UNPROTECT(lev); + } + } + } +} + +/* Mark all sockets passed to select as used to prevent them from being freed + * from other threads while select is running. + * Marked sockets are added to 'used_sockets' to mark them only once an be able + * to unmark them correctly. + */ +static void +lwip_select_inc_sockets_used(int maxfdp, fd_set *fdset1, fd_set *fdset2, fd_set *fdset3, fd_set *used_sockets) +{ + FD_ZERO(used_sockets); + lwip_select_inc_sockets_used_set(maxfdp, fdset1, used_sockets); + lwip_select_inc_sockets_used_set(maxfdp, fdset2, used_sockets); + lwip_select_inc_sockets_used_set(maxfdp, fdset3, used_sockets); +} + +/* Let go all sockets that were marked as used when starting select */ +static void +lwip_select_dec_sockets_used(int maxfdp, fd_set *used_sockets) +{ + int i; + for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) { + /* if this FD is not in the set, continue */ + if (FD_ISSET(i, used_sockets)) { + struct lwip_sock *sock = tryget_socket_unconn_nouse(i); + LWIP_ASSERT("socket gone at the end of select", sock != NULL); + if (sock != NULL) { + done_socket(sock); + } + } + } +} +#else /* LWIP_NETCONN_FULLDUPLEX */ +#define lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, used_sockets) +#define lwip_select_dec_sockets_used(maxfdp1, used_sockets) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +int +lwip_select(int maxfdp1, fd_set *readset, fd_set *writeset, fd_set *exceptset, + struct timeval *timeout) +{ + u32_t waitres = 0; + int nready; + fd_set lreadset, lwriteset, lexceptset; + u32_t msectimeout; + int i; + int maxfdp2; +#if LWIP_NETCONN_SEM_PER_THREAD + int waited = 0; +#endif +#if LWIP_NETCONN_FULLDUPLEX + fd_set used_sockets; +#endif + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select(%d, %p, %p, %p, tvsec=%"S32_F" tvusec=%"S32_F")\n", + maxfdp1, (void *)readset, (void *) writeset, (void *) exceptset, + timeout ? (s32_t)timeout->tv_sec : (s32_t) - 1, + timeout ? (s32_t)timeout->tv_usec : (s32_t) - 1)); + + if ((maxfdp1 < 0) || (maxfdp1 > LWIP_SELECT_MAXNFDS)) { + set_errno(EINVAL); + return -1; + } + + lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, &used_sockets); + + /* Go through each socket in each list to count number of sockets which + currently match */ + nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset); + + if (nready < 0) { + /* one of the sockets in one of the fd_sets was invalid */ + set_errno(EBADF); + lwip_select_dec_sockets_used(maxfdp1, &used_sockets); + return -1; + } else if (nready > 0) { + /* one or more sockets are set, no need to wait */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready)); + } else { + /* If we don't have any current events, then suspend if we are supposed to */ + if (timeout && timeout->tv_sec == 0 && timeout->tv_usec == 0) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: no timeout, returning 0\n")); + /* This is OK as the local fdsets are empty and nready is zero, + or we would have returned earlier. */ + } else { + /* None ready: add our semaphore to list: + We don't actually need any dynamic memory. Our entry on the + list is only valid while we are in this function, so it's ok + to use local variables (unless we're running in MPU compatible + mode). */ + API_SELECT_CB_VAR_DECLARE(select_cb); + API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(ENOMEM); lwip_select_dec_sockets_used(maxfdp1, &used_sockets); return -1); + memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb)); + + API_SELECT_CB_VAR_REF(select_cb).readset = readset; + API_SELECT_CB_VAR_REF(select_cb).writeset = writeset; + API_SELECT_CB_VAR_REF(select_cb).exceptset = exceptset; +#if LWIP_NETCONN_SEM_PER_THREAD + API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) { + /* failed to create semaphore */ + set_errno(ENOMEM); + lwip_select_dec_sockets_used(maxfdp1, &used_sockets); + API_SELECT_CB_VAR_FREE(select_cb); + return -1; + } +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb)); + + /* Increase select_waiting for each socket we are interested in */ + maxfdp2 = maxfdp1; + for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) { + if ((readset && FD_ISSET(i, readset)) || + (writeset && FD_ISSET(i, writeset)) || + (exceptset && FD_ISSET(i, exceptset))) { + struct lwip_sock *sock; + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(i); + if (sock != NULL) { + sock->select_waiting++; + if (sock->select_waiting == 0) { + /* overflow - too many threads waiting */ + sock->select_waiting--; + nready = -1; + maxfdp2 = i; + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + set_errno(EBUSY); + break; + } + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + } else { + /* Not a valid socket */ + nready = -1; + maxfdp2 = i; + SYS_ARCH_UNPROTECT(lev); + set_errno(EBADF); + break; + } + } + } + + if (nready >= 0) { + /* Call lwip_selscan again: there could have been events between + the last scan (without us on the list) and putting us on the list! */ + nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset); + if (!nready) { + /* Still none ready, just wait to be woken */ + if (timeout == 0) { + /* Wait forever */ + msectimeout = 0; + } else { + long msecs_long = ((timeout->tv_sec * 1000) + ((timeout->tv_usec + 500) / 1000)); + if (msecs_long <= 0) { + /* Wait 1ms at least (0 means wait forever) */ + msectimeout = 1; + } else { + msectimeout = (u32_t)msecs_long; + } + } + + waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout); +#if LWIP_NETCONN_SEM_PER_THREAD + waited = 1; +#endif + } + } + + /* Decrease select_waiting for each socket we are interested in */ + for (i = LWIP_SOCKET_OFFSET; i < maxfdp2; i++) { + if ((readset && FD_ISSET(i, readset)) || + (writeset && FD_ISSET(i, writeset)) || + (exceptset && FD_ISSET(i, exceptset))) { + struct lwip_sock *sock; + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(i); + if (sock != NULL) { + /* for now, handle select_waiting==0... */ + LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0); + if (sock->select_waiting > 0) { + sock->select_waiting--; + } + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + } else { + SYS_ARCH_UNPROTECT(lev); + /* Not a valid socket */ + nready = -1; + set_errno(EBADF); + } + } + } + + lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb)); + +#if LWIP_NETCONN_SEM_PER_THREAD + if (API_SELECT_CB_VAR_REF(select_cb).sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) { + /* don't leave the thread-local semaphore signalled */ + sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1); + } +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem); +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + API_SELECT_CB_VAR_FREE(select_cb); + + if (nready < 0) { + /* This happens when a socket got closed while waiting */ + lwip_select_dec_sockets_used(maxfdp1, &used_sockets); + return -1; + } + + if (waitres == SYS_ARCH_TIMEOUT) { + /* Timeout */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: timeout expired\n")); + /* This is OK as the local fdsets are empty and nready is zero, + or we would have returned earlier. */ + } else { + /* See what's set now after waiting */ + nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready)); + } + } + } + + lwip_select_dec_sockets_used(maxfdp1, &used_sockets); + set_errno(0); + if (readset) { + *readset = lreadset; + } + if (writeset) { + *writeset = lwriteset; + } + if (exceptset) { + *exceptset = lexceptset; + } + return nready; +} +#endif /* LWIP_SOCKET_SELECT */ + +#if LWIP_SOCKET_POLL +/** Options for the lwip_pollscan function. */ +enum lwip_pollscan_opts +{ + /** Clear revents in each struct pollfd. */ + LWIP_POLLSCAN_CLEAR = 1, + + /** Increment select_waiting in each struct lwip_sock. */ + LWIP_POLLSCAN_INC_WAIT = 2, + + /** Decrement select_waiting in each struct lwip_sock. */ + LWIP_POLLSCAN_DEC_WAIT = 4 +}; + +/** + * Update revents in each struct pollfd. + * Optionally update select_waiting in struct lwip_sock. + * + * @param fds array of structures to update + * @param nfds number of structures in fds + * @param opts what to update and how + * @return number of structures that have revents != 0 + */ +static int +lwip_pollscan(struct pollfd *fds, nfds_t nfds, enum lwip_pollscan_opts opts) +{ + int nready = 0; + nfds_t fdi; + struct lwip_sock *sock; + SYS_ARCH_DECL_PROTECT(lev); + + /* Go through each struct pollfd in the array. */ + for (fdi = 0; fdi < nfds; fdi++) { + if ((opts & LWIP_POLLSCAN_CLEAR) != 0) { + fds[fdi].revents = 0; + } + + /* Negative fd means the caller wants us to ignore this struct. + POLLNVAL means we already detected that the fd is invalid; + if another thread has since opened a new socket with that fd, + we must not use that socket. */ + if (fds[fdi].fd >= 0 && (fds[fdi].revents & POLLNVAL) == 0) { + /* First get the socket's status (protected)... */ + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(fds[fdi].fd); + if (sock != NULL) { + void* lastdata = sock->lastdata.pbuf; + s16_t rcvevent = sock->rcvevent; + u16_t sendevent = sock->sendevent; + u16_t errevent = sock->errevent; + + if ((opts & LWIP_POLLSCAN_INC_WAIT) != 0) { + sock->select_waiting++; + if (sock->select_waiting == 0) { + /* overflow - too many threads waiting */ + sock->select_waiting--; + nready = -1; + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + break; + } + } else if ((opts & LWIP_POLLSCAN_DEC_WAIT) != 0) { + /* for now, handle select_waiting==0... */ + LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0); + if (sock->select_waiting > 0) { + sock->select_waiting--; + } + } + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + + /* ... then examine it: */ + /* See if netconn of this socket is ready for read */ + if ((fds[fdi].events & POLLIN) != 0 && ((lastdata != NULL) || (rcvevent > 0))) { + fds[fdi].revents |= POLLIN; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for reading\n", fds[fdi].fd)); + } + /* See if netconn of this socket is ready for write */ + if ((fds[fdi].events & POLLOUT) != 0 && (sendevent != 0)) { + fds[fdi].revents |= POLLOUT; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for writing\n", fds[fdi].fd)); + } + /* See if netconn of this socket had an error */ + if (errevent != 0) { + /* POLLERR is output only. */ + fds[fdi].revents |= POLLERR; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for exception\n", fds[fdi].fd)); + } + } else { + /* Not a valid socket */ + SYS_ARCH_UNPROTECT(lev); + /* POLLNVAL is output only. */ + fds[fdi].revents |= POLLNVAL; + return -1; + } + } + + /* Will return the number of structures that have events, + not the number of events. */ + if (fds[fdi].revents != 0) { + nready++; + } + } + + LWIP_ASSERT("nready >= 0", nready >= 0); + return nready; +} + +#if LWIP_NETCONN_FULLDUPLEX +/* Mark all sockets as used. + * + * All sockets are marked (and later unmarked), whether they are open or not. + * This is OK as lwip_pollscan aborts select when non-open sockets are found. + */ +static void +lwip_poll_inc_sockets_used(struct pollfd *fds, nfds_t nfds) +{ + nfds_t fdi; + + if(fds) { + /* Go through each struct pollfd in the array. */ + for (fdi = 0; fdi < nfds; fdi++) { + /* Increase the reference counter */ + tryget_socket_unconn(fds[fdi].fd); + } + } +} + +/* Let go all sockets that were marked as used when starting poll */ +static void +lwip_poll_dec_sockets_used(struct pollfd *fds, nfds_t nfds) +{ + nfds_t fdi; + + if(fds) { + /* Go through each struct pollfd in the array. */ + for (fdi = 0; fdi < nfds; fdi++) { + struct lwip_sock *sock = tryget_socket_unconn_nouse(fds[fdi].fd); + if (sock != NULL) { + done_socket(sock); + } + } + } +} +#else /* LWIP_NETCONN_FULLDUPLEX */ +#define lwip_poll_inc_sockets_used(fds, nfds) +#define lwip_poll_dec_sockets_used(fds, nfds) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +int +lwip_poll(struct pollfd *fds, nfds_t nfds, int timeout) +{ + u32_t waitres = 0; + int nready; + u32_t msectimeout; +#if LWIP_NETCONN_SEM_PER_THREAD + int waited = 0; +#endif + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll(%p, %d, %d)\n", + (void*)fds, (int)nfds, timeout)); + LWIP_ERROR("lwip_poll: invalid fds", ((fds != NULL && nfds > 0) || (fds == NULL && nfds == 0)), + set_errno(EINVAL); return -1;); + + lwip_poll_inc_sockets_used(fds, nfds); + + /* Go through each struct pollfd to count number of structures + which currently match */ + nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_CLEAR); + + if (nready < 0) { + lwip_poll_dec_sockets_used(fds, nfds); + return -1; + } + + /* If we don't have any current events, then suspend if we are supposed to */ + if (!nready) { + API_SELECT_CB_VAR_DECLARE(select_cb); + + if (timeout == 0) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: no timeout, returning 0\n")); + goto return_success; + } + API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(EAGAIN); lwip_poll_dec_sockets_used(fds, nfds); return -1); + memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb)); + + /* None ready: add our semaphore to list: + We don't actually need any dynamic memory. Our entry on the + list is only valid while we are in this function, so it's ok + to use local variables. */ + + API_SELECT_CB_VAR_REF(select_cb).poll_fds = fds; + API_SELECT_CB_VAR_REF(select_cb).poll_nfds = nfds; +#if LWIP_NETCONN_SEM_PER_THREAD + API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) { + /* failed to create semaphore */ + set_errno(EAGAIN); + lwip_poll_dec_sockets_used(fds, nfds); + API_SELECT_CB_VAR_FREE(select_cb); + return -1; + } +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb)); + + /* Increase select_waiting for each socket we are interested in. + Also, check for events again: there could have been events between + the last scan (without us on the list) and putting us on the list! */ + nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_INC_WAIT); + + if (!nready) { + /* Still none ready, just wait to be woken */ + if (timeout < 0) { + /* Wait forever */ + msectimeout = 0; + } else { + /* timeout == 0 would have been handled earlier. */ + LWIP_ASSERT("timeout > 0", timeout > 0); + msectimeout = timeout; + } + waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout); +#if LWIP_NETCONN_SEM_PER_THREAD + waited = 1; +#endif + } + + /* Decrease select_waiting for each socket we are interested in, + and check which events occurred while we waited. */ + nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_DEC_WAIT); + + lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb)); + +#if LWIP_NETCONN_SEM_PER_THREAD + if (select_cb.sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) { + /* don't leave the thread-local semaphore signalled */ + sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1); + } +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem); +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + API_SELECT_CB_VAR_FREE(select_cb); + + if (nready < 0) { + /* This happens when a socket got closed while waiting */ + lwip_poll_dec_sockets_used(fds, nfds); + return -1; + } + + if (waitres == SYS_ARCH_TIMEOUT) { + /* Timeout */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: timeout expired\n")); + goto return_success; + } + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: nready=%d\n", nready)); +return_success: + lwip_poll_dec_sockets_used(fds, nfds); + set_errno(0); + return nready; +} + +/** + * Check whether event_callback should wake up a thread waiting in + * lwip_poll. + */ +static int +lwip_poll_should_wake(const struct lwip_select_cb *scb, int fd, int has_recvevent, int has_sendevent, int has_errevent) +{ + nfds_t fdi; + for (fdi = 0; fdi < scb->poll_nfds; fdi++) { + const struct pollfd *pollfd = &scb->poll_fds[fdi]; + if (pollfd->fd == fd) { + /* Do not update pollfd->revents right here; + that would be a data race because lwip_pollscan + accesses revents without protecting. */ + if (has_recvevent && (pollfd->events & POLLIN) != 0) { + return 1; + } + if (has_sendevent && (pollfd->events & POLLOUT) != 0) { + return 1; + } + if (has_errevent) { + /* POLLERR is output only. */ + return 1; + } + } + } + return 0; +} +#endif /* LWIP_SOCKET_POLL */ + +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL +/** + * Callback registered in the netconn layer for each socket-netconn. + * Processes recvevent (data available) and wakes up tasks waiting for select. + * + * @note for LWIP_TCPIP_CORE_LOCKING any caller of this function + * must have the core lock held when signaling the following events + * as they might cause select_list_cb to be checked: + * NETCONN_EVT_RCVPLUS + * NETCONN_EVT_SENDPLUS + * NETCONN_EVT_ERROR + * This requirement will be asserted in select_check_waiters() + */ +static void +event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len) +{ + int s, check_waiters; + struct lwip_sock *sock; + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_UNUSED_ARG(len); + + /* Get socket */ + if (conn) { + s = conn->socket; + if (s < 0) { + /* Data comes in right away after an accept, even though + * the server task might not have created a new socket yet. + * Just count down (or up) if that's the case and we + * will use the data later. Note that only receive events + * can happen before the new socket is set up. */ + SYS_ARCH_PROTECT(lev); + if (conn->socket < 0) { + if (evt == NETCONN_EVT_RCVPLUS) { + /* conn->socket is -1 on initialization + lwip_accept adjusts sock->recvevent if conn->socket < -1 */ + conn->socket--; + } + SYS_ARCH_UNPROTECT(lev); + return; + } + s = conn->socket; + SYS_ARCH_UNPROTECT(lev); + } + + sock = get_socket(s); + if (!sock) { + return; + } + } else { + return; + } + + check_waiters = 1; + SYS_ARCH_PROTECT(lev); + /* Set event as required */ + switch (evt) { + case NETCONN_EVT_RCVPLUS: + sock->rcvevent++; + if (sock->rcvevent > 1) { + check_waiters = 0; + } + break; + case NETCONN_EVT_RCVMINUS: + sock->rcvevent--; + check_waiters = 0; + break; + case NETCONN_EVT_SENDPLUS: + if (sock->sendevent) { + check_waiters = 0; + } + sock->sendevent = 1; + break; + case NETCONN_EVT_SENDMINUS: + sock->sendevent = 0; + check_waiters = 0; + break; + case NETCONN_EVT_ERROR: + sock->errevent = 1; + break; + default: + LWIP_ASSERT("unknown event", 0); + break; + } + + if (sock->select_waiting && check_waiters) { + /* Save which events are active */ + int has_recvevent, has_sendevent, has_errevent; + has_recvevent = sock->rcvevent > 0; + has_sendevent = sock->sendevent != 0; + has_errevent = sock->errevent != 0; + SYS_ARCH_UNPROTECT(lev); + /* Check any select calls waiting on this socket */ + select_check_waiters(s, has_recvevent, has_sendevent, has_errevent); + } else { + SYS_ARCH_UNPROTECT(lev); + } + done_socket(sock); +} + +/** + * Check if any select waiters are waiting on this socket and its events + * + * @note on synchronization of select_cb_list: + * LWIP_TCPIP_CORE_LOCKING: the select_cb_list must only be accessed while holding + * the core lock. We do a single pass through the list and signal any waiters. + * Core lock should already be held when calling here!!!! + + * !LWIP_TCPIP_CORE_LOCKING: we use SYS_ARCH_PROTECT but unlock on each iteration + * of the loop, thus creating a possibility where a thread could modify the + * select_cb_list during our UNPROTECT/PROTECT. We use a generational counter to + * detect this change and restart the list walk. The list is expected to be small + */ +static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent) +{ + struct lwip_select_cb *scb; +#if !LWIP_TCPIP_CORE_LOCKING + int last_select_cb_ctr; + SYS_ARCH_DECL_PROTECT(lev); +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + + LWIP_ASSERT_CORE_LOCKED(); + +#if !LWIP_TCPIP_CORE_LOCKING + SYS_ARCH_PROTECT(lev); +again: + /* remember the state of select_cb_list to detect changes */ + last_select_cb_ctr = select_cb_ctr; +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + for (scb = select_cb_list; scb != NULL; scb = scb->next) { + if (scb->sem_signalled == 0) { + /* semaphore not signalled yet */ + int do_signal = 0; +#if LWIP_SOCKET_POLL + if (scb->poll_fds != NULL) { + do_signal = lwip_poll_should_wake(scb, s, has_recvevent, has_sendevent, has_errevent); + } +#endif /* LWIP_SOCKET_POLL */ +#if LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL + else +#endif /* LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL */ +#if LWIP_SOCKET_SELECT + { + /* Test this select call for our socket */ + if (has_recvevent) { + if (scb->readset && FD_ISSET(s, scb->readset)) { + do_signal = 1; + } + } + if (has_sendevent) { + if (!do_signal && scb->writeset && FD_ISSET(s, scb->writeset)) { + do_signal = 1; + } + } + if (has_errevent) { + if (!do_signal && scb->exceptset && FD_ISSET(s, scb->exceptset)) { + do_signal = 1; + } + } + } +#endif /* LWIP_SOCKET_SELECT */ + if (do_signal) { + scb->sem_signalled = 1; + /* For !LWIP_TCPIP_CORE_LOCKING, we don't call SYS_ARCH_UNPROTECT() before signaling + the semaphore, as this might lead to the select thread taking itself off the list, + invalidating the semaphore. */ + sys_sem_signal(SELECT_SEM_PTR(scb->sem)); + } + } +#if LWIP_TCPIP_CORE_LOCKING + } +#else + /* unlock interrupts with each step */ + SYS_ARCH_UNPROTECT(lev); + /* this makes sure interrupt protection time is short */ + SYS_ARCH_PROTECT(lev); + if (last_select_cb_ctr != select_cb_ctr) { + /* someone has changed select_cb_list, restart at the beginning */ + goto again; + } + /* remember the state of select_cb_list to detect changes */ + last_select_cb_ctr = select_cb_ctr; + } + SYS_ARCH_UNPROTECT(lev); +#endif +} +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ + +/** + * Close one end of a full-duplex connection. + */ +int +lwip_shutdown(int s, int how) +{ + struct lwip_sock *sock; + err_t err; + u8_t shut_rx = 0, shut_tx = 0; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_shutdown(%d, how=%d)\n", s, how)); + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (sock->conn != NULL) { + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + sock_set_errno(sock, EOPNOTSUPP); + done_socket(sock); + return -1; + } + } else { + sock_set_errno(sock, ENOTCONN); + done_socket(sock); + return -1; + } + + if (how == SHUT_RD) { + shut_rx = 1; + } else if (how == SHUT_WR) { + shut_tx = 1; + } else if (how == SHUT_RDWR) { + shut_rx = 1; + shut_tx = 1; + } else { + sock_set_errno(sock, EINVAL); + done_socket(sock); + return -1; + } + err = netconn_shutdown(sock->conn, shut_rx, shut_tx); + + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return (err == ERR_OK ? 0 : -1); +} + +static int +lwip_getaddrname(int s, struct sockaddr *name, socklen_t *namelen, u8_t local) +{ + struct lwip_sock *sock; + union sockaddr_aligned saddr; + ip_addr_t naddr; + u16_t port; + err_t err; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + /* get the IP address and port */ + err = netconn_getaddr(sock->conn, &naddr, &port, local); + if (err != ERR_OK) { + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */ + if (NETCONNTYPE_ISIPV6(netconn_type(sock->conn)) && + IP_IS_V4_VAL(naddr)) { + ip4_2_ipv4_mapped_ipv6(ip_2_ip6(&naddr), ip_2_ip4(&naddr)); + IP_SET_TYPE_VAL(naddr, IPADDR_TYPE_V6); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + IPADDR_PORT_TO_SOCKADDR(&saddr, &naddr, port); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getaddrname(%d, addr=", s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, naddr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", port)); + + if (*namelen > saddr.sa.sa_len) { + *namelen = saddr.sa.sa_len; + } + MEMCPY(name, &saddr, *namelen); + + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +} + +int +lwip_getpeername(int s, struct sockaddr *name, socklen_t *namelen) +{ + return lwip_getaddrname(s, name, namelen, 0); +} + +int +lwip_getsockname(int s, struct sockaddr *name, socklen_t *namelen) +{ + return lwip_getaddrname(s, name, namelen, 1); +} + +int +lwip_getsockopt(int s, int level, int optname, void *optval, socklen_t *optlen) +{ + int err; + struct lwip_sock *sock = get_socket(s); +#if !LWIP_TCPIP_CORE_LOCKING + err_t cberr; + LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data); +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + + if (!sock) { + return -1; + } + + if ((NULL == optval) || (NULL == optlen)) { + sock_set_errno(sock, EFAULT); + done_socket(sock); + return -1; + } + +#if LWIP_TCPIP_CORE_LOCKING + /* core-locking can just call the -impl function */ + LOCK_TCPIP_CORE(); + err = lwip_getsockopt_impl(s, level, optname, optval, optlen); + UNLOCK_TCPIP_CORE(); + +#else /* LWIP_TCPIP_CORE_LOCKING */ + +#if LWIP_MPU_COMPATIBLE + /* MPU_COMPATIBLE copies the optval data, so check for max size here */ + if (*optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) { + sock_set_errno(sock, ENOBUFS); + done_socket(sock); + return -1; + } +#endif /* LWIP_MPU_COMPATIBLE */ + + LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock); + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = *optlen; +#if !LWIP_MPU_COMPATIBLE + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.p = optval; +#endif /* !LWIP_MPU_COMPATIBLE */ + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0; +#if LWIP_NETCONN_SEM_PER_THREAD + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed; +#endif + cberr = tcpip_callback(lwip_getsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data)); + if (cberr != ERR_OK) { + LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data); + sock_set_errno(sock, err_to_errno(cberr)); + done_socket(sock); + return -1; + } + sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0); + + /* write back optlen and optval */ + *optlen = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen; +#if LWIP_MPU_COMPATIBLE + MEMCPY(optval, LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval, + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen); +#endif /* LWIP_MPU_COMPATIBLE */ + + /* maybe lwip_getsockopt_internal has changed err */ + err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err; + LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + + sock_set_errno(sock, err); + done_socket(sock); + return err ? -1 : 0; +} + +#if !LWIP_TCPIP_CORE_LOCKING +/** lwip_getsockopt_callback: only used without CORE_LOCKING + * to get into the tcpip_thread + */ +static void +lwip_getsockopt_callback(void *arg) +{ + struct lwip_setgetsockopt_data *data; + LWIP_ASSERT("arg != NULL", arg != NULL); + data = (struct lwip_setgetsockopt_data *)arg; + + data->err = lwip_getsockopt_impl(data->s, data->level, data->optname, +#if LWIP_MPU_COMPATIBLE + data->optval, +#else /* LWIP_MPU_COMPATIBLE */ + data->optval.p, +#endif /* LWIP_MPU_COMPATIBLE */ + &data->optlen); + + sys_sem_signal((sys_sem_t *)(data->completed_sem)); +} +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +static int +lwip_sockopt_to_ipopt(int optname) +{ + /* Map SO_* values to our internal SOF_* values + * We should not rely on #defines in socket.h + * being in sync with ip.h. + */ + switch (optname) { + case SO_BROADCAST: + return SOF_BROADCAST; + case SO_KEEPALIVE: + return SOF_KEEPALIVE; + case SO_REUSEADDR: + return SOF_REUSEADDR; + default: + LWIP_ASSERT("Unknown socket option", 0); + return 0; + } +} + +/** lwip_getsockopt_impl: the actual implementation of getsockopt: + * same argument as lwip_getsockopt, either called directly or through callback + */ +static int +lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen) +{ + int err = 0; + struct lwip_sock *sock = tryget_socket(s); + if (!sock) { + return EBADF; + } + +#ifdef LWIP_HOOK_SOCKETS_GETSOCKOPT + if (LWIP_HOOK_SOCKETS_GETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) { + return err; + } +#endif + + switch (level) { + + /* Level: SOL_SOCKET */ + case SOL_SOCKET: + switch (optname) { + +#if LWIP_TCP + case SO_ACCEPTCONN: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + if (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_TCP) { + done_socket(sock); + return ENOPROTOOPT; + } + if ((sock->conn->pcb.tcp != NULL) && (sock->conn->pcb.tcp->state == LISTEN)) { + *(int *)optval = 1; + } else { + *(int *)optval = 0; + } + break; +#endif /* LWIP_TCP */ + + /* The option flags */ + case SO_BROADCAST: + case SO_KEEPALIVE: +#if SO_REUSE + case SO_REUSEADDR: +#endif /* SO_REUSE */ + if ((optname == SO_BROADCAST) && + (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) { + done_socket(sock); + return ENOPROTOOPT; + } + + optname = lwip_sockopt_to_ipopt(optname); + + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + *(int *)optval = ip_get_option(sock->conn->pcb.ip, optname); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, optname=0x%x, ..) = %s\n", + s, optname, (*(int *)optval ? "on" : "off"))); + break; + + case SO_TYPE: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int); + switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) { + case NETCONN_RAW: + *(int *)optval = SOCK_RAW; + break; + case NETCONN_TCP: + *(int *)optval = SOCK_STREAM; + break; + case NETCONN_UDP: + *(int *)optval = SOCK_DGRAM; + break; + default: /* unrecognized socket type */ + *(int *)optval = netconn_type(sock->conn); + LWIP_DEBUGF(SOCKETS_DEBUG, + ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE): unrecognized socket type %d\n", + s, *(int *)optval)); + } /* switch (netconn_type(sock->conn)) */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE) = %d\n", + s, *(int *)optval)); + break; + + case SO_ERROR: + LWIP_SOCKOPT_CHECK_OPTLEN(sock, *optlen, int); + *(int *)optval = err_to_errno(netconn_err(sock->conn)); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_ERROR) = %d\n", + s, *(int *)optval)); + break; + +#if LWIP_SO_SNDTIMEO + case SO_SNDTIMEO: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE); + LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_sendtimeout(sock->conn)); + break; +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_RCVTIMEO + case SO_RCVTIMEO: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE); + LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_recvtimeout(sock->conn)); + break; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF + case SO_RCVBUF: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int); + *(int *)optval = netconn_get_recvbufsize(sock->conn); + break; +#endif /* LWIP_SO_RCVBUF */ +#if LWIP_SO_LINGER + case SO_LINGER: { + s16_t conn_linger; + struct linger *linger = (struct linger *)optval; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, struct linger); + conn_linger = sock->conn->linger; + if (conn_linger >= 0) { + linger->l_onoff = 1; + linger->l_linger = (int)conn_linger; + } else { + linger->l_onoff = 0; + linger->l_linger = 0; + } + } + break; +#endif /* LWIP_SO_LINGER */ +#if LWIP_UDP + case SO_NO_CHECK: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_UDP); +#if LWIP_UDPLITE + if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) { + /* this flag is only available for UDP, not for UDP lite */ + done_socket(sock); + return EAFNOSUPPORT; + } +#endif /* LWIP_UDPLITE */ + *(int *)optval = udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM) ? 1 : 0; + break; +#endif /* LWIP_UDP*/ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + + /* Level: IPPROTO_IP */ + case IPPROTO_IP: + switch (optname) { + case IP_TTL: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + *(int *)optval = sock->conn->pcb.ip->ttl; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TTL) = %d\n", + s, *(int *)optval)); + break; + case IP_TOS: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + *(int *)optval = sock->conn->pcb.ip->tos; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TOS) = %d\n", + s, *(int *)optval)); + break; +#if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP + case IP_MULTICAST_TTL: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) { + done_socket(sock); + return ENOPROTOOPT; + } + *(u8_t *)optval = udp_get_multicast_ttl(sock->conn->pcb.udp); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_TTL) = %d\n", + s, *(int *)optval)); + break; + case IP_MULTICAST_IF: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, struct in_addr); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) { + done_socket(sock); + return ENOPROTOOPT; + } + inet_addr_from_ip4addr((struct in_addr *)optval, udp_get_multicast_netif_addr(sock->conn->pcb.udp)); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_IF) = 0x%"X32_F"\n", + s, *(u32_t *)optval)); + break; + case IP_MULTICAST_LOOP: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t); + if ((sock->conn->pcb.udp->flags & UDP_FLAGS_MULTICAST_LOOP) != 0) { + *(u8_t *)optval = 1; + } else { + *(u8_t *)optval = 0; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_LOOP) = %d\n", + s, *(int *)optval)); + break; +#endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + +#if LWIP_TCP + /* Level: IPPROTO_TCP */ + case IPPROTO_TCP: + /* Special case: all IPPROTO_TCP option take an int */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_TCP); + if (sock->conn->pcb.tcp->state == LISTEN) { + done_socket(sock); + return EINVAL; + } + switch (optname) { + case TCP_NODELAY: + *(int *)optval = tcp_nagle_disabled(sock->conn->pcb.tcp); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_NODELAY) = %s\n", + s, (*(int *)optval) ? "on" : "off") ); + break; + case TCP_KEEPALIVE: + *(int *)optval = (int)sock->conn->pcb.tcp->keep_idle; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) = %d\n", + s, *(int *)optval)); + break; + +#if LWIP_TCP_KEEPALIVE + case TCP_KEEPIDLE: + *(int *)optval = (int)(sock->conn->pcb.tcp->keep_idle / 1000); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) = %d\n", + s, *(int *)optval)); + break; + case TCP_KEEPINTVL: + *(int *)optval = (int)(sock->conn->pcb.tcp->keep_intvl / 1000); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) = %d\n", + s, *(int *)optval)); + break; + case TCP_KEEPCNT: + *(int *)optval = (int)sock->conn->pcb.tcp->keep_cnt; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) = %d\n", + s, *(int *)optval)); + break; +#endif /* LWIP_TCP_KEEPALIVE */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_TCP */ + +#if LWIP_IPV6 + /* Level: IPPROTO_IPV6 */ + case IPPROTO_IPV6: + switch (optname) { + case IPV6_V6ONLY: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int); + *(int *)optval = (netconn_get_ipv6only(sock->conn) ? 1 : 0); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY) = %d\n", + s, *(int *)optval)); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_IPV6 */ + +#if LWIP_UDP && LWIP_UDPLITE + /* Level: IPPROTO_UDPLITE */ + case IPPROTO_UDPLITE: + /* Special case: all IPPROTO_UDPLITE option take an int */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + /* If this is no UDP lite socket, ignore any options. */ + if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) { + done_socket(sock); + return ENOPROTOOPT; + } + switch (optname) { + case UDPLITE_SEND_CSCOV: + *(int *)optval = sock->conn->pcb.udp->chksum_len_tx; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) = %d\n", + s, (*(int *)optval)) ); + break; + case UDPLITE_RECV_CSCOV: + *(int *)optval = sock->conn->pcb.udp->chksum_len_rx; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) = %d\n", + s, (*(int *)optval)) ); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_UDP */ + /* Level: IPPROTO_RAW */ + case IPPROTO_RAW: + switch (optname) { +#if LWIP_IPV6 && LWIP_RAW + case IPV6_CHECKSUM: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_RAW); + if (sock->conn->pcb.raw->chksum_reqd == 0) { + *(int *)optval = -1; + } else { + *(int *)optval = sock->conn->pcb.raw->chksum_offset; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM) = %d\n", + s, (*(int *)optval)) ); + break; +#endif /* LWIP_IPV6 && LWIP_RAW */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n", + s, level, optname)); + err = ENOPROTOOPT; + break; + } /* switch (level) */ + + done_socket(sock); + return err; +} + +int +lwip_setsockopt(int s, int level, int optname, const void *optval, socklen_t optlen) +{ + int err = 0; + struct lwip_sock *sock = get_socket(s); +#if !LWIP_TCPIP_CORE_LOCKING + err_t cberr; + LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data); +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + + if (!sock) { + return -1; + } + + if (NULL == optval) { + sock_set_errno(sock, EFAULT); + done_socket(sock); + return -1; + } + +#if LWIP_TCPIP_CORE_LOCKING + /* core-locking can just call the -impl function */ + LOCK_TCPIP_CORE(); + err = lwip_setsockopt_impl(s, level, optname, optval, optlen); + UNLOCK_TCPIP_CORE(); + +#else /* LWIP_TCPIP_CORE_LOCKING */ + +#if LWIP_MPU_COMPATIBLE + /* MPU_COMPATIBLE copies the optval data, so check for max size here */ + if (optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) { + sock_set_errno(sock, ENOBUFS); + done_socket(sock); + return -1; + } +#endif /* LWIP_MPU_COMPATIBLE */ + + LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock); + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = optlen; +#if LWIP_MPU_COMPATIBLE + MEMCPY(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval, optval, optlen); +#else /* LWIP_MPU_COMPATIBLE */ + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.pc = (const void *)optval; +#endif /* LWIP_MPU_COMPATIBLE */ + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0; +#if LWIP_NETCONN_SEM_PER_THREAD + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed; +#endif + cberr = tcpip_callback(lwip_setsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data)); + if (cberr != ERR_OK) { + LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data); + sock_set_errno(sock, err_to_errno(cberr)); + done_socket(sock); + return -1; + } + sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0); + + /* maybe lwip_getsockopt_internal has changed err */ + err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err; + LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + + sock_set_errno(sock, err); + done_socket(sock); + return err ? -1 : 0; +} + +#if !LWIP_TCPIP_CORE_LOCKING +/** lwip_setsockopt_callback: only used without CORE_LOCKING + * to get into the tcpip_thread + */ +static void +lwip_setsockopt_callback(void *arg) +{ + struct lwip_setgetsockopt_data *data; + LWIP_ASSERT("arg != NULL", arg != NULL); + data = (struct lwip_setgetsockopt_data *)arg; + + data->err = lwip_setsockopt_impl(data->s, data->level, data->optname, +#if LWIP_MPU_COMPATIBLE + data->optval, +#else /* LWIP_MPU_COMPATIBLE */ + data->optval.pc, +#endif /* LWIP_MPU_COMPATIBLE */ + data->optlen); + + sys_sem_signal((sys_sem_t *)(data->completed_sem)); +} +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +/** lwip_setsockopt_impl: the actual implementation of setsockopt: + * same argument as lwip_setsockopt, either called directly or through callback + */ +static int +lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen) +{ + int err = 0; + struct lwip_sock *sock = tryget_socket(s); + if (!sock) { + return EBADF; + } + +#ifdef LWIP_HOOK_SOCKETS_SETSOCKOPT + if (LWIP_HOOK_SOCKETS_SETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) { + return err; + } +#endif + + switch (level) { + + /* Level: SOL_SOCKET */ + case SOL_SOCKET: + switch (optname) { + + /* SO_ACCEPTCONN is get-only */ + + /* The option flags */ + case SO_BROADCAST: + case SO_KEEPALIVE: +#if SO_REUSE + case SO_REUSEADDR: +#endif /* SO_REUSE */ + if ((optname == SO_BROADCAST) && + (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) { + done_socket(sock); + return ENOPROTOOPT; + } + + optname = lwip_sockopt_to_ipopt(optname); + + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + if (*(const int *)optval) { + ip_set_option(sock->conn->pcb.ip, optname); + } else { + ip_reset_option(sock->conn->pcb.ip, optname); + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, optname=0x%x, ..) -> %s\n", + s, optname, (*(const int *)optval ? "on" : "off"))); + break; + + /* SO_TYPE is get-only */ + /* SO_ERROR is get-only */ + +#if LWIP_SO_SNDTIMEO + case SO_SNDTIMEO: { + long ms_long; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE); + ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval); + if (ms_long < 0) { + done_socket(sock); + return EINVAL; + } + netconn_set_sendtimeout(sock->conn, ms_long); + break; + } +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_RCVTIMEO + case SO_RCVTIMEO: { + long ms_long; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE); + ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval); + if (ms_long < 0) { + done_socket(sock); + return EINVAL; + } + netconn_set_recvtimeout(sock->conn, (u32_t)ms_long); + break; + } +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF + case SO_RCVBUF: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, int); + netconn_set_recvbufsize(sock->conn, *(const int *)optval); + break; +#endif /* LWIP_SO_RCVBUF */ +#if LWIP_SO_LINGER + case SO_LINGER: { + const struct linger *linger = (const struct linger *)optval; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct linger); + if (linger->l_onoff) { + int lingersec = linger->l_linger; + if (lingersec < 0) { + done_socket(sock); + return EINVAL; + } + if (lingersec > 0xFFFF) { + lingersec = 0xFFFF; + } + sock->conn->linger = (s16_t)lingersec; + } else { + sock->conn->linger = -1; + } + } + break; +#endif /* LWIP_SO_LINGER */ +#if LWIP_UDP + case SO_NO_CHECK: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP); +#if LWIP_UDPLITE + if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) { + /* this flag is only available for UDP, not for UDP lite */ + done_socket(sock); + return EAFNOSUPPORT; + } +#endif /* LWIP_UDPLITE */ + if (*(const int *)optval) { + udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM); + } else { + udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM); + } + break; +#endif /* LWIP_UDP */ + case SO_BINDTODEVICE: { + const struct ifreq *iface; + struct netif *n = NULL; + + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct ifreq); + + iface = (const struct ifreq *)optval; + if (iface->ifr_name[0] != 0) { + n = netif_find(iface->ifr_name); + if (n == NULL) { + done_socket(sock); + return ENODEV; + } + } + + switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) { +#if LWIP_TCP + case NETCONN_TCP: + tcp_bind_netif(sock->conn->pcb.tcp, n); + break; +#endif +#if LWIP_UDP + case NETCONN_UDP: + udp_bind_netif(sock->conn->pcb.udp, n); + break; +#endif +#if LWIP_RAW + case NETCONN_RAW: + raw_bind_netif(sock->conn->pcb.raw, n); + break; +#endif + default: + LWIP_ASSERT("Unhandled netconn type in SO_BINDTODEVICE", 0); + break; + } + } + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + + /* Level: IPPROTO_IP */ + case IPPROTO_IP: + switch (optname) { + case IP_TTL: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + sock->conn->pcb.ip->ttl = (u8_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TTL, ..) -> %d\n", + s, sock->conn->pcb.ip->ttl)); + break; + case IP_TOS: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + sock->conn->pcb.ip->tos = (u8_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TOS, ..)-> %d\n", + s, sock->conn->pcb.ip->tos)); + break; +#if LWIP_NETBUF_RECVINFO + case IP_PKTINFO: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP); + if (*(const int *)optval) { + sock->conn->flags |= NETCONN_FLAG_PKTINFO; + } else { + sock->conn->flags &= ~NETCONN_FLAG_PKTINFO; + } + break; +#endif /* LWIP_NETBUF_RECVINFO */ +#if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP + case IP_MULTICAST_TTL: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP); + udp_set_multicast_ttl(sock->conn->pcb.udp, (u8_t)(*(const u8_t *)optval)); + break; + case IP_MULTICAST_IF: { + ip4_addr_t if_addr; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct in_addr, NETCONN_UDP); + inet_addr_to_ip4addr(&if_addr, (const struct in_addr *)optval); + udp_set_multicast_netif_addr(sock->conn->pcb.udp, &if_addr); + } + break; + case IP_MULTICAST_LOOP: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP); + if (*(const u8_t *)optval) { + udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP); + } else { + udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP); + } + break; +#endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */ +#if LWIP_IGMP + case IP_ADD_MEMBERSHIP: + case IP_DROP_MEMBERSHIP: { + /* If this is a TCP or a RAW socket, ignore these options. */ + err_t igmp_err; + const struct ip_mreq *imr = (const struct ip_mreq *)optval; + ip4_addr_t if_addr; + ip4_addr_t multi_addr; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ip_mreq, NETCONN_UDP); + inet_addr_to_ip4addr(&if_addr, &imr->imr_interface); + inet_addr_to_ip4addr(&multi_addr, &imr->imr_multiaddr); + if (optname == IP_ADD_MEMBERSHIP) { + if (!lwip_socket_register_membership(s, &if_addr, &multi_addr)) { + /* cannot track membership (out of memory) */ + err = ENOMEM; + igmp_err = ERR_OK; + } else { + igmp_err = igmp_joingroup(&if_addr, &multi_addr); + } + } else { + igmp_err = igmp_leavegroup(&if_addr, &multi_addr); + lwip_socket_unregister_membership(s, &if_addr, &multi_addr); + } + if (igmp_err != ERR_OK) { + err = EADDRNOTAVAIL; + } + } + break; +#endif /* LWIP_IGMP */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + +#if LWIP_TCP + /* Level: IPPROTO_TCP */ + case IPPROTO_TCP: + /* Special case: all IPPROTO_TCP option take an int */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_TCP); + if (sock->conn->pcb.tcp->state == LISTEN) { + done_socket(sock); + return EINVAL; + } + switch (optname) { + case TCP_NODELAY: + if (*(const int *)optval) { + tcp_nagle_disable(sock->conn->pcb.tcp); + } else { + tcp_nagle_enable(sock->conn->pcb.tcp); + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_NODELAY) -> %s\n", + s, (*(const int *)optval) ? "on" : "off") ); + break; + case TCP_KEEPALIVE: + sock->conn->pcb.tcp->keep_idle = (u32_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) -> %"U32_F"\n", + s, sock->conn->pcb.tcp->keep_idle)); + break; + +#if LWIP_TCP_KEEPALIVE + case TCP_KEEPIDLE: + sock->conn->pcb.tcp->keep_idle = 1000 * (u32_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) -> %"U32_F"\n", + s, sock->conn->pcb.tcp->keep_idle)); + break; + case TCP_KEEPINTVL: + sock->conn->pcb.tcp->keep_intvl = 1000 * (u32_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) -> %"U32_F"\n", + s, sock->conn->pcb.tcp->keep_intvl)); + break; + case TCP_KEEPCNT: + sock->conn->pcb.tcp->keep_cnt = (u32_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) -> %"U32_F"\n", + s, sock->conn->pcb.tcp->keep_cnt)); + break; +#endif /* LWIP_TCP_KEEPALIVE */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_TCP*/ + +#if LWIP_IPV6 + /* Level: IPPROTO_IPV6 */ + case IPPROTO_IPV6: + switch (optname) { + case IPV6_V6ONLY: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + if (*(const int *)optval) { + netconn_set_ipv6only(sock->conn, 1); + } else { + netconn_set_ipv6only(sock->conn, 0); + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY, ..) -> %d\n", + s, (netconn_get_ipv6only(sock->conn) ? 1 : 0))); + break; +#if LWIP_IPV6_MLD + case IPV6_JOIN_GROUP: + case IPV6_LEAVE_GROUP: { + /* If this is a TCP or a RAW socket, ignore these options. */ + err_t mld6_err; + struct netif *netif; + ip6_addr_t multi_addr; + const struct ipv6_mreq *imr = (const struct ipv6_mreq *)optval; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ipv6_mreq, NETCONN_UDP); + inet6_addr_to_ip6addr(&multi_addr, &imr->ipv6mr_multiaddr); + LWIP_ASSERT("Invalid netif index", imr->ipv6mr_interface <= 0xFFu); + netif = netif_get_by_index((u8_t)imr->ipv6mr_interface); + if (netif == NULL) { + err = EADDRNOTAVAIL; + break; + } + + if (optname == IPV6_JOIN_GROUP) { + if (!lwip_socket_register_mld6_membership(s, imr->ipv6mr_interface, &multi_addr)) { + /* cannot track membership (out of memory) */ + err = ENOMEM; + mld6_err = ERR_OK; + } else { + mld6_err = mld6_joingroup_netif(netif, &multi_addr); + } + } else { + mld6_err = mld6_leavegroup_netif(netif, &multi_addr); + lwip_socket_unregister_mld6_membership(s, imr->ipv6mr_interface, &multi_addr); + } + if (mld6_err != ERR_OK) { + err = EADDRNOTAVAIL; + } + } + break; +#endif /* LWIP_IPV6_MLD */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_IPV6 */ + +#if LWIP_UDP && LWIP_UDPLITE + /* Level: IPPROTO_UDPLITE */ + case IPPROTO_UDPLITE: + /* Special case: all IPPROTO_UDPLITE option take an int */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + /* If this is no UDP lite socket, ignore any options. */ + if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) { + done_socket(sock); + return ENOPROTOOPT; + } + switch (optname) { + case UDPLITE_SEND_CSCOV: + if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) { + /* don't allow illegal values! */ + sock->conn->pcb.udp->chksum_len_tx = 8; + } else { + sock->conn->pcb.udp->chksum_len_tx = (u16_t) * (const int *)optval; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) -> %d\n", + s, (*(const int *)optval)) ); + break; + case UDPLITE_RECV_CSCOV: + if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) { + /* don't allow illegal values! */ + sock->conn->pcb.udp->chksum_len_rx = 8; + } else { + sock->conn->pcb.udp->chksum_len_rx = (u16_t) * (const int *)optval; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) -> %d\n", + s, (*(const int *)optval)) ); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_UDP */ + /* Level: IPPROTO_RAW */ + case IPPROTO_RAW: + switch (optname) { +#if LWIP_IPV6 && LWIP_RAW + case IPV6_CHECKSUM: + /* It should not be possible to disable the checksum generation with ICMPv6 + * as per RFC 3542 chapter 3.1 */ + if (sock->conn->pcb.raw->protocol == IPPROTO_ICMPV6) { + done_socket(sock); + return EINVAL; + } + + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_RAW); + if (*(const int *)optval < 0) { + sock->conn->pcb.raw->chksum_reqd = 0; + } else if (*(const int *)optval & 1) { + /* Per RFC3542, odd offsets are not allowed */ + done_socket(sock); + return EINVAL; + } else { + sock->conn->pcb.raw->chksum_reqd = 1; + sock->conn->pcb.raw->chksum_offset = (u16_t) * (const int *)optval; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM, ..) -> %d\n", + s, sock->conn->pcb.raw->chksum_reqd)); + break; +#endif /* LWIP_IPV6 && LWIP_RAW */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n", + s, level, optname)); + err = ENOPROTOOPT; + break; + } /* switch (level) */ + + done_socket(sock); + return err; +} + +int +lwip_ioctl(int s, long cmd, void *argp) +{ + struct lwip_sock *sock = get_socket(s); + u8_t val; +#if LWIP_SO_RCVBUF + int recv_avail; +#endif /* LWIP_SO_RCVBUF */ + + if (!sock) { + return -1; + } + + switch (cmd) { +#if LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE + case FIONREAD: + if (!argp) { + sock_set_errno(sock, EINVAL); + done_socket(sock); + return -1; + } +#if LWIP_FIONREAD_LINUXMODE + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + struct netbuf *nb; + if (sock->lastdata.netbuf) { + nb = sock->lastdata.netbuf; + *((int *)argp) = nb->p->tot_len; + } else { + struct netbuf *rxbuf; + err_t err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &rxbuf, NETCONN_DONTBLOCK); + if (err != ERR_OK) { + *((int *)argp) = 0; + } else { + sock->lastdata.netbuf = rxbuf; + *((int *)argp) = rxbuf->p->tot_len; + } + } + done_socket(sock); + return 0; + } +#endif /* LWIP_FIONREAD_LINUXMODE */ + +#if LWIP_SO_RCVBUF + /* we come here if either LWIP_FIONREAD_LINUXMODE==0 or this is a TCP socket */ + SYS_ARCH_GET(sock->conn->recv_avail, recv_avail); + if (recv_avail < 0) { + recv_avail = 0; + } + + /* Check if there is data left from the last recv operation. /maq 041215 */ + if (sock->lastdata.netbuf) { + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { + recv_avail += sock->lastdata.pbuf->tot_len; + } else { + recv_avail += sock->lastdata.netbuf->p->tot_len; + } + } + *((int *)argp) = recv_avail; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONREAD, %p) = %"U16_F"\n", s, argp, *((u16_t *)argp))); + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +#else /* LWIP_SO_RCVBUF */ + break; +#endif /* LWIP_SO_RCVBUF */ +#endif /* LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE */ + + case (long)FIONBIO: + val = 0; + if (argp && *(int *)argp) { + val = 1; + } + netconn_set_nonblocking(sock->conn, val); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONBIO, %d)\n", s, val)); + sock_set_errno(sock, 0); + done_socket(sock); + return 0; + + default: + break; + } /* switch (cmd) */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, UNIMPL: 0x%lx, %p)\n", s, cmd, argp)); + sock_set_errno(sock, ENOSYS); /* not yet implemented */ + done_socket(sock); + return -1; +} + +/** A minimal implementation of fcntl. + * Currently only the commands F_GETFL and F_SETFL are implemented. + * The flag O_NONBLOCK and access modes are supported for F_GETFL, only + * the flag O_NONBLOCK is implemented for F_SETFL. + */ +int +lwip_fcntl(int s, int cmd, int val) +{ + struct lwip_sock *sock = get_socket(s); + int ret = -1; + int op_mode = 0; + + if (!sock) { + return -1; + } + + switch (cmd) { + case F_GETFL: + ret = netconn_is_nonblocking(sock->conn) ? O_NONBLOCK : 0; + sock_set_errno(sock, 0); + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { +#if LWIP_TCPIP_CORE_LOCKING + LOCK_TCPIP_CORE(); +#else + SYS_ARCH_DECL_PROTECT(lev); + /* the proper thing to do here would be to get into the tcpip_thread, + but locking should be OK as well since we only *read* some flags */ + SYS_ARCH_PROTECT(lev); +#endif +#if LWIP_TCP + if (sock->conn->pcb.tcp) { + if (!(sock->conn->pcb.tcp->flags & TF_RXCLOSED)) { + op_mode |= O_RDONLY; + } + if (!(sock->conn->pcb.tcp->flags & TF_FIN)) { + op_mode |= O_WRONLY; + } + } +#endif +#if LWIP_TCPIP_CORE_LOCKING + UNLOCK_TCPIP_CORE(); +#else + SYS_ARCH_UNPROTECT(lev); +#endif + } else { + op_mode |= O_RDWR; + } + + /* ensure O_RDWR for (O_RDONLY|O_WRONLY) != O_RDWR cases */ + ret |= (op_mode == (O_RDONLY | O_WRONLY)) ? O_RDWR : op_mode; + + break; + case F_SETFL: + /* Bits corresponding to the file access mode and the file creation flags [..] that are set in arg shall be ignored */ + val &= ~(O_RDONLY | O_WRONLY | O_RDWR); + if ((val & ~O_NONBLOCK) == 0) { + /* only O_NONBLOCK, all other bits are zero */ + netconn_set_nonblocking(sock->conn, val & O_NONBLOCK); + ret = 0; + sock_set_errno(sock, 0); + } else { + sock_set_errno(sock, ENOSYS); /* not yet implemented */ + } + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_fcntl(%d, UNIMPL: %d, %d)\n", s, cmd, val)); + sock_set_errno(sock, ENOSYS); /* not yet implemented */ + break; + } + done_socket(sock); + return ret; +} + +#if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES +int +fcntl(int s, int cmd, ...) +{ + va_list ap; + int val; + + va_start(ap, cmd); + val = va_arg(ap, int); + va_end(ap); + return lwip_fcntl(s, cmd, val); +} +#endif + +const char * +lwip_inet_ntop(int af, const void *src, char *dst, socklen_t size) +{ + const char *ret = NULL; + int size_int = (int)size; + if (size_int < 0) { + set_errno(ENOSPC); + return NULL; + } + switch (af) { +#if LWIP_IPV4 + case AF_INET: + ret = ip4addr_ntoa_r((const ip4_addr_t *)src, dst, size_int); + if (ret == NULL) { + set_errno(ENOSPC); + } + break; +#endif +#if LWIP_IPV6 + case AF_INET6: + ret = ip6addr_ntoa_r((const ip6_addr_t *)src, dst, size_int); + if (ret == NULL) { + set_errno(ENOSPC); + } + break; +#endif + default: + set_errno(EAFNOSUPPORT); + break; + } + return ret; +} + +int +lwip_inet_pton(int af, const char *src, void *dst) +{ + int err; + switch (af) { +#if LWIP_IPV4 + case AF_INET: + err = ip4addr_aton(src, (ip4_addr_t *)dst); + break; +#endif +#if LWIP_IPV6 + case AF_INET6: { + /* convert into temporary variable since ip6_addr_t might be larger + than in6_addr when scopes are enabled */ + ip6_addr_t addr; + err = ip6addr_aton(src, &addr); + if (err) { + memcpy(dst, &addr.addr, sizeof(addr.addr)); + } + break; + } +#endif + default: + err = -1; + set_errno(EAFNOSUPPORT); + break; + } + return err; +} + +#if LWIP_IGMP +/** Register a new IGMP membership. On socket close, the membership is dropped automatically. + * + * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK). + * + * @return 1 on success, 0 on failure + */ +static int +lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return 0; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if (socket_ipv4_multicast_memberships[i].sock == NULL) { + socket_ipv4_multicast_memberships[i].sock = sock; + ip4_addr_copy(socket_ipv4_multicast_memberships[i].if_addr, *if_addr); + ip4_addr_copy(socket_ipv4_multicast_memberships[i].multi_addr, *multi_addr); + done_socket(sock); + return 1; + } + } + done_socket(sock); + return 0; +} + +/** Unregister a previously registered membership. This prevents dropping the membership + * on socket close. + * + * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK). + */ +static void +lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if ((socket_ipv4_multicast_memberships[i].sock == sock) && + ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].if_addr, if_addr) && + ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].multi_addr, multi_addr)) { + socket_ipv4_multicast_memberships[i].sock = NULL; + ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr); + ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr); + break; + } + } + done_socket(sock); +} + +/** Drop all memberships of a socket that were not dropped explicitly via setsockopt. + * + * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK). + */ +static void +lwip_socket_drop_registered_memberships(int s) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if (socket_ipv4_multicast_memberships[i].sock == sock) { + ip_addr_t multi_addr, if_addr; + ip_addr_copy_from_ip4(multi_addr, socket_ipv4_multicast_memberships[i].multi_addr); + ip_addr_copy_from_ip4(if_addr, socket_ipv4_multicast_memberships[i].if_addr); + socket_ipv4_multicast_memberships[i].sock = NULL; + ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr); + ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr); + + netconn_join_leave_group(sock->conn, &multi_addr, &if_addr, NETCONN_LEAVE); + } + } + done_socket(sock); +} +#endif /* LWIP_IGMP */ + +#if LWIP_IPV6_MLD +/** Register a new MLD6 membership. On socket close, the membership is dropped automatically. + * + * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK). + * + * @return 1 on success, 0 on failure + */ +static int +lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return 0; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if (socket_ipv6_multicast_memberships[i].sock == NULL) { + socket_ipv6_multicast_memberships[i].sock = sock; + socket_ipv6_multicast_memberships[i].if_idx = (u8_t)if_idx; + ip6_addr_copy(socket_ipv6_multicast_memberships[i].multi_addr, *multi_addr); + done_socket(sock); + return 1; + } + } + done_socket(sock); + return 0; +} + +/** Unregister a previously registered MLD6 membership. This prevents dropping the membership + * on socket close. + * + * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK). + */ +static void +lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if ((socket_ipv6_multicast_memberships[i].sock == sock) && + (socket_ipv6_multicast_memberships[i].if_idx == if_idx) && + ip6_addr_cmp(&socket_ipv6_multicast_memberships[i].multi_addr, multi_addr)) { + socket_ipv6_multicast_memberships[i].sock = NULL; + socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX; + ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr); + break; + } + } + done_socket(sock); +} + +/** Drop all MLD6 memberships of a socket that were not dropped explicitly via setsockopt. + * + * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK). + */ +static void +lwip_socket_drop_registered_mld6_memberships(int s) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if (socket_ipv6_multicast_memberships[i].sock == sock) { + ip_addr_t multi_addr; + u8_t if_idx; + + ip_addr_copy_from_ip6(multi_addr, socket_ipv6_multicast_memberships[i].multi_addr); + if_idx = socket_ipv6_multicast_memberships[i].if_idx; + + socket_ipv6_multicast_memberships[i].sock = NULL; + socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX; + ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr); + + netconn_join_leave_group_netif(sock->conn, &multi_addr, if_idx, NETCONN_LEAVE); + } + } + done_socket(sock); +} +#endif /* LWIP_IPV6_MLD */ + +#endif /* LWIP_SOCKET */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/tcpip.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/tcpip.c new file mode 100644 index 0000000..34c9a99 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/tcpip.c @@ -0,0 +1,658 @@ +/** + * @file + * Sequential API Main thread module + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if !NO_SYS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/priv/tcpip_priv.h" +#include "lwip/sys.h" +#include "lwip/memp.h" +#include "lwip/mem.h" +#include "lwip/init.h" +#include "lwip/ip.h" +#include "lwip/pbuf.h" +#include "lwip/etharp.h" +#include "netif/ethernet.h" + +#define TCPIP_MSG_VAR_REF(name) API_VAR_REF(name) +#define TCPIP_MSG_VAR_DECLARE(name) API_VAR_DECLARE(struct tcpip_msg, name) +#define TCPIP_MSG_VAR_ALLOC(name) API_VAR_ALLOC(struct tcpip_msg, MEMP_TCPIP_MSG_API, name, ERR_MEM) +#define TCPIP_MSG_VAR_FREE(name) API_VAR_FREE(MEMP_TCPIP_MSG_API, name) + +/* global variables */ +static tcpip_init_done_fn tcpip_init_done; +static void *tcpip_init_done_arg; +static sys_mbox_t tcpip_mbox; + +#if LWIP_TCPIP_CORE_LOCKING +/** The global semaphore to lock the stack. */ +sys_mutex_t lock_tcpip_core; +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +static void tcpip_thread_handle_msg(struct tcpip_msg *msg); + +#if !LWIP_TIMERS +/* wait for a message with timers disabled (e.g. pass a timer-check trigger into tcpip_thread) */ +#define TCPIP_MBOX_FETCH(mbox, msg) sys_mbox_fetch(mbox, msg) +#else /* !LWIP_TIMERS */ +/* wait for a message, timeouts are processed while waiting */ +#define TCPIP_MBOX_FETCH(mbox, msg) tcpip_timeouts_mbox_fetch(mbox, msg) +/** + * Wait (forever) for a message to arrive in an mbox. + * While waiting, timeouts are processed. + * + * @param mbox the mbox to fetch the message from + * @param msg the place to store the message + */ +static void +tcpip_timeouts_mbox_fetch(sys_mbox_t *mbox, void **msg) +{ + u32_t sleeptime, res; + +again: + LWIP_ASSERT_CORE_LOCKED(); + + sleeptime = sys_timeouts_sleeptime(); + if (sleeptime == SYS_TIMEOUTS_SLEEPTIME_INFINITE) { + UNLOCK_TCPIP_CORE(); + sys_arch_mbox_fetch(mbox, msg, 0); + LOCK_TCPIP_CORE(); + return; + } else if (sleeptime == 0) { + sys_check_timeouts(); + /* We try again to fetch a message from the mbox. */ + goto again; + } + + UNLOCK_TCPIP_CORE(); + res = sys_arch_mbox_fetch(mbox, msg, sleeptime); + LOCK_TCPIP_CORE(); + if (res == SYS_ARCH_TIMEOUT) { + /* If a SYS_ARCH_TIMEOUT value is returned, a timeout occurred + before a message could be fetched. */ + sys_check_timeouts(); + /* We try again to fetch a message from the mbox. */ + goto again; + } +} +#endif /* !LWIP_TIMERS */ + +/** + * The main lwIP thread. This thread has exclusive access to lwIP core functions + * (unless access to them is not locked). Other threads communicate with this + * thread using message boxes. + * + * It also starts all the timers to make sure they are running in the right + * thread context. + * + * @param arg unused argument + */ +static void +tcpip_thread(void *arg) +{ + struct tcpip_msg *msg; + LWIP_UNUSED_ARG(arg); + + LWIP_MARK_TCPIP_THREAD(); + + LOCK_TCPIP_CORE(); + if (tcpip_init_done != NULL) { + tcpip_init_done(tcpip_init_done_arg); + } + + while (1) { /* MAIN Loop */ + LWIP_TCPIP_THREAD_ALIVE(); + /* wait for a message, timeouts are processed while waiting */ + TCPIP_MBOX_FETCH(&tcpip_mbox, (void **)&msg); + if (msg == NULL) { + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: invalid message: NULL\n")); + LWIP_ASSERT("tcpip_thread: invalid message", 0); + continue; + } + tcpip_thread_handle_msg(msg); + } +} + +/* Handle a single tcpip_msg + * This is in its own function for access by tests only. + */ +static void +tcpip_thread_handle_msg(struct tcpip_msg *msg) +{ + switch (msg->type) { +#if !LWIP_TCPIP_CORE_LOCKING + case TCPIP_MSG_API: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: API message %p\n", (void *)msg)); + msg->msg.api_msg.function(msg->msg.api_msg.msg); + break; + case TCPIP_MSG_API_CALL: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: API CALL message %p\n", (void *)msg)); + msg->msg.api_call.arg->err = msg->msg.api_call.function(msg->msg.api_call.arg); + sys_sem_signal(msg->msg.api_call.sem); + break; +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + +#if !LWIP_TCPIP_CORE_LOCKING_INPUT + case TCPIP_MSG_INPKT: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: PACKET %p\n", (void *)msg)); + if (msg->msg.inp.input_fn(msg->msg.inp.p, msg->msg.inp.netif) != ERR_OK) { + pbuf_free(msg->msg.inp.p); + } + memp_free(MEMP_TCPIP_MSG_INPKT, msg); + break; +#endif /* !LWIP_TCPIP_CORE_LOCKING_INPUT */ + +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS + case TCPIP_MSG_TIMEOUT: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: TIMEOUT %p\n", (void *)msg)); + sys_timeout(msg->msg.tmo.msecs, msg->msg.tmo.h, msg->msg.tmo.arg); + memp_free(MEMP_TCPIP_MSG_API, msg); + break; + case TCPIP_MSG_UNTIMEOUT: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: UNTIMEOUT %p\n", (void *)msg)); + sys_untimeout(msg->msg.tmo.h, msg->msg.tmo.arg); + memp_free(MEMP_TCPIP_MSG_API, msg); + break; +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + + case TCPIP_MSG_CALLBACK: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: CALLBACK %p\n", (void *)msg)); + msg->msg.cb.function(msg->msg.cb.ctx); + memp_free(MEMP_TCPIP_MSG_API, msg); + break; + + case TCPIP_MSG_CALLBACK_STATIC: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: CALLBACK_STATIC %p\n", (void *)msg)); + msg->msg.cb.function(msg->msg.cb.ctx); + break; + + default: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: invalid message: %d\n", msg->type)); + LWIP_ASSERT("tcpip_thread: invalid message", 0); + break; + } +} + +#ifdef TCPIP_THREAD_TEST +/** Work on queued items in single-threaded test mode */ +int +tcpip_thread_poll_one(void) +{ + int ret = 0; + struct tcpip_msg *msg; + + if (sys_arch_mbox_tryfetch(&tcpip_mbox, (void **)&msg) != SYS_ARCH_TIMEOUT) { + LOCK_TCPIP_CORE(); + if (msg != NULL) { + tcpip_thread_handle_msg(msg); + ret = 1; + } + UNLOCK_TCPIP_CORE(); + } + return ret; +} +#endif + +/** + * Pass a received packet to tcpip_thread for input processing + * + * @param p the received packet + * @param inp the network interface on which the packet was received + * @param input_fn input function to call + */ +err_t +tcpip_inpkt(struct pbuf *p, struct netif *inp, netif_input_fn input_fn) +{ +#if LWIP_TCPIP_CORE_LOCKING_INPUT + err_t ret; + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_inpkt: PACKET %p/%p\n", (void *)p, (void *)inp)); + LOCK_TCPIP_CORE(); + ret = input_fn(p, inp); + UNLOCK_TCPIP_CORE(); + return ret; +#else /* LWIP_TCPIP_CORE_LOCKING_INPUT */ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_INPKT); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_INPKT; + msg->msg.inp.p = p; + msg->msg.inp.netif = inp; + msg->msg.inp.input_fn = input_fn; + if (sys_mbox_trypost(&tcpip_mbox, msg) != ERR_OK) { + memp_free(MEMP_TCPIP_MSG_INPKT, msg); + return ERR_MEM; + } + return ERR_OK; +#endif /* LWIP_TCPIP_CORE_LOCKING_INPUT */ +} + +/** + * @ingroup lwip_os + * Pass a received packet to tcpip_thread for input processing with + * ethernet_input or ip_input. Don't call directly, pass to netif_add() + * and call netif->input(). + * + * @param p the received packet, p->payload pointing to the Ethernet header or + * to an IP header (if inp doesn't have NETIF_FLAG_ETHARP or + * NETIF_FLAG_ETHERNET flags) + * @param inp the network interface on which the packet was received + */ +err_t +tcpip_input(struct pbuf *p, struct netif *inp) +{ +#if LWIP_ETHERNET + if (inp->flags & (NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET)) { + return tcpip_inpkt(p, inp, ethernet_input); + } else +#endif /* LWIP_ETHERNET */ + return tcpip_inpkt(p, inp, ip_input); +} + +/** + * @ingroup lwip_os + * Call a specific function in the thread context of + * tcpip_thread for easy access synchronization. + * A function called in that way may access lwIP core code + * without fearing concurrent access. + * Blocks until the request is posted. + * Must not be called from interrupt context! + * + * @param function the function to call + * @param ctx parameter passed to f + * @return ERR_OK if the function was called, another err_t if not + * + * @see tcpip_try_callback + */ +err_t +tcpip_callback(tcpip_callback_fn function, void *ctx) +{ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_CALLBACK; + msg->msg.cb.function = function; + msg->msg.cb.ctx = ctx; + + sys_mbox_post(&tcpip_mbox, msg); + return ERR_OK; +} + +/** + * @ingroup lwip_os + * Call a specific function in the thread context of + * tcpip_thread for easy access synchronization. + * A function called in that way may access lwIP core code + * without fearing concurrent access. + * Does NOT block when the request cannot be posted because the + * tcpip_mbox is full, but returns ERR_MEM instead. + * Can be called from interrupt context. + * + * @param function the function to call + * @param ctx parameter passed to f + * @return ERR_OK if the function was called, another err_t if not + * + * @see tcpip_callback + */ +err_t +tcpip_try_callback(tcpip_callback_fn function, void *ctx) +{ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_CALLBACK; + msg->msg.cb.function = function; + msg->msg.cb.ctx = ctx; + + if (sys_mbox_trypost(&tcpip_mbox, msg) != ERR_OK) { + memp_free(MEMP_TCPIP_MSG_API, msg); + return ERR_MEM; + } + return ERR_OK; +} + +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS +/** + * call sys_timeout in tcpip_thread + * + * @param msecs time in milliseconds for timeout + * @param h function to be called on timeout + * @param arg argument to pass to timeout function h + * @return ERR_MEM on memory error, ERR_OK otherwise + */ +err_t +tcpip_timeout(u32_t msecs, sys_timeout_handler h, void *arg) +{ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_TIMEOUT; + msg->msg.tmo.msecs = msecs; + msg->msg.tmo.h = h; + msg->msg.tmo.arg = arg; + sys_mbox_post(&tcpip_mbox, msg); + return ERR_OK; +} + +/** + * call sys_untimeout in tcpip_thread + * + * @param h function to be called on timeout + * @param arg argument to pass to timeout function h + * @return ERR_MEM on memory error, ERR_OK otherwise + */ +err_t +tcpip_untimeout(sys_timeout_handler h, void *arg) +{ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_UNTIMEOUT; + msg->msg.tmo.h = h; + msg->msg.tmo.arg = arg; + sys_mbox_post(&tcpip_mbox, msg); + return ERR_OK; +} +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + + +/** + * Sends a message to TCPIP thread to call a function. Caller thread blocks on + * on a provided semaphore, which ist NOT automatically signalled by TCPIP thread, + * this has to be done by the user. + * It is recommended to use LWIP_TCPIP_CORE_LOCKING since this is the way + * with least runtime overhead. + * + * @param fn function to be called from TCPIP thread + * @param apimsg argument to API function + * @param sem semaphore to wait on + * @return ERR_OK if the function was called, another err_t if not + */ +err_t +tcpip_send_msg_wait_sem(tcpip_callback_fn fn, void *apimsg, sys_sem_t *sem) +{ +#if LWIP_TCPIP_CORE_LOCKING + LWIP_UNUSED_ARG(sem); + LOCK_TCPIP_CORE(); + fn(apimsg); + UNLOCK_TCPIP_CORE(); + return ERR_OK; +#else /* LWIP_TCPIP_CORE_LOCKING */ + TCPIP_MSG_VAR_DECLARE(msg); + + LWIP_ASSERT("semaphore not initialized", sys_sem_valid(sem)); + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + TCPIP_MSG_VAR_ALLOC(msg); + TCPIP_MSG_VAR_REF(msg).type = TCPIP_MSG_API; + TCPIP_MSG_VAR_REF(msg).msg.api_msg.function = fn; + TCPIP_MSG_VAR_REF(msg).msg.api_msg.msg = apimsg; + sys_mbox_post(&tcpip_mbox, &TCPIP_MSG_VAR_REF(msg)); + sys_arch_sem_wait(sem, 0); + TCPIP_MSG_VAR_FREE(msg); + return ERR_OK; +#endif /* LWIP_TCPIP_CORE_LOCKING */ +} + +/** + * Synchronously calls function in TCPIP thread and waits for its completion. + * It is recommended to use LWIP_TCPIP_CORE_LOCKING (preferred) or + * LWIP_NETCONN_SEM_PER_THREAD. + * If not, a semaphore is created and destroyed on every call which is usually + * an expensive/slow operation. + * @param fn Function to call + * @param call Call parameters + * @return Return value from tcpip_api_call_fn + */ +err_t +tcpip_api_call(tcpip_api_call_fn fn, struct tcpip_api_call_data *call) +{ +#if LWIP_TCPIP_CORE_LOCKING + err_t err; + LOCK_TCPIP_CORE(); + err = fn(call); + UNLOCK_TCPIP_CORE(); + return err; +#else /* LWIP_TCPIP_CORE_LOCKING */ + TCPIP_MSG_VAR_DECLARE(msg); + +#if !LWIP_NETCONN_SEM_PER_THREAD + err_t err = sys_sem_new(&call->sem, 0); + if (err != ERR_OK) { + return err; + } +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + TCPIP_MSG_VAR_ALLOC(msg); + TCPIP_MSG_VAR_REF(msg).type = TCPIP_MSG_API_CALL; + TCPIP_MSG_VAR_REF(msg).msg.api_call.arg = call; + TCPIP_MSG_VAR_REF(msg).msg.api_call.function = fn; +#if LWIP_NETCONN_SEM_PER_THREAD + TCPIP_MSG_VAR_REF(msg).msg.api_call.sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + TCPIP_MSG_VAR_REF(msg).msg.api_call.sem = &call->sem; +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + sys_mbox_post(&tcpip_mbox, &TCPIP_MSG_VAR_REF(msg)); + sys_arch_sem_wait(TCPIP_MSG_VAR_REF(msg).msg.api_call.sem, 0); + TCPIP_MSG_VAR_FREE(msg); + +#if !LWIP_NETCONN_SEM_PER_THREAD + sys_sem_free(&call->sem); +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + return call->err; +#endif /* LWIP_TCPIP_CORE_LOCKING */ +} + +/** + * @ingroup lwip_os + * Allocate a structure for a static callback message and initialize it. + * The message has a special type such that lwIP never frees it. + * This is intended to be used to send "static" messages from interrupt context, + * e.g. the message is allocated once and posted several times from an IRQ + * using tcpip_callbackmsg_trycallback(). + * Example usage: Trigger execution of an ethernet IRQ DPC routine in lwIP thread context. + * + * @param function the function to call + * @param ctx parameter passed to function + * @return a struct pointer to pass to tcpip_callbackmsg_trycallback(). + * + * @see tcpip_callbackmsg_trycallback() + * @see tcpip_callbackmsg_delete() + */ +struct tcpip_callback_msg * +tcpip_callbackmsg_new(tcpip_callback_fn function, void *ctx) +{ + struct tcpip_msg *msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return NULL; + } + msg->type = TCPIP_MSG_CALLBACK_STATIC; + msg->msg.cb.function = function; + msg->msg.cb.ctx = ctx; + return (struct tcpip_callback_msg *)msg; +} + +/** + * @ingroup lwip_os + * Free a callback message allocated by tcpip_callbackmsg_new(). + * + * @param msg the message to free + * + * @see tcpip_callbackmsg_new() + */ +void +tcpip_callbackmsg_delete(struct tcpip_callback_msg *msg) +{ + memp_free(MEMP_TCPIP_MSG_API, msg); +} + +/** + * @ingroup lwip_os + * Try to post a callback-message to the tcpip_thread tcpip_mbox. + * + * @param msg pointer to the message to post + * @return sys_mbox_trypost() return code + * + * @see tcpip_callbackmsg_new() + */ +err_t +tcpip_callbackmsg_trycallback(struct tcpip_callback_msg *msg) +{ + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + return sys_mbox_trypost(&tcpip_mbox, msg); +} + +/** + * @ingroup lwip_os + * Try to post a callback-message to the tcpip_thread mbox. + * Same as @ref tcpip_callbackmsg_trycallback but calls sys_mbox_trypost_fromisr(), + * mainly to help FreeRTOS, where calls differ between task level and ISR level. + * + * @param msg pointer to the message to post + * @return sys_mbox_trypost_fromisr() return code (without change, so this + * knowledge can be used to e.g. propagate "bool needs_scheduling") + * + * @see tcpip_callbackmsg_new() + */ +err_t +tcpip_callbackmsg_trycallback_fromisr(struct tcpip_callback_msg *msg) +{ + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + return sys_mbox_trypost_fromisr(&tcpip_mbox, msg); +} + +/** + * @ingroup lwip_os + * Initialize this module: + * - initialize all sub modules + * - start the tcpip_thread + * + * @param initfunc a function to call when tcpip_thread is running and finished initializing + * @param arg argument to pass to initfunc + */ +void +tcpip_init(tcpip_init_done_fn initfunc, void *arg) +{ + lwip_init(); + + tcpip_init_done = initfunc; + tcpip_init_done_arg = arg; + if (sys_mbox_new(&tcpip_mbox, TCPIP_MBOX_SIZE) != ERR_OK) { + LWIP_ASSERT("failed to create tcpip_thread mbox", 0); + } +#if LWIP_TCPIP_CORE_LOCKING + if (sys_mutex_new(&lock_tcpip_core) != ERR_OK) { + LWIP_ASSERT("failed to create lock_tcpip_core", 0); + } +#endif /* LWIP_TCPIP_CORE_LOCKING */ + + sys_thread_new(TCPIP_THREAD_NAME, tcpip_thread, NULL, TCPIP_THREAD_STACKSIZE, TCPIP_THREAD_PRIO); +} + +/** + * Simple callback function used with tcpip_callback to free a pbuf + * (pbuf_free has a wrong signature for tcpip_callback) + * + * @param p The pbuf (chain) to be dereferenced. + */ +static void +pbuf_free_int(void *p) +{ + struct pbuf *q = (struct pbuf *)p; + pbuf_free(q); +} + +/** + * A simple wrapper function that allows you to free a pbuf from interrupt context. + * + * @param p The pbuf (chain) to be dereferenced. + * @return ERR_OK if callback could be enqueued, an err_t if not + */ +err_t +pbuf_free_callback(struct pbuf *p) +{ + return tcpip_try_callback(pbuf_free_int, p); +} + +/** + * A simple wrapper function that allows you to free heap memory from + * interrupt context. + * + * @param m the heap memory to free + * @return ERR_OK if callback could be enqueued, an err_t if not + */ +err_t +mem_free_callback(void *m) +{ + return tcpip_try_callback(mem_free, m); +} + +#endif /* !NO_SYS */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c new file mode 100644 index 0000000..f785ebe --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c @@ -0,0 +1,1463 @@ +/** + * @file + * MQTT client + * + * @defgroup mqtt MQTT client + * @ingroup apps + * @verbinclude mqtt_client.txt + */ + +/* + * Copyright (c) 2016 Erik Andersson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack + * + * Author: Erik Andersson + * + * + * @todo: + * - Handle large outgoing payloads for PUBLISH messages + * - Fix restriction of a single topic in each (UN)SUBSCRIBE message (protocol has support for multiple topics) + * - Add support for legacy MQTT protocol version + * + * Please coordinate changes and requests with Erik Andersson + * Erik Andersson + * + */ +#include "lwip/apps/mqtt.h" +#include "lwip/apps/mqtt_priv.h" +#include "lwip/timeouts.h" +#include "lwip/ip_addr.h" +#include "lwip/mem.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" +#include "lwip/altcp.h" +#include "lwip/altcp_tcp.h" +#include "lwip/altcp_tls.h" +#include + +#if LWIP_TCP && LWIP_CALLBACK_API + +/** + * MQTT_DEBUG: Default is off. + */ +#if !defined MQTT_DEBUG || defined __DOXYGEN__ +#define MQTT_DEBUG LWIP_DBG_OFF +#endif + +#define MQTT_DEBUG_TRACE (MQTT_DEBUG | LWIP_DBG_TRACE) +#define MQTT_DEBUG_STATE (MQTT_DEBUG | LWIP_DBG_STATE) +#define MQTT_DEBUG_WARN (MQTT_DEBUG | LWIP_DBG_LEVEL_WARNING) +#define MQTT_DEBUG_WARN_STATE (MQTT_DEBUG | LWIP_DBG_LEVEL_WARNING | LWIP_DBG_STATE) +#define MQTT_DEBUG_SERIOUS (MQTT_DEBUG | LWIP_DBG_LEVEL_SERIOUS) + + + +/** + * MQTT client connection states + */ +enum { + TCP_DISCONNECTED, + TCP_CONNECTING, + MQTT_CONNECTING, + MQTT_CONNECTED +}; + +/** + * MQTT control message types + */ +enum mqtt_message_type { + MQTT_MSG_TYPE_CONNECT = 1, + MQTT_MSG_TYPE_CONNACK = 2, + MQTT_MSG_TYPE_PUBLISH = 3, + MQTT_MSG_TYPE_PUBACK = 4, + MQTT_MSG_TYPE_PUBREC = 5, + MQTT_MSG_TYPE_PUBREL = 6, + MQTT_MSG_TYPE_PUBCOMP = 7, + MQTT_MSG_TYPE_SUBSCRIBE = 8, + MQTT_MSG_TYPE_SUBACK = 9, + MQTT_MSG_TYPE_UNSUBSCRIBE = 10, + MQTT_MSG_TYPE_UNSUBACK = 11, + MQTT_MSG_TYPE_PINGREQ = 12, + MQTT_MSG_TYPE_PINGRESP = 13, + MQTT_MSG_TYPE_DISCONNECT = 14 +}; + +/** Helpers to extract control packet type and qos from first byte in fixed header */ +#define MQTT_CTL_PACKET_TYPE(fixed_hdr_byte0) ((fixed_hdr_byte0 & 0xf0) >> 4) +#define MQTT_CTL_PACKET_QOS(fixed_hdr_byte0) ((fixed_hdr_byte0 & 0x6) >> 1) + +/** + * MQTT connect flags, only used in CONNECT message + */ +enum mqtt_connect_flag { + MQTT_CONNECT_FLAG_USERNAME = 1 << 7, + MQTT_CONNECT_FLAG_PASSWORD = 1 << 6, + MQTT_CONNECT_FLAG_WILL_RETAIN = 1 << 5, + MQTT_CONNECT_FLAG_WILL = 1 << 2, + MQTT_CONNECT_FLAG_CLEAN_SESSION = 1 << 1 +}; + + +static void mqtt_cyclic_timer(void *arg); + +#if defined(LWIP_DEBUG) +static const char *const mqtt_message_type_str[15] = { + "UNDEFINED", + "CONNECT", + "CONNACK", + "PUBLISH", + "PUBACK", + "PUBREC", + "PUBREL", + "PUBCOMP", + "SUBSCRIBE", + "SUBACK", + "UNSUBSCRIBE", + "UNSUBACK", + "PINGREQ", + "PINGRESP", + "DISCONNECT" +}; + +/** + * Message type value to string + * @param msg_type see enum mqtt_message_type + * + * @return Control message type text string + */ +static const char * +mqtt_msg_type_to_str(u8_t msg_type) +{ + if (msg_type >= LWIP_ARRAYSIZE(mqtt_message_type_str)) { + msg_type = 0; + } + return mqtt_message_type_str[msg_type]; +} + +#endif + + +/** + * Generate MQTT packet identifier + * @param client MQTT client + * @return New packet identifier, range 1 to 65535 + */ +static u16_t +msg_generate_packet_id(mqtt_client_t *client) +{ + client->pkt_id_seq++; + if (client->pkt_id_seq == 0) { + client->pkt_id_seq++; + } + return client->pkt_id_seq; +} + +/*--------------------------------------------------------------------------------------------------------------------- */ +/* Output ring buffer */ + +/** Add single item to ring buffer */ +static void +mqtt_ringbuf_put(struct mqtt_ringbuf_t *rb, u8_t item) +{ + rb->buf[rb->put] = item; + rb->put++; + if (rb->put >= MQTT_OUTPUT_RINGBUF_SIZE) { + rb->put = 0; + } +} + +/** Return pointer to ring buffer get position */ +static u8_t * +mqtt_ringbuf_get_ptr(struct mqtt_ringbuf_t *rb) +{ + return &rb->buf[rb->get]; +} + +static void +mqtt_ringbuf_advance_get_idx(struct mqtt_ringbuf_t *rb, u16_t len) +{ + LWIP_ASSERT("mqtt_ringbuf_advance_get_idx: len < MQTT_OUTPUT_RINGBUF_SIZE", len < MQTT_OUTPUT_RINGBUF_SIZE); + + rb->get += len; + if (rb->get >= MQTT_OUTPUT_RINGBUF_SIZE) { + rb->get = rb->get - MQTT_OUTPUT_RINGBUF_SIZE; + } +} + +/** Return number of bytes in ring buffer */ +static u16_t +mqtt_ringbuf_len(struct mqtt_ringbuf_t *rb) +{ + u32_t len = rb->put - rb->get; + if (len > 0xFFFF) { + len += MQTT_OUTPUT_RINGBUF_SIZE; + } + return (u16_t)len; +} + +/** Return number of bytes free in ring buffer */ +#define mqtt_ringbuf_free(rb) (MQTT_OUTPUT_RINGBUF_SIZE - mqtt_ringbuf_len(rb)) + +/** Return number of bytes possible to read without wrapping around */ +#define mqtt_ringbuf_linear_read_length(rb) LWIP_MIN(mqtt_ringbuf_len(rb), (MQTT_OUTPUT_RINGBUF_SIZE - (rb)->get)) + +/** + * Try send as many bytes as possible from output ring buffer + * @param rb Output ring buffer + * @param tpcb TCP connection handle + */ +static void +mqtt_output_send(struct mqtt_ringbuf_t *rb, struct altcp_pcb *tpcb) +{ + err_t err; + u8_t wrap = 0; + u16_t ringbuf_lin_len = mqtt_ringbuf_linear_read_length(rb); + u16_t send_len = altcp_sndbuf(tpcb); + LWIP_ASSERT("mqtt_output_send: tpcb != NULL", tpcb != NULL); + + if (send_len == 0 || ringbuf_lin_len == 0) { + return; + } + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_output_send: tcp_sndbuf: %d bytes, ringbuf_linear_available: %d, get %d, put %d\n", + send_len, ringbuf_lin_len, rb->get, rb->put)); + + if (send_len > ringbuf_lin_len) { + /* Space in TCP output buffer is larger than available in ring buffer linear portion */ + send_len = ringbuf_lin_len; + /* Wrap around if more data in ring buffer after linear portion */ + wrap = (mqtt_ringbuf_len(rb) > ringbuf_lin_len); + } + err = altcp_write(tpcb, mqtt_ringbuf_get_ptr(rb), send_len, TCP_WRITE_FLAG_COPY | (wrap ? TCP_WRITE_FLAG_MORE : 0)); + if ((err == ERR_OK) && wrap) { + mqtt_ringbuf_advance_get_idx(rb, send_len); + /* Use the lesser one of ring buffer linear length and TCP send buffer size */ + send_len = LWIP_MIN(altcp_sndbuf(tpcb), mqtt_ringbuf_linear_read_length(rb)); + err = altcp_write(tpcb, mqtt_ringbuf_get_ptr(rb), send_len, TCP_WRITE_FLAG_COPY); + } + + if (err == ERR_OK) { + mqtt_ringbuf_advance_get_idx(rb, send_len); + /* Flush */ + altcp_output(tpcb); + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_output_send: Send failed with err %d (\"%s\")\n", err, lwip_strerr(err))); + } +} + + + +/*--------------------------------------------------------------------------------------------------------------------- */ +/* Request queue */ + +/** + * Create request item + * @param r_objs Pointer to request objects + * @param r_objs_len Number of array entries + * @param pkt_id Packet identifier of request + * @param cb Packet callback to call when requests lifetime ends + * @param arg Parameter following callback + * @return Request or NULL if failed to create + */ +static struct mqtt_request_t * +mqtt_create_request(struct mqtt_request_t *r_objs, size_t r_objs_len, u16_t pkt_id, mqtt_request_cb_t cb, void *arg) +{ + struct mqtt_request_t *r = NULL; + u8_t n; + LWIP_ASSERT("mqtt_create_request: r_objs != NULL", r_objs != NULL); + for (n = 0; n < r_objs_len; n++) { + /* Item point to itself if not in use */ + if (r_objs[n].next == &r_objs[n]) { + r = &r_objs[n]; + r->next = NULL; + r->cb = cb; + r->arg = arg; + r->pkt_id = pkt_id; + break; + } + } + return r; +} + + +/** + * Append request to pending request queue + * @param tail Pointer to request queue tail pointer + * @param r Request to append + */ +static void +mqtt_append_request(struct mqtt_request_t **tail, struct mqtt_request_t *r) +{ + struct mqtt_request_t *head = NULL; + s16_t time_before = 0; + struct mqtt_request_t *iter; + + LWIP_ASSERT("mqtt_append_request: tail != NULL", tail != NULL); + + /* Iterate trough queue to find head, and count total timeout time */ + for (iter = *tail; iter != NULL; iter = iter->next) { + time_before += iter->timeout_diff; + head = iter; + } + + LWIP_ASSERT("mqtt_append_request: time_before <= MQTT_REQ_TIMEOUT", time_before <= MQTT_REQ_TIMEOUT); + r->timeout_diff = MQTT_REQ_TIMEOUT - time_before; + if (head == NULL) { + *tail = r; + } else { + head->next = r; + } +} + + +/** + * Delete request item + * @param r Request item to delete + */ +static void +mqtt_delete_request(struct mqtt_request_t *r) +{ + if (r != NULL) { + r->next = r; + } +} + +/** + * Remove a request item with a specific packet identifier from request queue + * @param tail Pointer to request queue tail pointer + * @param pkt_id Packet identifier of request to take + * @return Request item if found, NULL if not + */ +static struct mqtt_request_t * +mqtt_take_request(struct mqtt_request_t **tail, u16_t pkt_id) +{ + struct mqtt_request_t *iter = NULL, *prev = NULL; + LWIP_ASSERT("mqtt_take_request: tail != NULL", tail != NULL); + /* Search all request for pkt_id */ + for (iter = *tail; iter != NULL; iter = iter->next) { + if (iter->pkt_id == pkt_id) { + break; + } + prev = iter; + } + + /* If request was found */ + if (iter != NULL) { + /* unchain */ + if (prev == NULL) { + *tail = iter->next; + } else { + prev->next = iter->next; + } + /* If exists, add remaining timeout time for the request to next */ + if (iter->next != NULL) { + iter->next->timeout_diff += iter->timeout_diff; + } + iter->next = NULL; + } + return iter; +} + +/** + * Handle requests timeout + * @param tail Pointer to request queue tail pointer + * @param t Time since last call in seconds + */ +static void +mqtt_request_time_elapsed(struct mqtt_request_t **tail, u8_t t) +{ + struct mqtt_request_t *r; + LWIP_ASSERT("mqtt_request_time_elapsed: tail != NULL", tail != NULL); + r = *tail; + while (t > 0 && r != NULL) { + if (t >= r->timeout_diff) { + t -= (u8_t)r->timeout_diff; + /* Unchain */ + *tail = r->next; + /* Notify upper layer about timeout */ + if (r->cb != NULL) { + r->cb(r->arg, ERR_TIMEOUT); + } + mqtt_delete_request(r); + /* Tail might be be modified in callback, so re-read it in every iteration */ + r = *(struct mqtt_request_t *const volatile *)tail; + } else { + r->timeout_diff -= t; + t = 0; + } + } +} + +/** + * Free all request items + * @param tail Pointer to request queue tail pointer + */ +static void +mqtt_clear_requests(struct mqtt_request_t **tail) +{ + struct mqtt_request_t *iter, *next; + LWIP_ASSERT("mqtt_clear_requests: tail != NULL", tail != NULL); + for (iter = *tail; iter != NULL; iter = next) { + next = iter->next; + mqtt_delete_request(iter); + } + *tail = NULL; +} +/** + * Initialize all request items + * @param r_objs Pointer to request objects + * @param r_objs_len Number of array entries + */ +static void +mqtt_init_requests(struct mqtt_request_t *r_objs, size_t r_objs_len) +{ + u8_t n; + LWIP_ASSERT("mqtt_init_requests: r_objs != NULL", r_objs != NULL); + for (n = 0; n < r_objs_len; n++) { + /* Item pointing to itself indicates unused */ + r_objs[n].next = &r_objs[n]; + } +} + +/*--------------------------------------------------------------------------------------------------------------------- */ +/* Output message build helpers */ + + +static void +mqtt_output_append_u8(struct mqtt_ringbuf_t *rb, u8_t value) +{ + mqtt_ringbuf_put(rb, value); +} + +static +void mqtt_output_append_u16(struct mqtt_ringbuf_t *rb, u16_t value) +{ + mqtt_ringbuf_put(rb, value >> 8); + mqtt_ringbuf_put(rb, value & 0xff); +} + +static void +mqtt_output_append_buf(struct mqtt_ringbuf_t *rb, const void *data, u16_t length) +{ + u16_t n; + for (n = 0; n < length; n++) { + mqtt_ringbuf_put(rb, ((const u8_t *)data)[n]); + } +} + +static void +mqtt_output_append_string(struct mqtt_ringbuf_t *rb, const char *str, u16_t length) +{ + u16_t n; + mqtt_ringbuf_put(rb, length >> 8); + mqtt_ringbuf_put(rb, length & 0xff); + for (n = 0; n < length; n++) { + mqtt_ringbuf_put(rb, str[n]); + } +} + +/** + * Append fixed header + * @param rb Output ring buffer + * @param msg_type see enum mqtt_message_type + * @param fdup MQTT DUP flag + * @param fqos MQTT QoS field + * @param fretain MQTT retain flag + * @param r_length Remaining length after fixed header + */ + +static void +mqtt_output_append_fixed_header(struct mqtt_ringbuf_t *rb, u8_t msg_type, u8_t fdup, + u8_t fqos, u8_t fretain, u16_t r_length) +{ + /* Start with control byte */ + mqtt_output_append_u8(rb, (((msg_type & 0x0f) << 4) | ((fdup & 1) << 3) | ((fqos & 3) << 1) | (fretain & 1))); + /* Encode remaining length field */ + do { + mqtt_output_append_u8(rb, (r_length & 0x7f) | (r_length >= 128 ? 0x80 : 0)); + r_length >>= 7; + } while (r_length > 0); +} + + +/** + * Check output buffer space + * @param rb Output ring buffer + * @param r_length Remaining length after fixed header + * @return 1 if message will fit, 0 if not enough buffer space + */ +static u8_t +mqtt_output_check_space(struct mqtt_ringbuf_t *rb, u16_t r_length) +{ + /* Start with length of type byte + remaining length */ + u16_t total_len = 1 + r_length; + + LWIP_ASSERT("mqtt_output_check_space: rb != NULL", rb != NULL); + + /* Calculate number of required bytes to contain the remaining bytes field and add to total*/ + do { + total_len++; + r_length >>= 7; + } while (r_length > 0); + + return (total_len <= mqtt_ringbuf_free(rb)); +} + + +/** + * Close connection to server + * @param client MQTT client + * @param reason Reason for disconnection + */ +static void +mqtt_close(mqtt_client_t *client, mqtt_connection_status_t reason) +{ + LWIP_ASSERT("mqtt_close: client != NULL", client != NULL); + + /* Bring down TCP connection if not already done */ + if (client->conn != NULL) { + err_t res; + altcp_recv(client->conn, NULL); + altcp_err(client->conn, NULL); + altcp_sent(client->conn, NULL); + res = altcp_close(client->conn); + if (res != ERR_OK) { + altcp_abort(client->conn); + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_close: Close err=%s\n", lwip_strerr(res))); + } + client->conn = NULL; + } + + /* Remove all pending requests */ + mqtt_clear_requests(&client->pend_req_queue); + /* Stop cyclic timer */ + sys_untimeout(mqtt_cyclic_timer, client); + + /* Notify upper layer of disconnection if changed state */ + if (client->conn_state != TCP_DISCONNECTED) { + + client->conn_state = TCP_DISCONNECTED; + if (client->connect_cb != NULL) { + client->connect_cb(client, client->connect_arg, reason); + } + } +} + + +/** + * Interval timer, called every MQTT_CYCLIC_TIMER_INTERVAL seconds in MQTT_CONNECTING and MQTT_CONNECTED states + * @param arg MQTT client + */ +static void +mqtt_cyclic_timer(void *arg) +{ + u8_t restart_timer = 1; + mqtt_client_t *client = (mqtt_client_t *)arg; + LWIP_ASSERT("mqtt_cyclic_timer: client != NULL", client != NULL); + + if (client->conn_state == MQTT_CONNECTING) { + client->cyclic_tick++; + if ((client->cyclic_tick * MQTT_CYCLIC_TIMER_INTERVAL) >= MQTT_CONNECT_TIMOUT) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_cyclic_timer: CONNECT attempt to server timed out\n")); + /* Disconnect TCP */ + mqtt_close(client, MQTT_CONNECT_TIMEOUT); + restart_timer = 0; + } + } else if (client->conn_state == MQTT_CONNECTED) { + /* Handle timeout for pending requests */ + mqtt_request_time_elapsed(&client->pend_req_queue, MQTT_CYCLIC_TIMER_INTERVAL); + + /* keep_alive > 0 means keep alive functionality shall be used */ + if (client->keep_alive > 0) { + + client->server_watchdog++; + /* If reception from server has been idle for 1.5*keep_alive time, server is considered unresponsive */ + if ((client->server_watchdog * MQTT_CYCLIC_TIMER_INTERVAL) > (client->keep_alive + client->keep_alive / 2)) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_cyclic_timer: Server incoming keep-alive timeout\n")); + mqtt_close(client, MQTT_CONNECT_TIMEOUT); + restart_timer = 0; + } + + /* If time for a keep alive message to be sent, transmission has been idle for keep_alive time */ + if ((client->cyclic_tick * MQTT_CYCLIC_TIMER_INTERVAL) >= client->keep_alive) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_cyclic_timer: Sending keep-alive message to server\n")); + if (mqtt_output_check_space(&client->output, 0) != 0) { + mqtt_output_append_fixed_header(&client->output, MQTT_MSG_TYPE_PINGREQ, 0, 0, 0, 0); + client->cyclic_tick = 0; + } + } else { + client->cyclic_tick++; + } + } + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_cyclic_timer: Timer should not be running in state %d\n", client->conn_state)); + restart_timer = 0; + } + if (restart_timer) { + sys_timeout(MQTT_CYCLIC_TIMER_INTERVAL * 1000, mqtt_cyclic_timer, arg); + } +} + + +/** + * Send PUBACK, PUBREC or PUBREL response message + * @param client MQTT client + * @param msg PUBACK, PUBREC or PUBREL + * @param pkt_id Packet identifier + * @param qos QoS value + * @return ERR_OK if successful, ERR_MEM if out of memory + */ +static err_t +pub_ack_rec_rel_response(mqtt_client_t *client, u8_t msg, u16_t pkt_id, u8_t qos) +{ + err_t err = ERR_OK; + if (mqtt_output_check_space(&client->output, 2)) { + mqtt_output_append_fixed_header(&client->output, msg, 0, qos, 0, 2); + mqtt_output_append_u16(&client->output, pkt_id); + mqtt_output_send(&client->output, client->conn); + } else { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("pub_ack_rec_rel_response: OOM creating response: %s with pkt_id: %d\n", + mqtt_msg_type_to_str(msg), pkt_id)); + err = ERR_MEM; + } + return err; +} + +/** + * Subscribe response from server + * @param r Matching request + * @param result Result code from server + */ +static void +mqtt_incomming_suback(struct mqtt_request_t *r, u8_t result) +{ + if (r->cb != NULL) { + r->cb(r->arg, result < 3 ? ERR_OK : ERR_ABRT); + } +} + + +/** + * Complete MQTT message received or buffer full + * @param client MQTT client + * @param fixed_hdr_idx header index + * @param length length received part + * @param remaining_length Remaining length of complete message + */ +static mqtt_connection_status_t +mqtt_message_received(mqtt_client_t *client, u8_t fixed_hdr_idx, u16_t length, u32_t remaining_length) +{ + mqtt_connection_status_t res = MQTT_CONNECT_ACCEPTED; + + u8_t *var_hdr_payload = client->rx_buffer + fixed_hdr_idx; + size_t var_hdr_payload_bufsize = sizeof(client->rx_buffer) - fixed_hdr_idx; + + /* Control packet type */ + u8_t pkt_type = MQTT_CTL_PACKET_TYPE(client->rx_buffer[0]); + u16_t pkt_id = 0; + + LWIP_ASSERT("client->msg_idx < MQTT_VAR_HEADER_BUFFER_LEN", client->msg_idx < MQTT_VAR_HEADER_BUFFER_LEN); + LWIP_ASSERT("fixed_hdr_idx <= client->msg_idx", fixed_hdr_idx <= client->msg_idx); + LWIP_ERROR("buffer length mismatch", fixed_hdr_idx + length <= MQTT_VAR_HEADER_BUFFER_LEN, + return MQTT_CONNECT_DISCONNECTED); + + if (pkt_type == MQTT_MSG_TYPE_CONNACK) { + if (client->conn_state == MQTT_CONNECTING) { + if (length < 2) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short CONNACK message\n")); + goto out_disconnect; + } + /* Get result code from CONNACK */ + res = (mqtt_connection_status_t)var_hdr_payload[1]; + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_message_received: Connect response code %d\n", res)); + if (res == MQTT_CONNECT_ACCEPTED) { + /* Reset cyclic_tick when changing to connected state */ + client->cyclic_tick = 0; + client->conn_state = MQTT_CONNECTED; + /* Notify upper layer */ + if (client->connect_cb != 0) { + client->connect_cb(client, client->connect_arg, res); + } + } + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_message_received: Received CONNACK in connected state\n")); + } + } else if (pkt_type == MQTT_MSG_TYPE_PINGRESP) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ( "mqtt_message_received: Received PINGRESP from server\n")); + + } else if (pkt_type == MQTT_MSG_TYPE_PUBLISH) { + u16_t payload_offset = 0; + u16_t payload_length = length; + u8_t qos = MQTT_CTL_PACKET_QOS(client->rx_buffer[0]); + + if (client->msg_idx <= MQTT_VAR_HEADER_BUFFER_LEN) { + /* Should have topic and pkt id*/ + u8_t *topic; + u16_t after_topic; + u8_t bkp; + u16_t topic_len; + u16_t qos_len = (qos ? 2U : 0U); + if (length < 2 + qos_len) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short PUBLISH packet\n")); + goto out_disconnect; + } + topic_len = var_hdr_payload[0]; + topic_len = (topic_len << 8) + (u16_t)(var_hdr_payload[1]); + if ((topic_len > length - (2 + qos_len)) || + (topic_len > var_hdr_payload_bufsize - (2 + qos_len))) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short PUBLISH packet (topic)\n")); + goto out_disconnect; + } + + topic = var_hdr_payload + 2; + after_topic = 2 + topic_len; + /* Check buffer length, add one byte even for QoS 0 so that zero termination will fit */ + if ((after_topic + (qos ? 2U : 1U)) > var_hdr_payload_bufsize) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_message_received: Receive buffer can not fit topic + pkt_id\n")); + goto out_disconnect; + } + + /* id for QoS 1 and 2 */ + if (qos > 0) { + if (length < after_topic + 2U) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short PUBLISH packet (after_topic)\n")); + goto out_disconnect; + } + client->inpub_pkt_id = ((u16_t)var_hdr_payload[after_topic] << 8) + (u16_t)var_hdr_payload[after_topic + 1]; + after_topic += 2; + } else { + client->inpub_pkt_id = 0; + } + /* Take backup of byte after topic */ + bkp = topic[topic_len]; + /* Zero terminate string */ + topic[topic_len] = 0; + /* Payload data remaining in receive buffer */ + payload_length = length - after_topic; + payload_offset = after_topic; + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_incomming_publish: Received message with QoS %d at topic: %s, payload length %"U32_F"\n", + qos, topic, remaining_length + payload_length)); + if (client->pub_cb != NULL) { + client->pub_cb(client->inpub_arg, (const char *)topic, remaining_length + payload_length); + } + /* Restore byte after topic */ + topic[topic_len] = bkp; + } + if (payload_length > 0 || remaining_length == 0) { + if (length < (size_t)(payload_offset + payload_length)) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short packet (payload)\n")); + goto out_disconnect; + } + client->data_cb(client->inpub_arg, var_hdr_payload + payload_offset, payload_length, remaining_length == 0 ? MQTT_DATA_FLAG_LAST : 0); + /* Reply if QoS > 0 */ + if (remaining_length == 0 && qos > 0) { + /* Send PUBACK for QoS 1 or PUBREC for QoS 2 */ + u8_t resp_msg = (qos == 1) ? MQTT_MSG_TYPE_PUBACK : MQTT_MSG_TYPE_PUBREC; + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_incomming_publish: Sending publish response: %s with pkt_id: %d\n", + mqtt_msg_type_to_str(resp_msg), client->inpub_pkt_id)); + pub_ack_rec_rel_response(client, resp_msg, client->inpub_pkt_id, 0); + } + } + } else { + /* Get packet identifier */ + pkt_id = (u16_t)var_hdr_payload[0] << 8; + pkt_id |= (u16_t)var_hdr_payload[1]; + if (pkt_id == 0) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_message_received: Got message with illegal packet identifier: 0\n")); + goto out_disconnect; + } + if (pkt_type == MQTT_MSG_TYPE_PUBREC) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_message_received: PUBREC, sending PUBREL with pkt_id: %d\n", pkt_id)); + pub_ack_rec_rel_response(client, MQTT_MSG_TYPE_PUBREL, pkt_id, 1); + + } else if (pkt_type == MQTT_MSG_TYPE_PUBREL) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_message_received: PUBREL, sending PUBCOMP response with pkt_id: %d\n", pkt_id)); + pub_ack_rec_rel_response(client, MQTT_MSG_TYPE_PUBCOMP, pkt_id, 0); + + } else if (pkt_type == MQTT_MSG_TYPE_SUBACK || pkt_type == MQTT_MSG_TYPE_UNSUBACK || + pkt_type == MQTT_MSG_TYPE_PUBCOMP || pkt_type == MQTT_MSG_TYPE_PUBACK) { + struct mqtt_request_t *r = mqtt_take_request(&client->pend_req_queue, pkt_id); + if (r != NULL) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_message_received: %s response with id %d\n", mqtt_msg_type_to_str(pkt_type), pkt_id)); + if (pkt_type == MQTT_MSG_TYPE_SUBACK) { + if (length < 3) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_message_received: To small SUBACK packet\n")); + goto out_disconnect; + } else { + mqtt_incomming_suback(r, var_hdr_payload[2]); + } + } else if (r->cb != NULL) { + r->cb(r->arg, ERR_OK); + } + mqtt_delete_request(r); + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ( "mqtt_message_received: Received %s reply, with wrong pkt_id: %d\n", mqtt_msg_type_to_str(pkt_type), pkt_id)); + } + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ( "mqtt_message_received: Received unknown message type: %d\n", pkt_type)); + goto out_disconnect; + } + } + return res; +out_disconnect: + return MQTT_CONNECT_DISCONNECTED; +} + + +/** + * MQTT incoming message parser + * @param client MQTT client + * @param p PBUF chain of received data + * @return Connection status + */ +static mqtt_connection_status_t +mqtt_parse_incoming(mqtt_client_t *client, struct pbuf *p) +{ + u16_t in_offset = 0; + u32_t msg_rem_len = 0; + u8_t fixed_hdr_idx = 0; + u8_t b = 0; + + while (p->tot_len > in_offset) { + /* We ALWAYS parse the header here first. Even if the header was not + included in this segment, we re-parse it here by buffering it in + client->rx_buffer. client->msg_idx keeps track of this. */ + if ((fixed_hdr_idx < 2) || ((b & 0x80) != 0)) { + + if (fixed_hdr_idx < client->msg_idx) { + /* parse header from old pbuf (buffered in client->rx_buffer) */ + b = client->rx_buffer[fixed_hdr_idx]; + } else { + /* parse header from this pbuf and save it in client->rx_buffer in case + it comes in segmented */ + b = pbuf_get_at(p, in_offset++); + client->rx_buffer[client->msg_idx++] = b; + } + fixed_hdr_idx++; + + if (fixed_hdr_idx >= 2) { + /* fixed header contains at least 2 bytes but can contain more, depending on + 'remaining length'. All bytes but the last of this have 0x80 set to + indicate more bytes are coming. */ + msg_rem_len |= (u32_t)(b & 0x7f) << ((fixed_hdr_idx - 2) * 7); + if ((b & 0x80) == 0) { + /* fixed header is done */ + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_parse_incoming: Remaining length after fixed header: %"U32_F"\n", msg_rem_len)); + if (msg_rem_len == 0) { + /* Complete message with no extra headers of payload received */ + mqtt_message_received(client, fixed_hdr_idx, 0, 0); + client->msg_idx = 0; + fixed_hdr_idx = 0; + } else { + /* Bytes remaining in message (changes remaining length if this is + not the first segment of this message) */ + msg_rem_len = (msg_rem_len + fixed_hdr_idx) - client->msg_idx; + } + } + } + } else { + /* Fixed header has been parsed, parse variable header */ + u16_t cpy_len, cpy_start, buffer_space; + + cpy_start = (client->msg_idx - fixed_hdr_idx) % (MQTT_VAR_HEADER_BUFFER_LEN - fixed_hdr_idx) + fixed_hdr_idx; + + /* Allow to copy the lesser one of available length in input data or bytes remaining in message */ + cpy_len = (u16_t)LWIP_MIN((u16_t)(p->tot_len - in_offset), msg_rem_len); + + /* Limit to available space in buffer */ + buffer_space = MQTT_VAR_HEADER_BUFFER_LEN - cpy_start; + if (cpy_len > buffer_space) { + cpy_len = buffer_space; + } + pbuf_copy_partial(p, client->rx_buffer + cpy_start, cpy_len, in_offset); + + /* Advance get and put indexes */ + client->msg_idx += cpy_len; + in_offset += cpy_len; + msg_rem_len -= cpy_len; + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_parse_incoming: msg_idx: %"U32_F", cpy_len: %"U16_F", remaining %"U32_F"\n", client->msg_idx, cpy_len, msg_rem_len)); + if ((msg_rem_len == 0) || (cpy_len == buffer_space)) { + /* Whole message received or buffer is full */ + mqtt_connection_status_t res = mqtt_message_received(client, fixed_hdr_idx, (cpy_start + cpy_len) - fixed_hdr_idx, msg_rem_len); + if (res != MQTT_CONNECT_ACCEPTED) { + return res; + } + if (msg_rem_len == 0) { + /* Reset parser state */ + client->msg_idx = 0; + /* msg_tot_len = 0; */ + fixed_hdr_idx = 0; + } + } + } + } + return MQTT_CONNECT_ACCEPTED; +} + + +/** + * TCP received callback function. @see tcp_recv_fn + * @param arg MQTT client + * @param p PBUF chain of received data + * @param err Passed as return value if not ERR_OK + * @return ERR_OK or err passed into callback + */ +static err_t +mqtt_tcp_recv_cb(void *arg, struct altcp_pcb *pcb, struct pbuf *p, err_t err) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + LWIP_ASSERT("mqtt_tcp_recv_cb: client != NULL", client != NULL); + LWIP_ASSERT("mqtt_tcp_recv_cb: client->conn == pcb", client->conn == pcb); + + if (p == NULL) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_tcp_recv_cb: Recv pbuf=NULL, remote has closed connection\n")); + mqtt_close(client, MQTT_CONNECT_DISCONNECTED); + } else { + mqtt_connection_status_t res; + if (err != ERR_OK) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_tcp_recv_cb: Recv err=%d\n", err)); + pbuf_free(p); + return err; + } + + /* Tell remote that data has been received */ + altcp_recved(pcb, p->tot_len); + res = mqtt_parse_incoming(client, p); + pbuf_free(p); + + if (res != MQTT_CONNECT_ACCEPTED) { + mqtt_close(client, res); + } + /* If keep alive functionality is used */ + if (client->keep_alive != 0) { + /* Reset server alive watchdog */ + client->server_watchdog = 0; + } + + } + return ERR_OK; +} + + +/** + * TCP data sent callback function. @see tcp_sent_fn + * @param arg MQTT client + * @param tpcb TCP connection handle + * @param len Number of bytes sent + * @return ERR_OK + */ +static err_t +mqtt_tcp_sent_cb(void *arg, struct altcp_pcb *tpcb, u16_t len) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + + LWIP_UNUSED_ARG(tpcb); + LWIP_UNUSED_ARG(len); + + if (client->conn_state == MQTT_CONNECTED) { + struct mqtt_request_t *r; + + /* Reset keep-alive send timer and server watchdog */ + client->cyclic_tick = 0; + client->server_watchdog = 0; + /* QoS 0 publish has no response from server, so call its callbacks here */ + while ((r = mqtt_take_request(&client->pend_req_queue, 0)) != NULL) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_tcp_sent_cb: Calling QoS 0 publish complete callback\n")); + if (r->cb != NULL) { + r->cb(r->arg, ERR_OK); + } + mqtt_delete_request(r); + } + /* Try send any remaining buffers from output queue */ + mqtt_output_send(&client->output, client->conn); + } + return ERR_OK; +} + +/** + * TCP error callback function. @see tcp_err_fn + * @param arg MQTT client + * @param err Error encountered + */ +static void +mqtt_tcp_err_cb(void *arg, err_t err) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + LWIP_UNUSED_ARG(err); /* only used for debug output */ + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_tcp_err_cb: TCP error callback: error %d, arg: %p\n", err, arg)); + LWIP_ASSERT("mqtt_tcp_err_cb: client != NULL", client != NULL); + /* Set conn to null before calling close as pcb is already deallocated*/ + client->conn = 0; + mqtt_close(client, MQTT_CONNECT_DISCONNECTED); +} + +/** + * TCP poll callback function. @see tcp_poll_fn + * @param arg MQTT client + * @param tpcb TCP connection handle + * @return err ERR_OK + */ +static err_t +mqtt_tcp_poll_cb(void *arg, struct altcp_pcb *tpcb) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + if (client->conn_state == MQTT_CONNECTED) { + /* Try send any remaining buffers from output queue */ + mqtt_output_send(&client->output, tpcb); + } + return ERR_OK; +} + +/** + * TCP connect callback function. @see tcp_connected_fn + * @param arg MQTT client + * @param err Always ERR_OK, mqtt_tcp_err_cb is called in case of error + * @return ERR_OK + */ +static err_t +mqtt_tcp_connect_cb(void *arg, struct altcp_pcb *tpcb, err_t err) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + + if (err != ERR_OK) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_tcp_connect_cb: TCP connect error %d\n", err)); + return err; + } + + /* Initiate receiver state */ + client->msg_idx = 0; + + /* Setup TCP callbacks */ + altcp_recv(tpcb, mqtt_tcp_recv_cb); + altcp_sent(tpcb, mqtt_tcp_sent_cb); + altcp_poll(tpcb, mqtt_tcp_poll_cb, 2); + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_tcp_connect_cb: TCP connection established to server\n")); + /* Enter MQTT connect state */ + client->conn_state = MQTT_CONNECTING; + + /* Start cyclic timer */ + sys_timeout(MQTT_CYCLIC_TIMER_INTERVAL * 1000, mqtt_cyclic_timer, client); + client->cyclic_tick = 0; + + /* Start transmission from output queue, connect message is the first one out*/ + mqtt_output_send(&client->output, client->conn); + + return ERR_OK; +} + + + +/*---------------------------------------------------------------------------------------------------- */ +/* Public API */ + + +/** + * @ingroup mqtt + * MQTT publish function. + * @param client MQTT client + * @param topic Publish topic string + * @param payload Data to publish (NULL is allowed) + * @param payload_length Length of payload (0 is allowed) + * @param qos Quality of service, 0 1 or 2 + * @param retain MQTT retain flag + * @param cb Callback to call when publish is complete or has timed out + * @param arg User supplied argument to publish callback + * @return ERR_OK if successful + * ERR_CONN if client is disconnected + * ERR_MEM if short on memory + */ +err_t +mqtt_publish(mqtt_client_t *client, const char *topic, const void *payload, u16_t payload_length, u8_t qos, u8_t retain, + mqtt_request_cb_t cb, void *arg) +{ + struct mqtt_request_t *r; + u16_t pkt_id; + size_t topic_strlen; + size_t total_len; + u16_t topic_len; + u16_t remaining_length; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_publish: client != NULL", client); + LWIP_ASSERT("mqtt_publish: topic != NULL", topic); + LWIP_ERROR("mqtt_publish: TCP disconnected", (client->conn_state != TCP_DISCONNECTED), return ERR_CONN); + + topic_strlen = strlen(topic); + LWIP_ERROR("mqtt_publish: topic length overflow", (topic_strlen <= (0xFFFF - 2)), return ERR_ARG); + topic_len = (u16_t)topic_strlen; + total_len = 2 + topic_len + payload_length; + + if (qos > 0) { + total_len += 2; + /* Generate pkt_id id for QoS1 and 2 */ + pkt_id = msg_generate_packet_id(client); + } else { + /* Use reserved value pkt_id 0 for QoS 0 in request handle */ + pkt_id = 0; + } + LWIP_ERROR("mqtt_publish: total length overflow", (total_len <= 0xFFFF), return ERR_ARG); + remaining_length = (u16_t)total_len; + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_publish: Publish with payload length %d to topic \"%s\"\n", payload_length, topic)); + + r = mqtt_create_request(client->req_list, LWIP_ARRAYSIZE(client->req_list), pkt_id, cb, arg); + if (r == NULL) { + return ERR_MEM; + } + + if (mqtt_output_check_space(&client->output, remaining_length) == 0) { + mqtt_delete_request(r); + return ERR_MEM; + } + /* Append fixed header */ + mqtt_output_append_fixed_header(&client->output, MQTT_MSG_TYPE_PUBLISH, 0, qos, retain, remaining_length); + + /* Append Topic */ + mqtt_output_append_string(&client->output, topic, topic_len); + + /* Append packet if for QoS 1 and 2*/ + if (qos > 0) { + mqtt_output_append_u16(&client->output, pkt_id); + } + + /* Append optional publish payload */ + if ((payload != NULL) && (payload_length > 0)) { + mqtt_output_append_buf(&client->output, payload, payload_length); + } + + mqtt_append_request(&client->pend_req_queue, r); + mqtt_output_send(&client->output, client->conn); + return ERR_OK; +} + + +/** + * @ingroup mqtt + * MQTT subscribe/unsubscribe function. + * @param client MQTT client + * @param topic topic to subscribe to + * @param qos Quality of service, 0 1 or 2 (only used for subscribe) + * @param cb Callback to call when subscribe/unsubscribe reponse is received + * @param arg User supplied argument to publish callback + * @param sub 1 for subscribe, 0 for unsubscribe + * @return ERR_OK if successful, @see err_t enum for other results + */ +err_t +mqtt_sub_unsub(mqtt_client_t *client, const char *topic, u8_t qos, mqtt_request_cb_t cb, void *arg, u8_t sub) +{ + size_t topic_strlen; + size_t total_len; + u16_t topic_len; + u16_t remaining_length; + u16_t pkt_id; + struct mqtt_request_t *r; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_sub_unsub: client != NULL", client); + LWIP_ASSERT("mqtt_sub_unsub: topic != NULL", topic); + + topic_strlen = strlen(topic); + LWIP_ERROR("mqtt_sub_unsub: topic length overflow", (topic_strlen <= (0xFFFF - 2)), return ERR_ARG); + topic_len = (u16_t)topic_strlen; + /* Topic string, pkt_id, qos for subscribe */ + total_len = topic_len + 2 + 2 + (sub != 0); + LWIP_ERROR("mqtt_sub_unsub: total length overflow", (total_len <= 0xFFFF), return ERR_ARG); + remaining_length = (u16_t)total_len; + + LWIP_ASSERT("mqtt_sub_unsub: qos < 3", qos < 3); + if (client->conn_state == TCP_DISCONNECTED) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_sub_unsub: Can not (un)subscribe in disconnected state\n")); + return ERR_CONN; + } + + pkt_id = msg_generate_packet_id(client); + r = mqtt_create_request(client->req_list, LWIP_ARRAYSIZE(client->req_list), pkt_id, cb, arg); + if (r == NULL) { + return ERR_MEM; + } + + if (mqtt_output_check_space(&client->output, remaining_length) == 0) { + mqtt_delete_request(r); + return ERR_MEM; + } + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_sub_unsub: Client (un)subscribe to topic \"%s\", id: %d\n", topic, pkt_id)); + + mqtt_output_append_fixed_header(&client->output, sub ? MQTT_MSG_TYPE_SUBSCRIBE : MQTT_MSG_TYPE_UNSUBSCRIBE, 0, 1, 0, remaining_length); + /* Packet id */ + mqtt_output_append_u16(&client->output, pkt_id); + /* Topic */ + mqtt_output_append_string(&client->output, topic, topic_len); + /* QoS */ + if (sub != 0) { + mqtt_output_append_u8(&client->output, LWIP_MIN(qos, 2)); + } + + mqtt_append_request(&client->pend_req_queue, r); + mqtt_output_send(&client->output, client->conn); + return ERR_OK; +} + + +/** + * @ingroup mqtt + * Set callback to handle incoming publish requests from server + * @param client MQTT client + * @param pub_cb Callback invoked when publish starts, contain topic and total length of payload + * @param data_cb Callback for each fragment of payload that arrives + * @param arg User supplied argument to both callbacks + */ +void +mqtt_set_inpub_callback(mqtt_client_t *client, mqtt_incoming_publish_cb_t pub_cb, + mqtt_incoming_data_cb_t data_cb, void *arg) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_set_inpub_callback: client != NULL", client != NULL); + client->data_cb = data_cb; + client->pub_cb = pub_cb; + client->inpub_arg = arg; +} + +/** + * @ingroup mqtt + * Create a new MQTT client instance + * @return Pointer to instance on success, NULL otherwise + */ +mqtt_client_t * +mqtt_client_new(void) +{ + LWIP_ASSERT_CORE_LOCKED(); + return (mqtt_client_t *)mem_calloc(1, sizeof(mqtt_client_t)); +} + +/** + * @ingroup mqtt + * Free MQTT client instance + * @param client Pointer to instance to be freed + */ +void +mqtt_client_free(mqtt_client_t *client) +{ + mem_free(client); +} + +/** + * @ingroup mqtt + * Connect to MQTT server + * @param client MQTT client + * @param ip_addr Server IP + * @param port Server port + * @param cb Connection state change callback + * @param arg User supplied argument to connection callback + * @param client_info Client identification and connection options + * @return ERR_OK if successful, @see err_t enum for other results + */ +err_t +mqtt_client_connect(mqtt_client_t *client, const ip_addr_t *ip_addr, u16_t port, mqtt_connection_cb_t cb, void *arg, + const struct mqtt_connect_client_info_t *client_info) +{ + err_t err; + size_t len; + u16_t client_id_length; + /* Length is the sum of 2+"MQTT", protocol level, flags and keep alive */ + u16_t remaining_length = 2 + 4 + 1 + 1 + 2; + u8_t flags = 0, will_topic_len = 0, will_msg_len = 0; + u16_t client_user_len = 0, client_pass_len = 0; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_client_connect: client != NULL", client != NULL); + LWIP_ASSERT("mqtt_client_connect: ip_addr != NULL", ip_addr != NULL); + LWIP_ASSERT("mqtt_client_connect: client_info != NULL", client_info != NULL); + LWIP_ASSERT("mqtt_client_connect: client_info->client_id != NULL", client_info->client_id != NULL); + + if (client->conn_state != TCP_DISCONNECTED) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_client_connect: Already connected\n")); + return ERR_ISCONN; + } + + /* Wipe clean */ + memset(client, 0, sizeof(mqtt_client_t)); + client->connect_arg = arg; + client->connect_cb = cb; + client->keep_alive = client_info->keep_alive; + mqtt_init_requests(client->req_list, LWIP_ARRAYSIZE(client->req_list)); + + /* Build connect message */ + if (client_info->will_topic != NULL && client_info->will_msg != NULL) { + flags |= MQTT_CONNECT_FLAG_WILL; + flags |= (client_info->will_qos & 3) << 3; + if (client_info->will_retain) { + flags |= MQTT_CONNECT_FLAG_WILL_RETAIN; + } + len = strlen(client_info->will_topic); + LWIP_ERROR("mqtt_client_connect: client_info->will_topic length overflow", len <= 0xFF, return ERR_VAL); + LWIP_ERROR("mqtt_client_connect: client_info->will_topic length must be > 0", len > 0, return ERR_VAL); + will_topic_len = (u8_t)len; + len = strlen(client_info->will_msg); + LWIP_ERROR("mqtt_client_connect: client_info->will_msg length overflow", len <= 0xFF, return ERR_VAL); + will_msg_len = (u8_t)len; + len = remaining_length + 2 + will_topic_len + 2 + will_msg_len; + LWIP_ERROR("mqtt_client_connect: remaining_length overflow", len <= 0xFFFF, return ERR_VAL); + remaining_length = (u16_t)len; + } + if (client_info->client_user != NULL) { + flags |= MQTT_CONNECT_FLAG_USERNAME; + len = strlen(client_info->client_user); + LWIP_ERROR("mqtt_client_connect: client_info->client_user length overflow", len <= 0xFFFF, return ERR_VAL); + LWIP_ERROR("mqtt_client_connect: client_info->client_user length must be > 0", len > 0, return ERR_VAL); + client_user_len = (u16_t)len; + len = remaining_length + 2 + client_user_len; + LWIP_ERROR("mqtt_client_connect: remaining_length overflow", len <= 0xFFFF, return ERR_VAL); + remaining_length = (u16_t)len; + } + if (client_info->client_pass != NULL) { + flags |= MQTT_CONNECT_FLAG_PASSWORD; + len = strlen(client_info->client_pass); + LWIP_ERROR("mqtt_client_connect: client_info->client_pass length overflow", len <= 0xFFFF, return ERR_VAL); + LWIP_ERROR("mqtt_client_connect: client_info->client_pass length must be > 0", len > 0, return ERR_VAL); + client_pass_len = (u16_t)len; + len = remaining_length + 2 + client_pass_len; + LWIP_ERROR("mqtt_client_connect: remaining_length overflow", len <= 0xFFFF, return ERR_VAL); + remaining_length = (u16_t)len; + } + + /* Don't complicate things, always connect using clean session */ + flags |= MQTT_CONNECT_FLAG_CLEAN_SESSION; + + len = strlen(client_info->client_id); + LWIP_ERROR("mqtt_client_connect: client_info->client_id length overflow", len <= 0xFFFF, return ERR_VAL); + client_id_length = (u16_t)len; + len = remaining_length + 2 + client_id_length; + LWIP_ERROR("mqtt_client_connect: remaining_length overflow", len <= 0xFFFF, return ERR_VAL); + remaining_length = (u16_t)len; + + if (mqtt_output_check_space(&client->output, remaining_length) == 0) { + return ERR_MEM; + } + +#if LWIP_ALTCP && LWIP_ALTCP_TLS + if (client_info->tls_config) { + client->conn = altcp_tls_new(client_info->tls_config, IP_GET_TYPE(ip_addr)); + } else +#endif + { + client->conn = altcp_tcp_new_ip_type(IP_GET_TYPE(ip_addr)); + } + if (client->conn == NULL) { + return ERR_MEM; + } + + /* Set arg pointer for callbacks */ + altcp_arg(client->conn, client); + /* Any local address, pick random local port number */ + err = altcp_bind(client->conn, IP_ADDR_ANY, 0); + if (err != ERR_OK) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_client_connect: Error binding to local ip/port, %d\n", err)); + goto tcp_fail; + } + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_client_connect: Connecting to host: %s at port:%"U16_F"\n", ipaddr_ntoa(ip_addr), port)); + + /* Connect to server */ + err = altcp_connect(client->conn, ip_addr, port, mqtt_tcp_connect_cb); + if (err != ERR_OK) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_client_connect: Error connecting to remote ip/port, %d\n", err)); + goto tcp_fail; + } + /* Set error callback */ + altcp_err(client->conn, mqtt_tcp_err_cb); + client->conn_state = TCP_CONNECTING; + + /* Append fixed header */ + mqtt_output_append_fixed_header(&client->output, MQTT_MSG_TYPE_CONNECT, 0, 0, 0, remaining_length); + /* Append Protocol string */ + mqtt_output_append_string(&client->output, "MQTT", 4); + /* Append Protocol level */ + mqtt_output_append_u8(&client->output, 4); + /* Append connect flags */ + mqtt_output_append_u8(&client->output, flags); + /* Append keep-alive */ + mqtt_output_append_u16(&client->output, client_info->keep_alive); + /* Append client id */ + mqtt_output_append_string(&client->output, client_info->client_id, client_id_length); + /* Append will message if used */ + if ((flags & MQTT_CONNECT_FLAG_WILL) != 0) { + mqtt_output_append_string(&client->output, client_info->will_topic, will_topic_len); + mqtt_output_append_string(&client->output, client_info->will_msg, will_msg_len); + } + /* Append user name if given */ + if ((flags & MQTT_CONNECT_FLAG_USERNAME) != 0) { + mqtt_output_append_string(&client->output, client_info->client_user, client_user_len); + } + /* Append password if given */ + if ((flags & MQTT_CONNECT_FLAG_PASSWORD) != 0) { + mqtt_output_append_string(&client->output, client_info->client_pass, client_pass_len); + } + return ERR_OK; + +tcp_fail: + altcp_abort(client->conn); + client->conn = NULL; + return err; +} + + +/** + * @ingroup mqtt + * Disconnect from MQTT server + * @param client MQTT client + */ +void +mqtt_disconnect(mqtt_client_t *client) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_disconnect: client != NULL", client); + /* If connection in not already closed */ + if (client->conn_state != TCP_DISCONNECTED) { + /* Set conn_state before calling mqtt_close to prevent callback from being called */ + client->conn_state = TCP_DISCONNECTED; + mqtt_close(client, (mqtt_connection_status_t)0); + } +} + +/** + * @ingroup mqtt + * Check connection with server + * @param client MQTT client + * @return 1 if connected to server, 0 otherwise + */ +u8_t +mqtt_client_is_connected(mqtt_client_t *client) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_client_is_connected: client != NULL", client); + return client->conn_state == MQTT_CONNECTED; +} + +#endif /* LWIP_TCP && LWIP_CALLBACK_API */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/altcp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/altcp.c new file mode 100644 index 0000000..d42b727 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/altcp.c @@ -0,0 +1,681 @@ +/** + * @file + * @defgroup altcp Application layered TCP Functions + * @ingroup altcp_api + * + * This file contains the common functions for altcp to work. + * For more details see @ref altcp_api. + */ + +/** + * @defgroup altcp_api Application layered TCP Introduction + * @ingroup callbackstyle_api + * + * Overview + * -------- + * altcp (application layered TCP connection API; to be used from TCPIP thread) + * is an abstraction layer that prevents applications linking hard against the + * @ref tcp.h functions while providing the same functionality. It is used to + * e.g. add SSL/TLS (see LWIP_ALTCP_TLS) or proxy-connect support to an application + * written for the tcp callback API without that application knowing the + * protocol details. + * + * * This interface mimics the tcp callback API to the application while preventing + * direct linking (much like virtual functions). + * * This way, an application can make use of other application layer protocols + * on top of TCP without knowing the details (e.g. TLS, proxy connection). + * * This is achieved by simply including "lwip/altcp.h" instead of "lwip/tcp.h", + * replacing "struct tcp_pcb" with "struct altcp_pcb" and prefixing all functions + * with "altcp_" instead of "tcp_". + * + * With altcp support disabled (LWIP_ALTCP==0), applications written against the + * altcp API can still be compiled but are directly linked against the tcp.h + * callback API and then cannot use layered protocols. To minimize code changes + * in this case, the use of altcp_allocators is strongly suggested. + * + * Usage + * ----- + * To make use of this API from an existing tcp raw API application: + * * Include "lwip/altcp.h" instead of "lwip/tcp.h" + * * Replace "struct tcp_pcb" with "struct altcp_pcb" + * * Prefix all called tcp API functions with "altcp_" instead of "tcp_" to link + * against the altcp functions + * * @ref altcp_new (and @ref altcp_new_ip_type/@ref altcp_new_ip6) take + * an @ref altcp_allocator_t as an argument, whereas the original tcp API + * functions take no arguments. + * * An @ref altcp_allocator_t allocator is an object that holds a pointer to an + * allocator object and a corresponding state (e.g. for TLS, the corresponding + * state may hold certificates or keys). This way, the application does not + * even need to know if it uses TLS or pure TCP, this is handled at runtime + * by passing a specific allocator. + * * An application can alternatively bind hard to the altcp_tls API by calling + * @ref altcp_tls_new or @ref altcp_tls_wrap. + * * The TLS layer is not directly implemented by lwIP, but a port to mbedTLS is + * provided. + * * Another altcp layer is proxy-connect to use TLS behind a HTTP proxy (see + * @ref altcp_proxyconnect.h) + * + * altcp_allocator_t + * ----------------- + * An altcp allocator is created by the application by combining an allocator + * callback function and a corresponding state, e.g.:\code{.c} + * static const unsigned char cert[] = {0x2D, ... (see mbedTLS doc for how to create this)}; + * struct altcp_tls_config * conf = altcp_tls_create_config_client(cert, sizeof(cert)); + * altcp_allocator_t tls_allocator = { + * altcp_tls_alloc, conf + * }; + * \endcode + * + * + * struct altcp_tls_config + * ----------------------- + * The struct altcp_tls_config holds state that is needed to create new TLS client + * or server connections (e.g. certificates and private keys). + * + * It is not defined by lwIP itself but by the TLS port (e.g. altcp_tls to mbedTLS + * adaption). However, the parameters used to create it are defined in @ref + * altcp_tls.h (see @ref altcp_tls_create_config_server_privkey_cert for servers + * and @ref altcp_tls_create_config_client/@ref altcp_tls_create_config_client_2wayauth + * for clients). + * + * For mbedTLS, ensure that certificates can be parsed by 'mbedtls_x509_crt_parse()' and + * private keys can be parsed by 'mbedtls_pk_parse_key()'. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/altcp.h" +#include "lwip/priv/altcp_priv.h" +#include "lwip/altcp_tcp.h" +#include "lwip/tcp.h" +#include "lwip/mem.h" + +#include + +extern const struct altcp_functions altcp_tcp_functions; + +/** + * For altcp layer implementations only: allocate a new struct altcp_pcb from the pool + * and zero the memory + */ +struct altcp_pcb * +altcp_alloc(void) +{ + struct altcp_pcb *ret = (struct altcp_pcb *)memp_malloc(MEMP_ALTCP_PCB); + if (ret != NULL) { + memset(ret, 0, sizeof(struct altcp_pcb)); + } + return ret; +} + +/** + * For altcp layer implementations only: return a struct altcp_pcb to the pool + */ +void +altcp_free(struct altcp_pcb *conn) +{ + if (conn) { + if (conn->fns && conn->fns->dealloc) { + conn->fns->dealloc(conn); + } + memp_free(MEMP_ALTCP_PCB, conn); + } +} + +/** + * @ingroup altcp + * altcp_new_ip6: @ref altcp_new for IPv6 + */ +struct altcp_pcb * +altcp_new_ip6(altcp_allocator_t *allocator) +{ + return altcp_new_ip_type(allocator, IPADDR_TYPE_V6); +} + +/** + * @ingroup altcp + * altcp_new: @ref altcp_new for IPv4 + */ +struct altcp_pcb * +altcp_new(altcp_allocator_t *allocator) +{ + return altcp_new_ip_type(allocator, IPADDR_TYPE_V4); +} + +/** + * @ingroup altcp + * altcp_new_ip_type: called by applications to allocate a new pcb with the help of an + * allocator function. + * + * @param allocator allocator function and argument + * @param ip_type IP version of the pcb (@ref lwip_ip_addr_type) + * @return a new altcp_pcb or NULL on error + */ +struct altcp_pcb * +altcp_new_ip_type(altcp_allocator_t *allocator, u8_t ip_type) +{ + struct altcp_pcb *conn; + if (allocator == NULL) { + /* no allocator given, create a simple TCP connection */ + return altcp_tcp_new_ip_type(ip_type); + } + if (allocator->alloc == NULL) { + /* illegal allocator */ + return NULL; + } + conn = allocator->alloc(allocator->arg, ip_type); + if (conn == NULL) { + /* allocation failed */ + return NULL; + } + return conn; +} + +/** + * @ingroup altcp + * @see tcp_arg() + */ +void +altcp_arg(struct altcp_pcb *conn, void *arg) +{ + if (conn) { + conn->arg = arg; + } +} + +/** + * @ingroup altcp + * @see tcp_accept() + */ +void +altcp_accept(struct altcp_pcb *conn, altcp_accept_fn accept) +{ + if (conn != NULL) { + conn->accept = accept; + } +} + +/** + * @ingroup altcp + * @see tcp_recv() + */ +void +altcp_recv(struct altcp_pcb *conn, altcp_recv_fn recv) +{ + if (conn) { + conn->recv = recv; + } +} + +/** + * @ingroup altcp + * @see tcp_sent() + */ +void +altcp_sent(struct altcp_pcb *conn, altcp_sent_fn sent) +{ + if (conn) { + conn->sent = sent; + } +} + +/** + * @ingroup altcp + * @see tcp_poll() + */ +void +altcp_poll(struct altcp_pcb *conn, altcp_poll_fn poll, u8_t interval) +{ + if (conn) { + conn->poll = poll; + conn->pollinterval = interval; + if (conn->fns && conn->fns->set_poll) { + conn->fns->set_poll(conn, interval); + } + } +} + +/** + * @ingroup altcp + * @see tcp_err() + */ +void +altcp_err(struct altcp_pcb *conn, altcp_err_fn err) +{ + if (conn) { + conn->err = err; + } +} + +/* Generic functions calling the "virtual" ones */ + +/** + * @ingroup altcp + * @see tcp_recved() + */ +void +altcp_recved(struct altcp_pcb *conn, u16_t len) +{ + if (conn && conn->fns && conn->fns->recved) { + conn->fns->recved(conn, len); + } +} + +/** + * @ingroup altcp + * @see tcp_bind() + */ +err_t +altcp_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port) +{ + if (conn && conn->fns && conn->fns->bind) { + return conn->fns->bind(conn, ipaddr, port); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_connect() + */ +err_t +altcp_connect(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected) +{ + if (conn && conn->fns && conn->fns->connect) { + return conn->fns->connect(conn, ipaddr, port, connected); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_listen_with_backlog_and_err() + */ +struct altcp_pcb * +altcp_listen_with_backlog_and_err(struct altcp_pcb *conn, u8_t backlog, err_t *err) +{ + if (conn && conn->fns && conn->fns->listen) { + return conn->fns->listen(conn, backlog, err); + } + return NULL; +} + +/** + * @ingroup altcp + * @see tcp_abort() + */ +void +altcp_abort(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->abort) { + conn->fns->abort(conn); + } +} + +/** + * @ingroup altcp + * @see tcp_close() + */ +err_t +altcp_close(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->close) { + return conn->fns->close(conn); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_shutdown() + */ +err_t +altcp_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx) +{ + if (conn && conn->fns && conn->fns->shutdown) { + return conn->fns->shutdown(conn, shut_rx, shut_tx); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_write() + */ +err_t +altcp_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags) +{ + if (conn && conn->fns && conn->fns->write) { + return conn->fns->write(conn, dataptr, len, apiflags); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_output() + */ +err_t +altcp_output(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->output) { + return conn->fns->output(conn); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_mss() + */ +u16_t +altcp_mss(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->mss) { + return conn->fns->mss(conn); + } + return 0; +} + +/** + * @ingroup altcp + * @see tcp_sndbuf() + */ +u16_t +altcp_sndbuf(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->sndbuf) { + return conn->fns->sndbuf(conn); + } + return 0; +} + +/** + * @ingroup altcp + * @see tcp_sndqueuelen() + */ +u16_t +altcp_sndqueuelen(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->sndqueuelen) { + return conn->fns->sndqueuelen(conn); + } + return 0; +} + +void +altcp_nagle_disable(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->nagle_disable) { + conn->fns->nagle_disable(conn); + } +} + +void +altcp_nagle_enable(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->nagle_enable) { + conn->fns->nagle_enable(conn); + } +} + +int +altcp_nagle_disabled(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->nagle_disabled) { + return conn->fns->nagle_disabled(conn); + } + return 0; +} + +/** + * @ingroup altcp + * @see tcp_setprio() + */ +void +altcp_setprio(struct altcp_pcb *conn, u8_t prio) +{ + if (conn && conn->fns && conn->fns->setprio) { + conn->fns->setprio(conn, prio); + } +} + +err_t +altcp_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port) +{ + if (conn && conn->fns && conn->fns->addrinfo) { + return conn->fns->addrinfo(conn, local, addr, port); + } + return ERR_VAL; +} + +ip_addr_t * +altcp_get_ip(struct altcp_pcb *conn, int local) +{ + if (conn && conn->fns && conn->fns->getip) { + return conn->fns->getip(conn, local); + } + return NULL; +} + +u16_t +altcp_get_port(struct altcp_pcb *conn, int local) +{ + if (conn && conn->fns && conn->fns->getport) { + return conn->fns->getport(conn, local); + } + return 0; +} + +#ifdef LWIP_DEBUG +enum tcp_state +altcp_dbg_get_tcp_state(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->dbg_get_tcp_state) { + return conn->fns->dbg_get_tcp_state(conn); + } + return CLOSED; +} +#endif + +/* Default implementations for the "virtual" functions */ + +void +altcp_default_set_poll(struct altcp_pcb *conn, u8_t interval) +{ + if (conn && conn->inner_conn) { + altcp_poll(conn->inner_conn, conn->poll, interval); + } +} + +void +altcp_default_recved(struct altcp_pcb *conn, u16_t len) +{ + if (conn && conn->inner_conn) { + altcp_recved(conn->inner_conn, len); + } +} + +err_t +altcp_default_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port) +{ + if (conn && conn->inner_conn) { + return altcp_bind(conn->inner_conn, ipaddr, port); + } + return ERR_VAL; +} + +err_t +altcp_default_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx) +{ + if (conn) { + if (shut_rx && shut_tx && conn->fns && conn->fns->close) { + /* default shutdown for both sides is close */ + return conn->fns->close(conn); + } + if (conn->inner_conn) { + return altcp_shutdown(conn->inner_conn, shut_rx, shut_tx); + } + } + return ERR_VAL; +} + +err_t +altcp_default_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags) +{ + if (conn && conn->inner_conn) { + return altcp_write(conn->inner_conn, dataptr, len, apiflags); + } + return ERR_VAL; +} + +err_t +altcp_default_output(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_output(conn->inner_conn); + } + return ERR_VAL; +} + +u16_t +altcp_default_mss(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_mss(conn->inner_conn); + } + return 0; +} + +u16_t +altcp_default_sndbuf(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_sndbuf(conn->inner_conn); + } + return 0; +} + +u16_t +altcp_default_sndqueuelen(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_sndqueuelen(conn->inner_conn); + } + return 0; +} + +void +altcp_default_nagle_disable(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + altcp_nagle_disable(conn->inner_conn); + } +} + +void +altcp_default_nagle_enable(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + altcp_nagle_enable(conn->inner_conn); + } +} + +int +altcp_default_nagle_disabled(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_nagle_disabled(conn->inner_conn); + } + return 0; +} + +void +altcp_default_setprio(struct altcp_pcb *conn, u8_t prio) +{ + if (conn && conn->inner_conn) { + altcp_setprio(conn->inner_conn, prio); + } +} + +void +altcp_default_dealloc(struct altcp_pcb *conn) +{ + LWIP_UNUSED_ARG(conn); + /* nothing to do */ +} + +err_t +altcp_default_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port) +{ + if (conn && conn->inner_conn) { + return altcp_get_tcp_addrinfo(conn->inner_conn, local, addr, port); + } + return ERR_VAL; +} + +ip_addr_t * +altcp_default_get_ip(struct altcp_pcb *conn, int local) +{ + if (conn && conn->inner_conn) { + return altcp_get_ip(conn->inner_conn, local); + } + return NULL; +} + +u16_t +altcp_default_get_port(struct altcp_pcb *conn, int local) +{ + if (conn && conn->inner_conn) { + return altcp_get_port(conn->inner_conn, local); + } + return 0; +} + +#ifdef LWIP_DEBUG +enum tcp_state +altcp_default_dbg_get_tcp_state(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_dbg_get_tcp_state(conn->inner_conn); + } + return CLOSED; +} +#endif + + +#endif /* LWIP_ALTCP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c new file mode 100644 index 0000000..8c2390e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c @@ -0,0 +1,87 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread)\n + * This interface mimics the tcp callback API to the application while preventing + * direct linking (much like virtual functions). + * This way, an application can make use of other application layer protocols + * on top of TCP without knowing the details (e.g. TLS, proxy connection). + * + * This file contains allocation implementation that combine several layers. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/altcp.h" +#include "lwip/altcp_tcp.h" +#include "lwip/altcp_tls.h" +#include "lwip/priv/altcp_priv.h" +#include "lwip/mem.h" + +#include + +#if LWIP_ALTCP_TLS + +/** This standard allocator function creates an altcp pcb for + * TLS over TCP */ +struct altcp_pcb * +altcp_tls_new(struct altcp_tls_config *config, u8_t ip_type) +{ + struct altcp_pcb *inner_conn, *ret; + LWIP_UNUSED_ARG(ip_type); + + inner_conn = altcp_tcp_new_ip_type(ip_type); + if (inner_conn == NULL) { + return NULL; + } + ret = altcp_tls_wrap(config, inner_conn); + if (ret == NULL) { + altcp_close(inner_conn); + } + return ret; +} + +/** This standard allocator function creates an altcp pcb for + * TLS over TCP */ +struct altcp_pcb * +altcp_tls_alloc(void *arg, u8_t ip_type) +{ + return altcp_tls_new((struct altcp_tls_config *)arg, ip_type); +} + +#endif /* LWIP_ALTCP_TLS */ + +#endif /* LWIP_ALTCP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c new file mode 100644 index 0000000..f5b8e84 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c @@ -0,0 +1,543 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread)\n + * This interface mimics the tcp callback API to the application while preventing + * direct linking (much like virtual functions). + * This way, an application can make use of other application layer protocols + * on top of TCP without knowing the details (e.g. TLS, proxy connection). + * + * This file contains the base implementation calling into tcp. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/altcp.h" +#include "lwip/altcp_tcp.h" +#include "lwip/priv/altcp_priv.h" +#include "lwip/tcp.h" +#include "lwip/mem.h" + +#include + +#define ALTCP_TCP_ASSERT_CONN(conn) do { \ + LWIP_ASSERT("conn->inner_conn == NULL", (conn)->inner_conn == NULL); \ + LWIP_UNUSED_ARG(conn); /* for LWIP_NOASSERT */ } while(0) +#define ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb) do { \ + LWIP_ASSERT("pcb mismatch", (conn)->state == tpcb); \ + LWIP_UNUSED_ARG(tpcb); /* for LWIP_NOASSERT */ \ + ALTCP_TCP_ASSERT_CONN(conn); } while(0) + + +/* Variable prototype, the actual declaration is at the end of this file + since it contains pointers to static functions declared here */ +extern const struct altcp_functions altcp_tcp_functions; + +static void altcp_tcp_setup(struct altcp_pcb *conn, struct tcp_pcb *tpcb); + +/* callback functions for TCP */ +static err_t +altcp_tcp_accept(void *arg, struct tcp_pcb *new_tpcb, err_t err) +{ + struct altcp_pcb *listen_conn = (struct altcp_pcb *)arg; + if (listen_conn && listen_conn->accept) { + /* create a new altcp_conn to pass to the next 'accept' callback */ + struct altcp_pcb *new_conn = altcp_alloc(); + if (new_conn == NULL) { + return ERR_MEM; + } + altcp_tcp_setup(new_conn, new_tpcb); + return listen_conn->accept(listen_conn->arg, new_conn, err); + } + return ERR_ARG; +} + +static err_t +altcp_tcp_connected(void *arg, struct tcp_pcb *tpcb, err_t err) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb); + if (conn->connected) { + return conn->connected(conn->arg, conn, err); + } + } + return ERR_OK; +} + +static err_t +altcp_tcp_recv(void *arg, struct tcp_pcb *tpcb, struct pbuf *p, err_t err) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb); + if (conn->recv) { + return conn->recv(conn->arg, conn, p, err); + } + } + if (p != NULL) { + /* prevent memory leaks */ + pbuf_free(p); + } + return ERR_OK; +} + +static err_t +altcp_tcp_sent(void *arg, struct tcp_pcb *tpcb, u16_t len) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb); + if (conn->sent) { + return conn->sent(conn->arg, conn, len); + } + } + return ERR_OK; +} + +static err_t +altcp_tcp_poll(void *arg, struct tcp_pcb *tpcb) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb); + if (conn->poll) { + return conn->poll(conn->arg, conn); + } + } + return ERR_OK; +} + +static void +altcp_tcp_err(void *arg, err_t err) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + conn->state = NULL; /* already freed */ + if (conn->err) { + conn->err(conn->arg, err); + } + altcp_free(conn); + } +} + +/* setup functions */ + +static void +altcp_tcp_remove_callbacks(struct tcp_pcb *tpcb) +{ + tcp_arg(tpcb, NULL); + tcp_recv(tpcb, NULL); + tcp_sent(tpcb, NULL); + tcp_err(tpcb, NULL); + tcp_poll(tpcb, NULL, tpcb->pollinterval); +} + +static void +altcp_tcp_setup_callbacks(struct altcp_pcb *conn, struct tcp_pcb *tpcb) +{ + tcp_arg(tpcb, conn); + tcp_recv(tpcb, altcp_tcp_recv); + tcp_sent(tpcb, altcp_tcp_sent); + tcp_err(tpcb, altcp_tcp_err); + /* tcp_poll is set when interval is set by application */ + /* listen is set totally different :-) */ +} + +static void +altcp_tcp_setup(struct altcp_pcb *conn, struct tcp_pcb *tpcb) +{ + altcp_tcp_setup_callbacks(conn, tpcb); + conn->state = tpcb; + conn->fns = &altcp_tcp_functions; +} + +struct altcp_pcb * +altcp_tcp_new_ip_type(u8_t ip_type) +{ + /* Allocate the tcp pcb first to invoke the priority handling code + if we're out of pcbs */ + struct tcp_pcb *tpcb = tcp_new_ip_type(ip_type); + if (tpcb != NULL) { + struct altcp_pcb *ret = altcp_alloc(); + if (ret != NULL) { + altcp_tcp_setup(ret, tpcb); + return ret; + } else { + /* altcp_pcb allocation failed -> free the tcp_pcb too */ + tcp_close(tpcb); + } + } + return NULL; +} + +/** altcp_tcp allocator function fitting to @ref altcp_allocator_t / @ref altcp_new. +* +* arg pointer is not used for TCP. +*/ +struct altcp_pcb * +altcp_tcp_alloc(void *arg, u8_t ip_type) +{ + LWIP_UNUSED_ARG(arg); + return altcp_tcp_new_ip_type(ip_type); +} + +struct altcp_pcb * +altcp_tcp_wrap(struct tcp_pcb *tpcb) +{ + if (tpcb != NULL) { + struct altcp_pcb *ret = altcp_alloc(); + if (ret != NULL) { + altcp_tcp_setup(ret, tpcb); + return ret; + } + } + return NULL; +} + + +/* "virtual" functions calling into tcp */ +static void +altcp_tcp_set_poll(struct altcp_pcb *conn, u8_t interval) +{ + if (conn != NULL) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_poll(pcb, altcp_tcp_poll, interval); + } +} + +static void +altcp_tcp_recved(struct altcp_pcb *conn, u16_t len) +{ + if (conn != NULL) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_recved(pcb, len); + } +} + +static err_t +altcp_tcp_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_bind(pcb, ipaddr, port); +} + +static err_t +altcp_tcp_connect(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + conn->connected = connected; + pcb = (struct tcp_pcb *)conn->state; + return tcp_connect(pcb, ipaddr, port, altcp_tcp_connected); +} + +static struct altcp_pcb * +altcp_tcp_listen(struct altcp_pcb *conn, u8_t backlog, err_t *err) +{ + struct tcp_pcb *pcb; + struct tcp_pcb *lpcb; + if (conn == NULL) { + return NULL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + lpcb = tcp_listen_with_backlog_and_err(pcb, backlog, err); + if (lpcb != NULL) { + conn->state = lpcb; + tcp_accept(lpcb, altcp_tcp_accept); + return conn; + } + return NULL; +} + +static void +altcp_tcp_abort(struct altcp_pcb *conn) +{ + if (conn != NULL) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + if (pcb) { + tcp_abort(pcb); + } + } +} + +static err_t +altcp_tcp_close(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + if (pcb) { + err_t err; + tcp_poll_fn oldpoll = pcb->poll; + altcp_tcp_remove_callbacks(pcb); + err = tcp_close(pcb); + if (err != ERR_OK) { + /* not closed, set up all callbacks again */ + altcp_tcp_setup_callbacks(conn, pcb); + /* poll callback is not included in the above */ + tcp_poll(pcb, oldpoll, pcb->pollinterval); + return err; + } + conn->state = NULL; /* unsafe to reference pcb after tcp_close(). */ + } + altcp_free(conn); + return ERR_OK; +} + +static err_t +altcp_tcp_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_shutdown(pcb, shut_rx, shut_tx); +} + +static err_t +altcp_tcp_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_write(pcb, dataptr, len, apiflags); +} + +static err_t +altcp_tcp_output(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_output(pcb); +} + +static u16_t +altcp_tcp_mss(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return 0; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_mss(pcb); +} + +static u16_t +altcp_tcp_sndbuf(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return 0; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_sndbuf(pcb); +} + +static u16_t +altcp_tcp_sndqueuelen(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return 0; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_sndqueuelen(pcb); +} + +static void +altcp_tcp_nagle_disable(struct altcp_pcb *conn) +{ + if (conn && conn->state) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_nagle_disable(pcb); + } +} + +static void +altcp_tcp_nagle_enable(struct altcp_pcb *conn) +{ + if (conn && conn->state) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_nagle_enable(pcb); + } +} + +static int +altcp_tcp_nagle_disabled(struct altcp_pcb *conn) +{ + if (conn && conn->state) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + return tcp_nagle_disabled(pcb); + } + return 0; +} + +static void +altcp_tcp_setprio(struct altcp_pcb *conn, u8_t prio) +{ + if (conn != NULL) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_setprio(pcb, prio); + } +} + +static void +altcp_tcp_dealloc(struct altcp_pcb *conn) +{ + LWIP_UNUSED_ARG(conn); + ALTCP_TCP_ASSERT_CONN(conn); + /* no private state to clean up */ +} + +static err_t +altcp_tcp_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port) +{ + if (conn) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + return tcp_tcp_get_tcp_addrinfo(pcb, local, addr, port); + } + return ERR_VAL; +} + +static ip_addr_t * +altcp_tcp_get_ip(struct altcp_pcb *conn, int local) +{ + if (conn) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + if (pcb) { + if (local) { + return &pcb->local_ip; + } else { + return &pcb->remote_ip; + } + } + } + return NULL; +} + +static u16_t +altcp_tcp_get_port(struct altcp_pcb *conn, int local) +{ + if (conn) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + if (pcb) { + if (local) { + return pcb->local_port; + } else { + return pcb->remote_port; + } + } + } + return 0; +} + +#ifdef LWIP_DEBUG +static enum tcp_state +altcp_tcp_dbg_get_tcp_state(struct altcp_pcb *conn) +{ + if (conn) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + if (pcb) { + return pcb->state; + } + } + return CLOSED; +} +#endif +const struct altcp_functions altcp_tcp_functions = { + altcp_tcp_set_poll, + altcp_tcp_recved, + altcp_tcp_bind, + altcp_tcp_connect, + altcp_tcp_listen, + altcp_tcp_abort, + altcp_tcp_close, + altcp_tcp_shutdown, + altcp_tcp_write, + altcp_tcp_output, + altcp_tcp_mss, + altcp_tcp_sndbuf, + altcp_tcp_sndqueuelen, + altcp_tcp_nagle_disable, + altcp_tcp_nagle_enable, + altcp_tcp_nagle_disabled, + altcp_tcp_setprio, + altcp_tcp_dealloc, + altcp_tcp_get_tcp_addrinfo, + altcp_tcp_get_ip, + altcp_tcp_get_port +#ifdef LWIP_DEBUG + , altcp_tcp_dbg_get_tcp_state +#endif +}; + +#endif /* LWIP_ALTCP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/def.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/def.c new file mode 100644 index 0000000..004a528 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/def.c @@ -0,0 +1,240 @@ +/** + * @file + * Common functions used throughout the stack. + * + * These are reference implementations of the byte swapping functions. + * Again with the aim of being simple, correct and fully portable. + * Byte swapping is the second thing you would want to optimize. You will + * need to port it to your architecture and in your cc.h: + * + * \#define lwip_htons(x) your_htons + * \#define lwip_htonl(x) your_htonl + * + * Note lwip_ntohs() and lwip_ntohl() are merely references to the htonx counterparts. + * + * If you \#define them to htons() and htonl(), you should + * \#define LWIP_DONT_PROVIDE_BYTEORDER_FUNCTIONS to prevent lwIP from + * defining htonx/ntohx compatibility macros. + + * @defgroup sys_nonstandard Non-standard functions + * @ingroup sys_layer + * lwIP provides default implementations for non-standard functions. + * These can be mapped to OS functions to reduce code footprint if desired. + * All defines related to this section must not be placed in lwipopts.h, + * but in arch/cc.h! + * These options cannot be \#defined in lwipopts.h since they are not options + * of lwIP itself, but options of the lwIP port to your system. + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/opt.h" +#include "lwip/def.h" + +#include + +#if BYTE_ORDER == LITTLE_ENDIAN + +#if !defined(lwip_htons) +/** + * Convert an u16_t from host- to network byte order. + * + * @param n u16_t in host byte order + * @return n in network byte order + */ +u16_t +lwip_htons(u16_t n) +{ + return PP_HTONS(n); +} +#endif /* lwip_htons */ + +#if !defined(lwip_htonl) +/** + * Convert an u32_t from host- to network byte order. + * + * @param n u32_t in host byte order + * @return n in network byte order + */ +u32_t +lwip_htonl(u32_t n) +{ + return PP_HTONL(n); +} +#endif /* lwip_htonl */ + +#endif /* BYTE_ORDER == LITTLE_ENDIAN */ + +#ifndef lwip_strnstr +/** + * @ingroup sys_nonstandard + * lwIP default implementation for strnstr() non-standard function. + * This can be \#defined to strnstr() depending on your platform port. + */ +char * +lwip_strnstr(const char *buffer, const char *token, size_t n) +{ + const char *p; + size_t tokenlen = strlen(token); + if (tokenlen == 0) { + return LWIP_CONST_CAST(char *, buffer); + } + for (p = buffer; *p && (p + tokenlen <= buffer + n); p++) { + if ((*p == *token) && (strncmp(p, token, tokenlen) == 0)) { + return LWIP_CONST_CAST(char *, p); + } + } + return NULL; +} +#endif + +#ifndef lwip_stricmp +/** + * @ingroup sys_nonstandard + * lwIP default implementation for stricmp() non-standard function. + * This can be \#defined to stricmp() depending on your platform port. + */ +int +lwip_stricmp(const char *str1, const char *str2) +{ + char c1, c2; + + do { + c1 = *str1++; + c2 = *str2++; + if (c1 != c2) { + char c1_upc = c1 | 0x20; + if ((c1_upc >= 'a') && (c1_upc <= 'z')) { + /* characters are not equal an one is in the alphabet range: + downcase both chars and check again */ + char c2_upc = c2 | 0x20; + if (c1_upc != c2_upc) { + /* still not equal */ + /* don't care for < or > */ + return 1; + } + } else { + /* characters are not equal but none is in the alphabet range */ + return 1; + } + } + } while (c1 != 0); + return 0; +} +#endif + +#ifndef lwip_strnicmp +/** + * @ingroup sys_nonstandard + * lwIP default implementation for strnicmp() non-standard function. + * This can be \#defined to strnicmp() depending on your platform port. + */ +int +lwip_strnicmp(const char *str1, const char *str2, size_t len) +{ + char c1, c2; + + do { + c1 = *str1++; + c2 = *str2++; + if (c1 != c2) { + char c1_upc = c1 | 0x20; + if ((c1_upc >= 'a') && (c1_upc <= 'z')) { + /* characters are not equal an one is in the alphabet range: + downcase both chars and check again */ + char c2_upc = c2 | 0x20; + if (c1_upc != c2_upc) { + /* still not equal */ + /* don't care for < or > */ + return 1; + } + } else { + /* characters are not equal but none is in the alphabet range */ + return 1; + } + } + len--; + } while ((len != 0) && (c1 != 0)); + return 0; +} +#endif + +#ifndef lwip_itoa +/** + * @ingroup sys_nonstandard + * lwIP default implementation for itoa() non-standard function. + * This can be \#defined to itoa() or snprintf(result, bufsize, "%d", number) depending on your platform port. + */ +void +lwip_itoa(char *result, size_t bufsize, int number) +{ + char *res = result; + char *tmp = result + bufsize - 1; + int n = (number >= 0) ? number : -number; + + /* handle invalid bufsize */ + if (bufsize < 2) { + if (bufsize == 1) { + *result = 0; + } + return; + } + + /* First, add sign */ + if (number < 0) { + *res++ = '-'; + } + /* Then create the string from the end and stop if buffer full, + and ensure output string is zero terminated */ + *tmp = 0; + while ((n != 0) && (tmp > res)) { + char val = (char)('0' + (n % 10)); + tmp--; + *tmp = val; + n = n / 10; + } + if (n) { + /* buffer is too small */ + *result = 0; + return; + } + if (*tmp == 0) { + /* Nothing added? */ + *res++ = '0'; + *res++ = 0; + return; + } + /* move from temporary buffer to output buffer (sign is not moved) */ + memmove(res, tmp, (size_t)((result + bufsize) - tmp)); +} +#endif diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/dns.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/dns.c new file mode 100644 index 0000000..737b339 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/dns.c @@ -0,0 +1,1631 @@ +/** + * @file + * DNS - host name to IP address resolver. + * + * @defgroup dns DNS + * @ingroup callbackstyle_api + * + * Implements a DNS host name to IP address resolver. + * + * The lwIP DNS resolver functions are used to lookup a host name and + * map it to a numerical IP address. It maintains a list of resolved + * hostnames that can be queried with the dns_lookup() function. + * New hostnames can be resolved using the dns_query() function. + * + * The lwIP version of the resolver also adds a non-blocking version of + * gethostbyname() that will work with a raw API application. This function + * checks for an IP address string first and converts it if it is valid. + * gethostbyname() then does a dns_lookup() to see if the name is + * already in the table. If so, the IP is returned. If not, a query is + * issued and the function returns with a ERR_INPROGRESS status. The app + * using the dns client must then go into a waiting state. + * + * Once a hostname has been resolved (or found to be non-existent), + * the resolver code calls a specified callback function (which + * must be implemented by the module that uses the resolver). + * + * Multicast DNS queries are supported for names ending on ".local". + * However, only "One-Shot Multicast DNS Queries" are supported (RFC 6762 + * chapter 5.1), this is not a fully compliant implementation of continuous + * mDNS querying! + * + * All functions must be called from TCPIP thread. + * + * @see DNS_MAX_SERVERS + * @see LWIP_DHCP_MAX_DNS_SERVERS + * @see @ref netconn_common for thread-safe access. + */ + +/* + * Port to lwIP from uIP + * by Jim Pettinato April 2007 + * + * security fixes and more by Simon Goldschmidt + * + * uIP version Copyright (c) 2002-2003, Adam Dunkels. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*----------------------------------------------------------------------------- + * RFC 1035 - Domain names - implementation and specification + * RFC 2181 - Clarifications to the DNS Specification + *----------------------------------------------------------------------------*/ + +/** @todo: define good default values (rfc compliance) */ +/** @todo: improve answer parsing, more checkings... */ +/** @todo: check RFC1035 - 7.3. Processing responses */ +/** @todo: one-shot mDNS: dual-stack fallback to another IP version */ + +/*----------------------------------------------------------------------------- + * Includes + *----------------------------------------------------------------------------*/ + +#include "lwip/opt.h" + +#if LWIP_DNS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/udp.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/dns.h" +#include "lwip/prot/dns.h" + +#include + +/** Random generator function to create random TXIDs and source ports for queries */ +#ifndef DNS_RAND_TXID +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_XID) != 0) +#define DNS_RAND_TXID LWIP_RAND +#else +static u16_t dns_txid; +#define DNS_RAND_TXID() (++dns_txid) +#endif +#endif + +/** Limits the source port to be >= 1024 by default */ +#ifndef DNS_PORT_ALLOWED +#define DNS_PORT_ALLOWED(port) ((port) >= 1024) +#endif + +/** DNS resource record max. TTL (one week as default) */ +#ifndef DNS_MAX_TTL +#define DNS_MAX_TTL 604800 +#elif DNS_MAX_TTL > 0x7FFFFFFF +#error DNS_MAX_TTL must be a positive 32-bit value +#endif + +#if DNS_TABLE_SIZE > 255 +#error DNS_TABLE_SIZE must fit into an u8_t +#endif +#if DNS_MAX_SERVERS > 255 +#error DNS_MAX_SERVERS must fit into an u8_t +#endif + +/* The number of parallel requests (i.e. calls to dns_gethostbyname + * that cannot be answered from the DNS table. + * This is set to the table size by default. + */ +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) +#ifndef DNS_MAX_REQUESTS +#define DNS_MAX_REQUESTS DNS_TABLE_SIZE +#else +#if DNS_MAX_REQUESTS > 255 +#error DNS_MAX_REQUESTS must fit into an u8_t +#endif +#endif +#else +/* In this configuration, both arrays have to have the same size and are used + * like one entry (used/free) */ +#define DNS_MAX_REQUESTS DNS_TABLE_SIZE +#endif + +/* The number of UDP source ports used in parallel */ +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) +#ifndef DNS_MAX_SOURCE_PORTS +#define DNS_MAX_SOURCE_PORTS DNS_MAX_REQUESTS +#else +#if DNS_MAX_SOURCE_PORTS > 255 +#error DNS_MAX_SOURCE_PORTS must fit into an u8_t +#endif +#endif +#else +#ifdef DNS_MAX_SOURCE_PORTS +#undef DNS_MAX_SOURCE_PORTS +#endif +#define DNS_MAX_SOURCE_PORTS 1 +#endif + +#if LWIP_IPV4 && LWIP_IPV6 +#define LWIP_DNS_ADDRTYPE_IS_IPV6(t) (((t) == LWIP_DNS_ADDRTYPE_IPV6_IPV4) || ((t) == LWIP_DNS_ADDRTYPE_IPV6)) +#define LWIP_DNS_ADDRTYPE_MATCH_IP(t, ip) (IP_IS_V6_VAL(ip) ? LWIP_DNS_ADDRTYPE_IS_IPV6(t) : (!LWIP_DNS_ADDRTYPE_IS_IPV6(t))) +#define LWIP_DNS_ADDRTYPE_ARG(x) , x +#define LWIP_DNS_ADDRTYPE_ARG_OR_ZERO(x) x +#define LWIP_DNS_SET_ADDRTYPE(x, y) do { x = y; } while(0) +#else +#if LWIP_IPV6 +#define LWIP_DNS_ADDRTYPE_IS_IPV6(t) 1 +#else +#define LWIP_DNS_ADDRTYPE_IS_IPV6(t) 0 +#endif +#define LWIP_DNS_ADDRTYPE_MATCH_IP(t, ip) 1 +#define LWIP_DNS_ADDRTYPE_ARG(x) +#define LWIP_DNS_ADDRTYPE_ARG_OR_ZERO(x) 0 +#define LWIP_DNS_SET_ADDRTYPE(x, y) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#if LWIP_DNS_SUPPORT_MDNS_QUERIES +#define LWIP_DNS_ISMDNS_ARG(x) , x +#else +#define LWIP_DNS_ISMDNS_ARG(x) +#endif + +/** DNS query message structure. + No packing needed: only used locally on the stack. */ +struct dns_query { + /* DNS query record starts with either a domain name or a pointer + to a name already present somewhere in the packet. */ + u16_t type; + u16_t cls; +}; +#define SIZEOF_DNS_QUERY 4 + +/** DNS answer message structure. + No packing needed: only used locally on the stack. */ +struct dns_answer { + /* DNS answer record starts with either a domain name or a pointer + to a name already present somewhere in the packet. */ + u16_t type; + u16_t cls; + u32_t ttl; + u16_t len; +}; +#define SIZEOF_DNS_ANSWER 10 +/* maximum allowed size for the struct due to non-packed */ +#define SIZEOF_DNS_ANSWER_ASSERT 12 + +/* DNS table entry states */ +typedef enum { + DNS_STATE_UNUSED = 0, + DNS_STATE_NEW = 1, + DNS_STATE_ASKING = 2, + DNS_STATE_DONE = 3 +} dns_state_enum_t; + +/** DNS table entry */ +struct dns_table_entry { + u32_t ttl; + ip_addr_t ipaddr; + u16_t txid; + u8_t state; + u8_t server_idx; + u8_t tmr; + u8_t retries; + u8_t seqno; +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) + u8_t pcb_idx; +#endif + char name[DNS_MAX_NAME_LENGTH]; +#if LWIP_IPV4 && LWIP_IPV6 + u8_t reqaddrtype; +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + u8_t is_mdns; +#endif +}; + +/** DNS request table entry: used when dns_gehostbyname cannot answer the + * request from the DNS table */ +struct dns_req_entry { + /* pointer to callback on DNS query done */ + dns_found_callback found; + /* argument passed to the callback function */ + void *arg; +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) + u8_t dns_table_idx; +#endif +#if LWIP_IPV4 && LWIP_IPV6 + u8_t reqaddrtype; +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +}; + +#if DNS_LOCAL_HOSTLIST + +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC +/** Local host-list. For hostnames in this list, no + * external name resolution is performed */ +static struct local_hostlist_entry *local_hostlist_dynamic; +#else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + +/** Defining this allows the local_hostlist_static to be placed in a different + * linker section (e.g. FLASH) */ +#ifndef DNS_LOCAL_HOSTLIST_STORAGE_PRE +#define DNS_LOCAL_HOSTLIST_STORAGE_PRE static +#endif /* DNS_LOCAL_HOSTLIST_STORAGE_PRE */ +/** Defining this allows the local_hostlist_static to be placed in a different + * linker section (e.g. FLASH) */ +#ifndef DNS_LOCAL_HOSTLIST_STORAGE_POST +#define DNS_LOCAL_HOSTLIST_STORAGE_POST +#endif /* DNS_LOCAL_HOSTLIST_STORAGE_POST */ +DNS_LOCAL_HOSTLIST_STORAGE_PRE struct local_hostlist_entry local_hostlist_static[] + DNS_LOCAL_HOSTLIST_STORAGE_POST = DNS_LOCAL_HOSTLIST_INIT; + +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + +static void dns_init_local(void); +static err_t dns_lookup_local(const char *hostname, ip_addr_t *addr LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype)); +#endif /* DNS_LOCAL_HOSTLIST */ + + +/* forward declarations */ +static void dns_recv(void *s, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port); +static void dns_check_entries(void); +static void dns_call_found(u8_t idx, ip_addr_t *addr); + +/*----------------------------------------------------------------------------- + * Globals + *----------------------------------------------------------------------------*/ + +/* DNS variables */ +static struct udp_pcb *dns_pcbs[DNS_MAX_SOURCE_PORTS]; +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) +static u8_t dns_last_pcb_idx; +#endif +static u8_t dns_seqno; +static struct dns_table_entry dns_table[DNS_TABLE_SIZE]; +static struct dns_req_entry dns_requests[DNS_MAX_REQUESTS]; +static ip_addr_t dns_servers[DNS_MAX_SERVERS]; + +#if LWIP_IPV4 +const ip_addr_t dns_mquery_v4group = DNS_MQUERY_IPV4_GROUP_INIT; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 +const ip_addr_t dns_mquery_v6group = DNS_MQUERY_IPV6_GROUP_INIT; +#endif /* LWIP_IPV6 */ + +/** + * Initialize the resolver: set up the UDP pcb and configure the default server + * (if DNS_SERVER_ADDRESS is set). + */ +void +dns_init(void) +{ +#ifdef DNS_SERVER_ADDRESS + /* initialize default DNS server address */ + ip_addr_t dnsserver; + DNS_SERVER_ADDRESS(&dnsserver); + dns_setserver(0, &dnsserver); +#endif /* DNS_SERVER_ADDRESS */ + + LWIP_ASSERT("sanity check SIZEOF_DNS_QUERY", + sizeof(struct dns_query) == SIZEOF_DNS_QUERY); + LWIP_ASSERT("sanity check SIZEOF_DNS_ANSWER", + sizeof(struct dns_answer) <= SIZEOF_DNS_ANSWER_ASSERT); + + LWIP_DEBUGF(DNS_DEBUG, ("dns_init: initializing\n")); + + /* if dns client not yet initialized... */ +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) == 0) + if (dns_pcbs[0] == NULL) { + dns_pcbs[0] = udp_new_ip_type(IPADDR_TYPE_ANY); + LWIP_ASSERT("dns_pcbs[0] != NULL", dns_pcbs[0] != NULL); + + /* initialize DNS table not needed (initialized to zero since it is a + * global variable) */ + LWIP_ASSERT("For implicit initialization to work, DNS_STATE_UNUSED needs to be 0", + DNS_STATE_UNUSED == 0); + + /* initialize DNS client */ + udp_bind(dns_pcbs[0], IP_ANY_TYPE, 0); + udp_recv(dns_pcbs[0], dns_recv, NULL); + } +#endif + +#if DNS_LOCAL_HOSTLIST + dns_init_local(); +#endif +} + +/** + * @ingroup dns + * Initialize one of the DNS servers. + * + * @param numdns the index of the DNS server to set must be < DNS_MAX_SERVERS + * @param dnsserver IP address of the DNS server to set + */ +void +dns_setserver(u8_t numdns, const ip_addr_t *dnsserver) +{ + if (numdns < DNS_MAX_SERVERS) { + if (dnsserver != NULL) { + dns_servers[numdns] = (*dnsserver); + } else { + dns_servers[numdns] = *IP_ADDR_ANY; + } + } +} + +/** + * @ingroup dns + * Obtain one of the currently configured DNS server. + * + * @param numdns the index of the DNS server + * @return IP address of the indexed DNS server or "ip_addr_any" if the DNS + * server has not been configured. + */ +const ip_addr_t * +dns_getserver(u8_t numdns) +{ + if (numdns < DNS_MAX_SERVERS) { + return &dns_servers[numdns]; + } else { + return IP_ADDR_ANY; + } +} + +/** + * The DNS resolver client timer - handle retries and timeouts and should + * be called every DNS_TMR_INTERVAL milliseconds (every second by default). + */ +void +dns_tmr(void) +{ + LWIP_DEBUGF(DNS_DEBUG, ("dns_tmr: dns_check_entries\n")); + dns_check_entries(); +} + +#if DNS_LOCAL_HOSTLIST +static void +dns_init_local(void) +{ +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC && defined(DNS_LOCAL_HOSTLIST_INIT) + size_t i; + struct local_hostlist_entry *entry; + /* Dynamic: copy entries from DNS_LOCAL_HOSTLIST_INIT to list */ + struct local_hostlist_entry local_hostlist_init[] = DNS_LOCAL_HOSTLIST_INIT; + size_t namelen; + for (i = 0; i < LWIP_ARRAYSIZE(local_hostlist_init); i++) { + struct local_hostlist_entry *init_entry = &local_hostlist_init[i]; + LWIP_ASSERT("invalid host name (NULL)", init_entry->name != NULL); + namelen = strlen(init_entry->name); + LWIP_ASSERT("namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN", namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN); + entry = (struct local_hostlist_entry *)memp_malloc(MEMP_LOCALHOSTLIST); + LWIP_ASSERT("mem-error in dns_init_local", entry != NULL); + if (entry != NULL) { + char *entry_name = (char *)entry + sizeof(struct local_hostlist_entry); + MEMCPY(entry_name, init_entry->name, namelen); + entry_name[namelen] = 0; + entry->name = entry_name; + entry->addr = init_entry->addr; + entry->next = local_hostlist_dynamic; + local_hostlist_dynamic = entry; + } + } +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC && defined(DNS_LOCAL_HOSTLIST_INIT) */ +} + +/** + * @ingroup dns + * Iterate the local host-list for a hostname. + * + * @param iterator_fn a function that is called for every entry in the local host-list + * @param iterator_arg 3rd argument passed to iterator_fn + * @return the number of entries in the local host-list + */ +size_t +dns_local_iterate(dns_found_callback iterator_fn, void *iterator_arg) +{ + size_t i; +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC + struct local_hostlist_entry *entry = local_hostlist_dynamic; + i = 0; + while (entry != NULL) { + if (iterator_fn != NULL) { + iterator_fn(entry->name, &entry->addr, iterator_arg); + } + i++; + entry = entry->next; + } +#else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + for (i = 0; i < LWIP_ARRAYSIZE(local_hostlist_static); i++) { + if (iterator_fn != NULL) { + iterator_fn(local_hostlist_static[i].name, &local_hostlist_static[i].addr, iterator_arg); + } + } +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + return i; +} + +/** + * @ingroup dns + * Scans the local host-list for a hostname. + * + * @param hostname Hostname to look for in the local host-list + * @param addr the first IP address for the hostname in the local host-list or + * IPADDR_NONE if not found. + * @param dns_addrtype - LWIP_DNS_ADDRTYPE_IPV4_IPV6: try to resolve IPv4 (ATTENTION: no fallback here!) + * - LWIP_DNS_ADDRTYPE_IPV6_IPV4: try to resolve IPv6 (ATTENTION: no fallback here!) + * - LWIP_DNS_ADDRTYPE_IPV4: try to resolve IPv4 only + * - LWIP_DNS_ADDRTYPE_IPV6: try to resolve IPv6 only + * @return ERR_OK if found, ERR_ARG if not found + */ +err_t +dns_local_lookup(const char *hostname, ip_addr_t *addr, u8_t dns_addrtype) +{ + LWIP_UNUSED_ARG(dns_addrtype); + return dns_lookup_local(hostname, addr LWIP_DNS_ADDRTYPE_ARG(dns_addrtype)); +} + +/* Internal implementation for dns_local_lookup and dns_lookup */ +static err_t +dns_lookup_local(const char *hostname, ip_addr_t *addr LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype)) +{ +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC + struct local_hostlist_entry *entry = local_hostlist_dynamic; + while (entry != NULL) { + if ((lwip_stricmp(entry->name, hostname) == 0) && + LWIP_DNS_ADDRTYPE_MATCH_IP(dns_addrtype, entry->addr)) { + if (addr) { + ip_addr_copy(*addr, entry->addr); + } + return ERR_OK; + } + entry = entry->next; + } +#else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + size_t i; + for (i = 0; i < LWIP_ARRAYSIZE(local_hostlist_static); i++) { + if ((lwip_stricmp(local_hostlist_static[i].name, hostname) == 0) && + LWIP_DNS_ADDRTYPE_MATCH_IP(dns_addrtype, local_hostlist_static[i].addr)) { + if (addr) { + ip_addr_copy(*addr, local_hostlist_static[i].addr); + } + return ERR_OK; + } + } +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + return ERR_ARG; +} + +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC +/** + * @ingroup dns + * Remove all entries from the local host-list for a specific hostname + * and/or IP address + * + * @param hostname hostname for which entries shall be removed from the local + * host-list + * @param addr address for which entries shall be removed from the local host-list + * @return the number of removed entries + */ +int +dns_local_removehost(const char *hostname, const ip_addr_t *addr) +{ + int removed = 0; + struct local_hostlist_entry *entry = local_hostlist_dynamic; + struct local_hostlist_entry *last_entry = NULL; + while (entry != NULL) { + if (((hostname == NULL) || !lwip_stricmp(entry->name, hostname)) && + ((addr == NULL) || ip_addr_cmp(&entry->addr, addr))) { + struct local_hostlist_entry *free_entry; + if (last_entry != NULL) { + last_entry->next = entry->next; + } else { + local_hostlist_dynamic = entry->next; + } + free_entry = entry; + entry = entry->next; + memp_free(MEMP_LOCALHOSTLIST, free_entry); + removed++; + } else { + last_entry = entry; + entry = entry->next; + } + } + return removed; +} + +/** + * @ingroup dns + * Add a hostname/IP address pair to the local host-list. + * Duplicates are not checked. + * + * @param hostname hostname of the new entry + * @param addr IP address of the new entry + * @return ERR_OK if succeeded or ERR_MEM on memory error + */ +err_t +dns_local_addhost(const char *hostname, const ip_addr_t *addr) +{ + struct local_hostlist_entry *entry; + size_t namelen; + char *entry_name; + LWIP_ASSERT("invalid host name (NULL)", hostname != NULL); + namelen = strlen(hostname); + LWIP_ASSERT("namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN", namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN); + entry = (struct local_hostlist_entry *)memp_malloc(MEMP_LOCALHOSTLIST); + if (entry == NULL) { + return ERR_MEM; + } + entry_name = (char *)entry + sizeof(struct local_hostlist_entry); + MEMCPY(entry_name, hostname, namelen); + entry_name[namelen] = 0; + entry->name = entry_name; + ip_addr_copy(entry->addr, *addr); + entry->next = local_hostlist_dynamic; + local_hostlist_dynamic = entry; + return ERR_OK; +} +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC*/ +#endif /* DNS_LOCAL_HOSTLIST */ + +/** + * @ingroup dns + * Look up a hostname in the array of known hostnames. + * + * @note This function only looks in the internal array of known + * hostnames, it does not send out a query for the hostname if none + * was found. The function dns_enqueue() can be used to send a query + * for a hostname. + * + * @param name the hostname to look up + * @param addr the hostname's IP address, as u32_t (instead of ip_addr_t to + * better check for failure: != IPADDR_NONE) or IPADDR_NONE if the hostname + * was not found in the cached dns_table. + * @return ERR_OK if found, ERR_ARG if not found + */ +static err_t +dns_lookup(const char *name, ip_addr_t *addr LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype)) +{ + u8_t i; +#if DNS_LOCAL_HOSTLIST + if (dns_lookup_local(name, addr LWIP_DNS_ADDRTYPE_ARG(dns_addrtype)) == ERR_OK) { + return ERR_OK; + } +#endif /* DNS_LOCAL_HOSTLIST */ +#ifdef DNS_LOOKUP_LOCAL_EXTERN + if (DNS_LOOKUP_LOCAL_EXTERN(name, addr, LWIP_DNS_ADDRTYPE_ARG_OR_ZERO(dns_addrtype)) == ERR_OK) { + return ERR_OK; + } +#endif /* DNS_LOOKUP_LOCAL_EXTERN */ + + /* Walk through name list, return entry if found. If not, return NULL. */ + for (i = 0; i < DNS_TABLE_SIZE; ++i) { + if ((dns_table[i].state == DNS_STATE_DONE) && + (lwip_strnicmp(name, dns_table[i].name, sizeof(dns_table[i].name)) == 0) && + LWIP_DNS_ADDRTYPE_MATCH_IP(dns_addrtype, dns_table[i].ipaddr)) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_lookup: \"%s\": found = ", name)); + ip_addr_debug_print_val(DNS_DEBUG, dns_table[i].ipaddr); + LWIP_DEBUGF(DNS_DEBUG, ("\n")); + if (addr) { + ip_addr_copy(*addr, dns_table[i].ipaddr); + } + return ERR_OK; + } + } + + return ERR_ARG; +} + +/** + * Compare the "dotted" name "query" with the encoded name "response" + * to make sure an answer from the DNS server matches the current dns_table + * entry (otherwise, answers might arrive late for hostname not on the list + * any more). + * + * For now, this function compares case-insensitive to cope with all kinds of + * servers. This also means that "dns 0x20 bit encoding" must be checked + * externally, if we want to implement it. + * Currently, the request is sent exactly as passed in by he user request. + * + * @param query hostname (not encoded) from the dns_table + * @param p pbuf containing the encoded hostname in the DNS response + * @param start_offset offset into p where the name starts + * @return 0xFFFF: names differ, other: names equal -> offset behind name + */ +static u16_t +dns_compare_name(const char *query, struct pbuf *p, u16_t start_offset) +{ + int n; + u16_t response_offset = start_offset; + + do { + n = pbuf_try_get_at(p, response_offset); + if ((n < 0) || (response_offset == 0xFFFF)) { + /* error or overflow */ + return 0xFFFF; + } + response_offset++; + /** @see RFC 1035 - 4.1.4. Message compression */ + if ((n & 0xc0) == 0xc0) { + /* Compressed name: cannot be equal since we don't send them */ + return 0xFFFF; + } else { + /* Not compressed name */ + while (n > 0) { + int c = pbuf_try_get_at(p, response_offset); + if (c < 0) { + return 0xFFFF; + } + if (lwip_tolower((*query)) != lwip_tolower((u8_t)c)) { + return 0xFFFF; + } + if (response_offset == 0xFFFF) { + /* would overflow */ + return 0xFFFF; + } + response_offset++; + ++query; + --n; + } + ++query; + } + n = pbuf_try_get_at(p, response_offset); + if (n < 0) { + return 0xFFFF; + } + } while (n != 0); + + if (response_offset == 0xFFFF) { + /* would overflow */ + return 0xFFFF; + } + return (u16_t)(response_offset + 1); +} + +/** + * Walk through a compact encoded DNS name and return the end of the name. + * + * @param p pbuf containing the name + * @param query_idx start index into p pointing to encoded DNS name in the DNS server response + * @return index to end of the name + */ +static u16_t +dns_skip_name(struct pbuf *p, u16_t query_idx) +{ + int n; + u16_t offset = query_idx; + + do { + n = pbuf_try_get_at(p, offset++); + if ((n < 0) || (offset == 0)) { + return 0xFFFF; + } + /** @see RFC 1035 - 4.1.4. Message compression */ + if ((n & 0xc0) == 0xc0) { + /* Compressed name: since we only want to skip it (not check it), stop here */ + break; + } else { + /* Not compressed name */ + if (offset + n >= p->tot_len) { + return 0xFFFF; + } + offset = (u16_t)(offset + n); + } + n = pbuf_try_get_at(p, offset); + if (n < 0) { + return 0xFFFF; + } + } while (n != 0); + + if (offset == 0xFFFF) { + return 0xFFFF; + } + return (u16_t)(offset + 1); +} + +/** + * Send a DNS query packet. + * + * @param idx the DNS table entry index for which to send a request + * @return ERR_OK if packet is sent; an err_t indicating the problem otherwise + */ +static err_t +dns_send(u8_t idx) +{ + err_t err; + struct dns_hdr hdr; + struct dns_query qry; + struct pbuf *p; + u16_t query_idx, copy_len; + const char *hostname, *hostname_part; + u8_t n; + u8_t pcb_idx; + struct dns_table_entry *entry = &dns_table[idx]; + + LWIP_DEBUGF(DNS_DEBUG, ("dns_send: dns_servers[%"U16_F"] \"%s\": request\n", + (u16_t)(entry->server_idx), entry->name)); + LWIP_ASSERT("dns server out of array", entry->server_idx < DNS_MAX_SERVERS); + if (ip_addr_isany_val(dns_servers[entry->server_idx]) +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + && !entry->is_mdns +#endif + ) { + /* DNS server not valid anymore, e.g. PPP netif has been shut down */ + /* call specified callback function if provided */ + dns_call_found(idx, NULL); + /* flush this entry */ + entry->state = DNS_STATE_UNUSED; + return ERR_OK; + } + + /* if here, we have either a new query or a retry on a previous query to process */ + p = pbuf_alloc(PBUF_TRANSPORT, (u16_t)(SIZEOF_DNS_HDR + strlen(entry->name) + 2 + + SIZEOF_DNS_QUERY), PBUF_RAM); + if (p != NULL) { + const ip_addr_t *dst; + u16_t dst_port; + /* fill dns header */ + memset(&hdr, 0, SIZEOF_DNS_HDR); + hdr.id = lwip_htons(entry->txid); + hdr.flags1 = DNS_FLAG1_RD; + hdr.numquestions = PP_HTONS(1); + pbuf_take(p, &hdr, SIZEOF_DNS_HDR); + hostname = entry->name; + --hostname; + + /* convert hostname into suitable query format. */ + query_idx = SIZEOF_DNS_HDR; + do { + ++hostname; + hostname_part = hostname; + for (n = 0; *hostname != '.' && *hostname != 0; ++hostname) { + ++n; + } + copy_len = (u16_t)(hostname - hostname_part); + if (query_idx + n + 1 > 0xFFFF) { + /* u16_t overflow */ + goto overflow_return; + } + pbuf_put_at(p, query_idx, n); + pbuf_take_at(p, hostname_part, copy_len, (u16_t)(query_idx + 1)); + query_idx = (u16_t)(query_idx + n + 1); + } while (*hostname != 0); + pbuf_put_at(p, query_idx, 0); + query_idx++; + + /* fill dns query */ + if (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) { + qry.type = PP_HTONS(DNS_RRTYPE_AAAA); + } else { + qry.type = PP_HTONS(DNS_RRTYPE_A); + } + qry.cls = PP_HTONS(DNS_RRCLASS_IN); + pbuf_take_at(p, &qry, SIZEOF_DNS_QUERY, query_idx); + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) + pcb_idx = entry->pcb_idx; +#else + pcb_idx = 0; +#endif + /* send dns packet */ + LWIP_DEBUGF(DNS_DEBUG, ("sending DNS request ID %d for name \"%s\" to server %d\r\n", + entry->txid, entry->name, entry->server_idx)); +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + if (entry->is_mdns) { + dst_port = DNS_MQUERY_PORT; +#if LWIP_IPV6 + if (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) { + dst = &dns_mquery_v6group; + } +#endif +#if LWIP_IPV4 && LWIP_IPV6 + else +#endif +#if LWIP_IPV4 + { + dst = &dns_mquery_v4group; + } +#endif + } else +#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ + { + dst_port = DNS_SERVER_PORT; + dst = &dns_servers[entry->server_idx]; + } + err = udp_sendto(dns_pcbs[pcb_idx], p, dst, dst_port); + + /* free pbuf */ + pbuf_free(p); + } else { + err = ERR_MEM; + } + + return err; +overflow_return: + pbuf_free(p); + return ERR_VAL; +} + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) +static struct udp_pcb * +dns_alloc_random_port(void) +{ + err_t err; + struct udp_pcb *pcb; + + pcb = udp_new_ip_type(IPADDR_TYPE_ANY); + if (pcb == NULL) { + /* out of memory, have to reuse an existing pcb */ + return NULL; + } + do { + u16_t port = (u16_t)DNS_RAND_TXID(); + if (DNS_PORT_ALLOWED(port)) { + err = udp_bind(pcb, IP_ANY_TYPE, port); + } else { + /* this port is not allowed, try again */ + err = ERR_USE; + } + } while (err == ERR_USE); + if (err != ERR_OK) { + udp_remove(pcb); + return NULL; + } + udp_recv(pcb, dns_recv, NULL); + return pcb; +} + +/** + * dns_alloc_pcb() - allocates a new pcb (or reuses an existing one) to be used + * for sending a request + * + * @return an index into dns_pcbs + */ +static u8_t +dns_alloc_pcb(void) +{ + u8_t i; + u8_t idx; + + for (i = 0; i < DNS_MAX_SOURCE_PORTS; i++) { + if (dns_pcbs[i] == NULL) { + break; + } + } + if (i < DNS_MAX_SOURCE_PORTS) { + dns_pcbs[i] = dns_alloc_random_port(); + if (dns_pcbs[i] != NULL) { + /* succeeded */ + dns_last_pcb_idx = i; + return i; + } + } + /* if we come here, creating a new UDP pcb failed, so we have to use + an already existing one (so overflow is no issue) */ + for (i = 0, idx = (u8_t)(dns_last_pcb_idx + 1); i < DNS_MAX_SOURCE_PORTS; i++, idx++) { + if (idx >= DNS_MAX_SOURCE_PORTS) { + idx = 0; + } + if (dns_pcbs[idx] != NULL) { + dns_last_pcb_idx = idx; + return idx; + } + } + return DNS_MAX_SOURCE_PORTS; +} +#endif /* ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) */ + +/** + * dns_call_found() - call the found callback and check if there are duplicate + * entries for the given hostname. If there are any, their found callback will + * be called and they will be removed. + * + * @param idx dns table index of the entry that is resolved or removed + * @param addr IP address for the hostname (or NULL on error or memory shortage) + */ +static void +dns_call_found(u8_t idx, ip_addr_t *addr) +{ +#if ((LWIP_DNS_SECURE & (LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING | LWIP_DNS_SECURE_RAND_SRC_PORT)) != 0) + u8_t i; +#endif + +#if LWIP_IPV4 && LWIP_IPV6 + if (addr != NULL) { + /* check that address type matches the request and adapt the table entry */ + if (IP_IS_V6_VAL(*addr)) { + LWIP_ASSERT("invalid response", LWIP_DNS_ADDRTYPE_IS_IPV6(dns_table[idx].reqaddrtype)); + dns_table[idx].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV6; + } else { + LWIP_ASSERT("invalid response", !LWIP_DNS_ADDRTYPE_IS_IPV6(dns_table[idx].reqaddrtype)); + dns_table[idx].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV4; + } + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) + for (i = 0; i < DNS_MAX_REQUESTS; i++) { + if (dns_requests[i].found && (dns_requests[i].dns_table_idx == idx)) { + (*dns_requests[i].found)(dns_table[idx].name, addr, dns_requests[i].arg); + /* flush this entry */ + dns_requests[i].found = NULL; + } + } +#else + if (dns_requests[idx].found) { + (*dns_requests[idx].found)(dns_table[idx].name, addr, dns_requests[idx].arg); + } + dns_requests[idx].found = NULL; +#endif +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) + /* close the pcb used unless other request are using it */ + for (i = 0; i < DNS_MAX_REQUESTS; i++) { + if (i == idx) { + continue; /* only check other requests */ + } + if (dns_table[i].state == DNS_STATE_ASKING) { + if (dns_table[i].pcb_idx == dns_table[idx].pcb_idx) { + /* another request is still using the same pcb */ + dns_table[idx].pcb_idx = DNS_MAX_SOURCE_PORTS; + break; + } + } + } + if (dns_table[idx].pcb_idx < DNS_MAX_SOURCE_PORTS) { + /* if we come here, the pcb is not used any more and can be removed */ + udp_remove(dns_pcbs[dns_table[idx].pcb_idx]); + dns_pcbs[dns_table[idx].pcb_idx] = NULL; + dns_table[idx].pcb_idx = DNS_MAX_SOURCE_PORTS; + } +#endif +} + +/* Create a query transmission ID that is unique for all outstanding queries */ +static u16_t +dns_create_txid(void) +{ + u16_t txid; + u8_t i; + +again: + txid = (u16_t)DNS_RAND_TXID(); + + /* check whether the ID is unique */ + for (i = 0; i < DNS_TABLE_SIZE; i++) { + if ((dns_table[i].state == DNS_STATE_ASKING) && + (dns_table[i].txid == txid)) { + /* ID already used by another pending query */ + goto again; + } + } + + return txid; +} + +/** + * Check whether there are other backup DNS servers available to try + */ +static u8_t +dns_backupserver_available(struct dns_table_entry *pentry) +{ + u8_t ret = 0; + + if (pentry) { + if ((pentry->server_idx + 1 < DNS_MAX_SERVERS) && !ip_addr_isany_val(dns_servers[pentry->server_idx + 1])) { + ret = 1; + } + } + + return ret; +} + +/** + * dns_check_entry() - see if entry has not yet been queried and, if so, sends out a query. + * Check an entry in the dns_table: + * - send out query for new entries + * - retry old pending entries on timeout (also with different servers) + * - remove completed entries from the table if their TTL has expired + * + * @param i index of the dns_table entry to check + */ +static void +dns_check_entry(u8_t i) +{ + err_t err; + struct dns_table_entry *entry = &dns_table[i]; + + LWIP_ASSERT("array index out of bounds", i < DNS_TABLE_SIZE); + + switch (entry->state) { + case DNS_STATE_NEW: + /* initialize new entry */ + entry->txid = dns_create_txid(); + entry->state = DNS_STATE_ASKING; + entry->server_idx = 0; + entry->tmr = 1; + entry->retries = 0; + + /* send DNS packet for this entry */ + err = dns_send(i); + if (err != ERR_OK) { + LWIP_DEBUGF(DNS_DEBUG | LWIP_DBG_LEVEL_WARNING, + ("dns_send returned error: %s\n", lwip_strerr(err))); + } + break; + case DNS_STATE_ASKING: + if (--entry->tmr == 0) { + if (++entry->retries == DNS_MAX_RETRIES) { + if (dns_backupserver_available(entry) +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + && !entry->is_mdns +#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ + ) { + /* change of server */ + entry->server_idx++; + entry->tmr = 1; + entry->retries = 0; + } else { + LWIP_DEBUGF(DNS_DEBUG, ("dns_check_entry: \"%s\": timeout\n", entry->name)); + /* call specified callback function if provided */ + dns_call_found(i, NULL); + /* flush this entry */ + entry->state = DNS_STATE_UNUSED; + break; + } + } else { + /* wait longer for the next retry */ + entry->tmr = entry->retries; + } + + /* send DNS packet for this entry */ + err = dns_send(i); + if (err != ERR_OK) { + LWIP_DEBUGF(DNS_DEBUG | LWIP_DBG_LEVEL_WARNING, + ("dns_send returned error: %s\n", lwip_strerr(err))); + } + } + break; + case DNS_STATE_DONE: + /* if the time to live is nul */ + if ((entry->ttl == 0) || (--entry->ttl == 0)) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_check_entry: \"%s\": flush\n", entry->name)); + /* flush this entry, there cannot be any related pending entries in this state */ + entry->state = DNS_STATE_UNUSED; + } + break; + case DNS_STATE_UNUSED: + /* nothing to do */ + break; + default: + LWIP_ASSERT("unknown dns_table entry state:", 0); + break; + } +} + +/** + * Call dns_check_entry for each entry in dns_table - check all entries. + */ +static void +dns_check_entries(void) +{ + u8_t i; + + for (i = 0; i < DNS_TABLE_SIZE; ++i) { + dns_check_entry(i); + } +} + +/** + * Save TTL and call dns_call_found for correct response. + */ +static void +dns_correct_response(u8_t idx, u32_t ttl) +{ + struct dns_table_entry *entry = &dns_table[idx]; + + entry->state = DNS_STATE_DONE; + + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response = ", entry->name)); + ip_addr_debug_print_val(DNS_DEBUG, entry->ipaddr); + LWIP_DEBUGF(DNS_DEBUG, ("\n")); + + /* read the answer resource record's TTL, and maximize it if needed */ + entry->ttl = ttl; + if (entry->ttl > DNS_MAX_TTL) { + entry->ttl = DNS_MAX_TTL; + } + dns_call_found(idx, &entry->ipaddr); + + if (entry->ttl == 0) { + /* RFC 883, page 29: "Zero values are + interpreted to mean that the RR can only be used for the + transaction in progress, and should not be cached." + -> flush this entry now */ + /* entry reused during callback? */ + if (entry->state == DNS_STATE_DONE) { + entry->state = DNS_STATE_UNUSED; + } + } +} + +/** + * Receive input function for DNS response packets arriving for the dns UDP pcb. + */ +static void +dns_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) +{ + u8_t i; + u16_t txid; + u16_t res_idx; + struct dns_hdr hdr; + struct dns_answer ans; + struct dns_query qry; + u16_t nquestions, nanswers; + + LWIP_UNUSED_ARG(arg); + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(port); + + /* is the dns message big enough ? */ + if (p->tot_len < (SIZEOF_DNS_HDR + SIZEOF_DNS_QUERY)) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: pbuf too small\n")); + /* free pbuf and return */ + goto ignore_packet; + } + + /* copy dns payload inside static buffer for processing */ + if (pbuf_copy_partial(p, &hdr, SIZEOF_DNS_HDR, 0) == SIZEOF_DNS_HDR) { + /* Match the ID in the DNS header with the name table. */ + txid = lwip_htons(hdr.id); + for (i = 0; i < DNS_TABLE_SIZE; i++) { + struct dns_table_entry *entry = &dns_table[i]; + if ((entry->state == DNS_STATE_ASKING) && + (entry->txid == txid)) { + + /* We only care about the question(s) and the answers. The authrr + and the extrarr are simply discarded. */ + nquestions = lwip_htons(hdr.numquestions); + nanswers = lwip_htons(hdr.numanswers); + + /* Check for correct response. */ + if ((hdr.flags1 & DNS_FLAG1_RESPONSE) == 0) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": not a response\n", entry->name)); + goto ignore_packet; /* ignore this packet */ + } + if (nquestions != 1) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", entry->name)); + goto ignore_packet; /* ignore this packet */ + } + +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + if (!entry->is_mdns) +#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ + { + /* Check whether response comes from the same network address to which the + question was sent. (RFC 5452) */ + if (!ip_addr_cmp(addr, &dns_servers[entry->server_idx])) { + goto ignore_packet; /* ignore this packet */ + } + } + + /* Check if the name in the "question" part match with the name in the entry and + skip it if equal. */ + res_idx = dns_compare_name(entry->name, p, SIZEOF_DNS_HDR); + if (res_idx == 0xFFFF) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", entry->name)); + goto ignore_packet; /* ignore this packet */ + } + + /* check if "question" part matches the request */ + if (pbuf_copy_partial(p, &qry, SIZEOF_DNS_QUERY, res_idx) != SIZEOF_DNS_QUERY) { + goto ignore_packet; /* ignore this packet */ + } + if ((qry.cls != PP_HTONS(DNS_RRCLASS_IN)) || + (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype) && (qry.type != PP_HTONS(DNS_RRTYPE_AAAA))) || + (!LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype) && (qry.type != PP_HTONS(DNS_RRTYPE_A)))) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", entry->name)); + goto ignore_packet; /* ignore this packet */ + } + /* skip the rest of the "question" part */ + if (res_idx + SIZEOF_DNS_QUERY > 0xFFFF) { + goto ignore_packet; + } + res_idx = (u16_t)(res_idx + SIZEOF_DNS_QUERY); + + /* Check for error. If so, call callback to inform. */ + if (hdr.flags2 & DNS_FLAG2_ERR_MASK) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": error in flags\n", entry->name)); + + /* if there is another backup DNS server to try + * then don't stop the DNS request + */ + if (dns_backupserver_available(entry)) { + /* avoid retrying the same server */ + entry->retries = DNS_MAX_RETRIES-1; + entry->tmr = 1; + + /* contact next available server for this entry */ + dns_check_entry(i); + + goto ignore_packet; + } + } else { + while ((nanswers > 0) && (res_idx < p->tot_len)) { + /* skip answer resource record's host name */ + res_idx = dns_skip_name(p, res_idx); + if (res_idx == 0xFFFF) { + goto ignore_packet; /* ignore this packet */ + } + + /* Check for IP address type and Internet class. Others are discarded. */ + if (pbuf_copy_partial(p, &ans, SIZEOF_DNS_ANSWER, res_idx) != SIZEOF_DNS_ANSWER) { + goto ignore_packet; /* ignore this packet */ + } + if (res_idx + SIZEOF_DNS_ANSWER > 0xFFFF) { + goto ignore_packet; + } + res_idx = (u16_t)(res_idx + SIZEOF_DNS_ANSWER); + + if (ans.cls == PP_HTONS(DNS_RRCLASS_IN)) { +#if LWIP_IPV4 + if ((ans.type == PP_HTONS(DNS_RRTYPE_A)) && (ans.len == PP_HTONS(sizeof(ip4_addr_t)))) { +#if LWIP_IPV4 && LWIP_IPV6 + if (!LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + { + ip4_addr_t ip4addr; + /* read the IP address after answer resource record's header */ + if (pbuf_copy_partial(p, &ip4addr, sizeof(ip4_addr_t), res_idx) != sizeof(ip4_addr_t)) { + goto ignore_packet; /* ignore this packet */ + } + ip_addr_copy_from_ip4(dns_table[i].ipaddr, ip4addr); + pbuf_free(p); + /* handle correct response */ + dns_correct_response(i, lwip_ntohl(ans.ttl)); + return; + } + } +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + if ((ans.type == PP_HTONS(DNS_RRTYPE_AAAA)) && (ans.len == PP_HTONS(sizeof(ip6_addr_p_t)))) { +#if LWIP_IPV4 && LWIP_IPV6 + if (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + { + ip6_addr_p_t ip6addr; + /* read the IP address after answer resource record's header */ + if (pbuf_copy_partial(p, &ip6addr, sizeof(ip6_addr_p_t), res_idx) != sizeof(ip6_addr_p_t)) { + goto ignore_packet; /* ignore this packet */ + } + /* @todo: scope ip6addr? Might be required for link-local addresses at least? */ + ip_addr_copy_from_ip6_packed(dns_table[i].ipaddr, ip6addr); + pbuf_free(p); + /* handle correct response */ + dns_correct_response(i, lwip_ntohl(ans.ttl)); + return; + } + } +#endif /* LWIP_IPV6 */ + } + /* skip this answer */ + if ((int)(res_idx + lwip_htons(ans.len)) > 0xFFFF) { + goto ignore_packet; /* ignore this packet */ + } + res_idx = (u16_t)(res_idx + lwip_htons(ans.len)); + --nanswers; + } +#if LWIP_IPV4 && LWIP_IPV6 + if ((entry->reqaddrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) || + (entry->reqaddrtype == LWIP_DNS_ADDRTYPE_IPV6_IPV4)) { + if (entry->reqaddrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) { + /* IPv4 failed, try IPv6 */ + dns_table[i].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV6; + } else { + /* IPv6 failed, try IPv4 */ + dns_table[i].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV4; + } + pbuf_free(p); + dns_table[i].state = DNS_STATE_NEW; + dns_check_entry(i); + return; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": error in response\n", entry->name)); + } + /* call callback to indicate error, clean up memory and return */ + pbuf_free(p); + dns_call_found(i, NULL); + dns_table[i].state = DNS_STATE_UNUSED; + return; + } + } + } + +ignore_packet: + /* deallocate memory and return */ + pbuf_free(p); + return; +} + +/** + * Queues a new hostname to resolve and sends out a DNS query for that hostname + * + * @param name the hostname that is to be queried + * @param hostnamelen length of the hostname + * @param found a callback function to be called on success, failure or timeout + * @param callback_arg argument to pass to the callback function + * @return err_t return code. + */ +static err_t +dns_enqueue(const char *name, size_t hostnamelen, dns_found_callback found, + void *callback_arg LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype) LWIP_DNS_ISMDNS_ARG(u8_t is_mdns)) +{ + u8_t i; + u8_t lseq, lseqi; + struct dns_table_entry *entry = NULL; + size_t namelen; + struct dns_req_entry *req; + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) + u8_t r; + /* check for duplicate entries */ + for (i = 0; i < DNS_TABLE_SIZE; i++) { + if ((dns_table[i].state == DNS_STATE_ASKING) && + (lwip_strnicmp(name, dns_table[i].name, sizeof(dns_table[i].name)) == 0)) { +#if LWIP_IPV4 && LWIP_IPV6 + if (dns_table[i].reqaddrtype != dns_addrtype) { + /* requested address types don't match + this can lead to 2 concurrent requests, but mixing the address types + for the same host should not be that common */ + continue; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + /* this is a duplicate entry, find a free request entry */ + for (r = 0; r < DNS_MAX_REQUESTS; r++) { + if (dns_requests[r].found == 0) { + dns_requests[r].found = found; + dns_requests[r].arg = callback_arg; + dns_requests[r].dns_table_idx = i; + LWIP_DNS_SET_ADDRTYPE(dns_requests[r].reqaddrtype, dns_addrtype); + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": duplicate request\n", name)); + return ERR_INPROGRESS; + } + } + } + } + /* no duplicate entries found */ +#endif + + /* search an unused entry, or the oldest one */ + lseq = 0; + lseqi = DNS_TABLE_SIZE; + for (i = 0; i < DNS_TABLE_SIZE; ++i) { + entry = &dns_table[i]; + /* is it an unused entry ? */ + if (entry->state == DNS_STATE_UNUSED) { + break; + } + /* check if this is the oldest completed entry */ + if (entry->state == DNS_STATE_DONE) { + u8_t age = (u8_t)(dns_seqno - entry->seqno); + if (age > lseq) { + lseq = age; + lseqi = i; + } + } + } + + /* if we don't have found an unused entry, use the oldest completed one */ + if (i == DNS_TABLE_SIZE) { + if ((lseqi >= DNS_TABLE_SIZE) || (dns_table[lseqi].state != DNS_STATE_DONE)) { + /* no entry can be used now, table is full */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": DNS entries table is full\n", name)); + return ERR_MEM; + } else { + /* use the oldest completed one */ + i = lseqi; + entry = &dns_table[i]; + } + } + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) + /* find a free request entry */ + req = NULL; + for (r = 0; r < DNS_MAX_REQUESTS; r++) { + if (dns_requests[r].found == NULL) { + req = &dns_requests[r]; + break; + } + } + if (req == NULL) { + /* no request entry can be used now, table is full */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": DNS request entries table is full\n", name)); + return ERR_MEM; + } + req->dns_table_idx = i; +#else + /* in this configuration, the entry index is the same as the request index */ + req = &dns_requests[i]; +#endif + + /* use this entry */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": use DNS entry %"U16_F"\n", name, (u16_t)(i))); + + /* fill the entry */ + entry->state = DNS_STATE_NEW; + entry->seqno = dns_seqno; + LWIP_DNS_SET_ADDRTYPE(entry->reqaddrtype, dns_addrtype); + LWIP_DNS_SET_ADDRTYPE(req->reqaddrtype, dns_addrtype); + req->found = found; + req->arg = callback_arg; + namelen = LWIP_MIN(hostnamelen, DNS_MAX_NAME_LENGTH - 1); + MEMCPY(entry->name, name, namelen); + entry->name[namelen] = 0; + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) + entry->pcb_idx = dns_alloc_pcb(); + if (entry->pcb_idx >= DNS_MAX_SOURCE_PORTS) { + /* failed to get a UDP pcb */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": failed to allocate a pcb\n", name)); + entry->state = DNS_STATE_UNUSED; + req->found = NULL; + return ERR_MEM; + } + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": use DNS pcb %"U16_F"\n", name, (u16_t)(entry->pcb_idx))); +#endif + +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + entry->is_mdns = is_mdns; +#endif + + dns_seqno++; + + /* force to send query without waiting timer */ + dns_check_entry(i); + + /* dns query is enqueued */ + return ERR_INPROGRESS; +} + +/** + * @ingroup dns + * Resolve a hostname (string) into an IP address. + * NON-BLOCKING callback version for use with raw API!!! + * + * Returns immediately with one of err_t return codes: + * - ERR_OK if hostname is a valid IP address string or the host + * name is already in the local names table. + * - ERR_INPROGRESS enqueue a request to be sent to the DNS server + * for resolution if no errors are present. + * - ERR_ARG: dns client not initialized or invalid hostname + * + * @param hostname the hostname that is to be queried + * @param addr pointer to a ip_addr_t where to store the address if it is already + * cached in the dns_table (only valid if ERR_OK is returned!) + * @param found a callback function to be called on success, failure or timeout (only if + * ERR_INPROGRESS is returned!) + * @param callback_arg argument to pass to the callback function + * @return a err_t return code. + */ +err_t +dns_gethostbyname(const char *hostname, ip_addr_t *addr, dns_found_callback found, + void *callback_arg) +{ + return dns_gethostbyname_addrtype(hostname, addr, found, callback_arg, LWIP_DNS_ADDRTYPE_DEFAULT); +} + +/** + * @ingroup dns + * Like dns_gethostbyname, but returned address type can be controlled: + * @param hostname the hostname that is to be queried + * @param addr pointer to a ip_addr_t where to store the address if it is already + * cached in the dns_table (only valid if ERR_OK is returned!) + * @param found a callback function to be called on success, failure or timeout (only if + * ERR_INPROGRESS is returned!) + * @param callback_arg argument to pass to the callback function + * @param dns_addrtype - LWIP_DNS_ADDRTYPE_IPV4_IPV6: try to resolve IPv4 first, try IPv6 if IPv4 fails only + * - LWIP_DNS_ADDRTYPE_IPV6_IPV4: try to resolve IPv6 first, try IPv4 if IPv6 fails only + * - LWIP_DNS_ADDRTYPE_IPV4: try to resolve IPv4 only + * - LWIP_DNS_ADDRTYPE_IPV6: try to resolve IPv6 only + */ +err_t +dns_gethostbyname_addrtype(const char *hostname, ip_addr_t *addr, dns_found_callback found, + void *callback_arg, u8_t dns_addrtype) +{ + size_t hostnamelen; +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + u8_t is_mdns; +#endif + /* not initialized or no valid server yet, or invalid addr pointer + * or invalid hostname or invalid hostname length */ + if ((addr == NULL) || + (!hostname) || (!hostname[0])) { + return ERR_ARG; + } +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) == 0) + if (dns_pcbs[0] == NULL) { + return ERR_ARG; + } +#endif + hostnamelen = strlen(hostname); + if (hostnamelen >= DNS_MAX_NAME_LENGTH) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_gethostbyname: name too long to resolve")); + return ERR_ARG; + } + + +#if LWIP_HAVE_LOOPIF + if (strcmp(hostname, "localhost") == 0) { + ip_addr_set_loopback(LWIP_DNS_ADDRTYPE_IS_IPV6(dns_addrtype), addr); + return ERR_OK; + } +#endif /* LWIP_HAVE_LOOPIF */ + + /* host name already in octet notation? set ip addr and return ERR_OK */ + if (ipaddr_aton(hostname, addr)) { +#if LWIP_IPV4 && LWIP_IPV6 + if ((IP_IS_V6(addr) && (dns_addrtype != LWIP_DNS_ADDRTYPE_IPV4)) || + (IP_IS_V4(addr) && (dns_addrtype != LWIP_DNS_ADDRTYPE_IPV6))) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + { + return ERR_OK; + } + } + /* already have this address cached? */ + if (dns_lookup(hostname, addr LWIP_DNS_ADDRTYPE_ARG(dns_addrtype)) == ERR_OK) { + return ERR_OK; + } +#if LWIP_IPV4 && LWIP_IPV6 + if ((dns_addrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) || (dns_addrtype == LWIP_DNS_ADDRTYPE_IPV6_IPV4)) { + /* fallback to 2nd IP type and try again to lookup */ + u8_t fallback; + if (dns_addrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) { + fallback = LWIP_DNS_ADDRTYPE_IPV6; + } else { + fallback = LWIP_DNS_ADDRTYPE_IPV4; + } + if (dns_lookup(hostname, addr LWIP_DNS_ADDRTYPE_ARG(fallback)) == ERR_OK) { + return ERR_OK; + } + } +#else /* LWIP_IPV4 && LWIP_IPV6 */ + LWIP_UNUSED_ARG(dns_addrtype); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + if (strstr(hostname, ".local") == &hostname[hostnamelen] - 6) { + is_mdns = 1; + } else { + is_mdns = 0; + } + + if (!is_mdns) +#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ + { + /* prevent calling found callback if no server is set, return error instead */ + if (ip_addr_isany_val(dns_servers[0])) { + return ERR_VAL; + } + } + + /* queue query with specified callback */ + return dns_enqueue(hostname, hostnamelen, found, callback_arg LWIP_DNS_ADDRTYPE_ARG(dns_addrtype) + LWIP_DNS_ISMDNS_ARG(is_mdns)); +} + +#endif /* LWIP_DNS */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/inet_chksum.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/inet_chksum.c new file mode 100644 index 0000000..25255b9 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/inet_chksum.c @@ -0,0 +1,608 @@ +/** + * @file + * Internet checksum functions.\n + * + * These are some reference implementations of the checksum algorithm, with the + * aim of being simple, correct and fully portable. Checksumming is the + * first thing you would want to optimize for your platform. If you create + * your own version, link it in and in your cc.h put: + * + * \#define LWIP_CHKSUM your_checksum_routine + * + * Or you can select from the implementations below by defining + * LWIP_CHKSUM_ALGORITHM to 1, 2 or 3. + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#include "lwip/inet_chksum.h" +#include "lwip/def.h" +#include "lwip/ip_addr.h" + +#include + +#ifndef LWIP_CHKSUM +# define LWIP_CHKSUM lwip_standard_chksum +# ifndef LWIP_CHKSUM_ALGORITHM +# define LWIP_CHKSUM_ALGORITHM 2 +# endif +u16_t lwip_standard_chksum(const void *dataptr, int len); +#endif +/* If none set: */ +#ifndef LWIP_CHKSUM_ALGORITHM +# define LWIP_CHKSUM_ALGORITHM 0 +#endif + +#if (LWIP_CHKSUM_ALGORITHM == 1) /* Version #1 */ +/** + * lwip checksum + * + * @param dataptr points to start of data to be summed at any boundary + * @param len length of data to be summed + * @return host order (!) lwip checksum (non-inverted Internet sum) + * + * @note accumulator size limits summable length to 64k + * @note host endianess is irrelevant (p3 RFC1071) + */ +u16_t +lwip_standard_chksum(const void *dataptr, int len) +{ + u32_t acc; + u16_t src; + const u8_t *octetptr; + + acc = 0; + /* dataptr may be at odd or even addresses */ + octetptr = (const u8_t *)dataptr; + while (len > 1) { + /* declare first octet as most significant + thus assume network order, ignoring host order */ + src = (*octetptr) << 8; + octetptr++; + /* declare second octet as least significant */ + src |= (*octetptr); + octetptr++; + acc += src; + len -= 2; + } + if (len > 0) { + /* accumulate remaining octet */ + src = (*octetptr) << 8; + acc += src; + } + /* add deferred carry bits */ + acc = (acc >> 16) + (acc & 0x0000ffffUL); + if ((acc & 0xffff0000UL) != 0) { + acc = (acc >> 16) + (acc & 0x0000ffffUL); + } + /* This maybe a little confusing: reorder sum using lwip_htons() + instead of lwip_ntohs() since it has a little less call overhead. + The caller must invert bits for Internet sum ! */ + return lwip_htons((u16_t)acc); +} +#endif + +#if (LWIP_CHKSUM_ALGORITHM == 2) /* Alternative version #2 */ +/* + * Curt McDowell + * Broadcom Corp. + * csm@broadcom.com + * + * IP checksum two bytes at a time with support for + * unaligned buffer. + * Works for len up to and including 0x20000. + * by Curt McDowell, Broadcom Corp. 12/08/2005 + * + * @param dataptr points to start of data to be summed at any boundary + * @param len length of data to be summed + * @return host order (!) lwip checksum (non-inverted Internet sum) + */ +u16_t +lwip_standard_chksum(const void *dataptr, int len) +{ + const u8_t *pb = (const u8_t *)dataptr; + const u16_t *ps; + u16_t t = 0; + u32_t sum = 0; + int odd = ((mem_ptr_t)pb & 1); + + /* Get aligned to u16_t */ + if (odd && len > 0) { + ((u8_t *)&t)[1] = *pb++; + len--; + } + + /* Add the bulk of the data */ + ps = (const u16_t *)(const void *)pb; + while (len > 1) { + sum += *ps++; + len -= 2; + } + + /* Consume left-over byte, if any */ + if (len > 0) { + ((u8_t *)&t)[0] = *(const u8_t *)ps; + } + + /* Add end bytes */ + sum += t; + + /* Fold 32-bit sum to 16 bits + calling this twice is probably faster than if statements... */ + sum = FOLD_U32T(sum); + sum = FOLD_U32T(sum); + + /* Swap if alignment was odd */ + if (odd) { + sum = SWAP_BYTES_IN_WORD(sum); + } + + return (u16_t)sum; +} +#endif + +#if (LWIP_CHKSUM_ALGORITHM == 3) /* Alternative version #3 */ +/** + * An optimized checksum routine. Basically, it uses loop-unrolling on + * the checksum loop, treating the head and tail bytes specially, whereas + * the inner loop acts on 8 bytes at a time. + * + * @arg start of buffer to be checksummed. May be an odd byte address. + * @len number of bytes in the buffer to be checksummed. + * @return host order (!) lwip checksum (non-inverted Internet sum) + * + * by Curt McDowell, Broadcom Corp. December 8th, 2005 + */ +u16_t +lwip_standard_chksum(const void *dataptr, int len) +{ + const u8_t *pb = (const u8_t *)dataptr; + const u16_t *ps; + u16_t t = 0; + const u32_t *pl; + u32_t sum = 0, tmp; + /* starts at odd byte address? */ + int odd = ((mem_ptr_t)pb & 1); + + if (odd && len > 0) { + ((u8_t *)&t)[1] = *pb++; + len--; + } + + ps = (const u16_t *)(const void *)pb; + + if (((mem_ptr_t)ps & 3) && len > 1) { + sum += *ps++; + len -= 2; + } + + pl = (const u32_t *)(const void *)ps; + + while (len > 7) { + tmp = sum + *pl++; /* ping */ + if (tmp < sum) { + tmp++; /* add back carry */ + } + + sum = tmp + *pl++; /* pong */ + if (sum < tmp) { + sum++; /* add back carry */ + } + + len -= 8; + } + + /* make room in upper bits */ + sum = FOLD_U32T(sum); + + ps = (const u16_t *)pl; + + /* 16-bit aligned word remaining? */ + while (len > 1) { + sum += *ps++; + len -= 2; + } + + /* dangling tail byte remaining? */ + if (len > 0) { /* include odd byte */ + ((u8_t *)&t)[0] = *(const u8_t *)ps; + } + + sum += t; /* add end bytes */ + + /* Fold 32-bit sum to 16 bits + calling this twice is probably faster than if statements... */ + sum = FOLD_U32T(sum); + sum = FOLD_U32T(sum); + + if (odd) { + sum = SWAP_BYTES_IN_WORD(sum); + } + + return (u16_t)sum; +} +#endif + +/** Parts of the pseudo checksum which are common to IPv4 and IPv6 */ +static u16_t +inet_cksum_pseudo_base(struct pbuf *p, u8_t proto, u16_t proto_len, u32_t acc) +{ + struct pbuf *q; + int swapped = 0; + + /* iterate through all pbuf in chain */ + for (q = p; q != NULL; q = q->next) { + LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): checksumming pbuf %p (has next %p) \n", + (void *)q, (void *)q->next)); + acc += LWIP_CHKSUM(q->payload, q->len); + /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): unwrapped lwip_chksum()=%"X32_F" \n", acc));*/ + /* just executing this next line is probably faster that the if statement needed + to check whether we really need to execute it, and does no harm */ + acc = FOLD_U32T(acc); + if (q->len % 2 != 0) { + swapped = !swapped; + acc = SWAP_BYTES_IN_WORD(acc); + } + /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): wrapped lwip_chksum()=%"X32_F" \n", acc));*/ + } + + if (swapped) { + acc = SWAP_BYTES_IN_WORD(acc); + } + + acc += (u32_t)lwip_htons((u16_t)proto); + acc += (u32_t)lwip_htons(proto_len); + + /* Fold 32-bit sum to 16 bits + calling this twice is probably faster than if statements... */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): pbuf chain lwip_chksum()=%"X32_F"\n", acc)); + return (u16_t)~(acc & 0xffffUL); +} + +#if LWIP_IPV4 +/* inet_chksum_pseudo: + * + * Calculates the IPv4 pseudo Internet checksum used by TCP and UDP for a pbuf chain. + * IP addresses are expected to be in network byte order. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param src source ip address (used for checksum of pseudo header) + * @param dst destination ip address (used for checksum of pseudo header) + * @param proto ip protocol (used for checksum of pseudo header) + * @param proto_len length of the ip data part (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +inet_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip4_addr_t *src, const ip4_addr_t *dest) +{ + u32_t acc; + u32_t addr; + + addr = ip4_addr_get_u32(src); + acc = (addr & 0xffffUL); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + addr = ip4_addr_get_u32(dest); + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + /* fold down to 16 bits */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + + return inet_cksum_pseudo_base(p, proto, proto_len, acc); +} +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +/** + * Calculates the checksum with IPv6 pseudo header used by TCP and UDP for a pbuf chain. + * IPv6 addresses are expected to be in network byte order. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param proto ipv6 protocol/next header (used for checksum of pseudo header) + * @param proto_len length of the ipv6 payload (used for checksum of pseudo header) + * @param src source ipv6 address (used for checksum of pseudo header) + * @param dest destination ipv6 address (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +ip6_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip6_addr_t *src, const ip6_addr_t *dest) +{ + u32_t acc = 0; + u32_t addr; + u8_t addr_part; + + for (addr_part = 0; addr_part < 4; addr_part++) { + addr = src->addr[addr_part]; + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + addr = dest->addr[addr_part]; + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + } + /* fold down to 16 bits */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + + return inet_cksum_pseudo_base(p, proto, proto_len, acc); +} +#endif /* LWIP_IPV6 */ + +/* ip_chksum_pseudo: + * + * Calculates the IPv4 or IPv6 pseudo Internet checksum used by TCP and UDP for a pbuf chain. + * IP addresses are expected to be in network byte order. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param src source ip address (used for checksum of pseudo header) + * @param dst destination ip address (used for checksum of pseudo header) + * @param proto ip protocol (used for checksum of pseudo header) + * @param proto_len length of the ip data part (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +ip_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip_addr_t *src, const ip_addr_t *dest) +{ +#if LWIP_IPV6 + if (IP_IS_V6(dest)) { + return ip6_chksum_pseudo(p, proto, proto_len, ip_2_ip6(src), ip_2_ip6(dest)); + } +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 && LWIP_IPV6 + else +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_IPV4 + { + return inet_chksum_pseudo(p, proto, proto_len, ip_2_ip4(src), ip_2_ip4(dest)); + } +#endif /* LWIP_IPV4 */ +} + +/** Parts of the pseudo checksum which are common to IPv4 and IPv6 */ +static u16_t +inet_cksum_pseudo_partial_base(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, u32_t acc) +{ + struct pbuf *q; + int swapped = 0; + u16_t chklen; + + /* iterate through all pbuf in chain */ + for (q = p; (q != NULL) && (chksum_len > 0); q = q->next) { + LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): checksumming pbuf %p (has next %p) \n", + (void *)q, (void *)q->next)); + chklen = q->len; + if (chklen > chksum_len) { + chklen = chksum_len; + } + acc += LWIP_CHKSUM(q->payload, chklen); + chksum_len = (u16_t)(chksum_len - chklen); + LWIP_ASSERT("delete me", chksum_len < 0x7fff); + /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): unwrapped lwip_chksum()=%"X32_F" \n", acc));*/ + /* fold the upper bit down */ + acc = FOLD_U32T(acc); + if (q->len % 2 != 0) { + swapped = !swapped; + acc = SWAP_BYTES_IN_WORD(acc); + } + /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): wrapped lwip_chksum()=%"X32_F" \n", acc));*/ + } + + if (swapped) { + acc = SWAP_BYTES_IN_WORD(acc); + } + + acc += (u32_t)lwip_htons((u16_t)proto); + acc += (u32_t)lwip_htons(proto_len); + + /* Fold 32-bit sum to 16 bits + calling this twice is probably faster than if statements... */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): pbuf chain lwip_chksum()=%"X32_F"\n", acc)); + return (u16_t)~(acc & 0xffffUL); +} + +#if LWIP_IPV4 +/* inet_chksum_pseudo_partial: + * + * Calculates the IPv4 pseudo Internet checksum used by TCP and UDP for a pbuf chain. + * IP addresses are expected to be in network byte order. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param src source ip address (used for checksum of pseudo header) + * @param dst destination ip address (used for checksum of pseudo header) + * @param proto ip protocol (used for checksum of pseudo header) + * @param proto_len length of the ip data part (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +inet_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip4_addr_t *src, const ip4_addr_t *dest) +{ + u32_t acc; + u32_t addr; + + addr = ip4_addr_get_u32(src); + acc = (addr & 0xffffUL); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + addr = ip4_addr_get_u32(dest); + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + /* fold down to 16 bits */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + + return inet_cksum_pseudo_partial_base(p, proto, proto_len, chksum_len, acc); +} +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +/** + * Calculates the checksum with IPv6 pseudo header used by TCP and UDP for a pbuf chain. + * IPv6 addresses are expected to be in network byte order. Will only compute for a + * portion of the payload. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param proto ipv6 protocol/next header (used for checksum of pseudo header) + * @param proto_len length of the ipv6 payload (used for checksum of pseudo header) + * @param chksum_len number of payload bytes used to compute chksum + * @param src source ipv6 address (used for checksum of pseudo header) + * @param dest destination ipv6 address (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +ip6_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip6_addr_t *src, const ip6_addr_t *dest) +{ + u32_t acc = 0; + u32_t addr; + u8_t addr_part; + + for (addr_part = 0; addr_part < 4; addr_part++) { + addr = src->addr[addr_part]; + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + addr = dest->addr[addr_part]; + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + } + /* fold down to 16 bits */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + + return inet_cksum_pseudo_partial_base(p, proto, proto_len, chksum_len, acc); +} +#endif /* LWIP_IPV6 */ + +/* ip_chksum_pseudo_partial: + * + * Calculates the IPv4 or IPv6 pseudo Internet checksum used by TCP and UDP for a pbuf chain. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param src source ip address (used for checksum of pseudo header) + * @param dst destination ip address (used for checksum of pseudo header) + * @param proto ip protocol (used for checksum of pseudo header) + * @param proto_len length of the ip data part (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +ip_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip_addr_t *src, const ip_addr_t *dest) +{ +#if LWIP_IPV6 + if (IP_IS_V6(dest)) { + return ip6_chksum_pseudo_partial(p, proto, proto_len, chksum_len, ip_2_ip6(src), ip_2_ip6(dest)); + } +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 && LWIP_IPV6 + else +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_IPV4 + { + return inet_chksum_pseudo_partial(p, proto, proto_len, chksum_len, ip_2_ip4(src), ip_2_ip4(dest)); + } +#endif /* LWIP_IPV4 */ +} + +/* inet_chksum: + * + * Calculates the Internet checksum over a portion of memory. Used primarily for IP + * and ICMP. + * + * @param dataptr start of the buffer to calculate the checksum (no alignment needed) + * @param len length of the buffer to calculate the checksum + * @return checksum (as u16_t) to be saved directly in the protocol header + */ + +u16_t +inet_chksum(const void *dataptr, u16_t len) +{ + return (u16_t)~(unsigned int)LWIP_CHKSUM(dataptr, len); +} + +/** + * Calculate a checksum over a chain of pbufs (without pseudo-header, much like + * inet_chksum only pbufs are used). + * + * @param p pbuf chain over that the checksum should be calculated + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +inet_chksum_pbuf(struct pbuf *p) +{ + u32_t acc; + struct pbuf *q; + int swapped = 0; + + acc = 0; + for (q = p; q != NULL; q = q->next) { + acc += LWIP_CHKSUM(q->payload, q->len); + acc = FOLD_U32T(acc); + if (q->len % 2 != 0) { + swapped = !swapped; + acc = SWAP_BYTES_IN_WORD(acc); + } + } + + if (swapped) { + acc = SWAP_BYTES_IN_WORD(acc); + } + return (u16_t)~(acc & 0xffffUL); +} + +/* These are some implementations for LWIP_CHKSUM_COPY, which copies data + * like MEMCPY but generates a checksum at the same time. Since this is a + * performance-sensitive function, you might want to create your own version + * in assembly targeted at your hardware by defining it in lwipopts.h: + * #define LWIP_CHKSUM_COPY(dst, src, len) your_chksum_copy(dst, src, len) + */ + +#if (LWIP_CHKSUM_COPY_ALGORITHM == 1) /* Version #1 */ +/** Safe but slow: first call MEMCPY, then call LWIP_CHKSUM. + * For architectures with big caches, data might still be in cache when + * generating the checksum after copying. + */ +u16_t +lwip_chksum_copy(void *dst, const void *src, u16_t len) +{ + MEMCPY(dst, src, len); + return LWIP_CHKSUM(dst, len); +} +#endif /* (LWIP_CHKSUM_COPY_ALGORITHM == 1) */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/init.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/init.c new file mode 100644 index 0000000..b688536 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/init.c @@ -0,0 +1,380 @@ +/** + * @file + * Modules initialization + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + */ + +#include "lwip/opt.h" + +#include "lwip/init.h" +#include "lwip/stats.h" +#include "lwip/sys.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/sockets.h" +#include "lwip/ip.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/igmp.h" +#include "lwip/dns.h" +#include "lwip/timeouts.h" +#include "lwip/etharp.h" +#include "lwip/ip6.h" +#include "lwip/nd6.h" +#include "lwip/mld6.h" +#include "lwip/api.h" + +#include "netif/ppp/ppp_opts.h" +#include "netif/ppp/ppp_impl.h" + +#ifndef LWIP_SKIP_PACKING_CHECK + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct packed_struct_test { + PACK_STRUCT_FLD_8(u8_t dummy1); + PACK_STRUCT_FIELD(u32_t dummy2); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define PACKED_STRUCT_TEST_EXPECTED_SIZE 5 + +#endif + +/* Compile-time sanity checks for configuration errors. + * These can be done independently of LWIP_DEBUG, without penalty. + */ +#ifndef BYTE_ORDER +#error "BYTE_ORDER is not defined, you have to define it in your cc.h" +#endif +#if (!IP_SOF_BROADCAST && IP_SOF_BROADCAST_RECV) +#error "If you want to use broadcast filter per pcb on recv operations, you have to define IP_SOF_BROADCAST=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP && LWIP_UDPLITE) +#error "If you want to use UDP Lite, you have to define LWIP_UDP=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP && LWIP_DHCP) +#error "If you want to use DHCP, you have to define LWIP_UDP=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP && !LWIP_RAW && LWIP_MULTICAST_TX_OPTIONS) +#error "If you want to use LWIP_MULTICAST_TX_OPTIONS, you have to define LWIP_UDP=1 and/or LWIP_RAW=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP && LWIP_DNS) +#error "If you want to use DNS, you have to define LWIP_UDP=1 in your lwipopts.h" +#endif +#if !MEMP_MEM_MALLOC /* MEMP_NUM_* checks are disabled when not using the pool allocator */ +#if (LWIP_ARP && ARP_QUEUEING && (MEMP_NUM_ARP_QUEUE<=0)) +#error "If you want to use ARP Queueing, you have to define MEMP_NUM_ARP_QUEUE>=1 in your lwipopts.h" +#endif +#if (LWIP_RAW && (MEMP_NUM_RAW_PCB<=0)) +#error "If you want to use RAW, you have to define MEMP_NUM_RAW_PCB>=1 in your lwipopts.h" +#endif +#if (LWIP_UDP && (MEMP_NUM_UDP_PCB<=0)) +#error "If you want to use UDP, you have to define MEMP_NUM_UDP_PCB>=1 in your lwipopts.h" +#endif +#if (LWIP_TCP && (MEMP_NUM_TCP_PCB<=0)) +#error "If you want to use TCP, you have to define MEMP_NUM_TCP_PCB>=1 in your lwipopts.h" +#endif +#if (LWIP_IGMP && (MEMP_NUM_IGMP_GROUP<=1)) +#error "If you want to use IGMP, you have to define MEMP_NUM_IGMP_GROUP>1 in your lwipopts.h" +#endif +#if (LWIP_IGMP && !LWIP_MULTICAST_TX_OPTIONS) +#error "If you want to use IGMP, you have to define LWIP_MULTICAST_TX_OPTIONS==1 in your lwipopts.h" +#endif +#if (LWIP_IGMP && !LWIP_IPV4) +#error "IGMP needs LWIP_IPV4 enabled in your lwipopts.h" +#endif +#if ((LWIP_NETCONN || LWIP_SOCKET) && (MEMP_NUM_TCPIP_MSG_API<=0)) +#error "If you want to use Sequential API, you have to define MEMP_NUM_TCPIP_MSG_API>=1 in your lwipopts.h" +#endif +/* There must be sufficient timeouts, taking into account requirements of the subsystems. */ +#if LWIP_TIMERS && (MEMP_NUM_SYS_TIMEOUT < LWIP_NUM_SYS_TIMEOUT_INTERNAL) +#error "MEMP_NUM_SYS_TIMEOUT is too low to accomodate all required timeouts" +#endif +#if (IP_REASSEMBLY && (MEMP_NUM_REASSDATA > IP_REASS_MAX_PBUFS)) +#error "MEMP_NUM_REASSDATA > IP_REASS_MAX_PBUFS doesn't make sense since each struct ip_reassdata must hold 2 pbufs at least!" +#endif +#endif /* !MEMP_MEM_MALLOC */ +#if LWIP_WND_SCALE +#if (LWIP_TCP && (TCP_WND > 0xffffffff)) +#error "If you want to use TCP, TCP_WND must fit in an u32_t, so, you have to reduce it in your lwipopts.h" +#endif +#if (LWIP_TCP && (TCP_RCV_SCALE > 14)) +#error "The maximum valid window scale value is 14!" +#endif +#if (LWIP_TCP && (TCP_WND > (0xFFFFU << TCP_RCV_SCALE))) +#error "TCP_WND is bigger than the configured LWIP_WND_SCALE allows!" +#endif +#if (LWIP_TCP && ((TCP_WND >> TCP_RCV_SCALE) == 0)) +#error "TCP_WND is too small for the configured LWIP_WND_SCALE (results in zero window)!" +#endif +#else /* LWIP_WND_SCALE */ +#if (LWIP_TCP && (TCP_WND > 0xffff)) +#error "If you want to use TCP, TCP_WND must fit in an u16_t, so, you have to reduce it in your lwipopts.h (or enable window scaling)" +#endif +#endif /* LWIP_WND_SCALE */ +#if (LWIP_TCP && (TCP_SND_QUEUELEN > 0xffff)) +#error "If you want to use TCP, TCP_SND_QUEUELEN must fit in an u16_t, so, you have to reduce it in your lwipopts.h" +#endif +#if (LWIP_TCP && (TCP_SND_QUEUELEN < 2)) +#error "TCP_SND_QUEUELEN must be at least 2 for no-copy TCP writes to work" +#endif +#if (LWIP_TCP && ((TCP_MAXRTX > 12) || (TCP_SYNMAXRTX > 12))) +#error "If you want to use TCP, TCP_MAXRTX and TCP_SYNMAXRTX must less or equal to 12 (due to tcp_backoff table), so, you have to reduce them in your lwipopts.h" +#endif +#if (LWIP_TCP && TCP_LISTEN_BACKLOG && ((TCP_DEFAULT_LISTEN_BACKLOG < 0) || (TCP_DEFAULT_LISTEN_BACKLOG > 0xff))) +#error "If you want to use TCP backlog, TCP_DEFAULT_LISTEN_BACKLOG must fit into an u8_t" +#endif +#if (LWIP_TCP && LWIP_TCP_SACK_OUT && !TCP_QUEUE_OOSEQ) +#error "To use LWIP_TCP_SACK_OUT, TCP_QUEUE_OOSEQ needs to be enabled" +#endif +#if (LWIP_TCP && LWIP_TCP_SACK_OUT && (LWIP_TCP_MAX_SACK_NUM < 1)) +#error "LWIP_TCP_MAX_SACK_NUM must be greater than 0" +#endif +#if (LWIP_NETIF_API && (NO_SYS==1)) +#error "If you want to use NETIF API, you have to define NO_SYS=0 in your lwipopts.h" +#endif +#if ((LWIP_SOCKET || LWIP_NETCONN) && (NO_SYS==1)) +#error "If you want to use Sequential API, you have to define NO_SYS=0 in your lwipopts.h" +#endif +#if (LWIP_PPP_API && (NO_SYS==1)) +#error "If you want to use PPP API, you have to define NO_SYS=0 in your lwipopts.h" +#endif +#if (LWIP_PPP_API && (PPP_SUPPORT==0)) +#error "If you want to use PPP API, you have to enable PPP_SUPPORT in your lwipopts.h" +#endif +#if (((!LWIP_DHCP) || (!LWIP_AUTOIP)) && LWIP_DHCP_AUTOIP_COOP) +#error "If you want to use DHCP/AUTOIP cooperation mode, you have to define LWIP_DHCP=1 and LWIP_AUTOIP=1 in your lwipopts.h" +#endif +#if (((!LWIP_DHCP) || (!LWIP_ARP)) && DHCP_DOES_ARP_CHECK) +#error "If you want to use DHCP ARP checking, you have to define LWIP_DHCP=1 and LWIP_ARP=1 in your lwipopts.h" +#endif +#if (!LWIP_ARP && LWIP_AUTOIP) +#error "If you want to use AUTOIP, you have to define LWIP_ARP=1 in your lwipopts.h" +#endif +#if (LWIP_TCP && ((LWIP_EVENT_API && LWIP_CALLBACK_API) || (!LWIP_EVENT_API && !LWIP_CALLBACK_API))) +#error "One and exactly one of LWIP_EVENT_API and LWIP_CALLBACK_API has to be enabled in your lwipopts.h" +#endif +#if (LWIP_ALTCP && LWIP_EVENT_API) +#error "The application layered tcp API does not work with LWIP_EVENT_API" +#endif +#if (MEM_LIBC_MALLOC && MEM_USE_POOLS) +#error "MEM_LIBC_MALLOC and MEM_USE_POOLS may not both be simultaneously enabled in your lwipopts.h" +#endif +#if (MEM_USE_POOLS && !MEMP_USE_CUSTOM_POOLS) +#error "MEM_USE_POOLS requires custom pools (MEMP_USE_CUSTOM_POOLS) to be enabled in your lwipopts.h" +#endif +#if (PBUF_POOL_BUFSIZE <= MEM_ALIGNMENT) +#error "PBUF_POOL_BUFSIZE must be greater than MEM_ALIGNMENT or the offset may take the full first pbuf" +#endif +#if (DNS_LOCAL_HOSTLIST && !DNS_LOCAL_HOSTLIST_IS_DYNAMIC && !(defined(DNS_LOCAL_HOSTLIST_INIT))) +#error "you have to define define DNS_LOCAL_HOSTLIST_INIT {{'host1', 0x123}, {'host2', 0x234}} to initialize DNS_LOCAL_HOSTLIST" +#endif +#if PPP_SUPPORT && !PPPOS_SUPPORT && !PPPOE_SUPPORT && !PPPOL2TP_SUPPORT +#error "PPP_SUPPORT needs at least one of PPPOS_SUPPORT, PPPOE_SUPPORT or PPPOL2TP_SUPPORT turned on" +#endif +#if PPP_SUPPORT && !PPP_IPV4_SUPPORT && !PPP_IPV6_SUPPORT +#error "PPP_SUPPORT needs PPP_IPV4_SUPPORT and/or PPP_IPV6_SUPPORT turned on" +#endif +#if PPP_SUPPORT && PPP_IPV4_SUPPORT && !LWIP_IPV4 +#error "PPP_IPV4_SUPPORT needs LWIP_IPV4 turned on" +#endif +#if PPP_SUPPORT && PPP_IPV6_SUPPORT && !LWIP_IPV6 +#error "PPP_IPV6_SUPPORT needs LWIP_IPV6 turned on" +#endif +#if !LWIP_ETHERNET && (LWIP_ARP || PPPOE_SUPPORT) +#error "LWIP_ETHERNET needs to be turned on for LWIP_ARP or PPPOE_SUPPORT" +#endif +#if LWIP_TCPIP_CORE_LOCKING_INPUT && !LWIP_TCPIP_CORE_LOCKING +#error "When using LWIP_TCPIP_CORE_LOCKING_INPUT, LWIP_TCPIP_CORE_LOCKING must be enabled, too" +#endif +#if LWIP_TCP && LWIP_NETIF_TX_SINGLE_PBUF && !TCP_OVERSIZE +#error "LWIP_NETIF_TX_SINGLE_PBUF needs TCP_OVERSIZE enabled to create single-pbuf TCP packets" +#endif +#if LWIP_NETCONN && LWIP_TCP +#if NETCONN_COPY != TCP_WRITE_FLAG_COPY +#error "NETCONN_COPY != TCP_WRITE_FLAG_COPY" +#endif +#if NETCONN_MORE != TCP_WRITE_FLAG_MORE +#error "NETCONN_MORE != TCP_WRITE_FLAG_MORE" +#endif +#endif /* LWIP_NETCONN && LWIP_TCP */ +#if LWIP_SOCKET +#endif /* LWIP_SOCKET */ + + +/* Compile-time checks for deprecated options. + */ +#ifdef MEMP_NUM_TCPIP_MSG +#error "MEMP_NUM_TCPIP_MSG option is deprecated. Remove it from your lwipopts.h." +#endif +#ifdef TCP_REXMIT_DEBUG +#error "TCP_REXMIT_DEBUG option is deprecated. Remove it from your lwipopts.h." +#endif +#ifdef RAW_STATS +#error "RAW_STATS option is deprecated. Remove it from your lwipopts.h." +#endif +#ifdef ETHARP_QUEUE_FIRST +#error "ETHARP_QUEUE_FIRST option is deprecated. Remove it from your lwipopts.h." +#endif +#ifdef ETHARP_ALWAYS_INSERT +#error "ETHARP_ALWAYS_INSERT option is deprecated. Remove it from your lwipopts.h." +#endif +#if !NO_SYS && LWIP_TCPIP_CORE_LOCKING && LWIP_COMPAT_MUTEX && !defined(LWIP_COMPAT_MUTEX_ALLOWED) +#error "LWIP_COMPAT_MUTEX cannot prevent priority inversion. It is recommended to implement priority-aware mutexes. (Define LWIP_COMPAT_MUTEX_ALLOWED to disable this error.)" +#endif + +#ifndef LWIP_DISABLE_TCP_SANITY_CHECKS +#define LWIP_DISABLE_TCP_SANITY_CHECKS 0 +#endif +#ifndef LWIP_DISABLE_MEMP_SANITY_CHECKS +#define LWIP_DISABLE_MEMP_SANITY_CHECKS 0 +#endif + +/* MEMP sanity checks */ +#if MEMP_MEM_MALLOC +#if !LWIP_DISABLE_MEMP_SANITY_CHECKS +#if LWIP_NETCONN || LWIP_SOCKET +#if !MEMP_NUM_NETCONN && LWIP_SOCKET +#error "lwip_sanity_check: WARNING: MEMP_NUM_NETCONN cannot be 0 when using sockets!" +#endif +#else /* MEMP_MEM_MALLOC */ +#if MEMP_NUM_NETCONN > (MEMP_NUM_TCP_PCB+MEMP_NUM_TCP_PCB_LISTEN+MEMP_NUM_UDP_PCB+MEMP_NUM_RAW_PCB) +#error "lwip_sanity_check: WARNING: MEMP_NUM_NETCONN should be less than the sum of MEMP_NUM_{TCP,RAW,UDP}_PCB+MEMP_NUM_TCP_PCB_LISTEN. If you know what you are doing, define LWIP_DISABLE_MEMP_SANITY_CHECKS to 1 to disable this error." +#endif +#endif /* LWIP_NETCONN || LWIP_SOCKET */ +#endif /* !LWIP_DISABLE_MEMP_SANITY_CHECKS */ +#if MEM_USE_POOLS +#error "MEMP_MEM_MALLOC and MEM_USE_POOLS cannot be enabled at the same time" +#endif +#ifdef LWIP_HOOK_MEMP_AVAILABLE +#error "LWIP_HOOK_MEMP_AVAILABLE doesn't make sense with MEMP_MEM_MALLOC" +#endif +#endif /* MEMP_MEM_MALLOC */ + +/* TCP sanity checks */ +#if !LWIP_DISABLE_TCP_SANITY_CHECKS +#if LWIP_TCP +#if !MEMP_MEM_MALLOC && (MEMP_NUM_TCP_SEG < TCP_SND_QUEUELEN) +#error "lwip_sanity_check: WARNING: MEMP_NUM_TCP_SEG should be at least as big as TCP_SND_QUEUELEN. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_SND_BUF < (2 * TCP_MSS) +#error "lwip_sanity_check: WARNING: TCP_SND_BUF must be at least as much as (2 * TCP_MSS) for things to work smoothly. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_SND_QUEUELEN < (2 * (TCP_SND_BUF / TCP_MSS)) +#error "lwip_sanity_check: WARNING: TCP_SND_QUEUELEN must be at least as much as (2 * TCP_SND_BUF/TCP_MSS) for things to work. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_SNDLOWAT >= TCP_SND_BUF +#error "lwip_sanity_check: WARNING: TCP_SNDLOWAT must be less than TCP_SND_BUF. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_SNDLOWAT >= (0xFFFF - (4 * TCP_MSS)) +#error "lwip_sanity_check: WARNING: TCP_SNDLOWAT must at least be 4*MSS below u16_t overflow!" +#endif +#if TCP_SNDQUEUELOWAT >= TCP_SND_QUEUELEN +#error "lwip_sanity_check: WARNING: TCP_SNDQUEUELOWAT must be less than TCP_SND_QUEUELEN. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if !MEMP_MEM_MALLOC && PBUF_POOL_SIZE && (PBUF_POOL_BUFSIZE <= (PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN)) +#error "lwip_sanity_check: WARNING: PBUF_POOL_BUFSIZE does not provide enough space for protocol headers. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if !MEMP_MEM_MALLOC && PBUF_POOL_SIZE && (TCP_WND > (PBUF_POOL_SIZE * (PBUF_POOL_BUFSIZE - (PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN)))) +#error "lwip_sanity_check: WARNING: TCP_WND is larger than space provided by PBUF_POOL_SIZE * (PBUF_POOL_BUFSIZE - protocol headers). If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_WND < TCP_MSS +#error "lwip_sanity_check: WARNING: TCP_WND is smaller than MSS. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#endif /* LWIP_TCP */ +#endif /* !LWIP_DISABLE_TCP_SANITY_CHECKS */ + +/** + * @ingroup lwip_nosys + * Initialize all modules. + * Use this in NO_SYS mode. Use tcpip_init() otherwise. + */ +void +lwip_init(void) +{ +#ifndef LWIP_SKIP_CONST_CHECK + int a = 0; + LWIP_UNUSED_ARG(a); + LWIP_ASSERT("LWIP_CONST_CAST not implemented correctly. Check your lwIP port.", LWIP_CONST_CAST(void *, &a) == &a); +#endif +#ifndef LWIP_SKIP_PACKING_CHECK + LWIP_ASSERT("Struct packing not implemented correctly. Check your lwIP port.", sizeof(struct packed_struct_test) == PACKED_STRUCT_TEST_EXPECTED_SIZE); +#endif + + /* Modules initialization */ + stats_init(); +#if !NO_SYS + sys_init(); +#endif /* !NO_SYS */ + mem_init(); + memp_init(); + pbuf_init(); + netif_init(); +#if LWIP_IPV4 + ip_init(); +#if LWIP_ARP + etharp_init(); +#endif /* LWIP_ARP */ +#endif /* LWIP_IPV4 */ +#if LWIP_RAW + raw_init(); +#endif /* LWIP_RAW */ +#if LWIP_UDP + udp_init(); +#endif /* LWIP_UDP */ +#if LWIP_TCP + tcp_init(); +#endif /* LWIP_TCP */ +#if LWIP_IGMP + igmp_init(); +#endif /* LWIP_IGMP */ +#if LWIP_DNS + dns_init(); +#endif /* LWIP_DNS */ +#if PPP_SUPPORT + ppp_init(); +#endif + +#if LWIP_TIMERS + sys_timeouts_init(); +#endif /* LWIP_TIMERS */ +} diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ip.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ip.c new file mode 100644 index 0000000..d611315 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ip.c @@ -0,0 +1,167 @@ +/** + * @file + * Common IPv4 and IPv6 code + * + * @defgroup ip IP + * @ingroup callbackstyle_api + * + * @defgroup ip4 IPv4 + * @ingroup ip + * + * @defgroup ip6 IPv6 + * @ingroup ip + * + * @defgroup ipaddr IP address handling + * @ingroup infrastructure + * + * @defgroup ip4addr IPv4 only + * @ingroup ipaddr + * + * @defgroup ip6addr IPv6 only + * @ingroup ipaddr + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 || LWIP_IPV6 + +#include "lwip/ip_addr.h" +#include "lwip/ip.h" + +/** Global data for both IPv4 and IPv6 */ +struct ip_globals ip_data; + +#if LWIP_IPV4 && LWIP_IPV6 + +const ip_addr_t ip_addr_any_type = IPADDR_ANY_TYPE_INIT; + +/** + * @ingroup ipaddr + * Convert numeric IP address (both versions) into ASCII representation. + * returns ptr to static buffer; not reentrant! + * + * @param addr ip address in network order to convert + * @return pointer to a global static (!) buffer that holds the ASCII + * representation of addr + */ +char *ipaddr_ntoa(const ip_addr_t *addr) +{ + if (addr == NULL) { + return NULL; + } + if (IP_IS_V6(addr)) { + return ip6addr_ntoa(ip_2_ip6(addr)); + } else { + return ip4addr_ntoa(ip_2_ip4(addr)); + } +} + +/** + * @ingroup ipaddr + * Same as ipaddr_ntoa, but reentrant since a user-supplied buffer is used. + * + * @param addr ip address in network order to convert + * @param buf target buffer where the string is stored + * @param buflen length of buf + * @return either pointer to buf which now holds the ASCII + * representation of addr or NULL if buf was too small + */ +char *ipaddr_ntoa_r(const ip_addr_t *addr, char *buf, int buflen) +{ + if (addr == NULL) { + return NULL; + } + if (IP_IS_V6(addr)) { + return ip6addr_ntoa_r(ip_2_ip6(addr), buf, buflen); + } else { + return ip4addr_ntoa_r(ip_2_ip4(addr), buf, buflen); + } +} + +/** + * @ingroup ipaddr + * Convert IP address string (both versions) to numeric. + * The version is auto-detected from the string. + * + * @param cp IP address string to convert + * @param addr conversion result is stored here + * @return 1 on success, 0 on error + */ +int +ipaddr_aton(const char *cp, ip_addr_t *addr) +{ + if (cp != NULL) { + const char *c; + for (c = cp; *c != 0; c++) { + if (*c == ':') { + /* contains a colon: IPv6 address */ + if (addr) { + IP_SET_TYPE_VAL(*addr, IPADDR_TYPE_V6); + } + return ip6addr_aton(cp, ip_2_ip6(addr)); + } else if (*c == '.') { + /* contains a dot: IPv4 address */ + break; + } + } + /* call ip4addr_aton as fallback or if IPv4 was found */ + if (addr) { + IP_SET_TYPE_VAL(*addr, IPADDR_TYPE_V4); + } + return ip4addr_aton(cp, ip_2_ip4(addr)); + } + return 0; +} + +/** + * @ingroup lwip_nosys + * If both IP versions are enabled, this function can dispatch packets to the correct one. + * Don't call directly, pass to netif_add() and call netif->input(). + */ +err_t +ip_input(struct pbuf *p, struct netif *inp) +{ + if (p != NULL) { + if (IP_HDR_GET_VERSION(p->payload) == 6) { + return ip6_input(p, inp); + } + return ip4_input(p, inp); + } + return ERR_VAL; +} + +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#endif /* LWIP_IPV4 || LWIP_IPV6 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c new file mode 100644 index 0000000..a454504 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c @@ -0,0 +1,527 @@ +/** + * @file + * AutoIP Automatic LinkLocal IP Configuration + * + * This is a AutoIP implementation for the lwIP TCP/IP stack. It aims to conform + * with RFC 3927. + * + * @defgroup autoip AUTOIP + * @ingroup ip4 + * AUTOIP related functions + * USAGE: + * + * define @ref LWIP_AUTOIP 1 in your lwipopts.h + * Options: + * AUTOIP_TMR_INTERVAL msecs, + * I recommend a value of 100. The value must divide 1000 with a remainder almost 0. + * Possible values are 1000, 500, 333, 250, 200, 166, 142, 125, 111, 100 .... + * + * Without DHCP: + * - Call autoip_start() after netif_add(). + * + * With DHCP: + * - define @ref LWIP_DHCP_AUTOIP_COOP 1 in your lwipopts.h. + * - Configure your DHCP Client. + * + * @see netifapi_autoip + */ + +/* + * + * Copyright (c) 2007 Dominik Spies + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Dominik Spies + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_AUTOIP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/mem.h" +/* #include "lwip/udp.h" */ +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/autoip.h" +#include "lwip/etharp.h" +#include "lwip/prot/autoip.h" + +#include + +/** Pseudo random macro based on netif informations. + * You could use "rand()" from the C Library if you define LWIP_AUTOIP_RAND in lwipopts.h */ +#ifndef LWIP_AUTOIP_RAND +#define LWIP_AUTOIP_RAND(netif) ( (((u32_t)((netif->hwaddr[5]) & 0xff) << 24) | \ + ((u32_t)((netif->hwaddr[3]) & 0xff) << 16) | \ + ((u32_t)((netif->hwaddr[2]) & 0xff) << 8) | \ + ((u32_t)((netif->hwaddr[4]) & 0xff))) + \ + (netif_autoip_data(netif)? netif_autoip_data(netif)->tried_llipaddr : 0)) +#endif /* LWIP_AUTOIP_RAND */ + +/** + * Macro that generates the initial IP address to be tried by AUTOIP. + * If you want to override this, define it to something else in lwipopts.h. + */ +#ifndef LWIP_AUTOIP_CREATE_SEED_ADDR +#define LWIP_AUTOIP_CREATE_SEED_ADDR(netif) \ + lwip_htonl(AUTOIP_RANGE_START + ((u32_t)(((u8_t)(netif->hwaddr[4])) | \ + ((u32_t)((u8_t)(netif->hwaddr[5]))) << 8))) +#endif /* LWIP_AUTOIP_CREATE_SEED_ADDR */ + +/* static functions */ +static err_t autoip_arp_announce(struct netif *netif); +static void autoip_start_probing(struct netif *netif); + +/** + * @ingroup autoip + * Set a statically allocated struct autoip to work with. + * Using this prevents autoip_start to allocate it using mem_malloc. + * + * @param netif the netif for which to set the struct autoip + * @param autoip (uninitialised) autoip struct allocated by the application + */ +void +autoip_set_struct(struct netif *netif, struct autoip *autoip) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("autoip != NULL", autoip != NULL); + LWIP_ASSERT("netif already has a struct autoip set", + netif_autoip_data(netif) == NULL); + + /* clear data structure */ + memset(autoip, 0, sizeof(struct autoip)); + /* autoip->state = AUTOIP_STATE_OFF; */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP, autoip); +} + +/** Restart AutoIP client and check the next address (conflict detected) + * + * @param netif The netif under AutoIP control + */ +static void +autoip_restart(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + autoip->tried_llipaddr++; + autoip_start(netif); +} + +/** + * Handle a IP address conflict after an ARP conflict detection + */ +static void +autoip_handle_arp_conflict(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + + /* RFC3927, 2.5 "Conflict Detection and Defense" allows two options where + a) means retreat on the first conflict and + b) allows to keep an already configured address when having only one + conflict in 10 seconds + We use option b) since it helps to improve the chance that one of the two + conflicting hosts may be able to retain its address. */ + + if (autoip->lastconflict > 0) { + /* retreat, there was a conflicting ARP in the last DEFEND_INTERVAL seconds */ + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_handle_arp_conflict(): we are defending, but in DEFEND_INTERVAL, retreating\n")); + + /* Active TCP sessions are aborted when removing the ip addresss */ + autoip_restart(netif); + } else { + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_handle_arp_conflict(): we are defend, send ARP Announce\n")); + autoip_arp_announce(netif); + autoip->lastconflict = DEFEND_INTERVAL * AUTOIP_TICKS_PER_SECOND; + } +} + +/** + * Create an IP-Address out of range 169.254.1.0 to 169.254.254.255 + * + * @param netif network interface on which create the IP-Address + * @param ipaddr ip address to initialize + */ +static void +autoip_create_addr(struct netif *netif, ip4_addr_t *ipaddr) +{ + struct autoip *autoip = netif_autoip_data(netif); + + /* Here we create an IP-Address out of range 169.254.1.0 to 169.254.254.255 + * compliant to RFC 3927 Section 2.1 + * We have 254 * 256 possibilities */ + + u32_t addr = lwip_ntohl(LWIP_AUTOIP_CREATE_SEED_ADDR(netif)); + addr += autoip->tried_llipaddr; + addr = AUTOIP_NET | (addr & 0xffff); + /* Now, 169.254.0.0 <= addr <= 169.254.255.255 */ + + if (addr < AUTOIP_RANGE_START) { + addr += AUTOIP_RANGE_END - AUTOIP_RANGE_START + 1; + } + if (addr > AUTOIP_RANGE_END) { + addr -= AUTOIP_RANGE_END - AUTOIP_RANGE_START + 1; + } + LWIP_ASSERT("AUTOIP address not in range", (addr >= AUTOIP_RANGE_START) && + (addr <= AUTOIP_RANGE_END)); + ip4_addr_set_u32(ipaddr, lwip_htonl(addr)); + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_create_addr(): tried_llipaddr=%"U16_F", %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + (u16_t)(autoip->tried_llipaddr), ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), + ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr))); +} + +/** + * Sends an ARP probe from a network interface + * + * @param netif network interface used to send the probe + */ +static err_t +autoip_arp_probe(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + /* this works because netif->ip_addr is ANY */ + return etharp_request(netif, &autoip->llipaddr); +} + +/** + * Sends an ARP announce from a network interface + * + * @param netif network interface used to send the announce + */ +static err_t +autoip_arp_announce(struct netif *netif) +{ + return etharp_gratuitous(netif); +} + +/** + * Configure interface for use with current LL IP-Address + * + * @param netif network interface to configure with current LL IP-Address + */ +static err_t +autoip_bind(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + ip4_addr_t sn_mask, gw_addr; + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, + ("autoip_bind(netif=%p) %c%c%"U16_F" %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num, + ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), + ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); + + IP4_ADDR(&sn_mask, 255, 255, 0, 0); + IP4_ADDR(&gw_addr, 0, 0, 0, 0); + + netif_set_addr(netif, &autoip->llipaddr, &sn_mask, &gw_addr); + /* interface is used by routing now that an address is set */ + + return ERR_OK; +} + +/** + * @ingroup autoip + * Start AutoIP client + * + * @param netif network interface on which start the AutoIP client + */ +err_t +autoip_start(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + err_t result = ERR_OK; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ERROR("netif is not up, old style port?", netif_is_up(netif), return ERR_ARG;); + + /* Set IP-Address, Netmask and Gateway to 0 to make sure that + * ARP Packets are formed correctly + */ + netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_start(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], + netif->name[1], (u16_t)netif->num)); + if (autoip == NULL) { + /* no AutoIP client attached yet? */ + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, + ("autoip_start(): starting new AUTOIP client\n")); + autoip = (struct autoip *)mem_calloc(1, sizeof(struct autoip)); + if (autoip == NULL) { + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, + ("autoip_start(): could not allocate autoip\n")); + return ERR_MEM; + } + /* store this AutoIP client in the netif */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP, autoip); + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_start(): allocated autoip")); + } else { + autoip->state = AUTOIP_STATE_OFF; + autoip->ttw = 0; + autoip->sent_num = 0; + ip4_addr_set_zero(&autoip->llipaddr); + autoip->lastconflict = 0; + } + + autoip_create_addr(netif, &(autoip->llipaddr)); + autoip_start_probing(netif); + + return result; +} + +static void +autoip_start_probing(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + + autoip->state = AUTOIP_STATE_PROBING; + autoip->sent_num = 0; + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_start_probing(): changing state to PROBING: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), + ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); + + /* time to wait to first probe, this is randomly + * chosen out of 0 to PROBE_WAIT seconds. + * compliant to RFC 3927 Section 2.2.1 + */ + autoip->ttw = (u16_t)(LWIP_AUTOIP_RAND(netif) % (PROBE_WAIT * AUTOIP_TICKS_PER_SECOND)); + + /* + * if we tried more then MAX_CONFLICTS we must limit our rate for + * acquiring and probing address + * compliant to RFC 3927 Section 2.2.1 + */ + if (autoip->tried_llipaddr > MAX_CONFLICTS) { + autoip->ttw = RATE_LIMIT_INTERVAL * AUTOIP_TICKS_PER_SECOND; + } +} + +/** + * Handle a possible change in the network configuration. + * + * If there is an AutoIP address configured, take the interface down + * and begin probing with the same address. + */ +void +autoip_network_changed(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + + if (autoip && (autoip->state != AUTOIP_STATE_OFF)) { + autoip_start_probing(netif); + } +} + +/** + * @ingroup autoip + * Stop AutoIP client + * + * @param netif network interface on which stop the AutoIP client + */ +err_t +autoip_stop(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + + LWIP_ASSERT_CORE_LOCKED(); + if (autoip != NULL) { + autoip->state = AUTOIP_STATE_OFF; + if (ip4_addr_islinklocal(netif_ip4_addr(netif))) { + netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); + } + } + return ERR_OK; +} + +/** + * Has to be called in loop every AUTOIP_TMR_INTERVAL milliseconds + */ +void +autoip_tmr(void) +{ + struct netif *netif; + /* loop through netif's */ + NETIF_FOREACH(netif) { + struct autoip *autoip = netif_autoip_data(netif); + /* only act on AutoIP configured interfaces */ + if (autoip != NULL) { + if (autoip->lastconflict > 0) { + autoip->lastconflict--; + } + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, + ("autoip_tmr() AutoIP-State: %"U16_F", ttw=%"U16_F"\n", + (u16_t)(autoip->state), autoip->ttw)); + + if (autoip->ttw > 0) { + autoip->ttw--; + } + + switch (autoip->state) { + case AUTOIP_STATE_PROBING: + if (autoip->ttw == 0) { + if (autoip->sent_num >= PROBE_NUM) { + /* Switch to ANNOUNCING: now we can bind to an IP address and use it */ + autoip->state = AUTOIP_STATE_ANNOUNCING; + autoip_bind(netif); + /* autoip_bind() calls netif_set_addr(): this triggers a gratuitous ARP + which counts as an announcement */ + autoip->sent_num = 1; + autoip->ttw = ANNOUNCE_WAIT * AUTOIP_TICKS_PER_SECOND; + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_tmr(): changing state to ANNOUNCING: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), + ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); + } else { + autoip_arp_probe(netif); + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_tmr() PROBING Sent Probe\n")); + autoip->sent_num++; + if (autoip->sent_num == PROBE_NUM) { + /* calculate time to wait to for announce */ + autoip->ttw = ANNOUNCE_WAIT * AUTOIP_TICKS_PER_SECOND; + } else { + /* calculate time to wait to next probe */ + autoip->ttw = (u16_t)((LWIP_AUTOIP_RAND(netif) % + ((PROBE_MAX - PROBE_MIN) * AUTOIP_TICKS_PER_SECOND) ) + + PROBE_MIN * AUTOIP_TICKS_PER_SECOND); + } + } + } + break; + + case AUTOIP_STATE_ANNOUNCING: + if (autoip->ttw == 0) { + autoip_arp_announce(netif); + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_tmr() ANNOUNCING Sent Announce\n")); + autoip->ttw = ANNOUNCE_INTERVAL * AUTOIP_TICKS_PER_SECOND; + autoip->sent_num++; + + if (autoip->sent_num >= ANNOUNCE_NUM) { + autoip->state = AUTOIP_STATE_BOUND; + autoip->sent_num = 0; + autoip->ttw = 0; + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_tmr(): changing state to BOUND: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), + ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); + } + } + break; + + default: + /* nothing to do in other states */ + break; + } + } + } +} + +/** + * Handles every incoming ARP Packet, called by etharp_input(). + * + * @param netif network interface to use for autoip processing + * @param hdr Incoming ARP packet + */ +void +autoip_arp_reply(struct netif *netif, struct etharp_hdr *hdr) +{ + struct autoip *autoip = netif_autoip_data(netif); + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_arp_reply()\n")); + if ((autoip != NULL) && (autoip->state != AUTOIP_STATE_OFF)) { + /* when ip.src == llipaddr && hw.src != netif->hwaddr + * + * when probing ip.dst == llipaddr && hw.src != netif->hwaddr + * we have a conflict and must solve it + */ + ip4_addr_t sipaddr, dipaddr; + struct eth_addr netifaddr; + SMEMCPY(netifaddr.addr, netif->hwaddr, ETH_HWADDR_LEN); + + /* Copy struct ip4_addr_wordaligned to aligned ip4_addr, to support compilers without + * structure packing (not using structure copy which breaks strict-aliasing rules). + */ + IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&sipaddr, &hdr->sipaddr); + IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&dipaddr, &hdr->dipaddr); + + if (autoip->state == AUTOIP_STATE_PROBING) { + /* RFC 3927 Section 2.2.1: + * from beginning to after ANNOUNCE_WAIT + * seconds we have a conflict if + * ip.src == llipaddr OR + * ip.dst == llipaddr && hw.src != own hwaddr + */ + if ((ip4_addr_cmp(&sipaddr, &autoip->llipaddr)) || + (ip4_addr_isany_val(sipaddr) && + ip4_addr_cmp(&dipaddr, &autoip->llipaddr) && + !eth_addr_cmp(&netifaddr, &hdr->shwaddr))) { + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING, + ("autoip_arp_reply(): Probe Conflict detected\n")); + autoip_restart(netif); + } + } else { + /* RFC 3927 Section 2.5: + * in any state we have a conflict if + * ip.src == llipaddr && hw.src != own hwaddr + */ + if (ip4_addr_cmp(&sipaddr, &autoip->llipaddr) && + !eth_addr_cmp(&netifaddr, &hdr->shwaddr)) { + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING, + ("autoip_arp_reply(): Conflicting ARP-Packet detected\n")); + autoip_handle_arp_conflict(netif); + } + } + } +} + +/** check if AutoIP supplied netif->ip_addr + * + * @param netif the netif to check + * @return 1 if AutoIP supplied netif->ip_addr (state BOUND or ANNOUNCING), + * 0 otherwise + */ +u8_t +autoip_supplied_address(const struct netif *netif) +{ + if ((netif != NULL) && (netif_autoip_data(netif) != NULL)) { + struct autoip *autoip = netif_autoip_data(netif); + return (autoip->state == AUTOIP_STATE_BOUND) || (autoip->state == AUTOIP_STATE_ANNOUNCING); + } + return 0; +} + +u8_t +autoip_accept_packet(struct netif *netif, const ip4_addr_t *addr) +{ + struct autoip *autoip = netif_autoip_data(netif); + return (autoip != NULL) && ip4_addr_cmp(addr, &(autoip->llipaddr)); +} + +#endif /* LWIP_IPV4 && LWIP_AUTOIP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c new file mode 100644 index 0000000..89f9c41 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c @@ -0,0 +1,1990 @@ +/** + * @file + * Dynamic Host Configuration Protocol client + * + * @defgroup dhcp4 DHCPv4 + * @ingroup ip4 + * DHCP (IPv4) related functions + * This is a DHCP client for the lwIP TCP/IP stack. It aims to conform + * with RFC 2131 and RFC 2132. + * + * @todo: + * - Support for interfaces other than Ethernet (SLIP, PPP, ...) + * + * Options: + * @ref DHCP_COARSE_TIMER_SECS (recommended 60 which is a minute) + * @ref DHCP_FINE_TIMER_MSECS (recommended 500 which equals TCP coarse timer) + * + * dhcp_start() starts a DHCP client instance which + * configures the interface by obtaining an IP address lease and maintaining it. + * + * Use dhcp_release() to end the lease and use dhcp_stop() + * to remove the DHCP client. + * + * @see LWIP_HOOK_DHCP_APPEND_OPTIONS + * @see LWIP_HOOK_DHCP_PARSE_OPTION + * + * @see netifapi_dhcp4 + */ + +/* + * Copyright (c) 2001-2004 Leon Woestenberg + * Copyright (c) 2001-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * The Swedish Institute of Computer Science and Adam Dunkels + * are specifically granted permission to redistribute this + * source code. + * + * Author: Leon Woestenberg + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_DHCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/stats.h" +#include "lwip/mem.h" +#include "lwip/udp.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/def.h" +#include "lwip/dhcp.h" +#include "lwip/autoip.h" +#include "lwip/dns.h" +#include "lwip/etharp.h" +#include "lwip/prot/dhcp.h" +#include "lwip/prot/iana.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif +#ifndef LWIP_HOOK_DHCP_APPEND_OPTIONS +#define LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, state, msg, msg_type, options_len_ptr) +#endif +#ifndef LWIP_HOOK_DHCP_PARSE_OPTION +#define LWIP_HOOK_DHCP_PARSE_OPTION(netif, dhcp, state, msg, msg_type, option, len, pbuf, offset) do { LWIP_UNUSED_ARG(msg); } while(0) +#endif + +/** DHCP_CREATE_RAND_XID: if this is set to 1, the xid is created using + * LWIP_RAND() (this overrides DHCP_GLOBAL_XID) + */ +#ifndef DHCP_CREATE_RAND_XID +#define DHCP_CREATE_RAND_XID 1 +#endif + +/** Default for DHCP_GLOBAL_XID is 0xABCD0000 + * This can be changed by defining DHCP_GLOBAL_XID and DHCP_GLOBAL_XID_HEADER, e.g. + * \#define DHCP_GLOBAL_XID_HEADER "stdlib.h" + * \#define DHCP_GLOBAL_XID rand() + */ +#ifdef DHCP_GLOBAL_XID_HEADER +#include DHCP_GLOBAL_XID_HEADER /* include optional starting XID generation prototypes */ +#endif + +/** DHCP_OPTION_MAX_MSG_SIZE is set to the MTU + * MTU is checked to be big enough in dhcp_start */ +#define DHCP_MAX_MSG_LEN(netif) (netif->mtu) +#define DHCP_MAX_MSG_LEN_MIN_REQUIRED 576 +/** Minimum length for reply before packet is parsed */ +#define DHCP_MIN_REPLY_LEN 44 + +#define REBOOT_TRIES 2 + +#if LWIP_DNS && LWIP_DHCP_MAX_DNS_SERVERS +#if DNS_MAX_SERVERS > LWIP_DHCP_MAX_DNS_SERVERS +#define LWIP_DHCP_PROVIDE_DNS_SERVERS LWIP_DHCP_MAX_DNS_SERVERS +#else +#define LWIP_DHCP_PROVIDE_DNS_SERVERS DNS_MAX_SERVERS +#endif +#else +#define LWIP_DHCP_PROVIDE_DNS_SERVERS 0 +#endif + +/** Option handling: options are parsed in dhcp_parse_reply + * and saved in an array where other functions can load them from. + * This might be moved into the struct dhcp (not necessarily since + * lwIP is single-threaded and the array is only used while in recv + * callback). */ +enum dhcp_option_idx { + DHCP_OPTION_IDX_OVERLOAD = 0, + DHCP_OPTION_IDX_MSG_TYPE, + DHCP_OPTION_IDX_SERVER_ID, + DHCP_OPTION_IDX_LEASE_TIME, + DHCP_OPTION_IDX_T1, + DHCP_OPTION_IDX_T2, + DHCP_OPTION_IDX_SUBNET_MASK, + DHCP_OPTION_IDX_ROUTER, +#if LWIP_DHCP_PROVIDE_DNS_SERVERS + DHCP_OPTION_IDX_DNS_SERVER, + DHCP_OPTION_IDX_DNS_SERVER_LAST = DHCP_OPTION_IDX_DNS_SERVER + LWIP_DHCP_PROVIDE_DNS_SERVERS - 1, +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ +#if LWIP_DHCP_GET_NTP_SRV + DHCP_OPTION_IDX_NTP_SERVER, + DHCP_OPTION_IDX_NTP_SERVER_LAST = DHCP_OPTION_IDX_NTP_SERVER + LWIP_DHCP_MAX_NTP_SERVERS - 1, +#endif /* LWIP_DHCP_GET_NTP_SRV */ + DHCP_OPTION_IDX_MAX +}; + +/** Holds the decoded option values, only valid while in dhcp_recv. + @todo: move this into struct dhcp? */ +u32_t dhcp_rx_options_val[DHCP_OPTION_IDX_MAX]; +/** Holds a flag which option was received and is contained in dhcp_rx_options_val, + only valid while in dhcp_recv. + @todo: move this into struct dhcp? */ +u8_t dhcp_rx_options_given[DHCP_OPTION_IDX_MAX]; + +static u8_t dhcp_discover_request_options[] = { + DHCP_OPTION_SUBNET_MASK, + DHCP_OPTION_ROUTER, + DHCP_OPTION_BROADCAST +#if LWIP_DHCP_PROVIDE_DNS_SERVERS + , DHCP_OPTION_DNS_SERVER +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ +#if LWIP_DHCP_GET_NTP_SRV + , DHCP_OPTION_NTP +#endif /* LWIP_DHCP_GET_NTP_SRV */ +}; + +#ifdef DHCP_GLOBAL_XID +static u32_t xid; +static u8_t xid_initialised; +#endif /* DHCP_GLOBAL_XID */ + +#define dhcp_option_given(dhcp, idx) (dhcp_rx_options_given[idx] != 0) +#define dhcp_got_option(dhcp, idx) (dhcp_rx_options_given[idx] = 1) +#define dhcp_clear_option(dhcp, idx) (dhcp_rx_options_given[idx] = 0) +#define dhcp_clear_all_options(dhcp) (memset(dhcp_rx_options_given, 0, sizeof(dhcp_rx_options_given))) +#define dhcp_get_option_value(dhcp, idx) (dhcp_rx_options_val[idx]) +#define dhcp_set_option_value(dhcp, idx, val) (dhcp_rx_options_val[idx] = (val)) + +static struct udp_pcb *dhcp_pcb; +static u8_t dhcp_pcb_refcount; + +/* DHCP client state machine functions */ +static err_t dhcp_discover(struct netif *netif); +static err_t dhcp_select(struct netif *netif); +static void dhcp_bind(struct netif *netif); +#if DHCP_DOES_ARP_CHECK +static err_t dhcp_decline(struct netif *netif); +#endif /* DHCP_DOES_ARP_CHECK */ +static err_t dhcp_rebind(struct netif *netif); +static err_t dhcp_reboot(struct netif *netif); +static void dhcp_set_state(struct dhcp *dhcp, u8_t new_state); + +/* receive, unfold, parse and free incoming messages */ +static void dhcp_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port); + +/* set the DHCP timers */ +static void dhcp_timeout(struct netif *netif); +static void dhcp_t1_timeout(struct netif *netif); +static void dhcp_t2_timeout(struct netif *netif); + +/* build outgoing messages */ +/* create a DHCP message, fill in common headers */ +static struct pbuf *dhcp_create_msg(struct netif *netif, struct dhcp *dhcp, u8_t message_type, u16_t *options_out_len); +/* add a DHCP option (type, then length in bytes) */ +static u16_t dhcp_option(u16_t options_out_len, u8_t *options, u8_t option_type, u8_t option_len); +/* add option values */ +static u16_t dhcp_option_byte(u16_t options_out_len, u8_t *options, u8_t value); +static u16_t dhcp_option_short(u16_t options_out_len, u8_t *options, u16_t value); +static u16_t dhcp_option_long(u16_t options_out_len, u8_t *options, u32_t value); +#if LWIP_NETIF_HOSTNAME +static u16_t dhcp_option_hostname(u16_t options_out_len, u8_t *options, struct netif *netif); +#endif /* LWIP_NETIF_HOSTNAME */ +/* always add the DHCP options trailer to end and pad */ +static void dhcp_option_trailer(u16_t options_out_len, u8_t *options, struct pbuf *p_out); + +/** Ensure DHCP PCB is allocated and bound */ +static err_t +dhcp_inc_pcb_refcount(void) +{ + if (dhcp_pcb_refcount == 0) { + LWIP_ASSERT("dhcp_inc_pcb_refcount(): memory leak", dhcp_pcb == NULL); + + /* allocate UDP PCB */ + dhcp_pcb = udp_new(); + + if (dhcp_pcb == NULL) { + return ERR_MEM; + } + + ip_set_option(dhcp_pcb, SOF_BROADCAST); + + /* set up local and remote port for the pcb -> listen on all interfaces on all src/dest IPs */ + udp_bind(dhcp_pcb, IP4_ADDR_ANY, LWIP_IANA_PORT_DHCP_CLIENT); + udp_connect(dhcp_pcb, IP4_ADDR_ANY, LWIP_IANA_PORT_DHCP_SERVER); + udp_recv(dhcp_pcb, dhcp_recv, NULL); + } + + dhcp_pcb_refcount++; + + return ERR_OK; +} + +/** Free DHCP PCB if the last netif stops using it */ +static void +dhcp_dec_pcb_refcount(void) +{ + LWIP_ASSERT("dhcp_pcb_refcount(): refcount error", (dhcp_pcb_refcount > 0)); + dhcp_pcb_refcount--; + + if (dhcp_pcb_refcount == 0) { + udp_remove(dhcp_pcb); + dhcp_pcb = NULL; + } +} + +/** + * Back-off the DHCP client (because of a received NAK response). + * + * Back-off the DHCP client because of a received NAK. Receiving a + * NAK means the client asked for something non-sensible, for + * example when it tries to renew a lease obtained on another network. + * + * We clear any existing set IP address and restart DHCP negotiation + * afresh (as per RFC2131 3.2.3). + * + * @param netif the netif under DHCP control + */ +static void +dhcp_handle_nak(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_handle_nak(netif=%p) %c%c%"U16_F"\n", + (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + /* Change to a defined state - set this before assigning the address + to ensure the callback can use dhcp_supplied_address() */ + dhcp_set_state(dhcp, DHCP_STATE_BACKING_OFF); + /* remove IP address from interface (must no longer be used, as per RFC2131) */ + netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); + /* We can immediately restart discovery */ + dhcp_discover(netif); +} + +#if DHCP_DOES_ARP_CHECK +/** + * Checks if the offered IP address is already in use. + * + * It does so by sending an ARP request for the offered address and + * entering CHECKING state. If no ARP reply is received within a small + * interval, the address is assumed to be free for use by us. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_check(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_check(netif=%p) %c%c\n", (void *)netif, (s16_t)netif->name[0], + (s16_t)netif->name[1])); + dhcp_set_state(dhcp, DHCP_STATE_CHECKING); + /* create an ARP query for the offered IP address, expecting that no host + responds, as the IP address should not be in use. */ + result = etharp_query(netif, &dhcp->offered_ip_addr, NULL); + if (result != ERR_OK) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("dhcp_check: could not perform ARP query\n")); + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = 500; + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_check(): set request timeout %"U16_F" msecs\n", msecs)); +} +#endif /* DHCP_DOES_ARP_CHECK */ + +/** + * Remember the configuration offered by a DHCP server. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_handle_offer(struct netif *netif, struct dhcp_msg *msg_in) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_handle_offer(netif=%p) %c%c%"U16_F"\n", + (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + /* obtain the server address */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_SERVER_ID)) { + dhcp->request_timeout = 0; /* stop timer */ + + ip_addr_set_ip4_u32(&dhcp->server_ip_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_SERVER_ID))); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_handle_offer(): server 0x%08"X32_F"\n", + ip4_addr_get_u32(ip_2_ip4(&dhcp->server_ip_addr)))); + /* remember offered address */ + ip4_addr_copy(dhcp->offered_ip_addr, msg_in->yiaddr); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_handle_offer(): offer for 0x%08"X32_F"\n", + ip4_addr_get_u32(&dhcp->offered_ip_addr))); + + dhcp_select(netif); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("dhcp_handle_offer(netif=%p) did not get server ID!\n", (void *)netif)); + } +} + +/** + * Select a DHCP server offer out of all offers. + * + * Simply select the first offer received. + * + * @param netif the netif under DHCP control + * @return lwIP specific error (see error.h) + */ +static err_t +dhcp_select(struct netif *netif) +{ + struct dhcp *dhcp; + err_t result; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_ERROR("dhcp_select: netif != NULL", (netif != NULL), return ERR_ARG;); + dhcp = netif_dhcp_data(netif); + LWIP_ERROR("dhcp_select: dhcp != NULL", (dhcp != NULL), return ERR_VAL;); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_select(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + dhcp_set_state(dhcp, DHCP_STATE_REQUESTING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_REQUEST, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + /* MUST request the offered IP address */ + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_REQUESTED_IP, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(&dhcp->offered_ip_addr))); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_SERVER_ID, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(ip_2_ip4(&dhcp->server_ip_addr)))); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + +#if LWIP_NETIF_HOSTNAME + options_out_len = dhcp_option_hostname(options_out_len, msg_out->options, netif); +#endif /* LWIP_NETIF_HOSTNAME */ + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_REQUESTING, msg_out, DHCP_REQUEST, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + /* send broadcast to any DHCP server */ + result = udp_sendto_if_src(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif, IP4_ADDR_ANY); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_select: REQUESTING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("dhcp_select: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = (u16_t)((dhcp->tries < 6 ? 1 << dhcp->tries : 60) * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_select(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + +/** + * The DHCP timer that checks for lease renewal/rebind timeouts. + * Must be called once a minute (see @ref DHCP_COARSE_TIMER_SECS). + */ +void +dhcp_coarse_tmr(void) +{ + struct netif *netif; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_coarse_tmr()\n")); + /* iterate through all network interfaces */ + NETIF_FOREACH(netif) { + /* only act on DHCP configured interfaces */ + struct dhcp *dhcp = netif_dhcp_data(netif); + if ((dhcp != NULL) && (dhcp->state != DHCP_STATE_OFF)) { + /* compare lease time to expire timeout */ + if (dhcp->t0_timeout && (++dhcp->lease_used == dhcp->t0_timeout)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_coarse_tmr(): t0 timeout\n")); + /* this clients' lease time has expired */ + dhcp_release_and_stop(netif); + dhcp_start(netif); + /* timer is active (non zero), and triggers (zeroes) now? */ + } else if (dhcp->t2_rebind_time && (dhcp->t2_rebind_time-- == 1)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_coarse_tmr(): t2 timeout\n")); + /* this clients' rebind timeout triggered */ + dhcp_t2_timeout(netif); + /* timer is active (non zero), and triggers (zeroes) now */ + } else if (dhcp->t1_renew_time && (dhcp->t1_renew_time-- == 1)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_coarse_tmr(): t1 timeout\n")); + /* this clients' renewal timeout triggered */ + dhcp_t1_timeout(netif); + } + } + } +} + +/** + * DHCP transaction timeout handling (this function must be called every 500ms, + * see @ref DHCP_FINE_TIMER_MSECS). + * + * A DHCP server is expected to respond within a short period of time. + * This timer checks whether an outstanding DHCP request is timed out. + */ +void +dhcp_fine_tmr(void) +{ + struct netif *netif; + /* loop through netif's */ + NETIF_FOREACH(netif) { + struct dhcp *dhcp = netif_dhcp_data(netif); + /* only act on DHCP configured interfaces */ + if (dhcp != NULL) { + /* timer is active (non zero), and is about to trigger now */ + if (dhcp->request_timeout > 1) { + dhcp->request_timeout--; + } else if (dhcp->request_timeout == 1) { + dhcp->request_timeout--; + /* { dhcp->request_timeout == 0 } */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_fine_tmr(): request timeout\n")); + /* this client's request timeout triggered */ + dhcp_timeout(netif); + } + } + } +} + +/** + * A DHCP negotiation transaction, or ARP request, has timed out. + * + * The timer that was started with the DHCP or ARP request has + * timed out, indicating no response was received in time. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_timeout(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_timeout()\n")); + /* back-off period has passed, or server selection timed out */ + if ((dhcp->state == DHCP_STATE_BACKING_OFF) || (dhcp->state == DHCP_STATE_SELECTING)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_timeout(): restarting discovery\n")); + dhcp_discover(netif); + /* receiving the requested lease timed out */ + } else if (dhcp->state == DHCP_STATE_REQUESTING) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_timeout(): REQUESTING, DHCP request timed out\n")); + if (dhcp->tries <= 5) { + dhcp_select(netif); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_timeout(): REQUESTING, releasing, restarting\n")); + dhcp_release_and_stop(netif); + dhcp_start(netif); + } +#if DHCP_DOES_ARP_CHECK + /* received no ARP reply for the offered address (which is good) */ + } else if (dhcp->state == DHCP_STATE_CHECKING) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_timeout(): CHECKING, ARP request timed out\n")); + if (dhcp->tries <= 1) { + dhcp_check(netif); + /* no ARP replies on the offered address, + looks like the IP address is indeed free */ + } else { + /* bind the interface to the offered address */ + dhcp_bind(netif); + } +#endif /* DHCP_DOES_ARP_CHECK */ + } else if (dhcp->state == DHCP_STATE_REBOOTING) { + if (dhcp->tries < REBOOT_TRIES) { + dhcp_reboot(netif); + } else { + dhcp_discover(netif); + } + } +} + +/** + * The renewal period has timed out. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_t1_timeout(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_t1_timeout()\n")); + if ((dhcp->state == DHCP_STATE_REQUESTING) || (dhcp->state == DHCP_STATE_BOUND) || + (dhcp->state == DHCP_STATE_RENEWING)) { + /* just retry to renew - note that the rebind timer (t2) will + * eventually time-out if renew tries fail. */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("dhcp_t1_timeout(): must renew\n")); + /* This slightly different to RFC2131: DHCPREQUEST will be sent from state + DHCP_STATE_RENEWING, not DHCP_STATE_BOUND */ + dhcp_renew(netif); + /* Calculate next timeout */ + if (((dhcp->t2_timeout - dhcp->lease_used) / 2) >= ((60 + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS)) { + dhcp->t1_renew_time = (u16_t)((dhcp->t2_timeout - dhcp->lease_used) / 2); + } + } +} + +/** + * The rebind period has timed out. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_t2_timeout(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_t2_timeout()\n")); + if ((dhcp->state == DHCP_STATE_REQUESTING) || (dhcp->state == DHCP_STATE_BOUND) || + (dhcp->state == DHCP_STATE_RENEWING) || (dhcp->state == DHCP_STATE_REBINDING)) { + /* just retry to rebind */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("dhcp_t2_timeout(): must rebind\n")); + /* This slightly different to RFC2131: DHCPREQUEST will be sent from state + DHCP_STATE_REBINDING, not DHCP_STATE_BOUND */ + dhcp_rebind(netif); + /* Calculate next timeout */ + if (((dhcp->t0_timeout - dhcp->lease_used) / 2) >= ((60 + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS)) { + dhcp->t2_rebind_time = (u16_t)((dhcp->t0_timeout - dhcp->lease_used) / 2); + } + } +} + +/** + * Handle a DHCP ACK packet + * + * @param netif the netif under DHCP control + */ +static void +dhcp_handle_ack(struct netif *netif, struct dhcp_msg *msg_in) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + +#if LWIP_DHCP_PROVIDE_DNS_SERVERS || LWIP_DHCP_GET_NTP_SRV + u8_t n; +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS || LWIP_DHCP_GET_NTP_SRV */ +#if LWIP_DHCP_GET_NTP_SRV + ip4_addr_t ntp_server_addrs[LWIP_DHCP_MAX_NTP_SERVERS]; +#endif + + /* clear options we might not get from the ACK */ + ip4_addr_set_zero(&dhcp->offered_sn_mask); + ip4_addr_set_zero(&dhcp->offered_gw_addr); +#if LWIP_DHCP_BOOTP_FILE + ip4_addr_set_zero(&dhcp->offered_si_addr); +#endif /* LWIP_DHCP_BOOTP_FILE */ + + /* lease time given? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_LEASE_TIME)) { + /* remember offered lease time */ + dhcp->offered_t0_lease = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_LEASE_TIME); + } + /* renewal period given? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_T1)) { + /* remember given renewal period */ + dhcp->offered_t1_renew = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_T1); + } else { + /* calculate safe periods for renewal */ + dhcp->offered_t1_renew = dhcp->offered_t0_lease / 2; + } + + /* renewal period given? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_T2)) { + /* remember given rebind period */ + dhcp->offered_t2_rebind = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_T2); + } else { + /* calculate safe periods for rebinding (offered_t0_lease * 0.875 -> 87.5%)*/ + dhcp->offered_t2_rebind = (dhcp->offered_t0_lease * 7U) / 8U; + } + + /* (y)our internet address */ + ip4_addr_copy(dhcp->offered_ip_addr, msg_in->yiaddr); + +#if LWIP_DHCP_BOOTP_FILE + /* copy boot server address, + boot file name copied in dhcp_parse_reply if not overloaded */ + ip4_addr_copy(dhcp->offered_si_addr, msg_in->siaddr); +#endif /* LWIP_DHCP_BOOTP_FILE */ + + /* subnet mask given? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_SUBNET_MASK)) { + /* remember given subnet mask */ + ip4_addr_set_u32(&dhcp->offered_sn_mask, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_SUBNET_MASK))); + dhcp->subnet_mask_given = 1; + } else { + dhcp->subnet_mask_given = 0; + } + + /* gateway router */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_ROUTER)) { + ip4_addr_set_u32(&dhcp->offered_gw_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_ROUTER))); + } + +#if LWIP_DHCP_GET_NTP_SRV + /* NTP servers */ + for (n = 0; (n < LWIP_DHCP_MAX_NTP_SERVERS) && dhcp_option_given(dhcp, DHCP_OPTION_IDX_NTP_SERVER + n); n++) { + ip4_addr_set_u32(&ntp_server_addrs[n], lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_NTP_SERVER + n))); + } + dhcp_set_ntp_servers(n, ntp_server_addrs); +#endif /* LWIP_DHCP_GET_NTP_SRV */ + +#if LWIP_DHCP_PROVIDE_DNS_SERVERS + /* DNS servers */ + for (n = 0; (n < LWIP_DHCP_PROVIDE_DNS_SERVERS) && dhcp_option_given(dhcp, DHCP_OPTION_IDX_DNS_SERVER + n); n++) { + ip_addr_t dns_addr; + ip_addr_set_ip4_u32_val(dns_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_DNS_SERVER + n))); + dns_setserver(n, &dns_addr); + } +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ +} + +/** + * @ingroup dhcp4 + * Set a statically allocated struct dhcp to work with. + * Using this prevents dhcp_start to allocate it using mem_malloc. + * + * @param netif the netif for which to set the struct dhcp + * @param dhcp (uninitialised) dhcp struct allocated by the application + */ +void +dhcp_set_struct(struct netif *netif, struct dhcp *dhcp) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("dhcp != NULL", dhcp != NULL); + LWIP_ASSERT("netif already has a struct dhcp set", netif_dhcp_data(netif) == NULL); + + /* clear data structure */ + memset(dhcp, 0, sizeof(struct dhcp)); + /* dhcp_set_state(&dhcp, DHCP_STATE_OFF); */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, dhcp); +} + +/** + * @ingroup dhcp4 + * Removes a struct dhcp from a netif. + * + * ATTENTION: Only use this when not using dhcp_set_struct() to allocate the + * struct dhcp since the memory is passed back to the heap. + * + * @param netif the netif from which to remove the struct dhcp + */ +void dhcp_cleanup(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + + if (netif_dhcp_data(netif) != NULL) { + mem_free(netif_dhcp_data(netif)); + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, NULL); + } +} + +/** + * @ingroup dhcp4 + * Start DHCP negotiation for a network interface. + * + * If no DHCP client instance was attached to this interface, + * a new client is created first. If a DHCP client instance + * was already present, it restarts negotiation. + * + * @param netif The lwIP network interface + * @return lwIP error code + * - ERR_OK - No error + * - ERR_MEM - Out of memory + */ +err_t +dhcp_start(struct netif *netif) +{ + struct dhcp *dhcp; + err_t result; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ERROR("netif != NULL", (netif != NULL), return ERR_ARG;); + LWIP_ERROR("netif is not up, old style port?", netif_is_up(netif), return ERR_ARG;); + dhcp = netif_dhcp_data(netif); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_start(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + + /* check MTU of the netif */ + if (netif->mtu < DHCP_MAX_MSG_LEN_MIN_REQUIRED) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): Cannot use this netif with DHCP: MTU is too small\n")); + return ERR_MEM; + } + + /* no DHCP client attached yet? */ + if (dhcp == NULL) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): mallocing new DHCP client\n")); + dhcp = (struct dhcp *)mem_malloc(sizeof(struct dhcp)); + if (dhcp == NULL) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): could not allocate dhcp\n")); + return ERR_MEM; + } + + /* store this dhcp client in the netif */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, dhcp); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): allocated dhcp")); + /* already has DHCP client attached */ + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_start(): restarting DHCP configuration\n")); + + if (dhcp->pcb_allocated != 0) { + dhcp_dec_pcb_refcount(); /* free DHCP PCB if not needed any more */ + } + /* dhcp is cleared below, no need to reset flag*/ + } + + /* clear data structure */ + memset(dhcp, 0, sizeof(struct dhcp)); + /* dhcp_set_state(&dhcp, DHCP_STATE_OFF); */ + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): starting DHCP configuration\n")); + + if (dhcp_inc_pcb_refcount() != ERR_OK) { /* ensure DHCP PCB is allocated */ + return ERR_MEM; + } + dhcp->pcb_allocated = 1; + + if (!netif_is_link_up(netif)) { + /* set state INIT and wait for dhcp_network_changed() to call dhcp_discover() */ + dhcp_set_state(dhcp, DHCP_STATE_INIT); + return ERR_OK; + } + + /* (re)start the DHCP negotiation */ + result = dhcp_discover(netif); + if (result != ERR_OK) { + /* free resources allocated above */ + dhcp_release_and_stop(netif); + return ERR_MEM; + } + return result; +} + +/** + * @ingroup dhcp4 + * Inform a DHCP server of our manual configuration. + * + * This informs DHCP servers of our fixed IP address configuration + * by sending an INFORM message. It does not involve DHCP address + * configuration, it is just here to be nice to the network. + * + * @param netif The lwIP network interface + */ +void +dhcp_inform(struct netif *netif) +{ + struct dhcp dhcp; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ERROR("netif != NULL", (netif != NULL), return;); + + if (dhcp_inc_pcb_refcount() != ERR_OK) { /* ensure DHCP PCB is allocated */ + return; + } + + memset(&dhcp, 0, sizeof(struct dhcp)); + dhcp_set_state(&dhcp, DHCP_STATE_INFORMING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, &dhcp, DHCP_INFORM, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, &dhcp, DHCP_STATE_INFORMING, msg_out, DHCP_INFORM, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_inform: INFORMING\n")); + + udp_sendto_if(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif); + + pbuf_free(p_out); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_inform: could not allocate DHCP request\n")); + } + + dhcp_dec_pcb_refcount(); /* delete DHCP PCB if not needed any more */ +} + +/** Handle a possible change in the network configuration. + * + * This enters the REBOOTING state to verify that the currently bound + * address is still valid. + */ +void +dhcp_network_changed(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + if (!dhcp) { + return; + } + switch (dhcp->state) { + case DHCP_STATE_REBINDING: + case DHCP_STATE_RENEWING: + case DHCP_STATE_BOUND: + case DHCP_STATE_REBOOTING: + dhcp->tries = 0; + dhcp_reboot(netif); + break; + case DHCP_STATE_OFF: + /* stay off */ + break; + default: + LWIP_ASSERT("invalid dhcp->state", dhcp->state <= DHCP_STATE_BACKING_OFF); + /* INIT/REQUESTING/CHECKING/BACKING_OFF restart with new 'rid' because the + state changes, SELECTING: continue with current 'rid' as we stay in the + same state */ +#if LWIP_DHCP_AUTOIP_COOP + if (dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_ON) { + autoip_stop(netif); + dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_OFF; + } +#endif /* LWIP_DHCP_AUTOIP_COOP */ + /* ensure we start with short timeouts, even if already discovering */ + dhcp->tries = 0; + dhcp_discover(netif); + break; + } +} + +#if DHCP_DOES_ARP_CHECK +/** + * Match an ARP reply with the offered IP address: + * check whether the offered IP address is not in use using ARP + * + * @param netif the network interface on which the reply was received + * @param addr The IP address we received a reply from + */ +void +dhcp_arp_reply(struct netif *netif, const ip4_addr_t *addr) +{ + struct dhcp *dhcp; + + LWIP_ERROR("netif != NULL", (netif != NULL), return;); + dhcp = netif_dhcp_data(netif); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_arp_reply()\n")); + /* is a DHCP client doing an ARP check? */ + if ((dhcp != NULL) && (dhcp->state == DHCP_STATE_CHECKING)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_arp_reply(): CHECKING, arp reply for 0x%08"X32_F"\n", + ip4_addr_get_u32(addr))); + /* did a host respond with the address we + were offered by the DHCP server? */ + if (ip4_addr_cmp(addr, &dhcp->offered_ip_addr)) { + /* we will not accept the offered address */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING, + ("dhcp_arp_reply(): arp reply matched with offered address, declining\n")); + dhcp_decline(netif); + } + } +} + +/** + * Decline an offered lease. + * + * Tell the DHCP server we do not accept the offered address. + * One reason to decline the lease is when we find out the address + * is already in use by another host (through ARP). + * + * @param netif the netif under DHCP control + */ +static err_t +dhcp_decline(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_decline()\n")); + dhcp_set_state(dhcp, DHCP_STATE_BACKING_OFF); + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_DECLINE, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_REQUESTED_IP, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(&dhcp->offered_ip_addr))); + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_BACKING_OFF, msg_out, DHCP_DECLINE, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + /* per section 4.4.4, broadcast DECLINE messages */ + result = udp_sendto_if_src(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif, IP4_ADDR_ANY); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_decline: BACKING OFF\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("dhcp_decline: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = 10 * 1000; + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_decline(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} +#endif /* DHCP_DOES_ARP_CHECK */ + + +/** + * Start the DHCP process, discover a DHCP server. + * + * @param netif the netif under DHCP control + */ +static err_t +dhcp_discover(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result = ERR_OK; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover()\n")); + + ip4_addr_set_any(&dhcp->offered_ip_addr); + dhcp_set_state(dhcp, DHCP_STATE_SELECTING); + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_DISCOVER, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: making request\n")); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_SELECTING, msg_out, DHCP_DISCOVER, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: sendto(DISCOVER, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER)\n")); + udp_sendto_if_src(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif, IP4_ADDR_ANY); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: deleting()ing\n")); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_discover: SELECTING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_discover: could not allocate DHCP request\n")); + } + if (dhcp->tries < 255) { + dhcp->tries++; + } +#if LWIP_DHCP_AUTOIP_COOP + if (dhcp->tries >= LWIP_DHCP_AUTOIP_COOP_TRIES && dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_OFF) { + dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_ON; + autoip_start(netif); + } +#endif /* LWIP_DHCP_AUTOIP_COOP */ + msecs = (u16_t)((dhcp->tries < 6 ? 1 << dhcp->tries : 60) * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_discover(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + + +/** + * Bind the interface to the offered IP address. + * + * @param netif network interface to bind to the offered address + */ +static void +dhcp_bind(struct netif *netif) +{ + u32_t timeout; + struct dhcp *dhcp; + ip4_addr_t sn_mask, gw_addr; + LWIP_ERROR("dhcp_bind: netif != NULL", (netif != NULL), return;); + dhcp = netif_dhcp_data(netif); + LWIP_ERROR("dhcp_bind: dhcp != NULL", (dhcp != NULL), return;); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + + /* reset time used of lease */ + dhcp->lease_used = 0; + + if (dhcp->offered_t0_lease != 0xffffffffUL) { + /* set renewal period timer */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(): t0 renewal timer %"U32_F" secs\n", dhcp->offered_t0_lease)); + timeout = (dhcp->offered_t0_lease + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS; + if (timeout > 0xffff) { + timeout = 0xffff; + } + dhcp->t0_timeout = (u16_t)timeout; + if (dhcp->t0_timeout == 0) { + dhcp->t0_timeout = 1; + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_bind(): set request timeout %"U32_F" msecs\n", dhcp->offered_t0_lease * 1000)); + } + + /* temporary DHCP lease? */ + if (dhcp->offered_t1_renew != 0xffffffffUL) { + /* set renewal period timer */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(): t1 renewal timer %"U32_F" secs\n", dhcp->offered_t1_renew)); + timeout = (dhcp->offered_t1_renew + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS; + if (timeout > 0xffff) { + timeout = 0xffff; + } + dhcp->t1_timeout = (u16_t)timeout; + if (dhcp->t1_timeout == 0) { + dhcp->t1_timeout = 1; + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_bind(): set request timeout %"U32_F" msecs\n", dhcp->offered_t1_renew * 1000)); + dhcp->t1_renew_time = dhcp->t1_timeout; + } + /* set renewal period timer */ + if (dhcp->offered_t2_rebind != 0xffffffffUL) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(): t2 rebind timer %"U32_F" secs\n", dhcp->offered_t2_rebind)); + timeout = (dhcp->offered_t2_rebind + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS; + if (timeout > 0xffff) { + timeout = 0xffff; + } + dhcp->t2_timeout = (u16_t)timeout; + if (dhcp->t2_timeout == 0) { + dhcp->t2_timeout = 1; + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_bind(): set request timeout %"U32_F" msecs\n", dhcp->offered_t2_rebind * 1000)); + dhcp->t2_rebind_time = dhcp->t2_timeout; + } + + /* If we have sub 1 minute lease, t2 and t1 will kick in at the same time. */ + if ((dhcp->t1_timeout >= dhcp->t2_timeout) && (dhcp->t2_timeout > 0)) { + dhcp->t1_timeout = 0; + } + + if (dhcp->subnet_mask_given) { + /* copy offered network mask */ + ip4_addr_copy(sn_mask, dhcp->offered_sn_mask); + } else { + /* subnet mask not given, choose a safe subnet mask given the network class */ + u8_t first_octet = ip4_addr1(&dhcp->offered_ip_addr); + if (first_octet <= 127) { + ip4_addr_set_u32(&sn_mask, PP_HTONL(0xff000000UL)); + } else if (first_octet >= 192) { + ip4_addr_set_u32(&sn_mask, PP_HTONL(0xffffff00UL)); + } else { + ip4_addr_set_u32(&sn_mask, PP_HTONL(0xffff0000UL)); + } + } + + ip4_addr_copy(gw_addr, dhcp->offered_gw_addr); + /* gateway address not given? */ + if (ip4_addr_isany_val(gw_addr)) { + /* copy network address */ + ip4_addr_get_network(&gw_addr, &dhcp->offered_ip_addr, &sn_mask); + /* use first host address on network as gateway */ + ip4_addr_set_u32(&gw_addr, ip4_addr_get_u32(&gw_addr) | PP_HTONL(0x00000001UL)); + } + +#if LWIP_DHCP_AUTOIP_COOP + if (dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_ON) { + autoip_stop(netif); + dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_OFF; + } +#endif /* LWIP_DHCP_AUTOIP_COOP */ + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_bind(): IP: 0x%08"X32_F" SN: 0x%08"X32_F" GW: 0x%08"X32_F"\n", + ip4_addr_get_u32(&dhcp->offered_ip_addr), ip4_addr_get_u32(&sn_mask), ip4_addr_get_u32(&gw_addr))); + /* netif is now bound to DHCP leased address - set this before assigning the address + to ensure the callback can use dhcp_supplied_address() */ + dhcp_set_state(dhcp, DHCP_STATE_BOUND); + + netif_set_addr(netif, &dhcp->offered_ip_addr, &sn_mask, &gw_addr); + /* interface is used by routing now that an address is set */ +} + +/** + * @ingroup dhcp4 + * Renew an existing DHCP lease at the involved DHCP server. + * + * @param netif network interface which must renew its lease + */ +err_t +dhcp_renew(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_renew()\n")); + dhcp_set_state(dhcp, DHCP_STATE_RENEWING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_REQUEST, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + +#if LWIP_NETIF_HOSTNAME + options_out_len = dhcp_option_hostname(options_out_len, msg_out->options, netif); +#endif /* LWIP_NETIF_HOSTNAME */ + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_RENEWING, msg_out, DHCP_REQUEST, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + result = udp_sendto_if(dhcp_pcb, p_out, &dhcp->server_ip_addr, LWIP_IANA_PORT_DHCP_SERVER, netif); + pbuf_free(p_out); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_renew: RENEWING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_renew: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + /* back-off on retries, but to a maximum of 20 seconds */ + msecs = (u16_t)(dhcp->tries < 10 ? dhcp->tries * 2000 : 20 * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_renew(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + +/** + * Rebind with a DHCP server for an existing DHCP lease. + * + * @param netif network interface which must rebind with a DHCP server + */ +static err_t +dhcp_rebind(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_rebind()\n")); + dhcp_set_state(dhcp, DHCP_STATE_REBINDING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_REQUEST, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + +#if LWIP_NETIF_HOSTNAME + options_out_len = dhcp_option_hostname(options_out_len, msg_out->options, netif); +#endif /* LWIP_NETIF_HOSTNAME */ + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_REBINDING, msg_out, DHCP_DISCOVER, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + /* broadcast to server */ + result = udp_sendto_if(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_rebind: REBINDING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_rebind: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = (u16_t)(dhcp->tries < 10 ? dhcp->tries * 1000 : 10 * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_rebind(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + +/** + * Enter REBOOTING state to verify an existing lease + * + * @param netif network interface which must reboot + */ +static err_t +dhcp_reboot(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_reboot()\n")); + dhcp_set_state(dhcp, DHCP_STATE_REBOOTING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_REQUEST, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN_MIN_REQUIRED); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_REQUESTED_IP, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(&dhcp->offered_ip_addr))); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + +#if LWIP_NETIF_HOSTNAME + options_out_len = dhcp_option_hostname(options_out_len, msg_out->options, netif); +#endif /* LWIP_NETIF_HOSTNAME */ + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_REBOOTING, msg_out, DHCP_REQUEST, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + /* broadcast to server */ + result = udp_sendto_if(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_reboot: REBOOTING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_reboot: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = (u16_t)(dhcp->tries < 10 ? dhcp->tries * 1000 : 10 * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_reboot(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + +/** + * @ingroup dhcp4 + * Release a DHCP lease and stop DHCP statemachine (and AUTOIP if LWIP_DHCP_AUTOIP_COOP). + * + * @param netif network interface + */ +void +dhcp_release_and_stop(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + ip_addr_t server_ip_addr; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_release_and_stop()\n")); + if (dhcp == NULL) { + return; + } + + /* already off? -> nothing to do */ + if (dhcp->state == DHCP_STATE_OFF) { + return; + } + + ip_addr_copy(server_ip_addr, dhcp->server_ip_addr); + + /* clean old DHCP offer */ + ip_addr_set_zero_ip4(&dhcp->server_ip_addr); + ip4_addr_set_zero(&dhcp->offered_ip_addr); + ip4_addr_set_zero(&dhcp->offered_sn_mask); + ip4_addr_set_zero(&dhcp->offered_gw_addr); +#if LWIP_DHCP_BOOTP_FILE + ip4_addr_set_zero(&dhcp->offered_si_addr); +#endif /* LWIP_DHCP_BOOTP_FILE */ + dhcp->offered_t0_lease = dhcp->offered_t1_renew = dhcp->offered_t2_rebind = 0; + dhcp->t1_renew_time = dhcp->t2_rebind_time = dhcp->lease_used = dhcp->t0_timeout = 0; + + /* send release message when current IP was assigned via DHCP */ + if (dhcp_supplied_address(netif)) { + /* create and initialize the DHCP message header */ + struct pbuf *p_out; + u16_t options_out_len; + p_out = dhcp_create_msg(netif, dhcp, DHCP_RELEASE, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_SERVER_ID, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(ip_2_ip4(&server_ip_addr)))); + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, dhcp->state, msg_out, DHCP_RELEASE, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + udp_sendto_if(dhcp_pcb, p_out, &server_ip_addr, LWIP_IANA_PORT_DHCP_SERVER, netif); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_release: RELEASED, DHCP_STATE_OFF\n")); + } else { + /* sending release failed, but that's not a problem since the correct behaviour of dhcp does not rely on release */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_release: could not allocate DHCP request\n")); + } + } + + /* remove IP address from interface (prevents routing from selecting this interface) */ + netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); + +#if LWIP_DHCP_AUTOIP_COOP + if (dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_ON) { + autoip_stop(netif); + dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_OFF; + } +#endif /* LWIP_DHCP_AUTOIP_COOP */ + + dhcp_set_state(dhcp, DHCP_STATE_OFF); + + if (dhcp->pcb_allocated != 0) { + dhcp_dec_pcb_refcount(); /* free DHCP PCB if not needed any more */ + dhcp->pcb_allocated = 0; + } +} + +/** + * @ingroup dhcp4 + * This function calls dhcp_release_and_stop() internally. + * @deprecated Use dhcp_release_and_stop() instead. + */ +err_t +dhcp_release(struct netif *netif) +{ + dhcp_release_and_stop(netif); + return ERR_OK; +} + +/** + * @ingroup dhcp4 + * This function calls dhcp_release_and_stop() internally. + * @deprecated Use dhcp_release_and_stop() instead. + */ +void +dhcp_stop(struct netif *netif) +{ + dhcp_release_and_stop(netif); +} + +/* + * Set the DHCP state of a DHCP client. + * + * If the state changed, reset the number of tries. + */ +static void +dhcp_set_state(struct dhcp *dhcp, u8_t new_state) +{ + if (new_state != dhcp->state) { + dhcp->state = new_state; + dhcp->tries = 0; + dhcp->request_timeout = 0; + } +} + +/* + * Concatenate an option type and length field to the outgoing + * DHCP message. + * + */ +static u16_t +dhcp_option(u16_t options_out_len, u8_t *options, u8_t option_type, u8_t option_len) +{ + LWIP_ASSERT("dhcp_option: options_out_len + 2 + option_len <= DHCP_OPTIONS_LEN", options_out_len + 2U + option_len <= DHCP_OPTIONS_LEN); + options[options_out_len++] = option_type; + options[options_out_len++] = option_len; + return options_out_len; +} +/* + * Concatenate a single byte to the outgoing DHCP message. + * + */ +static u16_t +dhcp_option_byte(u16_t options_out_len, u8_t *options, u8_t value) +{ + LWIP_ASSERT("dhcp_option_byte: options_out_len < DHCP_OPTIONS_LEN", options_out_len < DHCP_OPTIONS_LEN); + options[options_out_len++] = value; + return options_out_len; +} + +static u16_t +dhcp_option_short(u16_t options_out_len, u8_t *options, u16_t value) +{ + LWIP_ASSERT("dhcp_option_short: options_out_len + 2 <= DHCP_OPTIONS_LEN", options_out_len + 2U <= DHCP_OPTIONS_LEN); + options[options_out_len++] = (u8_t)((value & 0xff00U) >> 8); + options[options_out_len++] = (u8_t) (value & 0x00ffU); + return options_out_len; +} + +static u16_t +dhcp_option_long(u16_t options_out_len, u8_t *options, u32_t value) +{ + LWIP_ASSERT("dhcp_option_long: options_out_len + 4 <= DHCP_OPTIONS_LEN", options_out_len + 4U <= DHCP_OPTIONS_LEN); + options[options_out_len++] = (u8_t)((value & 0xff000000UL) >> 24); + options[options_out_len++] = (u8_t)((value & 0x00ff0000UL) >> 16); + options[options_out_len++] = (u8_t)((value & 0x0000ff00UL) >> 8); + options[options_out_len++] = (u8_t)((value & 0x000000ffUL)); + return options_out_len; +} + +#if LWIP_NETIF_HOSTNAME +static u16_t +dhcp_option_hostname(u16_t options_out_len, u8_t *options, struct netif *netif) +{ + if (netif->hostname != NULL) { + size_t namelen = strlen(netif->hostname); + if (namelen > 0) { + size_t len; + const char *p = netif->hostname; + /* Shrink len to available bytes (need 2 bytes for OPTION_HOSTNAME + and 1 byte for trailer) */ + size_t available = DHCP_OPTIONS_LEN - options_out_len - 3; + LWIP_ASSERT("DHCP: hostname is too long!", namelen <= available); + len = LWIP_MIN(namelen, available); + LWIP_ASSERT("DHCP: hostname is too long!", len <= 0xFF); + options_out_len = dhcp_option(options_out_len, options, DHCP_OPTION_HOSTNAME, (u8_t)len); + while (len--) { + options_out_len = dhcp_option_byte(options_out_len, options, *p++); + } + } + } + return options_out_len; +} +#endif /* LWIP_NETIF_HOSTNAME */ + +/** + * Extract the DHCP message and the DHCP options. + * + * Extract the DHCP message and the DHCP options, each into a contiguous + * piece of memory. As a DHCP message is variable sized by its options, + * and also allows overriding some fields for options, the easy approach + * is to first unfold the options into a contiguous piece of memory, and + * use that further on. + * + */ +static err_t +dhcp_parse_reply(struct pbuf *p, struct dhcp *dhcp) +{ + u8_t *options; + u16_t offset; + u16_t offset_max; + u16_t options_idx; + u16_t options_idx_max; + struct pbuf *q; + int parse_file_as_options = 0; + int parse_sname_as_options = 0; + struct dhcp_msg *msg_in; +#if LWIP_DHCP_BOOTP_FILE + int file_overloaded = 0; +#endif + + LWIP_UNUSED_ARG(dhcp); + + /* clear received options */ + dhcp_clear_all_options(dhcp); + /* check that beginning of dhcp_msg (up to and including chaddr) is in first pbuf */ + if (p->len < DHCP_SNAME_OFS) { + return ERR_BUF; + } + msg_in = (struct dhcp_msg *)p->payload; +#if LWIP_DHCP_BOOTP_FILE + /* clear boot file name */ + dhcp->boot_file_name[0] = 0; +#endif /* LWIP_DHCP_BOOTP_FILE */ + + /* parse options */ + + /* start with options field */ + options_idx = DHCP_OPTIONS_OFS; + /* parse options to the end of the received packet */ + options_idx_max = p->tot_len; +again: + q = p; + while ((q != NULL) && (options_idx >= q->len)) { + options_idx = (u16_t)(options_idx - q->len); + options_idx_max = (u16_t)(options_idx_max - q->len); + q = q->next; + } + if (q == NULL) { + return ERR_BUF; + } + offset = options_idx; + offset_max = options_idx_max; + options = (u8_t *)q->payload; + /* at least 1 byte to read and no end marker, then at least 3 bytes to read? */ + while ((q != NULL) && (offset < offset_max) && (options[offset] != DHCP_OPTION_END)) { + u8_t op = options[offset]; + u8_t len; + u8_t decode_len = 0; + int decode_idx = -1; + u16_t val_offset = (u16_t)(offset + 2); + if (val_offset < offset) { + /* overflow */ + return ERR_BUF; + } + /* len byte might be in the next pbuf */ + if ((offset + 1) < q->len) { + len = options[offset + 1]; + } else { + len = (q->next != NULL ? ((u8_t *)q->next->payload)[0] : 0); + } + /* LWIP_DEBUGF(DHCP_DEBUG, ("msg_offset=%"U16_F", q->len=%"U16_F, msg_offset, q->len)); */ + decode_len = len; + switch (op) { + /* case(DHCP_OPTION_END): handled above */ + case (DHCP_OPTION_PAD): + /* special option: no len encoded */ + decode_len = len = 0; + /* will be increased below */ + break; + case (DHCP_OPTION_SUBNET_MASK): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_SUBNET_MASK; + break; + case (DHCP_OPTION_ROUTER): + decode_len = 4; /* only copy the first given router */ + LWIP_ERROR("len >= decode_len", len >= decode_len, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_ROUTER; + break; +#if LWIP_DHCP_PROVIDE_DNS_SERVERS + case (DHCP_OPTION_DNS_SERVER): + /* special case: there might be more than one server */ + LWIP_ERROR("len %% 4 == 0", len % 4 == 0, return ERR_VAL;); + /* limit number of DNS servers */ + decode_len = LWIP_MIN(len, 4 * DNS_MAX_SERVERS); + LWIP_ERROR("len >= decode_len", len >= decode_len, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_DNS_SERVER; + break; +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ + case (DHCP_OPTION_LEASE_TIME): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_LEASE_TIME; + break; +#if LWIP_DHCP_GET_NTP_SRV + case (DHCP_OPTION_NTP): + /* special case: there might be more than one server */ + LWIP_ERROR("len %% 4 == 0", len % 4 == 0, return ERR_VAL;); + /* limit number of NTP servers */ + decode_len = LWIP_MIN(len, 4 * LWIP_DHCP_MAX_NTP_SERVERS); + LWIP_ERROR("len >= decode_len", len >= decode_len, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_NTP_SERVER; + break; +#endif /* LWIP_DHCP_GET_NTP_SRV*/ + case (DHCP_OPTION_OVERLOAD): + LWIP_ERROR("len == 1", len == 1, return ERR_VAL;); + /* decode overload only in options, not in file/sname: invalid packet */ + LWIP_ERROR("overload in file/sname", options_idx == DHCP_OPTIONS_OFS, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_OVERLOAD; + break; + case (DHCP_OPTION_MESSAGE_TYPE): + LWIP_ERROR("len == 1", len == 1, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_MSG_TYPE; + break; + case (DHCP_OPTION_SERVER_ID): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_SERVER_ID; + break; + case (DHCP_OPTION_T1): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_T1; + break; + case (DHCP_OPTION_T2): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_T2; + break; + default: + decode_len = 0; + LWIP_DEBUGF(DHCP_DEBUG, ("skipping option %"U16_F" in options\n", (u16_t)op)); + LWIP_HOOK_DHCP_PARSE_OPTION(ip_current_netif(), dhcp, dhcp->state, msg_in, + dhcp_option_given(dhcp, DHCP_OPTION_IDX_MSG_TYPE) ? (u8_t)dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_MSG_TYPE) : 0, + op, len, q, val_offset); + break; + } + if (op == DHCP_OPTION_PAD) { + offset++; + } else { + if (offset + len + 2 > 0xFFFF) { + /* overflow */ + return ERR_BUF; + } + offset = (u16_t)(offset + len + 2); + if (decode_len > 0) { + u32_t value = 0; + u16_t copy_len; +decode_next: + LWIP_ASSERT("check decode_idx", decode_idx >= 0 && decode_idx < DHCP_OPTION_IDX_MAX); + if (!dhcp_option_given(dhcp, decode_idx)) { + copy_len = LWIP_MIN(decode_len, 4); + if (pbuf_copy_partial(q, &value, copy_len, val_offset) != copy_len) { + return ERR_BUF; + } + if (decode_len > 4) { + /* decode more than one u32_t */ + u16_t next_val_offset; + LWIP_ERROR("decode_len %% 4 == 0", decode_len % 4 == 0, return ERR_VAL;); + dhcp_got_option(dhcp, decode_idx); + dhcp_set_option_value(dhcp, decode_idx, lwip_htonl(value)); + decode_len = (u8_t)(decode_len - 4); + next_val_offset = (u16_t)(val_offset + 4); + if (next_val_offset < val_offset) { + /* overflow */ + return ERR_BUF; + } + val_offset = next_val_offset; + decode_idx++; + goto decode_next; + } else if (decode_len == 4) { + value = lwip_ntohl(value); + } else { + LWIP_ERROR("invalid decode_len", decode_len == 1, return ERR_VAL;); + value = ((u8_t *)&value)[0]; + } + dhcp_got_option(dhcp, decode_idx); + dhcp_set_option_value(dhcp, decode_idx, value); + } + } + } + if (offset >= q->len) { + offset = (u16_t)(offset - q->len); + offset_max = (u16_t)(offset_max - q->len); + if (offset < offset_max) { + q = q->next; + LWIP_ERROR("next pbuf was null", q != NULL, return ERR_VAL;); + options = (u8_t *)q->payload; + } else { + /* We've run out of bytes, probably no end marker. Don't proceed. */ + return ERR_BUF; + } + } + } + /* is this an overloaded message? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_OVERLOAD)) { + u32_t overload = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_OVERLOAD); + dhcp_clear_option(dhcp, DHCP_OPTION_IDX_OVERLOAD); + if (overload == DHCP_OVERLOAD_FILE) { + parse_file_as_options = 1; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("overloaded file field\n")); + } else if (overload == DHCP_OVERLOAD_SNAME) { + parse_sname_as_options = 1; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("overloaded sname field\n")); + } else if (overload == DHCP_OVERLOAD_SNAME_FILE) { + parse_sname_as_options = 1; + parse_file_as_options = 1; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("overloaded sname and file field\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("invalid overload option: %d\n", (int)overload)); + } + } + if (parse_file_as_options) { + /* if both are overloaded, parse file first and then sname (RFC 2131 ch. 4.1) */ + parse_file_as_options = 0; + options_idx = DHCP_FILE_OFS; + options_idx_max = DHCP_FILE_OFS + DHCP_FILE_LEN; +#if LWIP_DHCP_BOOTP_FILE + file_overloaded = 1; +#endif + goto again; + } else if (parse_sname_as_options) { + parse_sname_as_options = 0; + options_idx = DHCP_SNAME_OFS; + options_idx_max = DHCP_SNAME_OFS + DHCP_SNAME_LEN; + goto again; + } +#if LWIP_DHCP_BOOTP_FILE + if (!file_overloaded) { + /* only do this for ACK messages */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_MSG_TYPE) && + (dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_MSG_TYPE) == DHCP_ACK)) + /* copy bootp file name, don't care for sname (server hostname) */ + if (pbuf_copy_partial(p, dhcp->boot_file_name, DHCP_FILE_LEN-1, DHCP_FILE_OFS) != (DHCP_FILE_LEN-1)) { + return ERR_BUF; + } + /* make sure the string is really NULL-terminated */ + dhcp->boot_file_name[DHCP_FILE_LEN-1] = 0; + } +#endif /* LWIP_DHCP_BOOTP_FILE */ + return ERR_OK; +} + +/** + * If an incoming DHCP message is in response to us, then trigger the state machine + */ +static void +dhcp_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) +{ + struct netif *netif = ip_current_input_netif(); + struct dhcp *dhcp = netif_dhcp_data(netif); + struct dhcp_msg *reply_msg = (struct dhcp_msg *)p->payload; + u8_t msg_type; + u8_t i; + struct dhcp_msg *msg_in; + + LWIP_UNUSED_ARG(arg); + + /* Caught DHCP message from netif that does not have DHCP enabled? -> not interested */ + if ((dhcp == NULL) || (dhcp->pcb_allocated == 0)) { + goto free_pbuf_and_return; + } + + LWIP_ASSERT("invalid server address type", IP_IS_V4(addr)); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_recv(pbuf = %p) from DHCP server %"U16_F".%"U16_F".%"U16_F".%"U16_F" port %"U16_F"\n", (void *)p, + ip4_addr1_16(ip_2_ip4(addr)), ip4_addr2_16(ip_2_ip4(addr)), ip4_addr3_16(ip_2_ip4(addr)), ip4_addr4_16(ip_2_ip4(addr)), port)); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("pbuf->len = %"U16_F"\n", p->len)); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("pbuf->tot_len = %"U16_F"\n", p->tot_len)); + /* prevent warnings about unused arguments */ + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(addr); + LWIP_UNUSED_ARG(port); + + if (p->len < DHCP_MIN_REPLY_LEN) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("DHCP reply message or pbuf too short\n")); + goto free_pbuf_and_return; + } + + if (reply_msg->op != DHCP_BOOTREPLY) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("not a DHCP reply message, but type %"U16_F"\n", (u16_t)reply_msg->op)); + goto free_pbuf_and_return; + } + /* iterate through hardware address and match against DHCP message */ + for (i = 0; i < netif->hwaddr_len && i < LWIP_MIN(DHCP_CHADDR_LEN, NETIF_MAX_HWADDR_LEN); i++) { + if (netif->hwaddr[i] != reply_msg->chaddr[i]) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("netif->hwaddr[%"U16_F"]==%02"X16_F" != reply_msg->chaddr[%"U16_F"]==%02"X16_F"\n", + (u16_t)i, (u16_t)netif->hwaddr[i], (u16_t)i, (u16_t)reply_msg->chaddr[i])); + goto free_pbuf_and_return; + } + } + /* match transaction ID against what we expected */ + if (lwip_ntohl(reply_msg->xid) != dhcp->xid) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("transaction id mismatch reply_msg->xid(%"X32_F")!=dhcp->xid(%"X32_F")\n", lwip_ntohl(reply_msg->xid), dhcp->xid)); + goto free_pbuf_and_return; + } + /* option fields could be unfold? */ + if (dhcp_parse_reply(p, dhcp) != ERR_OK) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("problem unfolding DHCP message - too short on memory?\n")); + goto free_pbuf_and_return; + } + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("searching DHCP_OPTION_MESSAGE_TYPE\n")); + /* obtain pointer to DHCP message type */ + if (!dhcp_option_given(dhcp, DHCP_OPTION_IDX_MSG_TYPE)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("DHCP_OPTION_MESSAGE_TYPE option not found\n")); + goto free_pbuf_and_return; + } + + msg_in = (struct dhcp_msg *)p->payload; + /* read DHCP message type */ + msg_type = (u8_t)dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_MSG_TYPE); + /* message type is DHCP ACK? */ + if (msg_type == DHCP_ACK) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("DHCP_ACK received\n")); + /* in requesting state? */ + if (dhcp->state == DHCP_STATE_REQUESTING) { + dhcp_handle_ack(netif, msg_in); +#if DHCP_DOES_ARP_CHECK + if ((netif->flags & NETIF_FLAG_ETHARP) != 0) { + /* check if the acknowledged lease address is already in use */ + dhcp_check(netif); + } else { + /* bind interface to the acknowledged lease address */ + dhcp_bind(netif); + } +#else + /* bind interface to the acknowledged lease address */ + dhcp_bind(netif); +#endif + } + /* already bound to the given lease address? */ + else if ((dhcp->state == DHCP_STATE_REBOOTING) || (dhcp->state == DHCP_STATE_REBINDING) || + (dhcp->state == DHCP_STATE_RENEWING)) { + dhcp_handle_ack(netif, msg_in); + dhcp_bind(netif); + } + } + /* received a DHCP_NAK in appropriate state? */ + else if ((msg_type == DHCP_NAK) && + ((dhcp->state == DHCP_STATE_REBOOTING) || (dhcp->state == DHCP_STATE_REQUESTING) || + (dhcp->state == DHCP_STATE_REBINDING) || (dhcp->state == DHCP_STATE_RENEWING ))) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("DHCP_NAK received\n")); + dhcp_handle_nak(netif); + } + /* received a DHCP_OFFER in DHCP_STATE_SELECTING state? */ + else if ((msg_type == DHCP_OFFER) && (dhcp->state == DHCP_STATE_SELECTING)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("DHCP_OFFER received in DHCP_STATE_SELECTING state\n")); + /* remember offered lease */ + dhcp_handle_offer(netif, msg_in); + } + +free_pbuf_and_return: + pbuf_free(p); +} + +/** + * Create a DHCP request, fill in common headers + * + * @param netif the netif under DHCP control + * @param dhcp dhcp control struct + * @param message_type message type of the request + */ +static struct pbuf * +dhcp_create_msg(struct netif *netif, struct dhcp *dhcp, u8_t message_type, u16_t *options_out_len) +{ + u16_t i; + struct pbuf *p_out; + struct dhcp_msg *msg_out; + u16_t options_out_len_loc; + +#ifndef DHCP_GLOBAL_XID + /** default global transaction identifier starting value (easy to match + * with a packet analyser). We simply increment for each new request. + * Predefine DHCP_GLOBAL_XID to a better value or a function call to generate one + * at runtime, any supporting function prototypes can be defined in DHCP_GLOBAL_XID_HEADER */ +#if DHCP_CREATE_RAND_XID && defined(LWIP_RAND) + static u32_t xid; +#else /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ + static u32_t xid = 0xABCD0000; +#endif /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ +#else + if (!xid_initialised) { + xid = DHCP_GLOBAL_XID; + xid_initialised = !xid_initialised; + } +#endif + LWIP_ERROR("dhcp_create_msg: netif != NULL", (netif != NULL), return NULL;); + LWIP_ERROR("dhcp_create_msg: dhcp != NULL", (dhcp != NULL), return NULL;); + p_out = pbuf_alloc(PBUF_TRANSPORT, sizeof(struct dhcp_msg), PBUF_RAM); + if (p_out == NULL) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("dhcp_create_msg(): could not allocate pbuf\n")); + return NULL; + } + LWIP_ASSERT("dhcp_create_msg: check that first pbuf can hold struct dhcp_msg", + (p_out->len >= sizeof(struct dhcp_msg))); + + /* DHCP_REQUEST should reuse 'xid' from DHCPOFFER */ + if ((message_type != DHCP_REQUEST) || (dhcp->state == DHCP_STATE_REBOOTING)) { + /* reuse transaction identifier in retransmissions */ + if (dhcp->tries == 0) { +#if DHCP_CREATE_RAND_XID && defined(LWIP_RAND) + xid = LWIP_RAND(); +#else /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ + xid++; +#endif /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ + } + dhcp->xid = xid; + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, + ("transaction id xid(%"X32_F")\n", xid)); + + msg_out = (struct dhcp_msg *)p_out->payload; + memset(msg_out, 0, sizeof(struct dhcp_msg)); + + msg_out->op = DHCP_BOOTREQUEST; + /* @todo: make link layer independent */ + msg_out->htype = LWIP_IANA_HWTYPE_ETHERNET; + msg_out->hlen = netif->hwaddr_len; + msg_out->xid = lwip_htonl(dhcp->xid); + /* we don't need the broadcast flag since we can receive unicast traffic + before being fully configured! */ + /* set ciaddr to netif->ip_addr based on message_type and state */ + if ((message_type == DHCP_INFORM) || (message_type == DHCP_DECLINE) || (message_type == DHCP_RELEASE) || + ((message_type == DHCP_REQUEST) && /* DHCP_STATE_BOUND not used for sending! */ + ((dhcp->state == DHCP_STATE_RENEWING) || dhcp->state == DHCP_STATE_REBINDING))) { + ip4_addr_copy(msg_out->ciaddr, *netif_ip4_addr(netif)); + } + for (i = 0; i < LWIP_MIN(DHCP_CHADDR_LEN, NETIF_MAX_HWADDR_LEN); i++) { + /* copy netif hardware address (padded with zeroes through memset already) */ + msg_out->chaddr[i] = netif->hwaddr[i]; + } + msg_out->cookie = PP_HTONL(DHCP_MAGIC_COOKIE); + /* Add option MESSAGE_TYPE */ + options_out_len_loc = dhcp_option(0, msg_out->options, DHCP_OPTION_MESSAGE_TYPE, DHCP_OPTION_MESSAGE_TYPE_LEN); + options_out_len_loc = dhcp_option_byte(options_out_len_loc, msg_out->options, message_type); + if (options_out_len) { + *options_out_len = options_out_len_loc; + } + return p_out; +} + +/** + * Add a DHCP message trailer + * + * Adds the END option to the DHCP message, and if + * necessary, up to three padding bytes. + */ +static void +dhcp_option_trailer(u16_t options_out_len, u8_t *options, struct pbuf *p_out) +{ + options[options_out_len++] = DHCP_OPTION_END; + /* packet is too small, or not 4 byte aligned? */ + while (((options_out_len < DHCP_MIN_OPTIONS_LEN) || (options_out_len & 3)) && + (options_out_len < DHCP_OPTIONS_LEN)) { + /* add a fill/padding byte */ + options[options_out_len++] = 0; + } + /* shrink the pbuf to the actual content length */ + pbuf_realloc(p_out, (u16_t)(sizeof(struct dhcp_msg) - DHCP_OPTIONS_LEN + options_out_len)); +} + +/** check if DHCP supplied netif->ip_addr + * + * @param netif the netif to check + * @return 1 if DHCP supplied netif->ip_addr (states BOUND or RENEWING), + * 0 otherwise + */ +u8_t +dhcp_supplied_address(const struct netif *netif) +{ + if ((netif != NULL) && (netif_dhcp_data(netif) != NULL)) { + struct dhcp *dhcp = netif_dhcp_data(netif); + return (dhcp->state == DHCP_STATE_BOUND) || (dhcp->state == DHCP_STATE_RENEWING) || + (dhcp->state == DHCP_STATE_REBINDING); + } + return 0; +} + +#endif /* LWIP_IPV4 && LWIP_DHCP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c new file mode 100644 index 0000000..570aaa3 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c @@ -0,0 +1,1204 @@ +/** + * @file + * Address Resolution Protocol module for IP over Ethernet + * + * Functionally, ARP is divided into two parts. The first maps an IP address + * to a physical address when sending a packet, and the second part answers + * requests from other machines for our physical address. + * + * This implementation complies with RFC 826 (Ethernet ARP). It supports + * Gratuitious ARP from RFC3220 (IP Mobility Support for IPv4) section 4.6 + * if an interface calls etharp_gratuitous(our_netif) upon address change. + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * Copyright (c) 2003-2004 Leon Woestenberg + * Copyright (c) 2003-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_ARP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/etharp.h" +#include "lwip/stats.h" +#include "lwip/snmp.h" +#include "lwip/dhcp.h" +#include "lwip/autoip.h" +#include "lwip/prot/iana.h" +#include "netif/ethernet.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** Re-request a used ARP entry 1 minute before it would expire to prevent + * breaking a steadily used connection because the ARP entry timed out. */ +#define ARP_AGE_REREQUEST_USED_UNICAST (ARP_MAXAGE - 30) +#define ARP_AGE_REREQUEST_USED_BROADCAST (ARP_MAXAGE - 15) + +/** the time an ARP entry stays pending after first request, + * for ARP_TMR_INTERVAL = 1000, this is + * 10 seconds. + * + * @internal Keep this number at least 2, otherwise it might + * run out instantly if the timeout occurs directly after a request. + */ +#define ARP_MAXPENDING 5 + +/** ARP states */ +enum etharp_state { + ETHARP_STATE_EMPTY = 0, + ETHARP_STATE_PENDING, + ETHARP_STATE_STABLE, + ETHARP_STATE_STABLE_REREQUESTING_1, + ETHARP_STATE_STABLE_REREQUESTING_2 +#if ETHARP_SUPPORT_STATIC_ENTRIES + , ETHARP_STATE_STATIC +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ +}; + +struct etharp_entry { +#if ARP_QUEUEING + /** Pointer to queue of pending outgoing packets on this ARP entry. */ + struct etharp_q_entry *q; +#else /* ARP_QUEUEING */ + /** Pointer to a single pending outgoing packet on this ARP entry. */ + struct pbuf *q; +#endif /* ARP_QUEUEING */ + ip4_addr_t ipaddr; + struct netif *netif; + struct eth_addr ethaddr; + u16_t ctime; + u8_t state; +}; + +static struct etharp_entry arp_table[ARP_TABLE_SIZE]; + +#if !LWIP_NETIF_HWADDRHINT +static netif_addr_idx_t etharp_cached_entry; +#endif /* !LWIP_NETIF_HWADDRHINT */ + +/** Try hard to create a new entry - we want the IP address to appear in + the cache (even if this means removing an active entry or so). */ +#define ETHARP_FLAG_TRY_HARD 1 +#define ETHARP_FLAG_FIND_ONLY 2 +#if ETHARP_SUPPORT_STATIC_ENTRIES +#define ETHARP_FLAG_STATIC_ENTRY 4 +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + +#if LWIP_NETIF_HWADDRHINT +#define ETHARP_SET_ADDRHINT(netif, addrhint) do { if (((netif) != NULL) && ((netif)->hints != NULL)) { \ + (netif)->hints->addr_hint = (addrhint); }} while(0) +#else /* LWIP_NETIF_HWADDRHINT */ +#define ETHARP_SET_ADDRHINT(netif, addrhint) (etharp_cached_entry = (addrhint)) +#endif /* LWIP_NETIF_HWADDRHINT */ + + +/* Check for maximum ARP_TABLE_SIZE */ +#if (ARP_TABLE_SIZE > NETIF_ADDR_IDX_MAX) +#error "ARP_TABLE_SIZE must fit in an s16_t, you have to reduce it in your lwipopts.h" +#endif + + +static err_t etharp_request_dst(struct netif *netif, const ip4_addr_t *ipaddr, const struct eth_addr *hw_dst_addr); +static err_t etharp_raw(struct netif *netif, + const struct eth_addr *ethsrc_addr, const struct eth_addr *ethdst_addr, + const struct eth_addr *hwsrc_addr, const ip4_addr_t *ipsrc_addr, + const struct eth_addr *hwdst_addr, const ip4_addr_t *ipdst_addr, + const u16_t opcode); + +#if ARP_QUEUEING +/** + * Free a complete queue of etharp entries + * + * @param q a qeueue of etharp_q_entry's to free + */ +static void +free_etharp_q(struct etharp_q_entry *q) +{ + struct etharp_q_entry *r; + LWIP_ASSERT("q != NULL", q != NULL); + while (q) { + r = q; + q = q->next; + LWIP_ASSERT("r->p != NULL", (r->p != NULL)); + pbuf_free(r->p); + memp_free(MEMP_ARP_QUEUE, r); + } +} +#else /* ARP_QUEUEING */ + +/** Compatibility define: free the queued pbuf */ +#define free_etharp_q(q) pbuf_free(q) + +#endif /* ARP_QUEUEING */ + +/** Clean up ARP table entries */ +static void +etharp_free_entry(int i) +{ + /* remove from SNMP ARP index tree */ + mib2_remove_arp_entry(arp_table[i].netif, &arp_table[i].ipaddr); + /* and empty packet queue */ + if (arp_table[i].q != NULL) { + /* remove all queued packets */ + LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_free_entry: freeing entry %"U16_F", packet queue %p.\n", (u16_t)i, (void *)(arp_table[i].q))); + free_etharp_q(arp_table[i].q); + arp_table[i].q = NULL; + } + /* recycle entry for re-use */ + arp_table[i].state = ETHARP_STATE_EMPTY; +#ifdef LWIP_DEBUG + /* for debugging, clean out the complete entry */ + arp_table[i].ctime = 0; + arp_table[i].netif = NULL; + ip4_addr_set_zero(&arp_table[i].ipaddr); + arp_table[i].ethaddr = ethzero; +#endif /* LWIP_DEBUG */ +} + +/** + * Clears expired entries in the ARP table. + * + * This function should be called every ARP_TMR_INTERVAL milliseconds (1 second), + * in order to expire entries in the ARP table. + */ +void +etharp_tmr(void) +{ + int i; + + LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_timer\n")); + /* remove expired entries from the ARP table */ + for (i = 0; i < ARP_TABLE_SIZE; ++i) { + u8_t state = arp_table[i].state; + if (state != ETHARP_STATE_EMPTY +#if ETHARP_SUPPORT_STATIC_ENTRIES + && (state != ETHARP_STATE_STATIC) +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + ) { + arp_table[i].ctime++; + if ((arp_table[i].ctime >= ARP_MAXAGE) || + ((arp_table[i].state == ETHARP_STATE_PENDING) && + (arp_table[i].ctime >= ARP_MAXPENDING))) { + /* pending or stable entry has become old! */ + LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_timer: expired %s entry %d.\n", + arp_table[i].state >= ETHARP_STATE_STABLE ? "stable" : "pending", i)); + /* clean up entries that have just been expired */ + etharp_free_entry(i); + } else if (arp_table[i].state == ETHARP_STATE_STABLE_REREQUESTING_1) { + /* Don't send more than one request every 2 seconds. */ + arp_table[i].state = ETHARP_STATE_STABLE_REREQUESTING_2; + } else if (arp_table[i].state == ETHARP_STATE_STABLE_REREQUESTING_2) { + /* Reset state to stable, so that the next transmitted packet will + re-send an ARP request. */ + arp_table[i].state = ETHARP_STATE_STABLE; + } else if (arp_table[i].state == ETHARP_STATE_PENDING) { + /* still pending, resend an ARP query */ + etharp_request(arp_table[i].netif, &arp_table[i].ipaddr); + } + } + } +} + +/** + * Search the ARP table for a matching or new entry. + * + * If an IP address is given, return a pending or stable ARP entry that matches + * the address. If no match is found, create a new entry with this address set, + * but in state ETHARP_EMPTY. The caller must check and possibly change the + * state of the returned entry. + * + * If ipaddr is NULL, return a initialized new entry in state ETHARP_EMPTY. + * + * In all cases, attempt to create new entries from an empty entry. If no + * empty entries are available and ETHARP_FLAG_TRY_HARD flag is set, recycle + * old entries. Heuristic choose the least important entry for recycling. + * + * @param ipaddr IP address to find in ARP cache, or to add if not found. + * @param flags See @ref etharp_state + * @param netif netif related to this address (used for NETIF_HWADDRHINT) + * + * @return The ARP entry index that matched or is created, ERR_MEM if no + * entry is found or could be recycled. + */ +static s16_t +etharp_find_entry(const ip4_addr_t *ipaddr, u8_t flags, struct netif *netif) +{ + s16_t old_pending = ARP_TABLE_SIZE, old_stable = ARP_TABLE_SIZE; + s16_t empty = ARP_TABLE_SIZE; + s16_t i = 0; + /* oldest entry with packets on queue */ + s16_t old_queue = ARP_TABLE_SIZE; + /* its age */ + u16_t age_queue = 0, age_pending = 0, age_stable = 0; + + LWIP_UNUSED_ARG(netif); + + /** + * a) do a search through the cache, remember candidates + * b) select candidate entry + * c) create new entry + */ + + /* a) in a single search sweep, do all of this + * 1) remember the first empty entry (if any) + * 2) remember the oldest stable entry (if any) + * 3) remember the oldest pending entry without queued packets (if any) + * 4) remember the oldest pending entry with queued packets (if any) + * 5) search for a matching IP entry, either pending or stable + * until 5 matches, or all entries are searched for. + */ + + for (i = 0; i < ARP_TABLE_SIZE; ++i) { + u8_t state = arp_table[i].state; + /* no empty entry found yet and now we do find one? */ + if ((empty == ARP_TABLE_SIZE) && (state == ETHARP_STATE_EMPTY)) { + LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_find_entry: found empty entry %d\n", (int)i)); + /* remember first empty entry */ + empty = i; + } else if (state != ETHARP_STATE_EMPTY) { + LWIP_ASSERT("state == ETHARP_STATE_PENDING || state >= ETHARP_STATE_STABLE", + state == ETHARP_STATE_PENDING || state >= ETHARP_STATE_STABLE); + /* if given, does IP address match IP address in ARP entry? */ + if (ipaddr && ip4_addr_cmp(ipaddr, &arp_table[i].ipaddr) +#if ETHARP_TABLE_MATCH_NETIF + && ((netif == NULL) || (netif == arp_table[i].netif)) +#endif /* ETHARP_TABLE_MATCH_NETIF */ + ) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: found matching entry %d\n", (int)i)); + /* found exact IP address match, simply bail out */ + return i; + } + /* pending entry? */ + if (state == ETHARP_STATE_PENDING) { + /* pending with queued packets? */ + if (arp_table[i].q != NULL) { + if (arp_table[i].ctime >= age_queue) { + old_queue = i; + age_queue = arp_table[i].ctime; + } + } else + /* pending without queued packets? */ + { + if (arp_table[i].ctime >= age_pending) { + old_pending = i; + age_pending = arp_table[i].ctime; + } + } + /* stable entry? */ + } else if (state >= ETHARP_STATE_STABLE) { +#if ETHARP_SUPPORT_STATIC_ENTRIES + /* don't record old_stable for static entries since they never expire */ + if (state < ETHARP_STATE_STATIC) +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + { + /* remember entry with oldest stable entry in oldest, its age in maxtime */ + if (arp_table[i].ctime >= age_stable) { + old_stable = i; + age_stable = arp_table[i].ctime; + } + } + } + } + } + /* { we have no match } => try to create a new entry */ + + /* don't create new entry, only search? */ + if (((flags & ETHARP_FLAG_FIND_ONLY) != 0) || + /* or no empty entry found and not allowed to recycle? */ + ((empty == ARP_TABLE_SIZE) && ((flags & ETHARP_FLAG_TRY_HARD) == 0))) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: no empty entry found and not allowed to recycle\n")); + return (s16_t)ERR_MEM; + } + + /* b) choose the least destructive entry to recycle: + * 1) empty entry + * 2) oldest stable entry + * 3) oldest pending entry without queued packets + * 4) oldest pending entry with queued packets + * + * { ETHARP_FLAG_TRY_HARD is set at this point } + */ + + /* 1) empty entry available? */ + if (empty < ARP_TABLE_SIZE) { + i = empty; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting empty entry %d\n", (int)i)); + } else { + /* 2) found recyclable stable entry? */ + if (old_stable < ARP_TABLE_SIZE) { + /* recycle oldest stable*/ + i = old_stable; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting oldest stable entry %d\n", (int)i)); + /* no queued packets should exist on stable entries */ + LWIP_ASSERT("arp_table[i].q == NULL", arp_table[i].q == NULL); + /* 3) found recyclable pending entry without queued packets? */ + } else if (old_pending < ARP_TABLE_SIZE) { + /* recycle oldest pending */ + i = old_pending; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting oldest pending entry %d (without queue)\n", (int)i)); + /* 4) found recyclable pending entry with queued packets? */ + } else if (old_queue < ARP_TABLE_SIZE) { + /* recycle oldest pending (queued packets are free in etharp_free_entry) */ + i = old_queue; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting oldest pending entry %d, freeing packet queue %p\n", (int)i, (void *)(arp_table[i].q))); + /* no empty or recyclable entries found */ + } else { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: no empty or recyclable entries found\n")); + return (s16_t)ERR_MEM; + } + + /* { empty or recyclable entry found } */ + LWIP_ASSERT("i < ARP_TABLE_SIZE", i < ARP_TABLE_SIZE); + etharp_free_entry(i); + } + + LWIP_ASSERT("i < ARP_TABLE_SIZE", i < ARP_TABLE_SIZE); + LWIP_ASSERT("arp_table[i].state == ETHARP_STATE_EMPTY", + arp_table[i].state == ETHARP_STATE_EMPTY); + + /* IP address given? */ + if (ipaddr != NULL) { + /* set IP address */ + ip4_addr_copy(arp_table[i].ipaddr, *ipaddr); + } + arp_table[i].ctime = 0; +#if ETHARP_TABLE_MATCH_NETIF + arp_table[i].netif = netif; +#endif /* ETHARP_TABLE_MATCH_NETIF */ + return (s16_t)i; +} + +/** + * Update (or insert) a IP/MAC address pair in the ARP cache. + * + * If a pending entry is resolved, any queued packets will be sent + * at this point. + * + * @param netif netif related to this entry (used for NETIF_ADDRHINT) + * @param ipaddr IP address of the inserted ARP entry. + * @param ethaddr Ethernet address of the inserted ARP entry. + * @param flags See @ref etharp_state + * + * @return + * - ERR_OK Successfully updated ARP cache. + * - ERR_MEM If we could not add a new ARP entry when ETHARP_FLAG_TRY_HARD was set. + * - ERR_ARG Non-unicast address given, those will not appear in ARP cache. + * + * @see pbuf_free() + */ +static err_t +etharp_update_arp_entry(struct netif *netif, const ip4_addr_t *ipaddr, struct eth_addr *ethaddr, u8_t flags) +{ + s16_t i; + LWIP_ASSERT("netif->hwaddr_len == ETH_HWADDR_LEN", netif->hwaddr_len == ETH_HWADDR_LEN); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_update_arp_entry: %"U16_F".%"U16_F".%"U16_F".%"U16_F" - %02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F"\n", + ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr), + (u16_t)ethaddr->addr[0], (u16_t)ethaddr->addr[1], (u16_t)ethaddr->addr[2], + (u16_t)ethaddr->addr[3], (u16_t)ethaddr->addr[4], (u16_t)ethaddr->addr[5])); + /* non-unicast address? */ + if (ip4_addr_isany(ipaddr) || + ip4_addr_isbroadcast(ipaddr, netif) || + ip4_addr_ismulticast(ipaddr)) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_update_arp_entry: will not add non-unicast IP address to ARP cache\n")); + return ERR_ARG; + } + /* find or create ARP entry */ + i = etharp_find_entry(ipaddr, flags, netif); + /* bail out if no entry could be found */ + if (i < 0) { + return (err_t)i; + } + +#if ETHARP_SUPPORT_STATIC_ENTRIES + if (flags & ETHARP_FLAG_STATIC_ENTRY) { + /* record static type */ + arp_table[i].state = ETHARP_STATE_STATIC; + } else if (arp_table[i].state == ETHARP_STATE_STATIC) { + /* found entry is a static type, don't overwrite it */ + return ERR_VAL; + } else +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + { + /* mark it stable */ + arp_table[i].state = ETHARP_STATE_STABLE; + } + + /* record network interface */ + arp_table[i].netif = netif; + /* insert in SNMP ARP index tree */ + mib2_add_arp_entry(netif, &arp_table[i].ipaddr); + + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_update_arp_entry: updating stable entry %"S16_F"\n", i)); + /* update address */ + SMEMCPY(&arp_table[i].ethaddr, ethaddr, ETH_HWADDR_LEN); + /* reset time stamp */ + arp_table[i].ctime = 0; + /* this is where we will send out queued packets! */ +#if ARP_QUEUEING + while (arp_table[i].q != NULL) { + struct pbuf *p; + /* remember remainder of queue */ + struct etharp_q_entry *q = arp_table[i].q; + /* pop first item off the queue */ + arp_table[i].q = q->next; + /* get the packet pointer */ + p = q->p; + /* now queue entry can be freed */ + memp_free(MEMP_ARP_QUEUE, q); +#else /* ARP_QUEUEING */ + if (arp_table[i].q != NULL) { + struct pbuf *p = arp_table[i].q; + arp_table[i].q = NULL; +#endif /* ARP_QUEUEING */ + /* send the queued IP packet */ + ethernet_output(netif, p, (struct eth_addr *)(netif->hwaddr), ethaddr, ETHTYPE_IP); + /* free the queued IP packet */ + pbuf_free(p); + } + return ERR_OK; +} + +#if ETHARP_SUPPORT_STATIC_ENTRIES +/** Add a new static entry to the ARP table. If an entry exists for the + * specified IP address, this entry is overwritten. + * If packets are queued for the specified IP address, they are sent out. + * + * @param ipaddr IP address for the new static entry + * @param ethaddr ethernet address for the new static entry + * @return See return values of etharp_add_static_entry + */ +err_t +etharp_add_static_entry(const ip4_addr_t *ipaddr, struct eth_addr *ethaddr) +{ + struct netif *netif; + LWIP_ASSERT_CORE_LOCKED(); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_add_static_entry: %"U16_F".%"U16_F".%"U16_F".%"U16_F" - %02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F"\n", + ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr), + (u16_t)ethaddr->addr[0], (u16_t)ethaddr->addr[1], (u16_t)ethaddr->addr[2], + (u16_t)ethaddr->addr[3], (u16_t)ethaddr->addr[4], (u16_t)ethaddr->addr[5])); + + netif = ip4_route(ipaddr); + if (netif == NULL) { + return ERR_RTE; + } + + return etharp_update_arp_entry(netif, ipaddr, ethaddr, ETHARP_FLAG_TRY_HARD | ETHARP_FLAG_STATIC_ENTRY); +} + +/** Remove a static entry from the ARP table previously added with a call to + * etharp_add_static_entry. + * + * @param ipaddr IP address of the static entry to remove + * @return ERR_OK: entry removed + * ERR_MEM: entry wasn't found + * ERR_ARG: entry wasn't a static entry but a dynamic one + */ +err_t +etharp_remove_static_entry(const ip4_addr_t *ipaddr) +{ + s16_t i; + LWIP_ASSERT_CORE_LOCKED(); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_remove_static_entry: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr))); + + /* find or create ARP entry */ + i = etharp_find_entry(ipaddr, ETHARP_FLAG_FIND_ONLY, NULL); + /* bail out if no entry could be found */ + if (i < 0) { + return (err_t)i; + } + + if (arp_table[i].state != ETHARP_STATE_STATIC) { + /* entry wasn't a static entry, cannot remove it */ + return ERR_ARG; + } + /* entry found, free it */ + etharp_free_entry(i); + return ERR_OK; +} +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + +/** + * Remove all ARP table entries of the specified netif. + * + * @param netif points to a network interface + */ +void +etharp_cleanup_netif(struct netif *netif) +{ + int i; + + for (i = 0; i < ARP_TABLE_SIZE; ++i) { + u8_t state = arp_table[i].state; + if ((state != ETHARP_STATE_EMPTY) && (arp_table[i].netif == netif)) { + etharp_free_entry(i); + } + } +} + +/** + * Finds (stable) ethernet/IP address pair from ARP table + * using interface and IP address index. + * @note the addresses in the ARP table are in network order! + * + * @param netif points to interface index + * @param ipaddr points to the (network order) IP address index + * @param eth_ret points to return pointer + * @param ip_ret points to return pointer + * @return table index if found, -1 otherwise + */ +ssize_t +etharp_find_addr(struct netif *netif, const ip4_addr_t *ipaddr, + struct eth_addr **eth_ret, const ip4_addr_t **ip_ret) +{ + s16_t i; + + LWIP_ASSERT("eth_ret != NULL && ip_ret != NULL", + eth_ret != NULL && ip_ret != NULL); + + LWIP_UNUSED_ARG(netif); + + i = etharp_find_entry(ipaddr, ETHARP_FLAG_FIND_ONLY, netif); + if ((i >= 0) && (arp_table[i].state >= ETHARP_STATE_STABLE)) { + *eth_ret = &arp_table[i].ethaddr; + *ip_ret = &arp_table[i].ipaddr; + return i; + } + return -1; +} + +/** + * Possibility to iterate over stable ARP table entries + * + * @param i entry number, 0 to ARP_TABLE_SIZE + * @param ipaddr return value: IP address + * @param netif return value: points to interface + * @param eth_ret return value: ETH address + * @return 1 on valid index, 0 otherwise + */ +int +etharp_get_entry(size_t i, ip4_addr_t **ipaddr, struct netif **netif, struct eth_addr **eth_ret) +{ + LWIP_ASSERT("ipaddr != NULL", ipaddr != NULL); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("eth_ret != NULL", eth_ret != NULL); + + if ((i < ARP_TABLE_SIZE) && (arp_table[i].state >= ETHARP_STATE_STABLE)) { + *ipaddr = &arp_table[i].ipaddr; + *netif = arp_table[i].netif; + *eth_ret = &arp_table[i].ethaddr; + return 1; + } else { + return 0; + } +} + +/** + * Responds to ARP requests to us. Upon ARP replies to us, add entry to cache + * send out queued IP packets. Updates cache with snooped address pairs. + * + * Should be called for incoming ARP packets. The pbuf in the argument + * is freed by this function. + * + * @param p The ARP packet that arrived on netif. Is freed by this function. + * @param netif The lwIP network interface on which the ARP packet pbuf arrived. + * + * @see pbuf_free() + */ +void +etharp_input(struct pbuf *p, struct netif *netif) +{ + struct etharp_hdr *hdr; + /* these are aligned properly, whereas the ARP header fields might not be */ + ip4_addr_t sipaddr, dipaddr; + u8_t for_us; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif != NULL", (netif != NULL), return;); + + hdr = (struct etharp_hdr *)p->payload; + + /* RFC 826 "Packet Reception": */ + if ((hdr->hwtype != PP_HTONS(LWIP_IANA_HWTYPE_ETHERNET)) || + (hdr->hwlen != ETH_HWADDR_LEN) || + (hdr->protolen != sizeof(ip4_addr_t)) || + (hdr->proto != PP_HTONS(ETHTYPE_IP))) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("etharp_input: packet dropped, wrong hw type, hwlen, proto, protolen or ethernet type (%"U16_F"/%"U16_F"/%"U16_F"/%"U16_F")\n", + hdr->hwtype, (u16_t)hdr->hwlen, hdr->proto, (u16_t)hdr->protolen)); + ETHARP_STATS_INC(etharp.proterr); + ETHARP_STATS_INC(etharp.drop); + pbuf_free(p); + return; + } + ETHARP_STATS_INC(etharp.recv); + +#if LWIP_AUTOIP + /* We have to check if a host already has configured our random + * created link local address and continuously check if there is + * a host with this IP-address so we can detect collisions */ + autoip_arp_reply(netif, hdr); +#endif /* LWIP_AUTOIP */ + + /* Copy struct ip4_addr_wordaligned to aligned ip4_addr, to support compilers without + * structure packing (not using structure copy which breaks strict-aliasing rules). */ + IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&sipaddr, &hdr->sipaddr); + IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&dipaddr, &hdr->dipaddr); + + /* this interface is not configured? */ + if (ip4_addr_isany_val(*netif_ip4_addr(netif))) { + for_us = 0; + } else { + /* ARP packet directed to us? */ + for_us = (u8_t)ip4_addr_cmp(&dipaddr, netif_ip4_addr(netif)); + } + + /* ARP message directed to us? + -> add IP address in ARP cache; assume requester wants to talk to us, + can result in directly sending the queued packets for this host. + ARP message not directed to us? + -> update the source IP address in the cache, if present */ + etharp_update_arp_entry(netif, &sipaddr, &(hdr->shwaddr), + for_us ? ETHARP_FLAG_TRY_HARD : ETHARP_FLAG_FIND_ONLY); + + /* now act on the message itself */ + switch (hdr->opcode) { + /* ARP request? */ + case PP_HTONS(ARP_REQUEST): + /* ARP request. If it asked for our address, we send out a + * reply. In any case, we time-stamp any existing ARP entry, + * and possibly send out an IP packet that was queued on it. */ + + LWIP_DEBUGF (ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: incoming ARP request\n")); + /* ARP request for our address? */ + if (for_us) { + /* send ARP response */ + etharp_raw(netif, + (struct eth_addr *)netif->hwaddr, &hdr->shwaddr, + (struct eth_addr *)netif->hwaddr, netif_ip4_addr(netif), + &hdr->shwaddr, &sipaddr, + ARP_REPLY); + /* we are not configured? */ + } else if (ip4_addr_isany_val(*netif_ip4_addr(netif))) { + /* { for_us == 0 and netif->ip_addr.addr == 0 } */ + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: we are unconfigured, ARP request ignored.\n")); + /* request was not directed to us */ + } else { + /* { for_us == 0 and netif->ip_addr.addr != 0 } */ + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: ARP request was not for us.\n")); + } + break; + case PP_HTONS(ARP_REPLY): + /* ARP reply. We already updated the ARP cache earlier. */ + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: incoming ARP reply\n")); +#if (LWIP_DHCP && DHCP_DOES_ARP_CHECK) + /* DHCP wants to know about ARP replies from any host with an + * IP address also offered to us by the DHCP server. We do not + * want to take a duplicate IP address on a single network. + * @todo How should we handle redundant (fail-over) interfaces? */ + dhcp_arp_reply(netif, &sipaddr); +#endif /* (LWIP_DHCP && DHCP_DOES_ARP_CHECK) */ + break; + default: + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: ARP unknown opcode type %"S16_F"\n", lwip_htons(hdr->opcode))); + ETHARP_STATS_INC(etharp.err); + break; + } + /* free ARP packet */ + pbuf_free(p); +} + +/** Just a small helper function that sends a pbuf to an ethernet address + * in the arp_table specified by the index 'arp_idx'. + */ +static err_t +etharp_output_to_arp_index(struct netif *netif, struct pbuf *q, netif_addr_idx_t arp_idx) +{ + LWIP_ASSERT("arp_table[arp_idx].state >= ETHARP_STATE_STABLE", + arp_table[arp_idx].state >= ETHARP_STATE_STABLE); + /* if arp table entry is about to expire: re-request it, + but only if its state is ETHARP_STATE_STABLE to prevent flooding the + network with ARP requests if this address is used frequently. */ + if (arp_table[arp_idx].state == ETHARP_STATE_STABLE) { + if (arp_table[arp_idx].ctime >= ARP_AGE_REREQUEST_USED_BROADCAST) { + /* issue a standard request using broadcast */ + if (etharp_request(netif, &arp_table[arp_idx].ipaddr) == ERR_OK) { + arp_table[arp_idx].state = ETHARP_STATE_STABLE_REREQUESTING_1; + } + } else if (arp_table[arp_idx].ctime >= ARP_AGE_REREQUEST_USED_UNICAST) { + /* issue a unicast request (for 15 seconds) to prevent unnecessary broadcast */ + if (etharp_request_dst(netif, &arp_table[arp_idx].ipaddr, &arp_table[arp_idx].ethaddr) == ERR_OK) { + arp_table[arp_idx].state = ETHARP_STATE_STABLE_REREQUESTING_1; + } + } + } + + return ethernet_output(netif, q, (struct eth_addr *)(netif->hwaddr), &arp_table[arp_idx].ethaddr, ETHTYPE_IP); +} + +/** + * Resolve and fill-in Ethernet address header for outgoing IP packet. + * + * For IP multicast and broadcast, corresponding Ethernet addresses + * are selected and the packet is transmitted on the link. + * + * For unicast addresses, the packet is submitted to etharp_query(). In + * case the IP address is outside the local network, the IP address of + * the gateway is used. + * + * @param netif The lwIP network interface which the IP packet will be sent on. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ipaddr The IP address of the packet destination. + * + * @return + * - ERR_RTE No route to destination (no gateway to external networks), + * or the return type of either etharp_query() or ethernet_output(). + */ +err_t +etharp_output(struct netif *netif, struct pbuf *q, const ip4_addr_t *ipaddr) +{ + const struct eth_addr *dest; + struct eth_addr mcastaddr; + const ip4_addr_t *dst_addr = ipaddr; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("q != NULL", q != NULL); + LWIP_ASSERT("ipaddr != NULL", ipaddr != NULL); + + /* Determine on destination hardware address. Broadcasts and multicasts + * are special, other IP addresses are looked up in the ARP table. */ + + /* broadcast destination IP address? */ + if (ip4_addr_isbroadcast(ipaddr, netif)) { + /* broadcast on Ethernet also */ + dest = (const struct eth_addr *)ðbroadcast; + /* multicast destination IP address? */ + } else if (ip4_addr_ismulticast(ipaddr)) { + /* Hash IP multicast address to MAC address.*/ + mcastaddr.addr[0] = LL_IP4_MULTICAST_ADDR_0; + mcastaddr.addr[1] = LL_IP4_MULTICAST_ADDR_1; + mcastaddr.addr[2] = LL_IP4_MULTICAST_ADDR_2; + mcastaddr.addr[3] = ip4_addr2(ipaddr) & 0x7f; + mcastaddr.addr[4] = ip4_addr3(ipaddr); + mcastaddr.addr[5] = ip4_addr4(ipaddr); + /* destination Ethernet address is multicast */ + dest = &mcastaddr; + /* unicast destination IP address? */ + } else { + netif_addr_idx_t i; + /* outside local network? if so, this can neither be a global broadcast nor + a subnet broadcast. */ + if (!ip4_addr_netcmp(ipaddr, netif_ip4_addr(netif), netif_ip4_netmask(netif)) && + !ip4_addr_islinklocal(ipaddr)) { +#if LWIP_AUTOIP + struct ip_hdr *iphdr = LWIP_ALIGNMENT_CAST(struct ip_hdr *, q->payload); + /* According to RFC 3297, chapter 2.6.2 (Forwarding Rules), a packet with + a link-local source address must always be "directly to its destination + on the same physical link. The host MUST NOT send the packet to any + router for forwarding". */ + if (!ip4_addr_islinklocal(&iphdr->src)) +#endif /* LWIP_AUTOIP */ + { +#ifdef LWIP_HOOK_ETHARP_GET_GW + /* For advanced routing, a single default gateway might not be enough, so get + the IP address of the gateway to handle the current destination address. */ + dst_addr = LWIP_HOOK_ETHARP_GET_GW(netif, ipaddr); + if (dst_addr == NULL) +#endif /* LWIP_HOOK_ETHARP_GET_GW */ + { + /* interface has default gateway? */ + if (!ip4_addr_isany_val(*netif_ip4_gw(netif))) { + /* send to hardware address of default gateway IP address */ + dst_addr = netif_ip4_gw(netif); + /* no default gateway available */ + } else { + /* no route to destination error (default gateway missing) */ + return ERR_RTE; + } + } + } + } +#if LWIP_NETIF_HWADDRHINT + if (netif->hints != NULL) { + /* per-pcb cached entry was given */ + netif_addr_idx_t etharp_cached_entry = netif->hints->addr_hint; + if (etharp_cached_entry < ARP_TABLE_SIZE) { +#endif /* LWIP_NETIF_HWADDRHINT */ + if ((arp_table[etharp_cached_entry].state >= ETHARP_STATE_STABLE) && +#if ETHARP_TABLE_MATCH_NETIF + (arp_table[etharp_cached_entry].netif == netif) && +#endif + (ip4_addr_cmp(dst_addr, &arp_table[etharp_cached_entry].ipaddr))) { + /* the per-pcb-cached entry is stable and the right one! */ + ETHARP_STATS_INC(etharp.cachehit); + return etharp_output_to_arp_index(netif, q, etharp_cached_entry); + } +#if LWIP_NETIF_HWADDRHINT + } + } +#endif /* LWIP_NETIF_HWADDRHINT */ + + /* find stable entry: do this here since this is a critical path for + throughput and etharp_find_entry() is kind of slow */ + for (i = 0; i < ARP_TABLE_SIZE; i++) { + if ((arp_table[i].state >= ETHARP_STATE_STABLE) && +#if ETHARP_TABLE_MATCH_NETIF + (arp_table[i].netif == netif) && +#endif + (ip4_addr_cmp(dst_addr, &arp_table[i].ipaddr))) { + /* found an existing, stable entry */ + ETHARP_SET_ADDRHINT(netif, i); + return etharp_output_to_arp_index(netif, q, i); + } + } + /* no stable entry found, use the (slower) query function: + queue on destination Ethernet address belonging to ipaddr */ + return etharp_query(netif, dst_addr, q); + } + + /* continuation for multicast/broadcast destinations */ + /* obtain source Ethernet address of the given interface */ + /* send packet directly on the link */ + return ethernet_output(netif, q, (struct eth_addr *)(netif->hwaddr), dest, ETHTYPE_IP); +} + +/** + * Send an ARP request for the given IP address and/or queue a packet. + * + * If the IP address was not yet in the cache, a pending ARP cache entry + * is added and an ARP request is sent for the given address. The packet + * is queued on this entry. + * + * If the IP address was already pending in the cache, a new ARP request + * is sent for the given address. The packet is queued on this entry. + * + * If the IP address was already stable in the cache, and a packet is + * given, it is directly sent and no ARP request is sent out. + * + * If the IP address was already stable in the cache, and no packet is + * given, an ARP request is sent out. + * + * @param netif The lwIP network interface on which ipaddr + * must be queried for. + * @param ipaddr The IP address to be resolved. + * @param q If non-NULL, a pbuf that must be delivered to the IP address. + * q is not freed by this function. + * + * @note q must only be ONE packet, not a packet queue! + * + * @return + * - ERR_BUF Could not make room for Ethernet header. + * - ERR_MEM Hardware address unknown, and no more ARP entries available + * to query for address or queue the packet. + * - ERR_MEM Could not queue packet due to memory shortage. + * - ERR_RTE No route to destination (no gateway to external networks). + * - ERR_ARG Non-unicast address given, those will not appear in ARP cache. + * + */ +err_t +etharp_query(struct netif *netif, const ip4_addr_t *ipaddr, struct pbuf *q) +{ + struct eth_addr *srcaddr = (struct eth_addr *)netif->hwaddr; + err_t result = ERR_MEM; + int is_new_entry = 0; + s16_t i_err; + netif_addr_idx_t i; + + /* non-unicast address? */ + if (ip4_addr_isbroadcast(ipaddr, netif) || + ip4_addr_ismulticast(ipaddr) || + ip4_addr_isany(ipaddr)) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: will not add non-unicast IP address to ARP cache\n")); + return ERR_ARG; + } + + /* find entry in ARP cache, ask to create entry if queueing packet */ + i_err = etharp_find_entry(ipaddr, ETHARP_FLAG_TRY_HARD, netif); + + /* could not find or create entry? */ + if (i_err < 0) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: could not create ARP entry\n")); + if (q) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: packet dropped\n")); + ETHARP_STATS_INC(etharp.memerr); + } + return (err_t)i_err; + } + LWIP_ASSERT("type overflow", (size_t)i_err < NETIF_ADDR_IDX_MAX); + i = (netif_addr_idx_t)i_err; + + /* mark a fresh entry as pending (we just sent a request) */ + if (arp_table[i].state == ETHARP_STATE_EMPTY) { + is_new_entry = 1; + arp_table[i].state = ETHARP_STATE_PENDING; + /* record network interface for re-sending arp request in etharp_tmr */ + arp_table[i].netif = netif; + } + + /* { i is either a STABLE or (new or existing) PENDING entry } */ + LWIP_ASSERT("arp_table[i].state == PENDING or STABLE", + ((arp_table[i].state == ETHARP_STATE_PENDING) || + (arp_table[i].state >= ETHARP_STATE_STABLE))); + + /* do we have a new entry? or an implicit query request? */ + if (is_new_entry || (q == NULL)) { + /* try to resolve it; send out ARP request */ + result = etharp_request(netif, ipaddr); + if (result != ERR_OK) { + /* ARP request couldn't be sent */ + /* We don't re-send arp request in etharp_tmr, but we still queue packets, + since this failure could be temporary, and the next packet calling + etharp_query again could lead to sending the queued packets. */ + } + if (q == NULL) { + return result; + } + } + + /* packet given? */ + LWIP_ASSERT("q != NULL", q != NULL); + /* stable entry? */ + if (arp_table[i].state >= ETHARP_STATE_STABLE) { + /* we have a valid IP->Ethernet address mapping */ + ETHARP_SET_ADDRHINT(netif, i); + /* send the packet */ + result = ethernet_output(netif, q, srcaddr, &(arp_table[i].ethaddr), ETHTYPE_IP); + /* pending entry? (either just created or already pending */ + } else if (arp_table[i].state == ETHARP_STATE_PENDING) { + /* entry is still pending, queue the given packet 'q' */ + struct pbuf *p; + int copy_needed = 0; + /* IF q includes a pbuf that must be copied, copy the whole chain into a + * new PBUF_RAM. See the definition of PBUF_NEEDS_COPY for details. */ + p = q; + while (p) { + LWIP_ASSERT("no packet queues allowed!", (p->len != p->tot_len) || (p->next == 0)); + if (PBUF_NEEDS_COPY(p)) { + copy_needed = 1; + break; + } + p = p->next; + } + if (copy_needed) { + /* copy the whole packet into new pbufs */ + p = pbuf_clone(PBUF_LINK, PBUF_RAM, q); + } else { + /* referencing the old pbuf is enough */ + p = q; + pbuf_ref(p); + } + /* packet could be taken over? */ + if (p != NULL) { + /* queue packet ... */ +#if ARP_QUEUEING + struct etharp_q_entry *new_entry; + /* allocate a new arp queue entry */ + new_entry = (struct etharp_q_entry *)memp_malloc(MEMP_ARP_QUEUE); + if (new_entry != NULL) { + unsigned int qlen = 0; + new_entry->next = 0; + new_entry->p = p; + if (arp_table[i].q != NULL) { + /* queue was already existent, append the new entry to the end */ + struct etharp_q_entry *r; + r = arp_table[i].q; + qlen++; + while (r->next != NULL) { + r = r->next; + qlen++; + } + r->next = new_entry; + } else { + /* queue did not exist, first item in queue */ + arp_table[i].q = new_entry; + } +#if ARP_QUEUE_LEN + if (qlen >= ARP_QUEUE_LEN) { + struct etharp_q_entry *old; + old = arp_table[i].q; + arp_table[i].q = arp_table[i].q->next; + pbuf_free(old->p); + memp_free(MEMP_ARP_QUEUE, old); + } +#endif + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: queued packet %p on ARP entry %"U16_F"\n", (void *)q, i)); + result = ERR_OK; + } else { + /* the pool MEMP_ARP_QUEUE is empty */ + pbuf_free(p); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: could not queue a copy of PBUF_REF packet %p (out of memory)\n", (void *)q)); + result = ERR_MEM; + } +#else /* ARP_QUEUEING */ + /* always queue one packet per ARP request only, freeing a previously queued packet */ + if (arp_table[i].q != NULL) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: dropped previously queued packet %p for ARP entry %"U16_F"\n", (void *)q, (u16_t)i)); + pbuf_free(arp_table[i].q); + } + arp_table[i].q = p; + result = ERR_OK; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: queued packet %p on ARP entry %"U16_F"\n", (void *)q, (u16_t)i)); +#endif /* ARP_QUEUEING */ + } else { + ETHARP_STATS_INC(etharp.memerr); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: could not queue a copy of PBUF_REF packet %p (out of memory)\n", (void *)q)); + result = ERR_MEM; + } + } + return result; +} + +/** + * Send a raw ARP packet (opcode and all addresses can be modified) + * + * @param netif the lwip network interface on which to send the ARP packet + * @param ethsrc_addr the source MAC address for the ethernet header + * @param ethdst_addr the destination MAC address for the ethernet header + * @param hwsrc_addr the source MAC address for the ARP protocol header + * @param ipsrc_addr the source IP address for the ARP protocol header + * @param hwdst_addr the destination MAC address for the ARP protocol header + * @param ipdst_addr the destination IP address for the ARP protocol header + * @param opcode the type of the ARP packet + * @return ERR_OK if the ARP packet has been sent + * ERR_MEM if the ARP packet couldn't be allocated + * any other err_t on failure + */ +static err_t +etharp_raw(struct netif *netif, const struct eth_addr *ethsrc_addr, + const struct eth_addr *ethdst_addr, + const struct eth_addr *hwsrc_addr, const ip4_addr_t *ipsrc_addr, + const struct eth_addr *hwdst_addr, const ip4_addr_t *ipdst_addr, + const u16_t opcode) +{ + struct pbuf *p; + err_t result = ERR_OK; + struct etharp_hdr *hdr; + + LWIP_ASSERT("netif != NULL", netif != NULL); + + /* allocate a pbuf for the outgoing ARP request packet */ + p = pbuf_alloc(PBUF_LINK, SIZEOF_ETHARP_HDR, PBUF_RAM); + /* could allocate a pbuf for an ARP request? */ + if (p == NULL) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("etharp_raw: could not allocate pbuf for ARP request.\n")); + ETHARP_STATS_INC(etharp.memerr); + return ERR_MEM; + } + LWIP_ASSERT("check that first pbuf can hold struct etharp_hdr", + (p->len >= SIZEOF_ETHARP_HDR)); + + hdr = (struct etharp_hdr *)p->payload; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_raw: sending raw ARP packet.\n")); + hdr->opcode = lwip_htons(opcode); + + LWIP_ASSERT("netif->hwaddr_len must be the same as ETH_HWADDR_LEN for etharp!", + (netif->hwaddr_len == ETH_HWADDR_LEN)); + + /* Write the ARP MAC-Addresses */ + SMEMCPY(&hdr->shwaddr, hwsrc_addr, ETH_HWADDR_LEN); + SMEMCPY(&hdr->dhwaddr, hwdst_addr, ETH_HWADDR_LEN); + /* Copy struct ip4_addr_wordaligned to aligned ip4_addr, to support compilers without + * structure packing. */ + IPADDR_WORDALIGNED_COPY_FROM_IP4_ADDR_T(&hdr->sipaddr, ipsrc_addr); + IPADDR_WORDALIGNED_COPY_FROM_IP4_ADDR_T(&hdr->dipaddr, ipdst_addr); + + hdr->hwtype = PP_HTONS(LWIP_IANA_HWTYPE_ETHERNET); + hdr->proto = PP_HTONS(ETHTYPE_IP); + /* set hwlen and protolen */ + hdr->hwlen = ETH_HWADDR_LEN; + hdr->protolen = sizeof(ip4_addr_t); + + /* send ARP query */ +#if LWIP_AUTOIP + /* If we are using Link-Local, all ARP packets that contain a Link-Local + * 'sender IP address' MUST be sent using link-layer broadcast instead of + * link-layer unicast. (See RFC3927 Section 2.5, last paragraph) */ + if (ip4_addr_islinklocal(ipsrc_addr)) { + ethernet_output(netif, p, ethsrc_addr, ðbroadcast, ETHTYPE_ARP); + } else +#endif /* LWIP_AUTOIP */ + { + ethernet_output(netif, p, ethsrc_addr, ethdst_addr, ETHTYPE_ARP); + } + + ETHARP_STATS_INC(etharp.xmit); + /* free ARP query packet */ + pbuf_free(p); + p = NULL; + /* could not allocate pbuf for ARP request */ + + return result; +} + +/** + * Send an ARP request packet asking for ipaddr to a specific eth address. + * Used to send unicast request to refresh the ARP table just before an entry + * times out + * + * @param netif the lwip network interface on which to send the request + * @param ipaddr the IP address for which to ask + * @param hw_dst_addr the ethernet address to send this packet to + * @return ERR_OK if the request has been sent + * ERR_MEM if the ARP packet couldn't be allocated + * any other err_t on failure + */ +static err_t +etharp_request_dst(struct netif *netif, const ip4_addr_t *ipaddr, const struct eth_addr *hw_dst_addr) +{ + return etharp_raw(netif, (struct eth_addr *)netif->hwaddr, hw_dst_addr, + (struct eth_addr *)netif->hwaddr, netif_ip4_addr(netif), ðzero, + ipaddr, ARP_REQUEST); +} + +/** + * Send an ARP request packet asking for ipaddr. + * + * @param netif the lwip network interface on which to send the request + * @param ipaddr the IP address for which to ask + * @return ERR_OK if the request has been sent + * ERR_MEM if the ARP packet couldn't be allocated + * any other err_t on failure + */ +err_t +etharp_request(struct netif *netif, const ip4_addr_t *ipaddr) +{ + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_request: sending ARP request.\n")); + return etharp_request_dst(netif, ipaddr, ðbroadcast); +} + +#endif /* LWIP_IPV4 && LWIP_ARP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c new file mode 100644 index 0000000..fd9054b --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c @@ -0,0 +1,404 @@ +/** + * @file + * ICMP - Internet Control Message Protocol + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/* Some ICMP messages should be passed to the transport protocols. This + is not implemented. */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_ICMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/icmp.h" +#include "lwip/inet_chksum.h" +#include "lwip/ip.h" +#include "lwip/def.h" +#include "lwip/stats.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** Small optimization: set to 0 if incoming PBUF_POOL pbuf always can be + * used to modify and send a response packet (and to 1 if this is not the case, + * e.g. when link header is stripped off when receiving) */ +#ifndef LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN +#define LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN 1 +#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN */ + +/* The amount of data from the original packet to return in a dest-unreachable */ +#define ICMP_DEST_UNREACH_DATASIZE 8 + +static void icmp_send_response(struct pbuf *p, u8_t type, u8_t code); + +/** + * Processes ICMP input packets, called from ip_input(). + * + * Currently only processes icmp echo requests and sends + * out the echo response. + * + * @param p the icmp echo request packet, p->payload pointing to the icmp header + * @param inp the netif on which this packet was received + */ +void +icmp_input(struct pbuf *p, struct netif *inp) +{ + u8_t type; +#ifdef LWIP_DEBUG + u8_t code; +#endif /* LWIP_DEBUG */ + struct icmp_echo_hdr *iecho; + const struct ip_hdr *iphdr_in; + u16_t hlen; + const ip4_addr_t *src; + + ICMP_STATS_INC(icmp.recv); + MIB2_STATS_INC(mib2.icmpinmsgs); + + iphdr_in = ip4_current_header(); + hlen = IPH_HL_BYTES(iphdr_in); + if (hlen < IP_HLEN) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: short IP header (%"S16_F" bytes) received\n", hlen)); + goto lenerr; + } + if (p->len < sizeof(u16_t) * 2) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: short ICMP (%"U16_F" bytes) received\n", p->tot_len)); + goto lenerr; + } + + type = *((u8_t *)p->payload); +#ifdef LWIP_DEBUG + code = *(((u8_t *)p->payload) + 1); + /* if debug is enabled but debug statement below is somehow disabled: */ + LWIP_UNUSED_ARG(code); +#endif /* LWIP_DEBUG */ + switch (type) { + case ICMP_ER: + /* This is OK, echo reply might have been parsed by a raw PCB + (as obviously, an echo request has been sent, too). */ + MIB2_STATS_INC(mib2.icmpinechoreps); + break; + case ICMP_ECHO: + MIB2_STATS_INC(mib2.icmpinechos); + src = ip4_current_dest_addr(); + /* multicast destination address? */ + if (ip4_addr_ismulticast(ip4_current_dest_addr())) { +#if LWIP_MULTICAST_PING + /* For multicast, use address of receiving interface as source address */ + src = netif_ip4_addr(inp); +#else /* LWIP_MULTICAST_PING */ + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: Not echoing to multicast pings\n")); + goto icmperr; +#endif /* LWIP_MULTICAST_PING */ + } + /* broadcast destination address? */ + if (ip4_addr_isbroadcast(ip4_current_dest_addr(), ip_current_netif())) { +#if LWIP_BROADCAST_PING + /* For broadcast, use address of receiving interface as source address */ + src = netif_ip4_addr(inp); +#else /* LWIP_BROADCAST_PING */ + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: Not echoing to broadcast pings\n")); + goto icmperr; +#endif /* LWIP_BROADCAST_PING */ + } + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ping\n")); + if (p->tot_len < sizeof(struct icmp_echo_hdr)) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: bad ICMP echo received\n")); + goto lenerr; + } +#if CHECKSUM_CHECK_ICMP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_ICMP) { + if (inet_chksum_pbuf(p) != 0) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: checksum failed for received ICMP echo\n")); + pbuf_free(p); + ICMP_STATS_INC(icmp.chkerr); + MIB2_STATS_INC(mib2.icmpinerrors); + return; + } + } +#endif +#if LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN + if (pbuf_add_header(p, hlen + PBUF_LINK_HLEN + PBUF_LINK_ENCAPSULATION_HLEN)) { + /* p is not big enough to contain link headers + * allocate a new one and copy p into it + */ + struct pbuf *r; + u16_t alloc_len = (u16_t)(p->tot_len + hlen); + if (alloc_len < p->tot_len) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: allocating new pbuf failed (tot_len overflow)\n")); + goto icmperr; + } + /* allocate new packet buffer with space for link headers */ + r = pbuf_alloc(PBUF_LINK, alloc_len, PBUF_RAM); + if (r == NULL) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: allocating new pbuf failed\n")); + goto icmperr; + } + if (r->len < hlen + sizeof(struct icmp_echo_hdr)) { + LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("first pbuf cannot hold the ICMP header")); + pbuf_free(r); + goto icmperr; + } + /* copy the ip header */ + MEMCPY(r->payload, iphdr_in, hlen); + /* switch r->payload back to icmp header (cannot fail) */ + if (pbuf_remove_header(r, hlen)) { + LWIP_ASSERT("icmp_input: moving r->payload to icmp header failed\n", 0); + pbuf_free(r); + goto icmperr; + } + /* copy the rest of the packet without ip header */ + if (pbuf_copy(r, p) != ERR_OK) { + LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("icmp_input: copying to new pbuf failed")); + pbuf_free(r); + goto icmperr; + } + /* free the original p */ + pbuf_free(p); + /* we now have an identical copy of p that has room for link headers */ + p = r; + } else { + /* restore p->payload to point to icmp header (cannot fail) */ + if (pbuf_remove_header(p, hlen + PBUF_LINK_HLEN + PBUF_LINK_ENCAPSULATION_HLEN)) { + LWIP_ASSERT("icmp_input: restoring original p->payload failed\n", 0); + goto icmperr; + } + } +#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN */ + /* At this point, all checks are OK. */ + /* We generate an answer by switching the dest and src ip addresses, + * setting the icmp type to ECHO_RESPONSE and updating the checksum. */ + iecho = (struct icmp_echo_hdr *)p->payload; + if (pbuf_add_header(p, hlen)) { + LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("Can't move over header in packet")); + } else { + err_t ret; + struct ip_hdr *iphdr = (struct ip_hdr *)p->payload; + ip4_addr_copy(iphdr->src, *src); + ip4_addr_copy(iphdr->dest, *ip4_current_src_addr()); + ICMPH_TYPE_SET(iecho, ICMP_ER); +#if CHECKSUM_GEN_ICMP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_ICMP) { + /* adjust the checksum */ + if (iecho->chksum > PP_HTONS(0xffffU - (ICMP_ECHO << 8))) { + iecho->chksum = (u16_t)(iecho->chksum + PP_HTONS((u16_t)(ICMP_ECHO << 8)) + 1); + } else { + iecho->chksum = (u16_t)(iecho->chksum + PP_HTONS(ICMP_ECHO << 8)); + } + } +#if LWIP_CHECKSUM_CTRL_PER_NETIF + else { + iecho->chksum = 0; + } +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */ +#else /* CHECKSUM_GEN_ICMP */ + iecho->chksum = 0; +#endif /* CHECKSUM_GEN_ICMP */ + + /* Set the correct TTL and recalculate the header checksum. */ + IPH_TTL_SET(iphdr, ICMP_TTL); + IPH_CHKSUM_SET(iphdr, 0); +#if CHECKSUM_GEN_IP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_IP) { + IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, hlen)); + } +#endif /* CHECKSUM_GEN_IP */ + + ICMP_STATS_INC(icmp.xmit); + /* increase number of messages attempted to send */ + MIB2_STATS_INC(mib2.icmpoutmsgs); + /* increase number of echo replies attempted to send */ + MIB2_STATS_INC(mib2.icmpoutechoreps); + + /* send an ICMP packet */ + ret = ip4_output_if(p, src, LWIP_IP_HDRINCL, + ICMP_TTL, 0, IP_PROTO_ICMP, inp); + if (ret != ERR_OK) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ip_output_if returned an error: %s\n", lwip_strerr(ret))); + } + } + break; + default: + if (type == ICMP_DUR) { + MIB2_STATS_INC(mib2.icmpindestunreachs); + } else if (type == ICMP_TE) { + MIB2_STATS_INC(mib2.icmpintimeexcds); + } else if (type == ICMP_PP) { + MIB2_STATS_INC(mib2.icmpinparmprobs); + } else if (type == ICMP_SQ) { + MIB2_STATS_INC(mib2.icmpinsrcquenchs); + } else if (type == ICMP_RD) { + MIB2_STATS_INC(mib2.icmpinredirects); + } else if (type == ICMP_TS) { + MIB2_STATS_INC(mib2.icmpintimestamps); + } else if (type == ICMP_TSR) { + MIB2_STATS_INC(mib2.icmpintimestampreps); + } else if (type == ICMP_AM) { + MIB2_STATS_INC(mib2.icmpinaddrmasks); + } else if (type == ICMP_AMR) { + MIB2_STATS_INC(mib2.icmpinaddrmaskreps); + } + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ICMP type %"S16_F" code %"S16_F" not supported.\n", + (s16_t)type, (s16_t)code)); + ICMP_STATS_INC(icmp.proterr); + ICMP_STATS_INC(icmp.drop); + } + pbuf_free(p); + return; +lenerr: + pbuf_free(p); + ICMP_STATS_INC(icmp.lenerr); + MIB2_STATS_INC(mib2.icmpinerrors); + return; +#if LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN || !LWIP_MULTICAST_PING || !LWIP_BROADCAST_PING +icmperr: + pbuf_free(p); + ICMP_STATS_INC(icmp.err); + MIB2_STATS_INC(mib2.icmpinerrors); + return; +#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN || !LWIP_MULTICAST_PING || !LWIP_BROADCAST_PING */ +} + +/** + * Send an icmp 'destination unreachable' packet, called from ip_input() if + * the transport layer protocol is unknown and from udp_input() if the local + * port is not bound. + * + * @param p the input packet for which the 'unreachable' should be sent, + * p->payload pointing to the IP header + * @param t type of the 'unreachable' packet + */ +void +icmp_dest_unreach(struct pbuf *p, enum icmp_dur_type t) +{ + MIB2_STATS_INC(mib2.icmpoutdestunreachs); + icmp_send_response(p, ICMP_DUR, t); +} + +#if IP_FORWARD || IP_REASSEMBLY +/** + * Send a 'time exceeded' packet, called from ip_forward() if TTL is 0. + * + * @param p the input packet for which the 'time exceeded' should be sent, + * p->payload pointing to the IP header + * @param t type of the 'time exceeded' packet + */ +void +icmp_time_exceeded(struct pbuf *p, enum icmp_te_type t) +{ + MIB2_STATS_INC(mib2.icmpouttimeexcds); + icmp_send_response(p, ICMP_TE, t); +} + +#endif /* IP_FORWARD || IP_REASSEMBLY */ + +/** + * Send an icmp packet in response to an incoming packet. + * + * @param p the input packet for which the 'unreachable' should be sent, + * p->payload pointing to the IP header + * @param type Type of the ICMP header + * @param code Code of the ICMP header + */ +static void +icmp_send_response(struct pbuf *p, u8_t type, u8_t code) +{ + struct pbuf *q; + struct ip_hdr *iphdr; + /* we can use the echo header here */ + struct icmp_echo_hdr *icmphdr; + ip4_addr_t iphdr_src; + struct netif *netif; + + /* increase number of messages attempted to send */ + MIB2_STATS_INC(mib2.icmpoutmsgs); + + /* ICMP header + IP header + 8 bytes of data */ + q = pbuf_alloc(PBUF_IP, sizeof(struct icmp_echo_hdr) + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE, + PBUF_RAM); + if (q == NULL) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded: failed to allocate pbuf for ICMP packet.\n")); + MIB2_STATS_INC(mib2.icmpouterrors); + return; + } + LWIP_ASSERT("check that first pbuf can hold icmp message", + (q->len >= (sizeof(struct icmp_echo_hdr) + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE))); + + iphdr = (struct ip_hdr *)p->payload; + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded from ")); + ip4_addr_debug_print_val(ICMP_DEBUG, iphdr->src); + LWIP_DEBUGF(ICMP_DEBUG, (" to ")); + ip4_addr_debug_print_val(ICMP_DEBUG, iphdr->dest); + LWIP_DEBUGF(ICMP_DEBUG, ("\n")); + + icmphdr = (struct icmp_echo_hdr *)q->payload; + icmphdr->type = type; + icmphdr->code = code; + icmphdr->id = 0; + icmphdr->seqno = 0; + + /* copy fields from original packet */ + SMEMCPY((u8_t *)q->payload + sizeof(struct icmp_echo_hdr), (u8_t *)p->payload, + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE); + + ip4_addr_copy(iphdr_src, iphdr->src); +#ifdef LWIP_HOOK_IP4_ROUTE_SRC + { + ip4_addr_t iphdr_dst; + ip4_addr_copy(iphdr_dst, iphdr->dest); + netif = ip4_route_src(&iphdr_dst, &iphdr_src); + } +#else + netif = ip4_route(&iphdr_src); +#endif + if (netif != NULL) { + /* calculate checksum */ + icmphdr->chksum = 0; +#if CHECKSUM_GEN_ICMP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP) { + icmphdr->chksum = inet_chksum(icmphdr, q->len); + } +#endif + ICMP_STATS_INC(icmp.xmit); + ip4_output_if(q, NULL, &iphdr_src, ICMP_TTL, 0, IP_PROTO_ICMP, netif); + } + pbuf_free(q); +} + +#endif /* LWIP_IPV4 && LWIP_ICMP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c new file mode 100644 index 0000000..61f45d9 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c @@ -0,0 +1,801 @@ +/** + * @file + * IGMP - Internet Group Management Protocol + * + * @defgroup igmp IGMP + * @ingroup ip4 + * To be called from TCPIP thread + */ + +/* + * Copyright (c) 2002 CITEL Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of CITEL Technologies Ltd nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY CITEL TECHNOLOGIES AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL CITEL TECHNOLOGIES OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is a contribution to the lwIP TCP/IP stack. + * The Swedish Institute of Computer Science and Adam Dunkels + * are specifically granted permission to redistribute this + * source code. +*/ + +/*------------------------------------------------------------- +Note 1) +Although the rfc requires V1 AND V2 capability +we will only support v2 since now V1 is very old (August 1989) +V1 can be added if required + +a debug print and statistic have been implemented to +show this up. +------------------------------------------------------------- +------------------------------------------------------------- +Note 2) +A query for a specific group address (as opposed to ALLHOSTS) +has now been implemented as I am unsure if it is required + +a debug print and statistic have been implemented to +show this up. +------------------------------------------------------------- +------------------------------------------------------------- +Note 3) +The router alert rfc 2113 is implemented in outgoing packets +but not checked rigorously incoming +------------------------------------------------------------- +Steve Reynolds +------------------------------------------------------------*/ + +/*----------------------------------------------------------------------------- + * RFC 988 - Host extensions for IP multicasting - V0 + * RFC 1054 - Host extensions for IP multicasting - + * RFC 1112 - Host extensions for IP multicasting - V1 + * RFC 2236 - Internet Group Management Protocol, Version 2 - V2 <- this code is based on this RFC (it's the "de facto" standard) + * RFC 3376 - Internet Group Management Protocol, Version 3 - V3 + * RFC 4604 - Using Internet Group Management Protocol Version 3... - V3+ + * RFC 2113 - IP Router Alert Option - + *----------------------------------------------------------------------------*/ + +/*----------------------------------------------------------------------------- + * Includes + *----------------------------------------------------------------------------*/ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_IGMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/igmp.h" +#include "lwip/debug.h" +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/ip.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/stats.h" +#include "lwip/prot/igmp.h" + +#include + +static struct igmp_group *igmp_lookup_group(struct netif *ifp, const ip4_addr_t *addr); +static err_t igmp_remove_group(struct netif *netif, struct igmp_group *group); +static void igmp_timeout(struct netif *netif, struct igmp_group *group); +static void igmp_start_timer(struct igmp_group *group, u8_t max_time); +static void igmp_delaying_member(struct igmp_group *group, u8_t maxresp); +static err_t igmp_ip_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, struct netif *netif); +static void igmp_send(struct netif *netif, struct igmp_group *group, u8_t type); + +static ip4_addr_t allsystems; +static ip4_addr_t allrouters; + +/** + * Initialize the IGMP module + */ +void +igmp_init(void) +{ + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_init: initializing\n")); + + IP4_ADDR(&allsystems, 224, 0, 0, 1); + IP4_ADDR(&allrouters, 224, 0, 0, 2); +} + +/** + * Start IGMP processing on interface + * + * @param netif network interface on which start IGMP processing + */ +err_t +igmp_start(struct netif *netif) +{ + struct igmp_group *group; + + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_start: starting IGMP processing on if %p\n", (void *)netif)); + + group = igmp_lookup_group(netif, &allsystems); + + if (group != NULL) { + group->group_state = IGMP_GROUP_IDLE_MEMBER; + group->use++; + + /* Allow the igmp messages at the MAC level */ + if (netif->igmp_mac_filter != NULL) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_start: igmp_mac_filter(ADD ")); + ip4_addr_debug_print_val(IGMP_DEBUG, allsystems); + LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif)); + netif->igmp_mac_filter(netif, &allsystems, NETIF_ADD_MAC_FILTER); + } + + return ERR_OK; + } + + return ERR_MEM; +} + +/** + * Stop IGMP processing on interface + * + * @param netif network interface on which stop IGMP processing + */ +err_t +igmp_stop(struct netif *netif) +{ + struct igmp_group *group = netif_igmp_data(netif); + + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_IGMP, NULL); + + while (group != NULL) { + struct igmp_group *next = group->next; /* avoid use-after-free below */ + + /* disable the group at the MAC level */ + if (netif->igmp_mac_filter != NULL) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_stop: igmp_mac_filter(DEL ")); + ip4_addr_debug_print_val(IGMP_DEBUG, group->group_address); + LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif)); + netif->igmp_mac_filter(netif, &(group->group_address), NETIF_DEL_MAC_FILTER); + } + + /* free group */ + memp_free(MEMP_IGMP_GROUP, group); + + /* move to "next" */ + group = next; + } + return ERR_OK; +} + +/** + * Report IGMP memberships for this interface + * + * @param netif network interface on which report IGMP memberships + */ +void +igmp_report_groups(struct netif *netif) +{ + struct igmp_group *group = netif_igmp_data(netif); + + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_report_groups: sending IGMP reports on if %p\n", (void *)netif)); + + /* Skip the first group in the list, it is always the allsystems group added in igmp_start() */ + if (group != NULL) { + group = group->next; + } + + while (group != NULL) { + igmp_delaying_member(group, IGMP_JOIN_DELAYING_MEMBER_TMR); + group = group->next; + } +} + +/** + * Search for a group in the netif's igmp group list + * + * @param ifp the network interface for which to look + * @param addr the group ip address to search for + * @return a struct igmp_group* if the group has been found, + * NULL if the group wasn't found. + */ +struct igmp_group * +igmp_lookfor_group(struct netif *ifp, const ip4_addr_t *addr) +{ + struct igmp_group *group = netif_igmp_data(ifp); + + while (group != NULL) { + if (ip4_addr_cmp(&(group->group_address), addr)) { + return group; + } + group = group->next; + } + + /* to be clearer, we return NULL here instead of + * 'group' (which is also NULL at this point). + */ + return NULL; +} + +/** + * Search for a specific igmp group and create a new one if not found- + * + * @param ifp the network interface for which to look + * @param addr the group ip address to search + * @return a struct igmp_group*, + * NULL on memory error. + */ +static struct igmp_group * +igmp_lookup_group(struct netif *ifp, const ip4_addr_t *addr) +{ + struct igmp_group *group; + struct igmp_group *list_head = netif_igmp_data(ifp); + + /* Search if the group already exists */ + group = igmp_lookfor_group(ifp, addr); + if (group != NULL) { + /* Group already exists. */ + return group; + } + + /* Group doesn't exist yet, create a new one */ + group = (struct igmp_group *)memp_malloc(MEMP_IGMP_GROUP); + if (group != NULL) { + ip4_addr_set(&(group->group_address), addr); + group->timer = 0; /* Not running */ + group->group_state = IGMP_GROUP_NON_MEMBER; + group->last_reporter_flag = 0; + group->use = 0; + + /* Ensure allsystems group is always first in list */ + if (list_head == NULL) { + /* this is the first entry in linked list */ + LWIP_ASSERT("igmp_lookup_group: first group must be allsystems", + (ip4_addr_cmp(addr, &allsystems) != 0)); + group->next = NULL; + netif_set_client_data(ifp, LWIP_NETIF_CLIENT_DATA_INDEX_IGMP, group); + } else { + /* append _after_ first entry */ + LWIP_ASSERT("igmp_lookup_group: all except first group must not be allsystems", + (ip4_addr_cmp(addr, &allsystems) == 0)); + group->next = list_head->next; + list_head->next = group; + } + } + + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_lookup_group: %sallocated a new group with address ", (group ? "" : "impossible to "))); + ip4_addr_debug_print(IGMP_DEBUG, addr); + LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void *)ifp)); + + return group; +} + +/** + * Remove a group from netif's igmp group list, but don't free it yet + * + * @param group the group to remove from the netif's igmp group list + * @return ERR_OK if group was removed from the list, an err_t otherwise + */ +static err_t +igmp_remove_group(struct netif *netif, struct igmp_group *group) +{ + err_t err = ERR_OK; + struct igmp_group *tmp_group; + + /* Skip the first group in the list, it is always the allsystems group added in igmp_start() */ + for (tmp_group = netif_igmp_data(netif); tmp_group != NULL; tmp_group = tmp_group->next) { + if (tmp_group->next == group) { + tmp_group->next = group->next; + break; + } + } + /* Group not found in netif's igmp group list */ + if (tmp_group == NULL) { + err = ERR_ARG; + } + + return err; +} + +/** + * Called from ip_input() if a new IGMP packet is received. + * + * @param p received igmp packet, p->payload pointing to the igmp header + * @param inp network interface on which the packet was received + * @param dest destination ip address of the igmp packet + */ +void +igmp_input(struct pbuf *p, struct netif *inp, const ip4_addr_t *dest) +{ + struct igmp_msg *igmp; + struct igmp_group *group; + struct igmp_group *groupref; + + IGMP_STATS_INC(igmp.recv); + + /* Note that the length CAN be greater than 8 but only 8 are used - All are included in the checksum */ + if (p->len < IGMP_MINLEN) { + pbuf_free(p); + IGMP_STATS_INC(igmp.lenerr); + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: length error\n")); + return; + } + + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: message from ")); + ip4_addr_debug_print_val(IGMP_DEBUG, ip4_current_header()->src); + LWIP_DEBUGF(IGMP_DEBUG, (" to address ")); + ip4_addr_debug_print_val(IGMP_DEBUG, ip4_current_header()->dest); + LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void *)inp)); + + /* Now calculate and check the checksum */ + igmp = (struct igmp_msg *)p->payload; + if (inet_chksum(igmp, p->len)) { + pbuf_free(p); + IGMP_STATS_INC(igmp.chkerr); + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: checksum error\n")); + return; + } + + /* Packet is ok so find an existing group */ + group = igmp_lookfor_group(inp, dest); /* use the destination IP address of incoming packet */ + + /* If group can be found or create... */ + if (!group) { + pbuf_free(p); + IGMP_STATS_INC(igmp.drop); + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP frame not for us\n")); + return; + } + + /* NOW ACT ON THE INCOMING MESSAGE TYPE... */ + switch (igmp->igmp_msgtype) { + case IGMP_MEMB_QUERY: + /* IGMP_MEMB_QUERY to the "all systems" address ? */ + if ((ip4_addr_cmp(dest, &allsystems)) && ip4_addr_isany(&igmp->igmp_group_address)) { + /* THIS IS THE GENERAL QUERY */ + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: General IGMP_MEMB_QUERY on \"ALL SYSTEMS\" address (224.0.0.1) [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp))); + + if (igmp->igmp_maxresp == 0) { + IGMP_STATS_INC(igmp.rx_v1); + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: got an all hosts query with time== 0 - this is V1 and not implemented - treat as v2\n")); + igmp->igmp_maxresp = IGMP_V1_DELAYING_MEMBER_TMR; + } else { + IGMP_STATS_INC(igmp.rx_general); + } + + groupref = netif_igmp_data(inp); + + /* Do not send messages on the all systems group address! */ + /* Skip the first group in the list, it is always the allsystems group added in igmp_start() */ + if (groupref != NULL) { + groupref = groupref->next; + } + + while (groupref) { + igmp_delaying_member(groupref, igmp->igmp_maxresp); + groupref = groupref->next; + } + } else { + /* IGMP_MEMB_QUERY to a specific group ? */ + if (!ip4_addr_isany(&igmp->igmp_group_address)) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP_MEMB_QUERY to a specific group ")); + ip4_addr_debug_print_val(IGMP_DEBUG, igmp->igmp_group_address); + if (ip4_addr_cmp(dest, &allsystems)) { + ip4_addr_t groupaddr; + LWIP_DEBUGF(IGMP_DEBUG, (" using \"ALL SYSTEMS\" address (224.0.0.1) [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp))); + /* we first need to re-look for the group since we used dest last time */ + ip4_addr_copy(groupaddr, igmp->igmp_group_address); + group = igmp_lookfor_group(inp, &groupaddr); + } else { + LWIP_DEBUGF(IGMP_DEBUG, (" with the group address as destination [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp))); + } + + if (group != NULL) { + IGMP_STATS_INC(igmp.rx_group); + igmp_delaying_member(group, igmp->igmp_maxresp); + } else { + IGMP_STATS_INC(igmp.drop); + } + } else { + IGMP_STATS_INC(igmp.proterr); + } + } + break; + case IGMP_V2_MEMB_REPORT: + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP_V2_MEMB_REPORT\n")); + IGMP_STATS_INC(igmp.rx_report); + if (group->group_state == IGMP_GROUP_DELAYING_MEMBER) { + /* This is on a specific group we have already looked up */ + group->timer = 0; /* stopped */ + group->group_state = IGMP_GROUP_IDLE_MEMBER; + group->last_reporter_flag = 0; + } + break; + default: + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: unexpected msg %d in state %d on group %p on if %p\n", + igmp->igmp_msgtype, group->group_state, (void *)&group, (void *)inp)); + IGMP_STATS_INC(igmp.proterr); + break; + } + + pbuf_free(p); + return; +} + +/** + * @ingroup igmp + * Join a group on one network interface. + * + * @param ifaddr ip address of the network interface which should join a new group + * @param groupaddr the ip address of the group which to join + * @return ERR_OK if group was joined on the netif(s), an err_t otherwise + */ +err_t +igmp_joingroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr) +{ + err_t err = ERR_VAL; /* no matching interface */ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + /* make sure it is multicast address */ + LWIP_ERROR("igmp_joingroup: attempt to join non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); + LWIP_ERROR("igmp_joingroup: attempt to join allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); + + /* loop through netif's */ + NETIF_FOREACH(netif) { + /* Should we join this interface ? */ + if ((netif->flags & NETIF_FLAG_IGMP) && ((ip4_addr_isany(ifaddr) || ip4_addr_cmp(netif_ip4_addr(netif), ifaddr)))) { + err = igmp_joingroup_netif(netif, groupaddr); + if (err != ERR_OK) { + /* Return an error even if some network interfaces are joined */ + /** @todo undo any other netif already joined */ + return err; + } + } + } + + return err; +} + +/** + * @ingroup igmp + * Join a group on one network interface. + * + * @param netif the network interface which should join a new group + * @param groupaddr the ip address of the group which to join + * @return ERR_OK if group was joined on the netif, an err_t otherwise + */ +err_t +igmp_joingroup_netif(struct netif *netif, const ip4_addr_t *groupaddr) +{ + struct igmp_group *group; + + LWIP_ASSERT_CORE_LOCKED(); + + /* make sure it is multicast address */ + LWIP_ERROR("igmp_joingroup_netif: attempt to join non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); + LWIP_ERROR("igmp_joingroup_netif: attempt to join allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); + + /* make sure it is an igmp-enabled netif */ + LWIP_ERROR("igmp_joingroup_netif: attempt to join on non-IGMP netif", netif->flags & NETIF_FLAG_IGMP, return ERR_VAL;); + + /* find group or create a new one if not found */ + group = igmp_lookup_group(netif, groupaddr); + + if (group != NULL) { + /* This should create a new group, check the state to make sure */ + if (group->group_state != IGMP_GROUP_NON_MEMBER) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: join to group not in state IGMP_GROUP_NON_MEMBER\n")); + } else { + /* OK - it was new group */ + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: join to new group: ")); + ip4_addr_debug_print(IGMP_DEBUG, groupaddr); + LWIP_DEBUGF(IGMP_DEBUG, ("\n")); + + /* If first use of the group, allow the group at the MAC level */ + if ((group->use == 0) && (netif->igmp_mac_filter != NULL)) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: igmp_mac_filter(ADD ")); + ip4_addr_debug_print(IGMP_DEBUG, groupaddr); + LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif)); + netif->igmp_mac_filter(netif, groupaddr, NETIF_ADD_MAC_FILTER); + } + + IGMP_STATS_INC(igmp.tx_join); + igmp_send(netif, group, IGMP_V2_MEMB_REPORT); + + igmp_start_timer(group, IGMP_JOIN_DELAYING_MEMBER_TMR); + + /* Need to work out where this timer comes from */ + group->group_state = IGMP_GROUP_DELAYING_MEMBER; + } + /* Increment group use */ + group->use++; + /* Join on this interface */ + return ERR_OK; + } else { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: Not enough memory to join to group\n")); + return ERR_MEM; + } +} + +/** + * @ingroup igmp + * Leave a group on one network interface. + * + * @param ifaddr ip address of the network interface which should leave a group + * @param groupaddr the ip address of the group which to leave + * @return ERR_OK if group was left on the netif(s), an err_t otherwise + */ +err_t +igmp_leavegroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr) +{ + err_t err = ERR_VAL; /* no matching interface */ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + /* make sure it is multicast address */ + LWIP_ERROR("igmp_leavegroup: attempt to leave non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); + LWIP_ERROR("igmp_leavegroup: attempt to leave allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); + + /* loop through netif's */ + NETIF_FOREACH(netif) { + /* Should we leave this interface ? */ + if ((netif->flags & NETIF_FLAG_IGMP) && ((ip4_addr_isany(ifaddr) || ip4_addr_cmp(netif_ip4_addr(netif), ifaddr)))) { + err_t res = igmp_leavegroup_netif(netif, groupaddr); + if (err != ERR_OK) { + /* Store this result if we have not yet gotten a success */ + err = res; + } + } + } + + return err; +} + +/** + * @ingroup igmp + * Leave a group on one network interface. + * + * @param netif the network interface which should leave a group + * @param groupaddr the ip address of the group which to leave + * @return ERR_OK if group was left on the netif, an err_t otherwise + */ +err_t +igmp_leavegroup_netif(struct netif *netif, const ip4_addr_t *groupaddr) +{ + struct igmp_group *group; + + LWIP_ASSERT_CORE_LOCKED(); + + /* make sure it is multicast address */ + LWIP_ERROR("igmp_leavegroup_netif: attempt to leave non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); + LWIP_ERROR("igmp_leavegroup_netif: attempt to leave allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); + + /* make sure it is an igmp-enabled netif */ + LWIP_ERROR("igmp_leavegroup_netif: attempt to leave on non-IGMP netif", netif->flags & NETIF_FLAG_IGMP, return ERR_VAL;); + + /* find group */ + group = igmp_lookfor_group(netif, groupaddr); + + if (group != NULL) { + /* Only send a leave if the flag is set according to the state diagram */ + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: Leaving group: ")); + ip4_addr_debug_print(IGMP_DEBUG, groupaddr); + LWIP_DEBUGF(IGMP_DEBUG, ("\n")); + + /* If there is no other use of the group */ + if (group->use <= 1) { + /* Remove the group from the list */ + igmp_remove_group(netif, group); + + /* If we are the last reporter for this group */ + if (group->last_reporter_flag) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: sending leaving group\n")); + IGMP_STATS_INC(igmp.tx_leave); + igmp_send(netif, group, IGMP_LEAVE_GROUP); + } + + /* Disable the group at the MAC level */ + if (netif->igmp_mac_filter != NULL) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: igmp_mac_filter(DEL ")); + ip4_addr_debug_print(IGMP_DEBUG, groupaddr); + LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif)); + netif->igmp_mac_filter(netif, groupaddr, NETIF_DEL_MAC_FILTER); + } + + /* Free group struct */ + memp_free(MEMP_IGMP_GROUP, group); + } else { + /* Decrement group use */ + group->use--; + } + return ERR_OK; + } else { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: not member of group\n")); + return ERR_VAL; + } +} + +/** + * The igmp timer function (both for NO_SYS=1 and =0) + * Should be called every IGMP_TMR_INTERVAL milliseconds (100 ms is default). + */ +void +igmp_tmr(void) +{ + struct netif *netif; + + NETIF_FOREACH(netif) { + struct igmp_group *group = netif_igmp_data(netif); + + while (group != NULL) { + if (group->timer > 0) { + group->timer--; + if (group->timer == 0) { + igmp_timeout(netif, group); + } + } + group = group->next; + } + } +} + +/** + * Called if a timeout for one group is reached. + * Sends a report for this group. + * + * @param group an igmp_group for which a timeout is reached + */ +static void +igmp_timeout(struct netif *netif, struct igmp_group *group) +{ + /* If the state is IGMP_GROUP_DELAYING_MEMBER then we send a report for this group + (unless it is the allsystems group) */ + if ((group->group_state == IGMP_GROUP_DELAYING_MEMBER) && + (!(ip4_addr_cmp(&(group->group_address), &allsystems)))) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_timeout: report membership for group with address ")); + ip4_addr_debug_print_val(IGMP_DEBUG, group->group_address); + LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void *)netif)); + + group->group_state = IGMP_GROUP_IDLE_MEMBER; + + IGMP_STATS_INC(igmp.tx_report); + igmp_send(netif, group, IGMP_V2_MEMB_REPORT); + } +} + +/** + * Start a timer for an igmp group + * + * @param group the igmp_group for which to start a timer + * @param max_time the time in multiples of IGMP_TMR_INTERVAL (decrease with + * every call to igmp_tmr()) + */ +static void +igmp_start_timer(struct igmp_group *group, u8_t max_time) +{ +#ifdef LWIP_RAND + group->timer = (u16_t)(max_time > 2 ? (LWIP_RAND() % max_time) : 1); +#else /* LWIP_RAND */ + /* ATTENTION: use this only if absolutely necessary! */ + group->timer = max_time / 2; +#endif /* LWIP_RAND */ + + if (group->timer == 0) { + group->timer = 1; + } +} + +/** + * Delaying membership report for a group if necessary + * + * @param group the igmp_group for which "delaying" membership report + * @param maxresp query delay + */ +static void +igmp_delaying_member(struct igmp_group *group, u8_t maxresp) +{ + if ((group->group_state == IGMP_GROUP_IDLE_MEMBER) || + ((group->group_state == IGMP_GROUP_DELAYING_MEMBER) && + ((group->timer == 0) || (maxresp < group->timer)))) { + igmp_start_timer(group, maxresp); + group->group_state = IGMP_GROUP_DELAYING_MEMBER; + } +} + + +/** + * Sends an IP packet on a network interface. This function constructs the IP header + * and calculates the IP header checksum. If the source IP address is NULL, + * the IP address of the outgoing network interface is filled in as source address. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IP header and p->payload points to that IP header) + * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the + * IP address of the netif used to send is used as source address) + * @param dest the destination IP address to send the packet to + * @param netif the netif on which to send this packet + * @return ERR_OK if the packet was sent OK + * ERR_BUF if p doesn't have enough space for IP/LINK headers + * returns errors returned by netif->output + */ +static err_t +igmp_ip_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, struct netif *netif) +{ + /* This is the "router alert" option */ + u16_t ra[2]; + ra[0] = PP_HTONS(ROUTER_ALERT); + ra[1] = 0x0000; /* Router shall examine packet */ + IGMP_STATS_INC(igmp.xmit); + return ip4_output_if_opt(p, src, dest, IGMP_TTL, 0, IP_PROTO_IGMP, netif, ra, ROUTER_ALERTLEN); +} + +/** + * Send an igmp packet to a specific group. + * + * @param group the group to which to send the packet + * @param type the type of igmp packet to send + */ +static void +igmp_send(struct netif *netif, struct igmp_group *group, u8_t type) +{ + struct pbuf *p = NULL; + struct igmp_msg *igmp = NULL; + ip4_addr_t src = *IP4_ADDR_ANY4; + ip4_addr_t *dest = NULL; + + /* IP header + "router alert" option + IGMP header */ + p = pbuf_alloc(PBUF_TRANSPORT, IGMP_MINLEN, PBUF_RAM); + + if (p) { + igmp = (struct igmp_msg *)p->payload; + LWIP_ASSERT("igmp_send: check that first pbuf can hold struct igmp_msg", + (p->len >= sizeof(struct igmp_msg))); + ip4_addr_copy(src, *netif_ip4_addr(netif)); + + if (type == IGMP_V2_MEMB_REPORT) { + dest = &(group->group_address); + ip4_addr_copy(igmp->igmp_group_address, group->group_address); + group->last_reporter_flag = 1; /* Remember we were the last to report */ + } else { + if (type == IGMP_LEAVE_GROUP) { + dest = &allrouters; + ip4_addr_copy(igmp->igmp_group_address, group->group_address); + } + } + + if ((type == IGMP_V2_MEMB_REPORT) || (type == IGMP_LEAVE_GROUP)) { + igmp->igmp_msgtype = type; + igmp->igmp_maxresp = 0; + igmp->igmp_checksum = 0; + igmp->igmp_checksum = inet_chksum(igmp, IGMP_MINLEN); + + igmp_ip_output_if(p, &src, dest, netif); + } + + pbuf_free(p); + } else { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_send: not enough memory for igmp_send\n")); + IGMP_STATS_INC(igmp.memerr); + } +} + +#endif /* LWIP_IPV4 && LWIP_IGMP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c new file mode 100644 index 0000000..00b09f9 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c @@ -0,0 +1,1132 @@ +/** + * @file + * This is the IPv4 layer implementation for incoming and outgoing IP traffic. + * + * @see ip_frag.c + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 + +#include "lwip/ip.h" +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/ip4_frag.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/icmp.h" +#include "lwip/igmp.h" +#include "lwip/priv/raw_priv.h" +#include "lwip/udp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/autoip.h" +#include "lwip/stats.h" +#include "lwip/prot/iana.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** Set this to 0 in the rare case of wanting to call an extra function to + * generate the IP checksum (in contrast to calculating it on-the-fly). */ +#ifndef LWIP_INLINE_IP_CHKSUM +#if LWIP_CHECKSUM_CTRL_PER_NETIF +#define LWIP_INLINE_IP_CHKSUM 0 +#else /* LWIP_CHECKSUM_CTRL_PER_NETIF */ +#define LWIP_INLINE_IP_CHKSUM 1 +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */ +#endif + +#if LWIP_INLINE_IP_CHKSUM && CHECKSUM_GEN_IP +#define CHECKSUM_GEN_IP_INLINE 1 +#else +#define CHECKSUM_GEN_IP_INLINE 0 +#endif + +#if LWIP_DHCP || defined(LWIP_IP_ACCEPT_UDP_PORT) +#define IP_ACCEPT_LINK_LAYER_ADDRESSING 1 + +/** Some defines for DHCP to let link-layer-addressed packets through while the + * netif is down. + * To use this in your own application/protocol, define LWIP_IP_ACCEPT_UDP_PORT(port) + * to return 1 if the port is accepted and 0 if the port is not accepted. + */ +#if LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) +/* accept DHCP client port and custom port */ +#define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) (((port) == PP_NTOHS(LWIP_IANA_PORT_DHCP_CLIENT)) \ + || (LWIP_IP_ACCEPT_UDP_PORT(port))) +#elif defined(LWIP_IP_ACCEPT_UDP_PORT) /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */ +/* accept custom port only */ +#define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) (LWIP_IP_ACCEPT_UDP_PORT(port)) +#else /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */ +/* accept DHCP client port only */ +#define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) ((port) == PP_NTOHS(LWIP_IANA_PORT_DHCP_CLIENT)) +#endif /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */ + +#else /* LWIP_DHCP */ +#define IP_ACCEPT_LINK_LAYER_ADDRESSING 0 +#endif /* LWIP_DHCP */ + +/** The IP header ID of the next outgoing IP packet */ +static u16_t ip_id; + +#if LWIP_MULTICAST_TX_OPTIONS +/** The default netif used for multicast */ +static struct netif *ip4_default_multicast_netif; + +/** + * @ingroup ip4 + * Set a default netif for IPv4 multicast. */ +void +ip4_set_default_multicast_netif(struct netif *default_multicast_netif) +{ + ip4_default_multicast_netif = default_multicast_netif; +} +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#ifdef LWIP_HOOK_IP4_ROUTE_SRC +/** + * Source based IPv4 routing must be fully implemented in + * LWIP_HOOK_IP4_ROUTE_SRC(). This function only provides the parameters. + */ +struct netif * +ip4_route_src(const ip4_addr_t *src, const ip4_addr_t *dest) +{ + if (src != NULL) { + /* when src==NULL, the hook is called from ip4_route(dest) */ + struct netif *netif = LWIP_HOOK_IP4_ROUTE_SRC(src, dest); + if (netif != NULL) { + return netif; + } + } + return ip4_route(dest); +} +#endif /* LWIP_HOOK_IP4_ROUTE_SRC */ + +/** + * Finds the appropriate network interface for a given IP address. It + * searches the list of network interfaces linearly. A match is found + * if the masked IP address of the network interface equals the masked + * IP address given to the function. + * + * @param dest the destination IP address for which to find the route + * @return the netif on which to send to reach dest + */ +struct netif * +ip4_route(const ip4_addr_t *dest) +{ +#if !LWIP_SINGLE_NETIF + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_MULTICAST_TX_OPTIONS + /* Use administratively selected interface for multicast by default */ + if (ip4_addr_ismulticast(dest) && ip4_default_multicast_netif) { + return ip4_default_multicast_netif; + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + /* bug #54569: in case LWIP_SINGLE_NETIF=1 and LWIP_DEBUGF() disabled, the following loop is optimized away */ + LWIP_UNUSED_ARG(dest); + + /* iterate through netifs */ + NETIF_FOREACH(netif) { + /* is the netif up, does it have a link and a valid address? */ + if (netif_is_up(netif) && netif_is_link_up(netif) && !ip4_addr_isany_val(*netif_ip4_addr(netif))) { + /* network mask matches? */ + if (ip4_addr_netcmp(dest, netif_ip4_addr(netif), netif_ip4_netmask(netif))) { + /* return netif on which to forward IP packet */ + return netif; + } + /* gateway matches on a non broadcast interface? (i.e. peer in a point to point interface) */ + if (((netif->flags & NETIF_FLAG_BROADCAST) == 0) && ip4_addr_cmp(dest, netif_ip4_gw(netif))) { + /* return netif on which to forward IP packet */ + return netif; + } + } + } + +#if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF + /* loopif is disabled, looopback traffic is passed through any netif */ + if (ip4_addr_isloopback(dest)) { + /* don't check for link on loopback traffic */ + if (netif_default != NULL && netif_is_up(netif_default)) { + return netif_default; + } + /* default netif is not up, just use any netif for loopback traffic */ + NETIF_FOREACH(netif) { + if (netif_is_up(netif)) { + return netif; + } + } + return NULL; + } +#endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */ + +#ifdef LWIP_HOOK_IP4_ROUTE_SRC + netif = LWIP_HOOK_IP4_ROUTE_SRC(NULL, dest); + if (netif != NULL) { + return netif; + } +#elif defined(LWIP_HOOK_IP4_ROUTE) + netif = LWIP_HOOK_IP4_ROUTE(dest); + if (netif != NULL) { + return netif; + } +#endif +#endif /* !LWIP_SINGLE_NETIF */ + + if ((netif_default == NULL) || !netif_is_up(netif_default) || !netif_is_link_up(netif_default) || + ip4_addr_isany_val(*netif_ip4_addr(netif_default)) || ip4_addr_isloopback(dest)) { + /* No matching netif found and default netif is not usable. + If this is not good enough for you, use LWIP_HOOK_IP4_ROUTE() */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_route: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest))); + IP_STATS_INC(ip.rterr); + MIB2_STATS_INC(mib2.ipoutnoroutes); + return NULL; + } + + return netif_default; +} + +#if IP_FORWARD +/** + * Determine whether an IP address is in a reserved set of addresses + * that may not be forwarded, or whether datagrams to that destination + * may be forwarded. + * @param p the packet to forward + * @return 1: can forward 0: discard + */ +static int +ip4_canforward(struct pbuf *p) +{ + u32_t addr = lwip_htonl(ip4_addr_get_u32(ip4_current_dest_addr())); + +#ifdef LWIP_HOOK_IP4_CANFORWARD + int ret = LWIP_HOOK_IP4_CANFORWARD(p, addr); + if (ret >= 0) { + return ret; + } +#endif /* LWIP_HOOK_IP4_CANFORWARD */ + + if (p->flags & PBUF_FLAG_LLBCAST) { + /* don't route link-layer broadcasts */ + return 0; + } + if ((p->flags & PBUF_FLAG_LLMCAST) || IP_MULTICAST(addr)) { + /* don't route link-layer multicasts (use LWIP_HOOK_IP4_CANFORWARD instead) */ + return 0; + } + if (IP_EXPERIMENTAL(addr)) { + return 0; + } + if (IP_CLASSA(addr)) { + u32_t net = addr & IP_CLASSA_NET; + if ((net == 0) || (net == ((u32_t)IP_LOOPBACKNET << IP_CLASSA_NSHIFT))) { + /* don't route loopback packets */ + return 0; + } + } + return 1; +} + +/** + * Forwards an IP packet. It finds an appropriate route for the + * packet, decrements the TTL value of the packet, adjusts the + * checksum and outputs the packet on the appropriate interface. + * + * @param p the packet to forward (p->payload points to IP header) + * @param iphdr the IP header of the input packet + * @param inp the netif on which this packet was received + */ +static void +ip4_forward(struct pbuf *p, struct ip_hdr *iphdr, struct netif *inp) +{ + struct netif *netif; + + PERF_START; + LWIP_UNUSED_ARG(inp); + + if (!ip4_canforward(p)) { + goto return_noroute; + } + + /* RFC3927 2.7: do not forward link-local addresses */ + if (ip4_addr_islinklocal(ip4_current_dest_addr())) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: not forwarding LLA %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()), + ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr()))); + goto return_noroute; + } + + /* Find network interface where to forward this IP packet to. */ + netif = ip4_route_src(ip4_current_src_addr(), ip4_current_dest_addr()); + if (netif == NULL) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: no forwarding route for %"U16_F".%"U16_F".%"U16_F".%"U16_F" found\n", + ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()), + ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr()))); + /* @todo: send ICMP_DUR_NET? */ + goto return_noroute; + } +#if !IP_FORWARD_ALLOW_TX_ON_RX_NETIF + /* Do not forward packets onto the same network interface on which + * they arrived. */ + if (netif == inp) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: not bouncing packets back on incoming interface.\n")); + goto return_noroute; + } +#endif /* IP_FORWARD_ALLOW_TX_ON_RX_NETIF */ + + /* decrement TTL */ + IPH_TTL_SET(iphdr, IPH_TTL(iphdr) - 1); + /* send ICMP if TTL == 0 */ + if (IPH_TTL(iphdr) == 0) { + MIB2_STATS_INC(mib2.ipinhdrerrors); +#if LWIP_ICMP + /* Don't send ICMP messages in response to ICMP messages */ + if (IPH_PROTO(iphdr) != IP_PROTO_ICMP) { + icmp_time_exceeded(p, ICMP_TE_TTL); + } +#endif /* LWIP_ICMP */ + return; + } + + /* Incrementally update the IP checksum. */ + if (IPH_CHKSUM(iphdr) >= PP_HTONS(0xffffU - 0x100)) { + IPH_CHKSUM_SET(iphdr, (u16_t)(IPH_CHKSUM(iphdr) + PP_HTONS(0x100) + 1)); + } else { + IPH_CHKSUM_SET(iphdr, (u16_t)(IPH_CHKSUM(iphdr) + PP_HTONS(0x100))); + } + + LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: forwarding packet to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()), + ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr()))); + + IP_STATS_INC(ip.fw); + MIB2_STATS_INC(mib2.ipforwdatagrams); + IP_STATS_INC(ip.xmit); + + PERF_STOP("ip4_forward"); + /* don't fragment if interface has mtu set to 0 [loopif] */ + if (netif->mtu && (p->tot_len > netif->mtu)) { + if ((IPH_OFFSET(iphdr) & PP_NTOHS(IP_DF)) == 0) { +#if IP_FRAG + ip4_frag(p, netif, ip4_current_dest_addr()); +#else /* IP_FRAG */ + /* @todo: send ICMP Destination Unreachable code 13 "Communication administratively prohibited"? */ +#endif /* IP_FRAG */ + } else { +#if LWIP_ICMP + /* send ICMP Destination Unreachable code 4: "Fragmentation Needed and DF Set" */ + icmp_dest_unreach(p, ICMP_DUR_FRAG); +#endif /* LWIP_ICMP */ + } + return; + } + /* transmit pbuf on chosen interface */ + netif->output(netif, p, ip4_current_dest_addr()); + return; +return_noroute: + MIB2_STATS_INC(mib2.ipoutnoroutes); +} +#endif /* IP_FORWARD */ + +/** Return true if the current input packet should be accepted on this netif */ +static int +ip4_input_accept(struct netif *netif) +{ + LWIP_DEBUGF(IP_DEBUG, ("ip_input: iphdr->dest 0x%"X32_F" netif->ip_addr 0x%"X32_F" (0x%"X32_F", 0x%"X32_F", 0x%"X32_F")\n", + ip4_addr_get_u32(ip4_current_dest_addr()), ip4_addr_get_u32(netif_ip4_addr(netif)), + ip4_addr_get_u32(ip4_current_dest_addr()) & ip4_addr_get_u32(netif_ip4_netmask(netif)), + ip4_addr_get_u32(netif_ip4_addr(netif)) & ip4_addr_get_u32(netif_ip4_netmask(netif)), + ip4_addr_get_u32(ip4_current_dest_addr()) & ~ip4_addr_get_u32(netif_ip4_netmask(netif)))); + + /* interface is up and configured? */ + if ((netif_is_up(netif)) && (!ip4_addr_isany_val(*netif_ip4_addr(netif)))) { + /* unicast to this interface address? */ + if (ip4_addr_cmp(ip4_current_dest_addr(), netif_ip4_addr(netif)) || + /* or broadcast on this interface network address? */ + ip4_addr_isbroadcast(ip4_current_dest_addr(), netif) +#if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF + || (ip4_addr_get_u32(ip4_current_dest_addr()) == PP_HTONL(IPADDR_LOOPBACK)) +#endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */ + ) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_input: packet accepted on interface %c%c\n", + netif->name[0], netif->name[1])); + /* accept on this netif */ + return 1; + } +#if LWIP_AUTOIP + /* connections to link-local addresses must persist after changing + the netif's address (RFC3927 ch. 1.9) */ + if (autoip_accept_packet(netif, ip4_current_dest_addr())) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_input: LLA packet accepted on interface %c%c\n", + netif->name[0], netif->name[1])); + /* accept on this netif */ + return 1; + } +#endif /* LWIP_AUTOIP */ + } + return 0; +} + +/** + * This function is called by the network interface device driver when + * an IP packet is received. The function does the basic checks of the + * IP header such as packet size being at least larger than the header + * size etc. If the packet was not destined for us, the packet is + * forwarded (using ip_forward). The IP checksum is always checked. + * + * Finally, the packet is sent to the upper layer protocol input function. + * + * @param p the received IP packet (p->payload points to IP header) + * @param inp the netif on which this packet was received + * @return ERR_OK if the packet was processed (could return ERR_* if it wasn't + * processed, but currently always returns ERR_OK) + */ +err_t +ip4_input(struct pbuf *p, struct netif *inp) +{ + const struct ip_hdr *iphdr; + struct netif *netif; + u16_t iphdr_hlen; + u16_t iphdr_len; +#if IP_ACCEPT_LINK_LAYER_ADDRESSING || LWIP_IGMP + int check_ip_src = 1; +#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING || LWIP_IGMP */ +#if LWIP_RAW + raw_input_state_t raw_status; +#endif /* LWIP_RAW */ + + LWIP_ASSERT_CORE_LOCKED(); + + IP_STATS_INC(ip.recv); + MIB2_STATS_INC(mib2.ipinreceives); + + /* identify the IP header */ + iphdr = (struct ip_hdr *)p->payload; + if (IPH_V(iphdr) != 4) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_WARNING, ("IP packet dropped due to bad version number %"U16_F"\n", (u16_t)IPH_V(iphdr))); + ip4_debug_print(p); + pbuf_free(p); + IP_STATS_INC(ip.err); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinhdrerrors); + return ERR_OK; + } + +#ifdef LWIP_HOOK_IP4_INPUT + if (LWIP_HOOK_IP4_INPUT(p, inp)) { + /* the packet has been eaten */ + return ERR_OK; + } +#endif + + /* obtain IP header length in bytes */ + iphdr_hlen = IPH_HL_BYTES(iphdr); + /* obtain ip length in bytes */ + iphdr_len = lwip_ntohs(IPH_LEN(iphdr)); + + /* Trim pbuf. This is especially required for packets < 60 bytes. */ + if (iphdr_len < p->tot_len) { + pbuf_realloc(p, iphdr_len); + } + + /* header length exceeds first pbuf length, or ip length exceeds total pbuf length? */ + if ((iphdr_hlen > p->len) || (iphdr_len > p->tot_len) || (iphdr_hlen < IP_HLEN)) { + if (iphdr_hlen < IP_HLEN) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("ip4_input: short IP header (%"U16_F" bytes) received, IP packet dropped\n", iphdr_hlen)); + } + if (iphdr_hlen > p->len) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IP header (len %"U16_F") does not fit in first pbuf (len %"U16_F"), IP packet dropped.\n", + iphdr_hlen, p->len)); + } + if (iphdr_len > p->tot_len) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IP (len %"U16_F") is longer than pbuf (len %"U16_F"), IP packet dropped.\n", + iphdr_len, p->tot_len)); + } + /* free (drop) packet pbufs */ + pbuf_free(p); + IP_STATS_INC(ip.lenerr); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipindiscards); + return ERR_OK; + } + + /* verify checksum */ +#if CHECKSUM_CHECK_IP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_IP) { + if (inet_chksum(iphdr, iphdr_hlen) != 0) { + + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("Checksum (0x%"X16_F") failed, IP packet dropped.\n", inet_chksum(iphdr, iphdr_hlen))); + ip4_debug_print(p); + pbuf_free(p); + IP_STATS_INC(ip.chkerr); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinhdrerrors); + return ERR_OK; + } + } +#endif + + /* copy IP addresses to aligned ip_addr_t */ + ip_addr_copy_from_ip4(ip_data.current_iphdr_dest, iphdr->dest); + ip_addr_copy_from_ip4(ip_data.current_iphdr_src, iphdr->src); + + /* match packet against an interface, i.e. is this packet for us? */ + if (ip4_addr_ismulticast(ip4_current_dest_addr())) { +#if LWIP_IGMP + if ((inp->flags & NETIF_FLAG_IGMP) && (igmp_lookfor_group(inp, ip4_current_dest_addr()))) { + /* IGMP snooping switches need 0.0.0.0 to be allowed as source address (RFC 4541) */ + ip4_addr_t allsystems; + IP4_ADDR(&allsystems, 224, 0, 0, 1); + if (ip4_addr_cmp(ip4_current_dest_addr(), &allsystems) && + ip4_addr_isany(ip4_current_src_addr())) { + check_ip_src = 0; + } + netif = inp; + } else { + netif = NULL; + } +#else /* LWIP_IGMP */ + if ((netif_is_up(inp)) && (!ip4_addr_isany_val(*netif_ip4_addr(inp)))) { + netif = inp; + } else { + netif = NULL; + } +#endif /* LWIP_IGMP */ + } else { + /* start trying with inp. if that's not acceptable, start walking the + list of configured netifs. */ + if (ip4_input_accept(inp)) { + netif = inp; + } else { + netif = NULL; +#if !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF + /* Packets sent to the loopback address must not be accepted on an + * interface that does not have the loopback address assigned to it, + * unless a non-loopback interface is used for loopback traffic. */ + if (!ip4_addr_isloopback(ip4_current_dest_addr())) +#endif /* !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF */ + { +#if !LWIP_SINGLE_NETIF + NETIF_FOREACH(netif) { + if (netif == inp) { + /* we checked that before already */ + continue; + } + if (ip4_input_accept(netif)) { + break; + } + } +#endif /* !LWIP_SINGLE_NETIF */ + } + } + } + +#if IP_ACCEPT_LINK_LAYER_ADDRESSING + /* Pass DHCP messages regardless of destination address. DHCP traffic is addressed + * using link layer addressing (such as Ethernet MAC) so we must not filter on IP. + * According to RFC 1542 section 3.1.1, referred by RFC 2131). + * + * If you want to accept private broadcast communication while a netif is down, + * define LWIP_IP_ACCEPT_UDP_PORT(dst_port), e.g.: + * + * #define LWIP_IP_ACCEPT_UDP_PORT(dst_port) ((dst_port) == PP_NTOHS(12345)) + */ + if (netif == NULL) { + /* remote port is DHCP server? */ + if (IPH_PROTO(iphdr) == IP_PROTO_UDP) { + const struct udp_hdr *udphdr = (const struct udp_hdr *)((const u8_t *)iphdr + iphdr_hlen); + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: UDP packet to DHCP client port %"U16_F"\n", + lwip_ntohs(udphdr->dest))); + if (IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(udphdr->dest)) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: DHCP packet accepted.\n")); + netif = inp; + check_ip_src = 0; + } + } + } +#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */ + + /* broadcast or multicast packet source address? Compliant with RFC 1122: 3.2.1.3 */ +#if LWIP_IGMP || IP_ACCEPT_LINK_LAYER_ADDRESSING + if (check_ip_src +#if IP_ACCEPT_LINK_LAYER_ADDRESSING + /* DHCP servers need 0.0.0.0 to be allowed as source address (RFC 1.1.2.2: 3.2.1.3/a) */ + && !ip4_addr_isany_val(*ip4_current_src_addr()) +#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */ + ) +#endif /* LWIP_IGMP || IP_ACCEPT_LINK_LAYER_ADDRESSING */ + { + if ((ip4_addr_isbroadcast(ip4_current_src_addr(), inp)) || + (ip4_addr_ismulticast(ip4_current_src_addr()))) { + /* packet source is not valid */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("ip4_input: packet source is not valid.\n")); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinaddrerrors); + MIB2_STATS_INC(mib2.ipindiscards); + return ERR_OK; + } + } + + /* packet not for us? */ + if (netif == NULL) { + /* packet not for us, route or discard */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: packet not for us.\n")); +#if IP_FORWARD + /* non-broadcast packet? */ + if (!ip4_addr_isbroadcast(ip4_current_dest_addr(), inp)) { + /* try to forward IP packet on (other) interfaces */ + ip4_forward(p, (struct ip_hdr *)p->payload, inp); + } else +#endif /* IP_FORWARD */ + { + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinaddrerrors); + MIB2_STATS_INC(mib2.ipindiscards); + } + pbuf_free(p); + return ERR_OK; + } + /* packet consists of multiple fragments? */ + if ((IPH_OFFSET(iphdr) & PP_HTONS(IP_OFFMASK | IP_MF)) != 0) { +#if IP_REASSEMBLY /* packet fragment reassembly code present? */ + LWIP_DEBUGF(IP_DEBUG, ("IP packet is a fragment (id=0x%04"X16_F" tot_len=%"U16_F" len=%"U16_F" MF=%"U16_F" offset=%"U16_F"), calling ip4_reass()\n", + lwip_ntohs(IPH_ID(iphdr)), p->tot_len, lwip_ntohs(IPH_LEN(iphdr)), (u16_t)!!(IPH_OFFSET(iphdr) & PP_HTONS(IP_MF)), (u16_t)((lwip_ntohs(IPH_OFFSET(iphdr)) & IP_OFFMASK) * 8))); + /* reassemble the packet*/ + p = ip4_reass(p); + /* packet not fully reassembled yet? */ + if (p == NULL) { + return ERR_OK; + } + iphdr = (const struct ip_hdr *)p->payload; +#else /* IP_REASSEMBLY == 0, no packet fragment reassembly code present */ + pbuf_free(p); + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IP packet dropped since it was fragmented (0x%"X16_F") (while IP_REASSEMBLY == 0).\n", + lwip_ntohs(IPH_OFFSET(iphdr)))); + IP_STATS_INC(ip.opterr); + IP_STATS_INC(ip.drop); + /* unsupported protocol feature */ + MIB2_STATS_INC(mib2.ipinunknownprotos); + return ERR_OK; +#endif /* IP_REASSEMBLY */ + } + +#if IP_OPTIONS_ALLOWED == 0 /* no support for IP options in the IP header? */ + +#if LWIP_IGMP + /* there is an extra "router alert" option in IGMP messages which we allow for but do not police */ + if ((iphdr_hlen > IP_HLEN) && (IPH_PROTO(iphdr) != IP_PROTO_IGMP)) { +#else + if (iphdr_hlen > IP_HLEN) { +#endif /* LWIP_IGMP */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IP packet dropped since there were IP options (while IP_OPTIONS_ALLOWED == 0).\n")); + pbuf_free(p); + IP_STATS_INC(ip.opterr); + IP_STATS_INC(ip.drop); + /* unsupported protocol feature */ + MIB2_STATS_INC(mib2.ipinunknownprotos); + return ERR_OK; + } +#endif /* IP_OPTIONS_ALLOWED == 0 */ + + /* send to upper layers */ + LWIP_DEBUGF(IP_DEBUG, ("ip4_input: \n")); + ip4_debug_print(p); + LWIP_DEBUGF(IP_DEBUG, ("ip4_input: p->len %"U16_F" p->tot_len %"U16_F"\n", p->len, p->tot_len)); + + ip_data.current_netif = netif; + ip_data.current_input_netif = inp; + ip_data.current_ip4_header = iphdr; + ip_data.current_ip_header_tot_len = IPH_HL_BYTES(iphdr); + +#if LWIP_RAW + /* raw input did not eat the packet? */ + raw_status = raw_input(p, inp); + if (raw_status != RAW_INPUT_EATEN) +#endif /* LWIP_RAW */ + { + pbuf_remove_header(p, iphdr_hlen); /* Move to payload, no check necessary. */ + + switch (IPH_PROTO(iphdr)) { +#if LWIP_UDP + case IP_PROTO_UDP: +#if LWIP_UDPLITE + case IP_PROTO_UDPLITE: +#endif /* LWIP_UDPLITE */ + MIB2_STATS_INC(mib2.ipindelivers); + udp_input(p, inp); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case IP_PROTO_TCP: + MIB2_STATS_INC(mib2.ipindelivers); + tcp_input(p, inp); + break; +#endif /* LWIP_TCP */ +#if LWIP_ICMP + case IP_PROTO_ICMP: + MIB2_STATS_INC(mib2.ipindelivers); + icmp_input(p, inp); + break; +#endif /* LWIP_ICMP */ +#if LWIP_IGMP + case IP_PROTO_IGMP: + igmp_input(p, inp, ip4_current_dest_addr()); + break; +#endif /* LWIP_IGMP */ + default: +#if LWIP_RAW + if (raw_status == RAW_INPUT_DELIVERED) { + MIB2_STATS_INC(mib2.ipindelivers); + } else +#endif /* LWIP_RAW */ + { +#if LWIP_ICMP + /* send ICMP destination protocol unreachable unless is was a broadcast */ + if (!ip4_addr_isbroadcast(ip4_current_dest_addr(), netif) && + !ip4_addr_ismulticast(ip4_current_dest_addr())) { + pbuf_header_force(p, (s16_t)iphdr_hlen); /* Move to ip header, no check necessary. */ + icmp_dest_unreach(p, ICMP_DUR_PROTO); + } +#endif /* LWIP_ICMP */ + + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("Unsupported transport protocol %"U16_F"\n", (u16_t)IPH_PROTO(iphdr))); + + IP_STATS_INC(ip.proterr); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinunknownprotos); + } + pbuf_free(p); + break; + } + } + + /* @todo: this is not really necessary... */ + ip_data.current_netif = NULL; + ip_data.current_input_netif = NULL; + ip_data.current_ip4_header = NULL; + ip_data.current_ip_header_tot_len = 0; + ip4_addr_set_any(ip4_current_src_addr()); + ip4_addr_set_any(ip4_current_dest_addr()); + + return ERR_OK; +} + +/** + * Sends an IP packet on a network interface. This function constructs + * the IP header and calculates the IP header checksum. If the source + * IP address is NULL, the IP address of the outgoing network + * interface is filled in as source address. + * If the destination IP address is LWIP_IP_HDRINCL, p is assumed to already + * include an IP header and p->payload points to it instead of the data. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IP header and p->payload points to that IP header) + * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the + * IP address of the netif used to send is used as source address) + * @param dest the destination IP address to send the packet to + * @param ttl the TTL value to be set in the IP header + * @param tos the TOS value to be set in the IP header + * @param proto the PROTOCOL to be set in the IP header + * @param netif the netif on which to send this packet + * @return ERR_OK if the packet was sent OK + * ERR_BUF if p doesn't have enough space for IP/LINK headers + * returns errors returned by netif->output + * + * @note ip_id: RFC791 "some host may be able to simply use + * unique identifiers independent of destination" + */ +err_t +ip4_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, + u8_t proto, struct netif *netif) +{ +#if IP_OPTIONS_SEND + return ip4_output_if_opt(p, src, dest, ttl, tos, proto, netif, NULL, 0); +} + +/** + * Same as ip_output_if() but with the possibility to include IP options: + * + * @ param ip_options pointer to the IP options, copied into the IP header + * @ param optlen length of ip_options + */ +err_t +ip4_output_if_opt(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, + u16_t optlen) +{ +#endif /* IP_OPTIONS_SEND */ + const ip4_addr_t *src_used = src; + if (dest != LWIP_IP_HDRINCL) { + if (ip4_addr_isany(src)) { + src_used = netif_ip4_addr(netif); + } + } + +#if IP_OPTIONS_SEND + return ip4_output_if_opt_src(p, src_used, dest, ttl, tos, proto, netif, + ip_options, optlen); +#else /* IP_OPTIONS_SEND */ + return ip4_output_if_src(p, src_used, dest, ttl, tos, proto, netif); +#endif /* IP_OPTIONS_SEND */ +} + +/** + * Same as ip_output_if() but 'src' address is not replaced by netif address + * when it is 'any'. + */ +err_t +ip4_output_if_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, + u8_t proto, struct netif *netif) +{ +#if IP_OPTIONS_SEND + return ip4_output_if_opt_src(p, src, dest, ttl, tos, proto, netif, NULL, 0); +} + +/** + * Same as ip_output_if_opt() but 'src' address is not replaced by netif address + * when it is 'any'. + */ +err_t +ip4_output_if_opt_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, + u16_t optlen) +{ +#endif /* IP_OPTIONS_SEND */ + struct ip_hdr *iphdr; + ip4_addr_t dest_addr; +#if CHECKSUM_GEN_IP_INLINE + u32_t chk_sum = 0; +#endif /* CHECKSUM_GEN_IP_INLINE */ + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + MIB2_STATS_INC(mib2.ipoutrequests); + + /* Should the IP header be generated or is it already included in p? */ + if (dest != LWIP_IP_HDRINCL) { + u16_t ip_hlen = IP_HLEN; +#if IP_OPTIONS_SEND + u16_t optlen_aligned = 0; + if (optlen != 0) { +#if CHECKSUM_GEN_IP_INLINE + int i; +#endif /* CHECKSUM_GEN_IP_INLINE */ + if (optlen > (IP_HLEN_MAX - IP_HLEN)) { + /* optlen too long */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output_if_opt: optlen too long\n")); + IP_STATS_INC(ip.err); + MIB2_STATS_INC(mib2.ipoutdiscards); + return ERR_VAL; + } + /* round up to a multiple of 4 */ + optlen_aligned = (u16_t)((optlen + 3) & ~3); + ip_hlen = (u16_t)(ip_hlen + optlen_aligned); + /* First write in the IP options */ + if (pbuf_add_header(p, optlen_aligned)) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output_if_opt: not enough room for IP options in pbuf\n")); + IP_STATS_INC(ip.err); + MIB2_STATS_INC(mib2.ipoutdiscards); + return ERR_BUF; + } + MEMCPY(p->payload, ip_options, optlen); + if (optlen < optlen_aligned) { + /* zero the remaining bytes */ + memset(((char *)p->payload) + optlen, 0, (size_t)(optlen_aligned - optlen)); + } +#if CHECKSUM_GEN_IP_INLINE + for (i = 0; i < optlen_aligned / 2; i++) { + chk_sum += ((u16_t *)p->payload)[i]; + } +#endif /* CHECKSUM_GEN_IP_INLINE */ + } +#endif /* IP_OPTIONS_SEND */ + /* generate IP header */ + if (pbuf_add_header(p, IP_HLEN)) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output: not enough room for IP header in pbuf\n")); + + IP_STATS_INC(ip.err); + MIB2_STATS_INC(mib2.ipoutdiscards); + return ERR_BUF; + } + + iphdr = (struct ip_hdr *)p->payload; + LWIP_ASSERT("check that first pbuf can hold struct ip_hdr", + (p->len >= sizeof(struct ip_hdr))); + + IPH_TTL_SET(iphdr, ttl); + IPH_PROTO_SET(iphdr, proto); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += PP_NTOHS(proto | (ttl << 8)); +#endif /* CHECKSUM_GEN_IP_INLINE */ + + /* dest cannot be NULL here */ + ip4_addr_copy(iphdr->dest, *dest); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += ip4_addr_get_u32(&iphdr->dest) & 0xFFFF; + chk_sum += ip4_addr_get_u32(&iphdr->dest) >> 16; +#endif /* CHECKSUM_GEN_IP_INLINE */ + + IPH_VHL_SET(iphdr, 4, ip_hlen / 4); + IPH_TOS_SET(iphdr, tos); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += PP_NTOHS(tos | (iphdr->_v_hl << 8)); +#endif /* CHECKSUM_GEN_IP_INLINE */ + IPH_LEN_SET(iphdr, lwip_htons(p->tot_len)); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += iphdr->_len; +#endif /* CHECKSUM_GEN_IP_INLINE */ + IPH_OFFSET_SET(iphdr, 0); + IPH_ID_SET(iphdr, lwip_htons(ip_id)); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += iphdr->_id; +#endif /* CHECKSUM_GEN_IP_INLINE */ + ++ip_id; + + if (src == NULL) { + ip4_addr_copy(iphdr->src, *IP4_ADDR_ANY4); + } else { + /* src cannot be NULL here */ + ip4_addr_copy(iphdr->src, *src); + } + +#if CHECKSUM_GEN_IP_INLINE + chk_sum += ip4_addr_get_u32(&iphdr->src) & 0xFFFF; + chk_sum += ip4_addr_get_u32(&iphdr->src) >> 16; + chk_sum = (chk_sum >> 16) + (chk_sum & 0xFFFF); + chk_sum = (chk_sum >> 16) + chk_sum; + chk_sum = ~chk_sum; + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) { + iphdr->_chksum = (u16_t)chk_sum; /* network order */ + } +#if LWIP_CHECKSUM_CTRL_PER_NETIF + else { + IPH_CHKSUM_SET(iphdr, 0); + } +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF*/ +#else /* CHECKSUM_GEN_IP_INLINE */ + IPH_CHKSUM_SET(iphdr, 0); +#if CHECKSUM_GEN_IP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) { + IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, ip_hlen)); + } +#endif /* CHECKSUM_GEN_IP */ +#endif /* CHECKSUM_GEN_IP_INLINE */ + } else { + /* IP header already included in p */ + if (p->len < IP_HLEN) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output: LWIP_IP_HDRINCL but pbuf is too short\n")); + IP_STATS_INC(ip.err); + MIB2_STATS_INC(mib2.ipoutdiscards); + return ERR_BUF; + } + iphdr = (struct ip_hdr *)p->payload; + ip4_addr_copy(dest_addr, iphdr->dest); + dest = &dest_addr; + } + + IP_STATS_INC(ip.xmit); + + LWIP_DEBUGF(IP_DEBUG, ("ip4_output_if: %c%c%"U16_F"\n", netif->name[0], netif->name[1], (u16_t)netif->num)); + ip4_debug_print(p); + +#if ENABLE_LOOPBACK + if (ip4_addr_cmp(dest, netif_ip4_addr(netif)) +#if !LWIP_HAVE_LOOPIF + || ip4_addr_isloopback(dest) +#endif /* !LWIP_HAVE_LOOPIF */ + ) { + /* Packet to self, enqueue it for loopback */ + LWIP_DEBUGF(IP_DEBUG, ("netif_loop_output()")); + return netif_loop_output(netif, p); + } +#if LWIP_MULTICAST_TX_OPTIONS + if ((p->flags & PBUF_FLAG_MCASTLOOP) != 0) { + netif_loop_output(netif, p); + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ +#endif /* ENABLE_LOOPBACK */ +#if IP_FRAG + /* don't fragment if interface has mtu set to 0 [loopif] */ + if (netif->mtu && (p->tot_len > netif->mtu)) { + return ip4_frag(p, netif, dest); + } +#endif /* IP_FRAG */ + + LWIP_DEBUGF(IP_DEBUG, ("ip4_output_if: call netif->output()\n")); + return netif->output(netif, p, dest); +} + +/** + * Simple interface to ip_output_if. It finds the outgoing network + * interface and calls upon ip_output_if to do the actual work. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IP header and p->payload points to that IP header) + * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the + * IP address of the netif used to send is used as source address) + * @param dest the destination IP address to send the packet to + * @param ttl the TTL value to be set in the IP header + * @param tos the TOS value to be set in the IP header + * @param proto the PROTOCOL to be set in the IP header + * + * @return ERR_RTE if no route is found + * see ip_output_if() for more return values + */ +err_t +ip4_output(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto) +{ + struct netif *netif; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + if ((netif = ip4_route_src(src, dest)) == NULL) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_output: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest))); + IP_STATS_INC(ip.rterr); + return ERR_RTE; + } + + return ip4_output_if(p, src, dest, ttl, tos, proto, netif); +} + +#if LWIP_NETIF_USE_HINTS +/** Like ip_output, but takes and addr_hint pointer that is passed on to netif->addr_hint + * before calling ip_output_if. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IP header and p->payload points to that IP header) + * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the + * IP address of the netif used to send is used as source address) + * @param dest the destination IP address to send the packet to + * @param ttl the TTL value to be set in the IP header + * @param tos the TOS value to be set in the IP header + * @param proto the PROTOCOL to be set in the IP header + * @param netif_hint netif output hint pointer set to netif->hint before + * calling ip_output_if() + * + * @return ERR_RTE if no route is found + * see ip_output_if() for more return values + */ +err_t +ip4_output_hinted(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif_hint *netif_hint) +{ + struct netif *netif; + err_t err; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + if ((netif = ip4_route_src(src, dest)) == NULL) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_output: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest))); + IP_STATS_INC(ip.rterr); + return ERR_RTE; + } + + NETIF_SET_HINTS(netif, netif_hint); + err = ip4_output_if(p, src, dest, ttl, tos, proto, netif); + NETIF_RESET_HINTS(netif); + + return err; +} +#endif /* LWIP_NETIF_USE_HINTS*/ + +#if IP_DEBUG +/* Print an IP header by using LWIP_DEBUGF + * @param p an IP packet, p->payload pointing to the IP header + */ +void +ip4_debug_print(struct pbuf *p) +{ + struct ip_hdr *iphdr = (struct ip_hdr *)p->payload; + + LWIP_DEBUGF(IP_DEBUG, ("IP header:\n")); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("|%2"S16_F" |%2"S16_F" | 0x%02"X16_F" | %5"U16_F" | (v, hl, tos, len)\n", + (u16_t)IPH_V(iphdr), + (u16_t)IPH_HL(iphdr), + (u16_t)IPH_TOS(iphdr), + lwip_ntohs(IPH_LEN(iphdr)))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("| %5"U16_F" |%"U16_F"%"U16_F"%"U16_F"| %4"U16_F" | (id, flags, offset)\n", + lwip_ntohs(IPH_ID(iphdr)), + (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 15 & 1), + (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 14 & 1), + (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 13 & 1), + (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) & IP_OFFMASK))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | 0x%04"X16_F" | (ttl, proto, chksum)\n", + (u16_t)IPH_TTL(iphdr), + (u16_t)IPH_PROTO(iphdr), + lwip_ntohs(IPH_CHKSUM(iphdr)))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | %3"U16_F" | %3"U16_F" | (src)\n", + ip4_addr1_16_val(iphdr->src), + ip4_addr2_16_val(iphdr->src), + ip4_addr3_16_val(iphdr->src), + ip4_addr4_16_val(iphdr->src))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | %3"U16_F" | %3"U16_F" | (dest)\n", + ip4_addr1_16_val(iphdr->dest), + ip4_addr2_16_val(iphdr->dest), + ip4_addr3_16_val(iphdr->dest), + ip4_addr4_16_val(iphdr->dest))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); +} +#endif /* IP_DEBUG */ + +#endif /* LWIP_IPV4 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c new file mode 100644 index 0000000..92077c2 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c @@ -0,0 +1,321 @@ +/** + * @file + * This is the IPv4 address tools implementation. + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 + +#include "lwip/ip_addr.h" +#include "lwip/netif.h" + +/* used by IP4_ADDR_ANY and IP_ADDR_BROADCAST in ip_addr.h */ +const ip_addr_t ip_addr_any = IPADDR4_INIT(IPADDR_ANY); +const ip_addr_t ip_addr_broadcast = IPADDR4_INIT(IPADDR_BROADCAST); + +/** + * Determine if an address is a broadcast address on a network interface + * + * @param addr address to be checked + * @param netif the network interface against which the address is checked + * @return returns non-zero if the address is a broadcast address + */ +u8_t +ip4_addr_isbroadcast_u32(u32_t addr, const struct netif *netif) +{ + ip4_addr_t ipaddr; + ip4_addr_set_u32(&ipaddr, addr); + + /* all ones (broadcast) or all zeroes (old skool broadcast) */ + if ((~addr == IPADDR_ANY) || + (addr == IPADDR_ANY)) { + return 1; + /* no broadcast support on this network interface? */ + } else if ((netif->flags & NETIF_FLAG_BROADCAST) == 0) { + /* the given address cannot be a broadcast address + * nor can we check against any broadcast addresses */ + return 0; + /* address matches network interface address exactly? => no broadcast */ + } else if (addr == ip4_addr_get_u32(netif_ip4_addr(netif))) { + return 0; + /* on the same (sub) network... */ + } else if (ip4_addr_netcmp(&ipaddr, netif_ip4_addr(netif), netif_ip4_netmask(netif)) + /* ...and host identifier bits are all ones? =>... */ + && ((addr & ~ip4_addr_get_u32(netif_ip4_netmask(netif))) == + (IPADDR_BROADCAST & ~ip4_addr_get_u32(netif_ip4_netmask(netif))))) { + /* => network broadcast address */ + return 1; + } else { + return 0; + } +} + +/** Checks if a netmask is valid (starting with ones, then only zeros) + * + * @param netmask the IPv4 netmask to check (in network byte order!) + * @return 1 if the netmask is valid, 0 if it is not + */ +u8_t +ip4_addr_netmask_valid(u32_t netmask) +{ + u32_t mask; + u32_t nm_hostorder = lwip_htonl(netmask); + + /* first, check for the first zero */ + for (mask = 1UL << 31 ; mask != 0; mask >>= 1) { + if ((nm_hostorder & mask) == 0) { + break; + } + } + /* then check that there is no one */ + for (; mask != 0; mask >>= 1) { + if ((nm_hostorder & mask) != 0) { + /* there is a one after the first zero -> invalid */ + return 0; + } + } + /* no one after the first zero -> valid */ + return 1; +} + +/** + * Ascii internet address interpretation routine. + * The value returned is in network order. + * + * @param cp IP address in ascii representation (e.g. "127.0.0.1") + * @return ip address in network order + */ +u32_t +ipaddr_addr(const char *cp) +{ + ip4_addr_t val; + + if (ip4addr_aton(cp, &val)) { + return ip4_addr_get_u32(&val); + } + return (IPADDR_NONE); +} + +/** + * Check whether "cp" is a valid ascii representation + * of an Internet address and convert to a binary address. + * Returns 1 if the address is valid, 0 if not. + * This replaces inet_addr, the return value from which + * cannot distinguish between failure and a local broadcast address. + * + * @param cp IP address in ascii representation (e.g. "127.0.0.1") + * @param addr pointer to which to save the ip address in network order + * @return 1 if cp could be converted to addr, 0 on failure + */ +int +ip4addr_aton(const char *cp, ip4_addr_t *addr) +{ + u32_t val; + u8_t base; + char c; + u32_t parts[4]; + u32_t *pp = parts; + + c = *cp; + for (;;) { + /* + * Collect number up to ``.''. + * Values are specified as for C: + * 0x=hex, 0=octal, 1-9=decimal. + */ + if (!lwip_isdigit(c)) { + return 0; + } + val = 0; + base = 10; + if (c == '0') { + c = *++cp; + if (c == 'x' || c == 'X') { + base = 16; + c = *++cp; + } else { + base = 8; + } + } + for (;;) { + if (lwip_isdigit(c)) { + val = (val * base) + (u32_t)(c - '0'); + c = *++cp; + } else if (base == 16 && lwip_isxdigit(c)) { + val = (val << 4) | (u32_t)(c + 10 - (lwip_islower(c) ? 'a' : 'A')); + c = *++cp; + } else { + break; + } + } + if (c == '.') { + /* + * Internet format: + * a.b.c.d + * a.b.c (with c treated as 16 bits) + * a.b (with b treated as 24 bits) + */ + if (pp >= parts + 3) { + return 0; + } + *pp++ = val; + c = *++cp; + } else { + break; + } + } + /* + * Check for trailing characters. + */ + if (c != '\0' && !lwip_isspace(c)) { + return 0; + } + /* + * Concoct the address according to + * the number of parts specified. + */ + switch (pp - parts + 1) { + + case 0: + return 0; /* initial nondigit */ + + case 1: /* a -- 32 bits */ + break; + + case 2: /* a.b -- 8.24 bits */ + if (val > 0xffffffUL) { + return 0; + } + if (parts[0] > 0xff) { + return 0; + } + val |= parts[0] << 24; + break; + + case 3: /* a.b.c -- 8.8.16 bits */ + if (val > 0xffff) { + return 0; + } + if ((parts[0] > 0xff) || (parts[1] > 0xff)) { + return 0; + } + val |= (parts[0] << 24) | (parts[1] << 16); + break; + + case 4: /* a.b.c.d -- 8.8.8.8 bits */ + if (val > 0xff) { + return 0; + } + if ((parts[0] > 0xff) || (parts[1] > 0xff) || (parts[2] > 0xff)) { + return 0; + } + val |= (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8); + break; + default: + LWIP_ASSERT("unhandled", 0); + break; + } + if (addr) { + ip4_addr_set_u32(addr, lwip_htonl(val)); + } + return 1; +} + +/** + * Convert numeric IP address into decimal dotted ASCII representation. + * returns ptr to static buffer; not reentrant! + * + * @param addr ip address in network order to convert + * @return pointer to a global static (!) buffer that holds the ASCII + * representation of addr + */ +char * +ip4addr_ntoa(const ip4_addr_t *addr) +{ + static char str[IP4ADDR_STRLEN_MAX]; + return ip4addr_ntoa_r(addr, str, IP4ADDR_STRLEN_MAX); +} + +/** + * Same as ip4addr_ntoa, but reentrant since a user-supplied buffer is used. + * + * @param addr ip address in network order to convert + * @param buf target buffer where the string is stored + * @param buflen length of buf + * @return either pointer to buf which now holds the ASCII + * representation of addr or NULL if buf was too small + */ +char * +ip4addr_ntoa_r(const ip4_addr_t *addr, char *buf, int buflen) +{ + u32_t s_addr; + char inv[3]; + char *rp; + u8_t *ap; + u8_t rem; + u8_t n; + u8_t i; + int len = 0; + + s_addr = ip4_addr_get_u32(addr); + + rp = buf; + ap = (u8_t *)&s_addr; + for (n = 0; n < 4; n++) { + i = 0; + do { + rem = *ap % (u8_t)10; + *ap /= (u8_t)10; + inv[i++] = (char)('0' + rem); + } while (*ap); + while (i--) { + if (len++ >= buflen) { + return NULL; + } + *rp++ = inv[i]; + } + if (len++ >= buflen) { + return NULL; + } + *rp++ = '.'; + ap++; + } + *--rp = 0; + return buf; +} + +#endif /* LWIP_IPV4 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c new file mode 100644 index 0000000..b2fd14c --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c @@ -0,0 +1,894 @@ +/** + * @file + * This is the IPv4 packet segmentation and reassembly implementation. + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Jani Monoses + * Simon Goldschmidt + * original reassembly code by Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 + +#include "lwip/ip4_frag.h" +#include "lwip/def.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/stats.h" +#include "lwip/icmp.h" + +#include + +#if IP_REASSEMBLY +/** + * The IP reassembly code currently has the following limitations: + * - IP header options are not supported + * - fragments must not overlap (e.g. due to different routes), + * currently, overlapping or duplicate fragments are thrown away + * if IP_REASS_CHECK_OVERLAP=1 (the default)! + * + * @todo: work with IP header options + */ + +/** Setting this to 0, you can turn off checking the fragments for overlapping + * regions. The code gets a little smaller. Only use this if you know that + * overlapping won't occur on your network! */ +#ifndef IP_REASS_CHECK_OVERLAP +#define IP_REASS_CHECK_OVERLAP 1 +#endif /* IP_REASS_CHECK_OVERLAP */ + +/** Set to 0 to prevent freeing the oldest datagram when the reassembly buffer is + * full (IP_REASS_MAX_PBUFS pbufs are enqueued). The code gets a little smaller. + * Datagrams will be freed by timeout only. Especially useful when MEMP_NUM_REASSDATA + * is set to 1, so one datagram can be reassembled at a time, only. */ +#ifndef IP_REASS_FREE_OLDEST +#define IP_REASS_FREE_OLDEST 1 +#endif /* IP_REASS_FREE_OLDEST */ + +#define IP_REASS_FLAG_LASTFRAG 0x01 + +#define IP_REASS_VALIDATE_TELEGRAM_FINISHED 1 +#define IP_REASS_VALIDATE_PBUF_QUEUED 0 +#define IP_REASS_VALIDATE_PBUF_DROPPED -1 + +/** This is a helper struct which holds the starting + * offset and the ending offset of this fragment to + * easily chain the fragments. + * It has the same packing requirements as the IP header, since it replaces + * the IP header in memory in incoming fragments (after copying it) to keep + * track of the various fragments. (-> If the IP header doesn't need packing, + * this struct doesn't need packing, too.) + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip_reass_helper { + PACK_STRUCT_FIELD(struct pbuf *next_pbuf); + PACK_STRUCT_FIELD(u16_t start); + PACK_STRUCT_FIELD(u16_t end); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define IP_ADDRESSES_AND_ID_MATCH(iphdrA, iphdrB) \ + (ip4_addr_cmp(&(iphdrA)->src, &(iphdrB)->src) && \ + ip4_addr_cmp(&(iphdrA)->dest, &(iphdrB)->dest) && \ + IPH_ID(iphdrA) == IPH_ID(iphdrB)) ? 1 : 0 + +/* global variables */ +static struct ip_reassdata *reassdatagrams; +static u16_t ip_reass_pbufcount; + +/* function prototypes */ +static void ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev); +static int ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev); + +/** + * Reassembly timer base function + * for both NO_SYS == 0 and 1 (!). + * + * Should be called every 1000 msec (defined by IP_TMR_INTERVAL). + */ +void +ip_reass_tmr(void) +{ + struct ip_reassdata *r, *prev = NULL; + + r = reassdatagrams; + while (r != NULL) { + /* Decrement the timer. Once it reaches 0, + * clean up the incomplete fragment assembly */ + if (r->timer > 0) { + r->timer--; + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer dec %"U16_F"\n", (u16_t)r->timer)); + prev = r; + r = r->next; + } else { + /* reassembly timed out */ + struct ip_reassdata *tmp; + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer timed out\n")); + tmp = r; + /* get the next pointer before freeing */ + r = r->next; + /* free the helper struct and all enqueued pbufs */ + ip_reass_free_complete_datagram(tmp, prev); + } + } +} + +/** + * Free a datagram (struct ip_reassdata) and all its pbufs. + * Updates the total count of enqueued pbufs (ip_reass_pbufcount), + * SNMP counters and sends an ICMP time exceeded packet. + * + * @param ipr datagram to free + * @param prev the previous datagram in the linked list + * @return the number of pbufs freed + */ +static int +ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev) +{ + u16_t pbufs_freed = 0; + u16_t clen; + struct pbuf *p; + struct ip_reass_helper *iprh; + + LWIP_ASSERT("prev != ipr", prev != ipr); + if (prev != NULL) { + LWIP_ASSERT("prev->next == ipr", prev->next == ipr); + } + + MIB2_STATS_INC(mib2.ipreasmfails); +#if LWIP_ICMP + iprh = (struct ip_reass_helper *)ipr->p->payload; + if (iprh->start == 0) { + /* The first fragment was received, send ICMP time exceeded. */ + /* First, de-queue the first pbuf from r->p. */ + p = ipr->p; + ipr->p = iprh->next_pbuf; + /* Then, copy the original header into it. */ + SMEMCPY(p->payload, &ipr->iphdr, IP_HLEN); + icmp_time_exceeded(p, ICMP_TE_FRAG); + clen = pbuf_clen(p); + LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); + pbufs_freed = (u16_t)(pbufs_freed + clen); + pbuf_free(p); + } +#endif /* LWIP_ICMP */ + + /* First, free all received pbufs. The individual pbufs need to be released + separately as they have not yet been chained */ + p = ipr->p; + while (p != NULL) { + struct pbuf *pcur; + iprh = (struct ip_reass_helper *)p->payload; + pcur = p; + /* get the next pointer before freeing */ + p = iprh->next_pbuf; + clen = pbuf_clen(pcur); + LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); + pbufs_freed = (u16_t)(pbufs_freed + clen); + pbuf_free(pcur); + } + /* Then, unchain the struct ip_reassdata from the list and free it. */ + ip_reass_dequeue_datagram(ipr, prev); + LWIP_ASSERT("ip_reass_pbufcount >= pbufs_freed", ip_reass_pbufcount >= pbufs_freed); + ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount - pbufs_freed); + + return pbufs_freed; +} + +#if IP_REASS_FREE_OLDEST +/** + * Free the oldest datagram to make room for enqueueing new fragments. + * The datagram 'fraghdr' belongs to is not freed! + * + * @param fraghdr IP header of the current fragment + * @param pbufs_needed number of pbufs needed to enqueue + * (used for freeing other datagrams if not enough space) + * @return the number of pbufs freed + */ +static int +ip_reass_remove_oldest_datagram(struct ip_hdr *fraghdr, int pbufs_needed) +{ + /* @todo Can't we simply remove the last datagram in the + * linked list behind reassdatagrams? + */ + struct ip_reassdata *r, *oldest, *prev, *oldest_prev; + int pbufs_freed = 0, pbufs_freed_current; + int other_datagrams; + + /* Free datagrams until being allowed to enqueue 'pbufs_needed' pbufs, + * but don't free the datagram that 'fraghdr' belongs to! */ + do { + oldest = NULL; + prev = NULL; + oldest_prev = NULL; + other_datagrams = 0; + r = reassdatagrams; + while (r != NULL) { + if (!IP_ADDRESSES_AND_ID_MATCH(&r->iphdr, fraghdr)) { + /* Not the same datagram as fraghdr */ + other_datagrams++; + if (oldest == NULL) { + oldest = r; + oldest_prev = prev; + } else if (r->timer <= oldest->timer) { + /* older than the previous oldest */ + oldest = r; + oldest_prev = prev; + } + } + if (r->next != NULL) { + prev = r; + } + r = r->next; + } + if (oldest != NULL) { + pbufs_freed_current = ip_reass_free_complete_datagram(oldest, oldest_prev); + pbufs_freed += pbufs_freed_current; + } + } while ((pbufs_freed < pbufs_needed) && (other_datagrams > 1)); + return pbufs_freed; +} +#endif /* IP_REASS_FREE_OLDEST */ + +/** + * Enqueues a new fragment into the fragment queue + * @param fraghdr points to the new fragments IP hdr + * @param clen number of pbufs needed to enqueue (used for freeing other datagrams if not enough space) + * @return A pointer to the queue location into which the fragment was enqueued + */ +static struct ip_reassdata * +ip_reass_enqueue_new_datagram(struct ip_hdr *fraghdr, int clen) +{ + struct ip_reassdata *ipr; +#if ! IP_REASS_FREE_OLDEST + LWIP_UNUSED_ARG(clen); +#endif + + /* No matching previous fragment found, allocate a new reassdata struct */ + ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA); + if (ipr == NULL) { +#if IP_REASS_FREE_OLDEST + if (ip_reass_remove_oldest_datagram(fraghdr, clen) >= clen) { + ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA); + } + if (ipr == NULL) +#endif /* IP_REASS_FREE_OLDEST */ + { + IPFRAG_STATS_INC(ip_frag.memerr); + LWIP_DEBUGF(IP_REASS_DEBUG, ("Failed to alloc reassdata struct\n")); + return NULL; + } + } + memset(ipr, 0, sizeof(struct ip_reassdata)); + ipr->timer = IP_REASS_MAXAGE; + + /* enqueue the new structure to the front of the list */ + ipr->next = reassdatagrams; + reassdatagrams = ipr; + /* copy the ip header for later tests and input */ + /* @todo: no ip options supported? */ + SMEMCPY(&(ipr->iphdr), fraghdr, IP_HLEN); + return ipr; +} + +/** + * Dequeues a datagram from the datagram queue. Doesn't deallocate the pbufs. + * @param ipr points to the queue entry to dequeue + */ +static void +ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev) +{ + /* dequeue the reass struct */ + if (reassdatagrams == ipr) { + /* it was the first in the list */ + reassdatagrams = ipr->next; + } else { + /* it wasn't the first, so it must have a valid 'prev' */ + LWIP_ASSERT("sanity check linked list", prev != NULL); + prev->next = ipr->next; + } + + /* now we can free the ip_reassdata struct */ + memp_free(MEMP_REASSDATA, ipr); +} + +/** + * Chain a new pbuf into the pbuf list that composes the datagram. The pbuf list + * will grow over time as new pbufs are rx. + * Also checks that the datagram passes basic continuity checks (if the last + * fragment was received at least once). + * @param ipr points to the reassembly state + * @param new_p points to the pbuf for the current fragment + * @param is_last is 1 if this pbuf has MF==0 (ipr->flags not updated yet) + * @return see IP_REASS_VALIDATE_* defines + */ +static int +ip_reass_chain_frag_into_datagram_and_validate(struct ip_reassdata *ipr, struct pbuf *new_p, int is_last) +{ + struct ip_reass_helper *iprh, *iprh_tmp, *iprh_prev = NULL; + struct pbuf *q; + u16_t offset, len; + u8_t hlen; + struct ip_hdr *fraghdr; + int valid = 1; + + /* Extract length and fragment offset from current fragment */ + fraghdr = (struct ip_hdr *)new_p->payload; + len = lwip_ntohs(IPH_LEN(fraghdr)); + hlen = IPH_HL_BYTES(fraghdr); + if (hlen > len) { + /* invalid datagram */ + return IP_REASS_VALIDATE_PBUF_DROPPED; + } + len = (u16_t)(len - hlen); + offset = IPH_OFFSET_BYTES(fraghdr); + + /* overwrite the fragment's ip header from the pbuf with our helper struct, + * and setup the embedded helper structure. */ + /* make sure the struct ip_reass_helper fits into the IP header */ + LWIP_ASSERT("sizeof(struct ip_reass_helper) <= IP_HLEN", + sizeof(struct ip_reass_helper) <= IP_HLEN); + iprh = (struct ip_reass_helper *)new_p->payload; + iprh->next_pbuf = NULL; + iprh->start = offset; + iprh->end = (u16_t)(offset + len); + if (iprh->end < offset) { + /* u16_t overflow, cannot handle this */ + return IP_REASS_VALIDATE_PBUF_DROPPED; + } + + /* Iterate through until we either get to the end of the list (append), + * or we find one with a larger offset (insert). */ + for (q = ipr->p; q != NULL;) { + iprh_tmp = (struct ip_reass_helper *)q->payload; + if (iprh->start < iprh_tmp->start) { + /* the new pbuf should be inserted before this */ + iprh->next_pbuf = q; + if (iprh_prev != NULL) { + /* not the fragment with the lowest offset */ +#if IP_REASS_CHECK_OVERLAP + if ((iprh->start < iprh_prev->end) || (iprh->end > iprh_tmp->start)) { + /* fragment overlaps with previous or following, throw away */ + return IP_REASS_VALIDATE_PBUF_DROPPED; + } +#endif /* IP_REASS_CHECK_OVERLAP */ + iprh_prev->next_pbuf = new_p; + if (iprh_prev->end != iprh->start) { + /* There is a fragment missing between the current + * and the previous fragment */ + valid = 0; + } + } else { +#if IP_REASS_CHECK_OVERLAP + if (iprh->end > iprh_tmp->start) { + /* fragment overlaps with following, throw away */ + return IP_REASS_VALIDATE_PBUF_DROPPED; + } +#endif /* IP_REASS_CHECK_OVERLAP */ + /* fragment with the lowest offset */ + ipr->p = new_p; + } + break; + } else if (iprh->start == iprh_tmp->start) { + /* received the same datagram twice: no need to keep the datagram */ + return IP_REASS_VALIDATE_PBUF_DROPPED; +#if IP_REASS_CHECK_OVERLAP + } else if (iprh->start < iprh_tmp->end) { + /* overlap: no need to keep the new datagram */ + return IP_REASS_VALIDATE_PBUF_DROPPED; +#endif /* IP_REASS_CHECK_OVERLAP */ + } else { + /* Check if the fragments received so far have no holes. */ + if (iprh_prev != NULL) { + if (iprh_prev->end != iprh_tmp->start) { + /* There is a fragment missing between the current + * and the previous fragment */ + valid = 0; + } + } + } + q = iprh_tmp->next_pbuf; + iprh_prev = iprh_tmp; + } + + /* If q is NULL, then we made it to the end of the list. Determine what to do now */ + if (q == NULL) { + if (iprh_prev != NULL) { + /* this is (for now), the fragment with the highest offset: + * chain it to the last fragment */ +#if IP_REASS_CHECK_OVERLAP + LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= iprh->start); +#endif /* IP_REASS_CHECK_OVERLAP */ + iprh_prev->next_pbuf = new_p; + if (iprh_prev->end != iprh->start) { + valid = 0; + } + } else { +#if IP_REASS_CHECK_OVERLAP + LWIP_ASSERT("no previous fragment, this must be the first fragment!", + ipr->p == NULL); +#endif /* IP_REASS_CHECK_OVERLAP */ + /* this is the first fragment we ever received for this ip datagram */ + ipr->p = new_p; + } + } + + /* At this point, the validation part begins: */ + /* If we already received the last fragment */ + if (is_last || ((ipr->flags & IP_REASS_FLAG_LASTFRAG) != 0)) { + /* and had no holes so far */ + if (valid) { + /* then check if the rest of the fragments is here */ + /* Check if the queue starts with the first datagram */ + if ((ipr->p == NULL) || (((struct ip_reass_helper *)ipr->p->payload)->start != 0)) { + valid = 0; + } else { + /* and check that there are no holes after this datagram */ + iprh_prev = iprh; + q = iprh->next_pbuf; + while (q != NULL) { + iprh = (struct ip_reass_helper *)q->payload; + if (iprh_prev->end != iprh->start) { + valid = 0; + break; + } + iprh_prev = iprh; + q = iprh->next_pbuf; + } + /* if still valid, all fragments are received + * (because to the MF==0 already arrived */ + if (valid) { + LWIP_ASSERT("sanity check", ipr->p != NULL); + LWIP_ASSERT("sanity check", + ((struct ip_reass_helper *)ipr->p->payload) != iprh); + LWIP_ASSERT("validate_datagram:next_pbuf!=NULL", + iprh->next_pbuf == NULL); + } + } + } + /* If valid is 0 here, there are some fragments missing in the middle + * (since MF == 0 has already arrived). Such datagrams simply time out if + * no more fragments are received... */ + return valid ? IP_REASS_VALIDATE_TELEGRAM_FINISHED : IP_REASS_VALIDATE_PBUF_QUEUED; + } + /* If we come here, not all fragments were received, yet! */ + return IP_REASS_VALIDATE_PBUF_QUEUED; /* not yet valid! */ +} + +/** + * Reassembles incoming IP fragments into an IP datagram. + * + * @param p points to a pbuf chain of the fragment + * @return NULL if reassembly is incomplete, ? otherwise + */ +struct pbuf * +ip4_reass(struct pbuf *p) +{ + struct pbuf *r; + struct ip_hdr *fraghdr; + struct ip_reassdata *ipr; + struct ip_reass_helper *iprh; + u16_t offset, len, clen; + u8_t hlen; + int valid; + int is_last; + + IPFRAG_STATS_INC(ip_frag.recv); + MIB2_STATS_INC(mib2.ipreasmreqds); + + fraghdr = (struct ip_hdr *)p->payload; + + if (IPH_HL_BYTES(fraghdr) != IP_HLEN) { + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: IP options currently not supported!\n")); + IPFRAG_STATS_INC(ip_frag.err); + goto nullreturn; + } + + offset = IPH_OFFSET_BYTES(fraghdr); + len = lwip_ntohs(IPH_LEN(fraghdr)); + hlen = IPH_HL_BYTES(fraghdr); + if (hlen > len) { + /* invalid datagram */ + goto nullreturn; + } + len = (u16_t)(len - hlen); + + /* Check if we are allowed to enqueue more datagrams. */ + clen = pbuf_clen(p); + if ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) { +#if IP_REASS_FREE_OLDEST + if (!ip_reass_remove_oldest_datagram(fraghdr, clen) || + ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS)) +#endif /* IP_REASS_FREE_OLDEST */ + { + /* No datagram could be freed and still too many pbufs enqueued */ + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: Overflow condition: pbufct=%d, clen=%d, MAX=%d\n", + ip_reass_pbufcount, clen, IP_REASS_MAX_PBUFS)); + IPFRAG_STATS_INC(ip_frag.memerr); + /* @todo: send ICMP time exceeded here? */ + /* drop this pbuf */ + goto nullreturn; + } + } + + /* Look for the datagram the fragment belongs to in the current datagram queue, + * remembering the previous in the queue for later dequeueing. */ + for (ipr = reassdatagrams; ipr != NULL; ipr = ipr->next) { + /* Check if the incoming fragment matches the one currently present + in the reassembly buffer. If so, we proceed with copying the + fragment into the buffer. */ + if (IP_ADDRESSES_AND_ID_MATCH(&ipr->iphdr, fraghdr)) { + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: matching previous fragment ID=%"X16_F"\n", + lwip_ntohs(IPH_ID(fraghdr)))); + IPFRAG_STATS_INC(ip_frag.cachehit); + break; + } + } + + if (ipr == NULL) { + /* Enqueue a new datagram into the datagram queue */ + ipr = ip_reass_enqueue_new_datagram(fraghdr, clen); + /* Bail if unable to enqueue */ + if (ipr == NULL) { + goto nullreturn; + } + } else { + if (((lwip_ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) == 0) && + ((lwip_ntohs(IPH_OFFSET(&ipr->iphdr)) & IP_OFFMASK) != 0)) { + /* ipr->iphdr is not the header from the first fragment, but fraghdr is + * -> copy fraghdr into ipr->iphdr since we want to have the header + * of the first fragment (for ICMP time exceeded and later, for copying + * all options, if supported)*/ + SMEMCPY(&ipr->iphdr, fraghdr, IP_HLEN); + } + } + + /* At this point, we have either created a new entry or pointing + * to an existing one */ + + /* check for 'no more fragments', and update queue entry*/ + is_last = (IPH_OFFSET(fraghdr) & PP_NTOHS(IP_MF)) == 0; + if (is_last) { + u16_t datagram_len = (u16_t)(offset + len); + if ((datagram_len < offset) || (datagram_len > (0xFFFF - IP_HLEN))) { + /* u16_t overflow, cannot handle this */ + goto nullreturn_ipr; + } + } + /* find the right place to insert this pbuf */ + /* @todo: trim pbufs if fragments are overlapping */ + valid = ip_reass_chain_frag_into_datagram_and_validate(ipr, p, is_last); + if (valid == IP_REASS_VALIDATE_PBUF_DROPPED) { + goto nullreturn_ipr; + } + /* if we come here, the pbuf has been enqueued */ + + /* Track the current number of pbufs current 'in-flight', in order to limit + the number of fragments that may be enqueued at any one time + (overflow checked by testing against IP_REASS_MAX_PBUFS) */ + ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount + clen); + if (is_last) { + u16_t datagram_len = (u16_t)(offset + len); + ipr->datagram_len = datagram_len; + ipr->flags |= IP_REASS_FLAG_LASTFRAG; + LWIP_DEBUGF(IP_REASS_DEBUG, + ("ip4_reass: last fragment seen, total len %"S16_F"\n", + ipr->datagram_len)); + } + + if (valid == IP_REASS_VALIDATE_TELEGRAM_FINISHED) { + struct ip_reassdata *ipr_prev; + /* the totally last fragment (flag more fragments = 0) was received at least + * once AND all fragments are received */ + u16_t datagram_len = (u16_t)(ipr->datagram_len + IP_HLEN); + + /* save the second pbuf before copying the header over the pointer */ + r = ((struct ip_reass_helper *)ipr->p->payload)->next_pbuf; + + /* copy the original ip header back to the first pbuf */ + fraghdr = (struct ip_hdr *)(ipr->p->payload); + SMEMCPY(fraghdr, &ipr->iphdr, IP_HLEN); + IPH_LEN_SET(fraghdr, lwip_htons(datagram_len)); + IPH_OFFSET_SET(fraghdr, 0); + IPH_CHKSUM_SET(fraghdr, 0); + /* @todo: do we need to set/calculate the correct checksum? */ +#if CHECKSUM_GEN_IP + IF__NETIF_CHECKSUM_ENABLED(ip_current_input_netif(), NETIF_CHECKSUM_GEN_IP) { + IPH_CHKSUM_SET(fraghdr, inet_chksum(fraghdr, IP_HLEN)); + } +#endif /* CHECKSUM_GEN_IP */ + + p = ipr->p; + + /* chain together the pbufs contained within the reass_data list. */ + while (r != NULL) { + iprh = (struct ip_reass_helper *)r->payload; + + /* hide the ip header for every succeeding fragment */ + pbuf_remove_header(r, IP_HLEN); + pbuf_cat(p, r); + r = iprh->next_pbuf; + } + + /* find the previous entry in the linked list */ + if (ipr == reassdatagrams) { + ipr_prev = NULL; + } else { + for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) { + if (ipr_prev->next == ipr) { + break; + } + } + } + + /* release the sources allocate for the fragment queue entry */ + ip_reass_dequeue_datagram(ipr, ipr_prev); + + /* and adjust the number of pbufs currently queued for reassembly. */ + clen = pbuf_clen(p); + LWIP_ASSERT("ip_reass_pbufcount >= clen", ip_reass_pbufcount >= clen); + ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount - clen); + + MIB2_STATS_INC(mib2.ipreasmoks); + + /* Return the pbuf chain */ + return p; + } + /* the datagram is not (yet?) reassembled completely */ + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_pbufcount: %d out\n", ip_reass_pbufcount)); + return NULL; + +nullreturn_ipr: + LWIP_ASSERT("ipr != NULL", ipr != NULL); + if (ipr->p == NULL) { + /* dropped pbuf after creating a new datagram entry: remove the entry, too */ + LWIP_ASSERT("not firstalthough just enqueued", ipr == reassdatagrams); + ip_reass_dequeue_datagram(ipr, NULL); + } + +nullreturn: + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: nullreturn\n")); + IPFRAG_STATS_INC(ip_frag.drop); + pbuf_free(p); + return NULL; +} +#endif /* IP_REASSEMBLY */ + +#if IP_FRAG +#if !LWIP_NETIF_TX_SINGLE_PBUF +/** Allocate a new struct pbuf_custom_ref */ +static struct pbuf_custom_ref * +ip_frag_alloc_pbuf_custom_ref(void) +{ + return (struct pbuf_custom_ref *)memp_malloc(MEMP_FRAG_PBUF); +} + +/** Free a struct pbuf_custom_ref */ +static void +ip_frag_free_pbuf_custom_ref(struct pbuf_custom_ref *p) +{ + LWIP_ASSERT("p != NULL", p != NULL); + memp_free(MEMP_FRAG_PBUF, p); +} + +/** Free-callback function to free a 'struct pbuf_custom_ref', called by + * pbuf_free. */ +static void +ipfrag_free_pbuf_custom(struct pbuf *p) +{ + struct pbuf_custom_ref *pcr = (struct pbuf_custom_ref *)p; + LWIP_ASSERT("pcr != NULL", pcr != NULL); + LWIP_ASSERT("pcr == p", (void *)pcr == (void *)p); + if (pcr->original != NULL) { + pbuf_free(pcr->original); + } + ip_frag_free_pbuf_custom_ref(pcr); +} +#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ + +/** + * Fragment an IP datagram if too large for the netif. + * + * Chop the datagram in MTU sized chunks and send them in order + * by pointing PBUF_REFs into p. + * + * @param p ip packet to send + * @param netif the netif on which to send + * @param dest destination ip address to which to send + * + * @return ERR_OK if sent successfully, err_t otherwise + */ +err_t +ip4_frag(struct pbuf *p, struct netif *netif, const ip4_addr_t *dest) +{ + struct pbuf *rambuf; +#if !LWIP_NETIF_TX_SINGLE_PBUF + struct pbuf *newpbuf; + u16_t newpbuflen = 0; + u16_t left_to_copy; +#endif + struct ip_hdr *original_iphdr; + struct ip_hdr *iphdr; + const u16_t nfb = (u16_t)((netif->mtu - IP_HLEN) / 8); + u16_t left, fragsize; + u16_t ofo; + int last; + u16_t poff = IP_HLEN; + u16_t tmp; + int mf_set; + + original_iphdr = (struct ip_hdr *)p->payload; + iphdr = original_iphdr; + if (IPH_HL_BYTES(iphdr) != IP_HLEN) { + /* ip4_frag() does not support IP options */ + return ERR_VAL; + } + LWIP_ERROR("ip4_frag(): pbuf too short", p->len >= IP_HLEN, return ERR_VAL); + + /* Save original offset */ + tmp = lwip_ntohs(IPH_OFFSET(iphdr)); + ofo = tmp & IP_OFFMASK; + /* already fragmented? if so, the last fragment we create must have MF, too */ + mf_set = tmp & IP_MF; + + left = (u16_t)(p->tot_len - IP_HLEN); + + while (left) { + /* Fill this fragment */ + fragsize = LWIP_MIN(left, (u16_t)(nfb * 8)); + +#if LWIP_NETIF_TX_SINGLE_PBUF + rambuf = pbuf_alloc(PBUF_IP, fragsize, PBUF_RAM); + if (rambuf == NULL) { + goto memerr; + } + LWIP_ASSERT("this needs a pbuf in one piece!", + (rambuf->len == rambuf->tot_len) && (rambuf->next == NULL)); + poff += pbuf_copy_partial(p, rambuf->payload, fragsize, poff); + /* make room for the IP header */ + if (pbuf_add_header(rambuf, IP_HLEN)) { + pbuf_free(rambuf); + goto memerr; + } + /* fill in the IP header */ + SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN); + iphdr = (struct ip_hdr *)rambuf->payload; +#else /* LWIP_NETIF_TX_SINGLE_PBUF */ + /* When not using a static buffer, create a chain of pbufs. + * The first will be a PBUF_RAM holding the link and IP header. + * The rest will be PBUF_REFs mirroring the pbuf chain to be fragged, + * but limited to the size of an mtu. + */ + rambuf = pbuf_alloc(PBUF_LINK, IP_HLEN, PBUF_RAM); + if (rambuf == NULL) { + goto memerr; + } + LWIP_ASSERT("this needs a pbuf in one piece!", + (rambuf->len >= (IP_HLEN))); + SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN); + iphdr = (struct ip_hdr *)rambuf->payload; + + left_to_copy = fragsize; + while (left_to_copy) { + struct pbuf_custom_ref *pcr; + u16_t plen = (u16_t)(p->len - poff); + LWIP_ASSERT("p->len >= poff", p->len >= poff); + newpbuflen = LWIP_MIN(left_to_copy, plen); + /* Is this pbuf already empty? */ + if (!newpbuflen) { + poff = 0; + p = p->next; + continue; + } + pcr = ip_frag_alloc_pbuf_custom_ref(); + if (pcr == NULL) { + pbuf_free(rambuf); + goto memerr; + } + /* Mirror this pbuf, although we might not need all of it. */ + newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc, + (u8_t *)p->payload + poff, newpbuflen); + if (newpbuf == NULL) { + ip_frag_free_pbuf_custom_ref(pcr); + pbuf_free(rambuf); + goto memerr; + } + pbuf_ref(p); + pcr->original = p; + pcr->pc.custom_free_function = ipfrag_free_pbuf_custom; + + /* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain + * so that it is removed when pbuf_dechain is later called on rambuf. + */ + pbuf_cat(rambuf, newpbuf); + left_to_copy = (u16_t)(left_to_copy - newpbuflen); + if (left_to_copy) { + poff = 0; + p = p->next; + } + } + poff = (u16_t)(poff + newpbuflen); +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + + /* Correct header */ + last = (left <= netif->mtu - IP_HLEN); + + /* Set new offset and MF flag */ + tmp = (IP_OFFMASK & (ofo)); + if (!last || mf_set) { + /* the last fragment has MF set if the input frame had it */ + tmp = tmp | IP_MF; + } + IPH_OFFSET_SET(iphdr, lwip_htons(tmp)); + IPH_LEN_SET(iphdr, lwip_htons((u16_t)(fragsize + IP_HLEN))); + IPH_CHKSUM_SET(iphdr, 0); +#if CHECKSUM_GEN_IP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) { + IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, IP_HLEN)); + } +#endif /* CHECKSUM_GEN_IP */ + + /* No need for separate header pbuf - we allowed room for it in rambuf + * when allocated. + */ + netif->output(netif, rambuf, dest); + IPFRAG_STATS_INC(ip_frag.xmit); + + /* Unfortunately we can't reuse rambuf - the hardware may still be + * using the buffer. Instead we free it (and the ensuing chain) and + * recreate it next time round the loop. If we're lucky the hardware + * will have already sent the packet, the free will really free, and + * there will be zero memory penalty. + */ + + pbuf_free(rambuf); + left = (u16_t)(left - fragsize); + ofo = (u16_t)(ofo + nfb); + } + MIB2_STATS_INC(mib2.ipfragoks); + return ERR_OK; +memerr: + MIB2_STATS_INC(mib2.ipfragfails); + return ERR_MEM; +} +#endif /* IP_FRAG */ + +#endif /* LWIP_IPV4 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c new file mode 100644 index 0000000..1ae8742 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c @@ -0,0 +1,812 @@ +/** + * @file + * + * @defgroup dhcp6 DHCPv6 + * @ingroup ip6 + * DHCPv6 client: IPv6 address autoconfiguration as per + * RFC 3315 (stateful DHCPv6) and + * RFC 3736 (stateless DHCPv6). + * + * For now, only stateless DHCPv6 is implemented! + * + * TODO: + * - enable/disable API to not always start when RA is received + * - stateful DHCPv6 (for now, only stateless DHCPv6 for DNS and NTP servers works) + * - create Client Identifier? + * - only start requests if a valid local address is available on the netif + * - only start information requests if required (not for every RA) + * + * dhcp6_enable_stateful() enables stateful DHCPv6 for a netif (stateless disabled)\n + * dhcp6_enable_stateless() enables stateless DHCPv6 for a netif (stateful disabled)\n + * dhcp6_disable() disable DHCPv6 for a netif + * + * When enabled, requests are only issued after receipt of RA with the + * corresponding bits set. + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_IPV6_DHCP6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/dhcp6.h" +#include "lwip/prot/dhcp6.h" +#include "lwip/def.h" +#include "lwip/udp.h" +#include "lwip/dns.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif +#ifndef LWIP_HOOK_DHCP6_APPEND_OPTIONS +#define LWIP_HOOK_DHCP6_APPEND_OPTIONS(netif, dhcp6, state, msg, msg_type, options_len_ptr, max_len) +#endif +#ifndef LWIP_HOOK_DHCP6_PARSE_OPTION +#define LWIP_HOOK_DHCP6_PARSE_OPTION(netif, dhcp6, state, msg, msg_type, option, len, pbuf, offset) do { LWIP_UNUSED_ARG(msg); } while(0) +#endif + +#if LWIP_DNS && LWIP_DHCP6_MAX_DNS_SERVERS +#if DNS_MAX_SERVERS > LWIP_DHCP6_MAX_DNS_SERVERS +#define LWIP_DHCP6_PROVIDE_DNS_SERVERS LWIP_DHCP6_MAX_DNS_SERVERS +#else +#define LWIP_DHCP6_PROVIDE_DNS_SERVERS DNS_MAX_SERVERS +#endif +#else +#define LWIP_DHCP6_PROVIDE_DNS_SERVERS 0 +#endif + + +/** Option handling: options are parsed in dhcp6_parse_reply + * and saved in an array where other functions can load them from. + * This might be moved into the struct dhcp6 (not necessarily since + * lwIP is single-threaded and the array is only used while in recv + * callback). */ +enum dhcp6_option_idx { + DHCP6_OPTION_IDX_CLI_ID = 0, + DHCP6_OPTION_IDX_SERVER_ID, +#if LWIP_DHCP6_PROVIDE_DNS_SERVERS + DHCP6_OPTION_IDX_DNS_SERVER, + DHCP6_OPTION_IDX_DOMAIN_LIST, +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ +#if LWIP_DHCP6_GET_NTP_SRV + DHCP6_OPTION_IDX_NTP_SERVER, +#endif /* LWIP_DHCP_GET_NTP_SRV */ + DHCP6_OPTION_IDX_MAX +}; + +struct dhcp6_option_info { + u8_t option_given; + u16_t val_start; + u16_t val_length; +}; + +/** Holds the decoded option info, only valid while in dhcp6_recv. */ +struct dhcp6_option_info dhcp6_rx_options[DHCP6_OPTION_IDX_MAX]; + +#define dhcp6_option_given(dhcp6, idx) (dhcp6_rx_options[idx].option_given != 0) +#define dhcp6_got_option(dhcp6, idx) (dhcp6_rx_options[idx].option_given = 1) +#define dhcp6_clear_option(dhcp6, idx) (dhcp6_rx_options[idx].option_given = 0) +#define dhcp6_clear_all_options(dhcp6) (memset(dhcp6_rx_options, 0, sizeof(dhcp6_rx_options))) +#define dhcp6_get_option_start(dhcp6, idx) (dhcp6_rx_options[idx].val_start) +#define dhcp6_get_option_length(dhcp6, idx) (dhcp6_rx_options[idx].val_length) +#define dhcp6_set_option(dhcp6, idx, start, len) do { dhcp6_rx_options[idx].val_start = (start); dhcp6_rx_options[idx].val_length = (len); }while(0) + + +const ip_addr_t dhcp6_All_DHCP6_Relay_Agents_and_Servers = IPADDR6_INIT_HOST(0xFF020000, 0, 0, 0x00010002); +const ip_addr_t dhcp6_All_DHCP6_Servers = IPADDR6_INIT_HOST(0xFF020000, 0, 0, 0x00010003); + +static struct udp_pcb *dhcp6_pcb; +static u8_t dhcp6_pcb_refcount; + + +/* receive, unfold, parse and free incoming messages */ +static void dhcp6_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port); + +/** Ensure DHCP PCB is allocated and bound */ +static err_t +dhcp6_inc_pcb_refcount(void) +{ + if (dhcp6_pcb_refcount == 0) { + LWIP_ASSERT("dhcp6_inc_pcb_refcount(): memory leak", dhcp6_pcb == NULL); + + /* allocate UDP PCB */ + dhcp6_pcb = udp_new_ip6(); + + if (dhcp6_pcb == NULL) { + return ERR_MEM; + } + + ip_set_option(dhcp6_pcb, SOF_BROADCAST); + + /* set up local and remote port for the pcb -> listen on all interfaces on all src/dest IPs */ + udp_bind(dhcp6_pcb, IP6_ADDR_ANY, DHCP6_CLIENT_PORT); + udp_recv(dhcp6_pcb, dhcp6_recv, NULL); + } + + dhcp6_pcb_refcount++; + + return ERR_OK; +} + +/** Free DHCP PCB if the last netif stops using it */ +static void +dhcp6_dec_pcb_refcount(void) +{ + LWIP_ASSERT("dhcp6_pcb_refcount(): refcount error", (dhcp6_pcb_refcount > 0)); + dhcp6_pcb_refcount--; + + if (dhcp6_pcb_refcount == 0) { + udp_remove(dhcp6_pcb); + dhcp6_pcb = NULL; + } +} + +/** + * @ingroup dhcp6 + * Set a statically allocated struct dhcp6 to work with. + * Using this prevents dhcp6_start to allocate it using mem_malloc. + * + * @param netif the netif for which to set the struct dhcp + * @param dhcp6 (uninitialised) dhcp6 struct allocated by the application + */ +void +dhcp6_set_struct(struct netif *netif, struct dhcp6 *dhcp6) +{ + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("dhcp6 != NULL", dhcp6 != NULL); + LWIP_ASSERT("netif already has a struct dhcp6 set", netif_dhcp6_data(netif) == NULL); + + /* clear data structure */ + memset(dhcp6, 0, sizeof(struct dhcp6)); + /* dhcp6_set_state(&dhcp, DHCP6_STATE_OFF); */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, dhcp6); +} + +/** + * @ingroup dhcp6 + * Removes a struct dhcp6 from a netif. + * + * ATTENTION: Only use this when not using dhcp6_set_struct() to allocate the + * struct dhcp6 since the memory is passed back to the heap. + * + * @param netif the netif from which to remove the struct dhcp + */ +void dhcp6_cleanup(struct netif *netif) +{ + LWIP_ASSERT("netif != NULL", netif != NULL); + + if (netif_dhcp6_data(netif) != NULL) { + mem_free(netif_dhcp6_data(netif)); + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, NULL); + } +} + +static struct dhcp6* +dhcp6_get_struct(struct netif *netif, const char *dbg_requester) +{ + struct dhcp6 *dhcp6 = netif_dhcp6_data(netif); + if (dhcp6 == NULL) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("%s: mallocing new DHCPv6 client\n", dbg_requester)); + dhcp6 = (struct dhcp6 *)mem_malloc(sizeof(struct dhcp6)); + if (dhcp6 == NULL) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("%s: could not allocate dhcp6\n", dbg_requester)); + return NULL; + } + + /* clear data structure, this implies DHCP6_STATE_OFF */ + memset(dhcp6, 0, sizeof(struct dhcp6)); + /* store this dhcp6 client in the netif */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, dhcp6); + } else { + /* already has DHCP6 client attached */ + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("%s: using existing DHCPv6 client\n", dbg_requester)); + } + + if (!dhcp6->pcb_allocated) { + if (dhcp6_inc_pcb_refcount() != ERR_OK) { /* ensure DHCP6 PCB is allocated */ + mem_free(dhcp6); + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, NULL); + return NULL; + } + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("%s: allocated dhcp6", dbg_requester)); + dhcp6->pcb_allocated = 1; + } + return dhcp6; +} + +/* + * Set the DHCPv6 state + * If the state changed, reset the number of tries. + */ +static void +dhcp6_set_state(struct dhcp6 *dhcp6, u8_t new_state, const char *dbg_caller) +{ + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("DHCPv6 state: %d -> %d (%s)\n", + dhcp6->state, new_state, dbg_caller)); + if (new_state != dhcp6->state) { + dhcp6->state = new_state; + dhcp6->tries = 0; + dhcp6->request_timeout = 0; + } +} + +static int +dhcp6_stateless_enabled(struct dhcp6 *dhcp6) +{ + if ((dhcp6->state == DHCP6_STATE_STATELESS_IDLE) || + (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG)) { + return 1; + } + return 0; +} + +/*static int +dhcp6_stateful_enabled(struct dhcp6 *dhcp6) +{ + if (dhcp6->state == DHCP6_STATE_OFF) { + return 0; + } + if (dhcp6_stateless_enabled(dhcp6)) { + return 0; + } + return 1; +}*/ + +/** + * @ingroup dhcp6 + * Enable stateful DHCPv6 on this netif + * Requests are sent on receipt of an RA message with the + * ND6_RA_FLAG_MANAGED_ADDR_CONFIG flag set. + * + * A struct dhcp6 will be allocated for this netif if not + * set via @ref dhcp6_set_struct before. + * + * @todo: stateful DHCPv6 not supported, yet + */ +err_t +dhcp6_enable_stateful(struct netif *netif) +{ + LWIP_UNUSED_ARG(netif); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("stateful dhcp6 not implemented yet")); + return ERR_VAL; +} + +/** + * @ingroup dhcp6 + * Enable stateless DHCPv6 on this netif + * Requests are sent on receipt of an RA message with the + * ND6_RA_FLAG_OTHER_CONFIG flag set. + * + * A struct dhcp6 will be allocated for this netif if not + * set via @ref dhcp6_set_struct before. + */ +err_t +dhcp6_enable_stateless(struct netif *netif) +{ + struct dhcp6 *dhcp6; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_enable_stateless(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + + dhcp6 = dhcp6_get_struct(netif, "dhcp6_enable_stateless()"); + if (dhcp6 == NULL) { + return ERR_MEM; + } + if (dhcp6_stateless_enabled(dhcp6)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_enable_stateless(): stateless DHCPv6 already enabled")); + return ERR_OK; + } else if (dhcp6->state != DHCP6_STATE_OFF) { + /* stateful running */ + /* @todo: stop stateful once it is implemented */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_enable_stateless(): switching from stateful to stateless DHCPv6")); + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_enable_stateless(): stateless DHCPv6 enabled\n")); + dhcp6_set_state(dhcp6, DHCP6_STATE_STATELESS_IDLE, "dhcp6_enable_stateless"); + return ERR_OK; +} + +/** + * @ingroup dhcp6 + * Disable stateful or stateless DHCPv6 on this netif + * Requests are sent on receipt of an RA message with the + * ND6_RA_FLAG_OTHER_CONFIG flag set. + */ +void +dhcp6_disable(struct netif *netif) +{ + struct dhcp6 *dhcp6; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_disable(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + + dhcp6 = netif_dhcp6_data(netif); + if (dhcp6 != NULL) { + if (dhcp6->state != DHCP6_STATE_OFF) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_disable(): DHCPv6 disabled (old state: %s)\n", + (dhcp6_stateless_enabled(dhcp6) ? "stateless" : "stateful"))); + dhcp6_set_state(dhcp6, DHCP6_STATE_OFF, "dhcp6_disable"); + if (dhcp6->pcb_allocated != 0) { + dhcp6_dec_pcb_refcount(); /* free DHCPv6 PCB if not needed any more */ + dhcp6->pcb_allocated = 0; + } + } + } +} + +/** + * Create a DHCPv6 request, fill in common headers + * + * @param netif the netif under DHCPv6 control + * @param dhcp6 dhcp6 control struct + * @param message_type message type of the request + * @param opt_len_alloc option length to allocate + * @param options_out_len option length on exit + * @return a pbuf for the message + */ +static struct pbuf * +dhcp6_create_msg(struct netif *netif, struct dhcp6 *dhcp6, u8_t message_type, + u16_t opt_len_alloc, u16_t *options_out_len) +{ + struct pbuf *p_out; + struct dhcp6_msg *msg_out; + + LWIP_ERROR("dhcp6_create_msg: netif != NULL", (netif != NULL), return NULL;); + LWIP_ERROR("dhcp6_create_msg: dhcp6 != NULL", (dhcp6 != NULL), return NULL;); + p_out = pbuf_alloc(PBUF_TRANSPORT, sizeof(struct dhcp6_msg) + opt_len_alloc, PBUF_RAM); + if (p_out == NULL) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("dhcp6_create_msg(): could not allocate pbuf\n")); + return NULL; + } + LWIP_ASSERT("dhcp6_create_msg: check that first pbuf can hold struct dhcp6_msg", + (p_out->len >= sizeof(struct dhcp6_msg) + opt_len_alloc)); + + /* @todo: limit new xid for certain message types? */ + /* reuse transaction identifier in retransmissions */ + if (dhcp6->tries == 0) { + dhcp6->xid = LWIP_RAND() & 0xFFFFFF; + } + + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, + ("transaction id xid(%"X32_F")\n", dhcp6->xid)); + + msg_out = (struct dhcp6_msg *)p_out->payload; + memset(msg_out, 0, sizeof(struct dhcp6_msg) + opt_len_alloc); + + msg_out->msgtype = message_type; + msg_out->transaction_id[0] = (u8_t)(dhcp6->xid >> 16); + msg_out->transaction_id[1] = (u8_t)(dhcp6->xid >> 8); + msg_out->transaction_id[2] = (u8_t)dhcp6->xid; + *options_out_len = 0; + return p_out; +} + +static u16_t +dhcp6_option_short(u16_t options_out_len, u8_t *options, u16_t value) +{ + options[options_out_len++] = (u8_t)((value & 0xff00U) >> 8); + options[options_out_len++] = (u8_t) (value & 0x00ffU); + return options_out_len; +} + +static u16_t +dhcp6_option_optionrequest(u16_t options_out_len, u8_t *options, const u16_t *req_options, + u16_t num_req_options, u16_t max_len) +{ + size_t i; + u16_t ret; + + LWIP_ASSERT("dhcp6_option_optionrequest: options_out_len + sizeof(struct dhcp6_msg) + addlen <= max_len", + sizeof(struct dhcp6_msg) + options_out_len + 4U + (2U * num_req_options) <= max_len); + LWIP_UNUSED_ARG(max_len); + + ret = dhcp6_option_short(options_out_len, options, DHCP6_OPTION_ORO); + ret = dhcp6_option_short(ret, options, 2 * num_req_options); + for (i = 0; i < num_req_options; i++) { + ret = dhcp6_option_short(ret, options, req_options[i]); + } + return ret; +} + +/* All options are added, shrink the pbuf to the required size */ +static void +dhcp6_msg_finalize(u16_t options_out_len, struct pbuf *p_out) +{ + /* shrink the pbuf to the actual content length */ + pbuf_realloc(p_out, (u16_t)(sizeof(struct dhcp6_msg) + options_out_len)); +} + + +#if LWIP_IPV6_DHCP6_STATELESS +static void +dhcp6_information_request(struct netif *netif, struct dhcp6 *dhcp6) +{ + const u16_t requested_options[] = {DHCP6_OPTION_DNS_SERVERS, DHCP6_OPTION_DOMAIN_LIST, DHCP6_OPTION_SNTP_SERVERS}; + u16_t msecs; + struct pbuf *p_out; + u16_t options_out_len; + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_information_request()\n")); + /* create and initialize the DHCP message header */ + p_out = dhcp6_create_msg(netif, dhcp6, DHCP6_INFOREQUEST, 4 + sizeof(requested_options), &options_out_len); + if (p_out != NULL) { + err_t err; + struct dhcp6_msg *msg_out = (struct dhcp6_msg *)p_out->payload; + u8_t *options = (u8_t *)(msg_out + 1); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_information_request: making request\n")); + + options_out_len = dhcp6_option_optionrequest(options_out_len, options, requested_options, + LWIP_ARRAYSIZE(requested_options), p_out->len); + LWIP_HOOK_DHCP6_APPEND_OPTIONS(netif, dhcp6, DHCP6_STATE_REQUESTING_CONFIG, msg_out, + DHCP6_INFOREQUEST, options_out_len, p_out->len); + dhcp6_msg_finalize(options_out_len, p_out); + + err = udp_sendto_if(dhcp6_pcb, p_out, &dhcp6_All_DHCP6_Relay_Agents_and_Servers, DHCP6_SERVER_PORT, netif); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_information_request: INFOREQUESTING -> %d\n", (int)err)); + LWIP_UNUSED_ARG(err); + } else { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp6_information_request: could not allocate DHCP6 request\n")); + } + dhcp6_set_state(dhcp6, DHCP6_STATE_REQUESTING_CONFIG, "dhcp6_information_request"); + if (dhcp6->tries < 255) { + dhcp6->tries++; + } + msecs = (u16_t)((dhcp6->tries < 6 ? 1 << dhcp6->tries : 60) * 1000); + dhcp6->request_timeout = (u16_t)((msecs + DHCP6_TIMER_MSECS - 1) / DHCP6_TIMER_MSECS); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_information_request(): set request timeout %"U16_F" msecs\n", msecs)); +} + +static err_t +dhcp6_request_config(struct netif *netif, struct dhcp6 *dhcp6) +{ + /* stateless mode enabled and no request running? */ + if (dhcp6->state == DHCP6_STATE_STATELESS_IDLE) { + /* send Information-request and wait for answer; setup receive timeout */ + dhcp6_information_request(netif, dhcp6); + } + + return ERR_OK; +} + +static void +dhcp6_abort_config_request(struct dhcp6 *dhcp6) +{ + if (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG) { + /* abort running request */ + dhcp6_set_state(dhcp6, DHCP6_STATE_STATELESS_IDLE, "dhcp6_abort_config_request"); + } +} + +/* Handle a REPLY to INFOREQUEST + * This parses DNS and NTP server addresses from the reply. + */ +static void +dhcp6_handle_config_reply(struct netif *netif, struct pbuf *p_msg_in) +{ + struct dhcp6 *dhcp6 = netif_dhcp6_data(netif); + + LWIP_UNUSED_ARG(dhcp6); + LWIP_UNUSED_ARG(p_msg_in); + +#if LWIP_DHCP6_PROVIDE_DNS_SERVERS + if (dhcp6_option_given(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER)) { + ip_addr_t dns_addr; + ip6_addr_t *dns_addr6; + u16_t op_start = dhcp6_get_option_start(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER); + u16_t op_len = dhcp6_get_option_length(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER); + u16_t idx; + u8_t n; + + memset(&dns_addr, 0, sizeof(dns_addr)); + dns_addr6 = ip_2_ip6(&dns_addr); + for (n = 0, idx = op_start; (idx < op_start + op_len) && (n < LWIP_DHCP6_PROVIDE_DNS_SERVERS); + n++, idx += sizeof(struct ip6_addr_packed)) { + u16_t copied = pbuf_copy_partial(p_msg_in, dns_addr6, sizeof(struct ip6_addr_packed), idx); + if (copied != sizeof(struct ip6_addr_packed)) { + /* pbuf length mismatch */ + return; + } + ip6_addr_assign_zone(dns_addr6, IP6_UNKNOWN, netif); + /* @todo: do we need a different offset than DHCP(v4)? */ + dns_setserver(n, &dns_addr); + } + } + /* @ todo: parse and set Domain Search List */ +#endif /* LWIP_DHCP6_PROVIDE_DNS_SERVERS */ + +#if LWIP_DHCP6_GET_NTP_SRV + if (dhcp6_option_given(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER)) { + ip_addr_t ntp_server_addrs[LWIP_DHCP6_MAX_NTP_SERVERS]; + u16_t op_start = dhcp6_get_option_start(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER); + u16_t op_len = dhcp6_get_option_length(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER); + u16_t idx; + u8_t n; + + for (n = 0, idx = op_start; (idx < op_start + op_len) && (n < LWIP_DHCP6_MAX_NTP_SERVERS); + n++, idx += sizeof(struct ip6_addr_packed)) { + u16_t copied; + ip6_addr_t *ntp_addr6 = ip_2_ip6(&ntp_server_addrs[n]); + ip_addr_set_zero_ip6(&ntp_server_addrs[n]); + copied = pbuf_copy_partial(p_msg_in, ntp_addr6, sizeof(struct ip6_addr_packed), idx); + if (copied != sizeof(struct ip6_addr_packed)) { + /* pbuf length mismatch */ + return; + } + ip6_addr_assign_zone(ntp_addr6, IP6_UNKNOWN, netif); + } + dhcp6_set_ntp_servers(n, ntp_server_addrs); + } +#endif /* LWIP_DHCP6_GET_NTP_SRV */ +} +#endif /* LWIP_IPV6_DHCP6_STATELESS */ + +/** This function is called from nd6 module when an RA messsage is received + * It triggers DHCPv6 requests (if enabled). + */ +void +dhcp6_nd6_ra_trigger(struct netif *netif, u8_t managed_addr_config, u8_t other_config) +{ + struct dhcp6 *dhcp6; + + LWIP_ASSERT("netif != NULL", netif != NULL); + dhcp6 = netif_dhcp6_data(netif); + + LWIP_UNUSED_ARG(managed_addr_config); + LWIP_UNUSED_ARG(other_config); + LWIP_UNUSED_ARG(dhcp6); + +#if LWIP_IPV6_DHCP6_STATELESS + if (dhcp6 != NULL) { + if (dhcp6_stateless_enabled(dhcp6)) { + if (other_config) { + dhcp6_request_config(netif, dhcp6); + } else { + dhcp6_abort_config_request(dhcp6); + } + } + } +#endif /* LWIP_IPV6_DHCP6_STATELESS */ +} + +/** + * Parse the DHCPv6 message and extract the DHCPv6 options. + * + * Extract the DHCPv6 options (offset + length) so that we can later easily + * check for them or extract the contents. + */ +static err_t +dhcp6_parse_reply(struct pbuf *p, struct dhcp6 *dhcp6) +{ + u16_t offset; + u16_t offset_max; + u16_t options_idx; + struct dhcp6_msg *msg_in; + + LWIP_UNUSED_ARG(dhcp6); + + /* clear received options */ + dhcp6_clear_all_options(dhcp6); + msg_in = (struct dhcp6_msg *)p->payload; + + /* parse options */ + + options_idx = sizeof(struct dhcp6_msg); + /* parse options to the end of the received packet */ + offset_max = p->tot_len; + + offset = options_idx; + /* at least 4 byte to read? */ + while ((offset + 4 <= offset_max)) { + u8_t op_len_buf[4]; + u8_t *op_len; + u16_t op; + u16_t len; + u16_t val_offset = (u16_t)(offset + 4); + if (val_offset < offset) { + /* overflow */ + return ERR_BUF; + } + /* copy option + length, might be split accross pbufs */ + op_len = (u8_t *)pbuf_get_contiguous(p, op_len_buf, 4, 4, offset); + if (op_len == NULL) { + /* failed to get option and length */ + return ERR_VAL; + } + op = (op_len[0] << 8) | op_len[1]; + len = (op_len[2] << 8) | op_len[3]; + offset = val_offset + len; + if (offset < val_offset) { + /* overflow */ + return ERR_BUF; + } + + switch (op) { + case (DHCP6_OPTION_CLIENTID): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_CLI_ID); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_CLI_ID, val_offset, len); + break; + case (DHCP6_OPTION_SERVERID): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_SERVER_ID); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_SERVER_ID, val_offset, len); + break; +#if LWIP_DHCP6_PROVIDE_DNS_SERVERS + case (DHCP6_OPTION_DNS_SERVERS): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER, val_offset, len); + break; + case (DHCP6_OPTION_DOMAIN_LIST): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_DOMAIN_LIST); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_DOMAIN_LIST, val_offset, len); + break; +#endif /* LWIP_DHCP6_PROVIDE_DNS_SERVERS */ +#if LWIP_DHCP6_GET_NTP_SRV + case (DHCP6_OPTION_SNTP_SERVERS): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER, val_offset, len); + break; +#endif /* LWIP_DHCP6_GET_NTP_SRV*/ + default: + LWIP_DEBUGF(DHCP6_DEBUG, ("skipping option %"U16_F" in options\n", op)); + LWIP_HOOK_DHCP6_PARSE_OPTION(ip_current_netif(), dhcp6, dhcp6->state, msg_in, + msg_in->msgtype, op, len, q, val_offset); + break; + } + } + return ERR_OK; +} + +static void +dhcp6_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) +{ + struct netif *netif = ip_current_input_netif(); + struct dhcp6 *dhcp6 = netif_dhcp6_data(netif); + struct dhcp6_msg *reply_msg = (struct dhcp6_msg *)p->payload; + u8_t msg_type; + u32_t xid; + + LWIP_UNUSED_ARG(arg); + + /* Caught DHCPv6 message from netif that does not have DHCPv6 enabled? -> not interested */ + if ((dhcp6 == NULL) || (dhcp6->pcb_allocated == 0)) { + goto free_pbuf_and_return; + } + + LWIP_ERROR("invalid server address type", IP_IS_V6(addr), goto free_pbuf_and_return;); + + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_recv(pbuf = %p) from DHCPv6 server %s port %"U16_F"\n", (void *)p, + ipaddr_ntoa(addr), port)); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("pbuf->len = %"U16_F"\n", p->len)); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("pbuf->tot_len = %"U16_F"\n", p->tot_len)); + /* prevent warnings about unused arguments */ + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(addr); + LWIP_UNUSED_ARG(port); + + if (p->len < sizeof(struct dhcp6_msg)) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("DHCPv6 reply message or pbuf too short\n")); + goto free_pbuf_and_return; + } + + /* match transaction ID against what we expected */ + xid = reply_msg->transaction_id[0] << 16; + xid |= reply_msg->transaction_id[1] << 8; + xid |= reply_msg->transaction_id[2]; + if (xid != dhcp6->xid) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("transaction id mismatch reply_msg->xid(%"X32_F")!= dhcp6->xid(%"X32_F")\n", xid, dhcp6->xid)); + goto free_pbuf_and_return; + } + /* option fields could be unfold? */ + if (dhcp6_parse_reply(p, dhcp6) != ERR_OK) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("problem unfolding DHCPv6 message - too short on memory?\n")); + goto free_pbuf_and_return; + } + + /* read DHCP message type */ + msg_type = reply_msg->msgtype; + /* message type is DHCP6 REPLY? */ + if (msg_type == DHCP6_REPLY) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("DHCP6_REPLY received\n")); +#if LWIP_IPV6_DHCP6_STATELESS + /* in info-requesting state? */ + if (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG) { + dhcp6_set_state(dhcp6, DHCP6_STATE_STATELESS_IDLE, "dhcp6_recv"); + dhcp6_handle_config_reply(netif, p); + } else +#endif /* LWIP_IPV6_DHCP6_STATELESS */ + { + /* @todo: handle reply in other states? */ + } + } else { + /* @todo: handle other message types */ + } + +free_pbuf_and_return: + pbuf_free(p); +} + +/** + * A DHCPv6 request has timed out. + * + * The timer that was started with the DHCPv6 request has + * timed out, indicating no response was received in time. + */ +static void +dhcp6_timeout(struct netif *netif, struct dhcp6 *dhcp6) +{ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_timeout()\n")); + + LWIP_UNUSED_ARG(netif); + LWIP_UNUSED_ARG(dhcp6); + +#if LWIP_IPV6_DHCP6_STATELESS + /* back-off period has passed, or server selection timed out */ + if (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_timeout(): retrying information request\n")); + dhcp6_information_request(netif, dhcp6); + } +#endif /* LWIP_IPV6_DHCP6_STATELESS */ +} + +/** + * DHCPv6 timeout handling (this function must be called every 500ms, + * see @ref DHCP6_TIMER_MSECS). + * + * A DHCPv6 server is expected to respond within a short period of time. + * This timer checks whether an outstanding DHCPv6 request is timed out. + */ +void +dhcp6_tmr(void) +{ + struct netif *netif; + /* loop through netif's */ + NETIF_FOREACH(netif) { + struct dhcp6 *dhcp6 = netif_dhcp6_data(netif); + /* only act on DHCPv6 configured interfaces */ + if (dhcp6 != NULL) { + /* timer is active (non zero), and is about to trigger now */ + if (dhcp6->request_timeout > 1) { + dhcp6->request_timeout--; + } else if (dhcp6->request_timeout == 1) { + dhcp6->request_timeout--; + /* { dhcp6->request_timeout == 0 } */ + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_tmr(): request timeout\n")); + /* this client's request timeout triggered */ + dhcp6_timeout(netif, dhcp6); + } + } + } +} + +#endif /* LWIP_IPV6 && LWIP_IPV6_DHCP6 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c new file mode 100644 index 0000000..0a616b6 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c @@ -0,0 +1,123 @@ +/** + * @file + * + * Ethernet output for IPv6. Uses ND tables for link-layer addressing. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_ETHERNET + +#include "lwip/ethip6.h" +#include "lwip/nd6.h" +#include "lwip/pbuf.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/icmp6.h" +#include "lwip/prot/ethernet.h" +#include "netif/ethernet.h" + +#include + +/** + * Resolve and fill-in Ethernet address header for outgoing IPv6 packet. + * + * For IPv6 multicast, corresponding Ethernet addresses + * are selected and the packet is transmitted on the link. + * + * For unicast addresses, ask the ND6 module what to do. It will either let us + * send the the packet right away, or queue the packet for later itself, unless + * an error occurs. + * + * @todo anycast addresses + * + * @param netif The lwIP network interface which the IP packet will be sent on. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ip6addr The IP address of the packet destination. + * + * @return + * - ERR_OK or the return value of @ref nd6_get_next_hop_addr_or_queue. + */ +err_t +ethip6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr) +{ + struct eth_addr dest; + const u8_t *hwaddr; + err_t result; + + LWIP_ASSERT_CORE_LOCKED(); + + /* The destination IP address must be properly zoned from here on down. */ + IP6_ADDR_ZONECHECK_NETIF(ip6addr, netif); + + /* multicast destination IP address? */ + if (ip6_addr_ismulticast(ip6addr)) { + /* Hash IP multicast address to MAC address.*/ + dest.addr[0] = 0x33; + dest.addr[1] = 0x33; + dest.addr[2] = ((const u8_t *)(&(ip6addr->addr[3])))[0]; + dest.addr[3] = ((const u8_t *)(&(ip6addr->addr[3])))[1]; + dest.addr[4] = ((const u8_t *)(&(ip6addr->addr[3])))[2]; + dest.addr[5] = ((const u8_t *)(&(ip6addr->addr[3])))[3]; + + /* Send out. */ + return ethernet_output(netif, q, (const struct eth_addr*)(netif->hwaddr), &dest, ETHTYPE_IPV6); + } + + /* We have a unicast destination IP address */ + /* @todo anycast? */ + + /* Ask ND6 what to do with the packet. */ + result = nd6_get_next_hop_addr_or_queue(netif, q, ip6addr, &hwaddr); + if (result != ERR_OK) { + return result; + } + + /* If no hardware address is returned, nd6 has queued the packet for later. */ + if (hwaddr == NULL) { + return ERR_OK; + } + + /* Send out the packet using the returned hardware address. */ + SMEMCPY(dest.addr, hwaddr, 6); + return ethernet_output(netif, q, (const struct eth_addr*)(netif->hwaddr), &dest, ETHTYPE_IPV6); +} + +#endif /* LWIP_IPV6 && LWIP_ETHERNET */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c new file mode 100644 index 0000000..7c93668 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c @@ -0,0 +1,425 @@ +/** + * @file + * + * IPv6 version of ICMP, as per RFC 4443. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_ICMP6 && LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/icmp6.h" +#include "lwip/prot/icmp6.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/inet_chksum.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/nd6.h" +#include "lwip/mld6.h" +#include "lwip/ip.h" +#include "lwip/stats.h" + +#include + +#if LWIP_ICMP6_DATASIZE == 0 +#undef LWIP_ICMP6_DATASIZE +#define LWIP_ICMP6_DATASIZE 8 +#endif + +/* Forward declarations */ +static void icmp6_send_response(struct pbuf *p, u8_t code, u32_t data, u8_t type); +static void icmp6_send_response_with_addrs(struct pbuf *p, u8_t code, u32_t data, + u8_t type, const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr); +static void icmp6_send_response_with_addrs_and_netif(struct pbuf *p, u8_t code, u32_t data, + u8_t type, const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr, struct netif *netif); + + +/** + * Process an input ICMPv6 message. Called by ip6_input. + * + * Will generate a reply for echo requests. Other messages are forwarded + * to nd6_input, or mld6_input. + * + * @param p the mld packet, p->payload pointing to the icmpv6 header + * @param inp the netif on which this packet was received + */ +void +icmp6_input(struct pbuf *p, struct netif *inp) +{ + struct icmp6_hdr *icmp6hdr; + struct pbuf *r; + const ip6_addr_t *reply_src; + + ICMP6_STATS_INC(icmp6.recv); + + /* Check that ICMPv6 header fits in payload */ + if (p->len < sizeof(struct icmp6_hdr)) { + /* drop short packets */ + pbuf_free(p); + ICMP6_STATS_INC(icmp6.lenerr); + ICMP6_STATS_INC(icmp6.drop); + return; + } + + icmp6hdr = (struct icmp6_hdr *)p->payload; + +#if CHECKSUM_CHECK_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_ICMP6) { + if (ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->tot_len, ip6_current_src_addr(), + ip6_current_dest_addr()) != 0) { + /* Checksum failed */ + pbuf_free(p); + ICMP6_STATS_INC(icmp6.chkerr); + ICMP6_STATS_INC(icmp6.drop); + return; + } + } +#endif /* CHECKSUM_CHECK_ICMP6 */ + + switch (icmp6hdr->type) { + case ICMP6_TYPE_NA: /* Neighbor advertisement */ + case ICMP6_TYPE_NS: /* Neighbor solicitation */ + case ICMP6_TYPE_RA: /* Router advertisement */ + case ICMP6_TYPE_RD: /* Redirect */ + case ICMP6_TYPE_PTB: /* Packet too big */ + nd6_input(p, inp); + return; + case ICMP6_TYPE_RS: +#if LWIP_IPV6_FORWARD + /* @todo implement router functionality */ +#endif + break; +#if LWIP_IPV6_MLD + case ICMP6_TYPE_MLQ: + case ICMP6_TYPE_MLR: + case ICMP6_TYPE_MLD: + mld6_input(p, inp); + return; +#endif + case ICMP6_TYPE_EREQ: +#if !LWIP_MULTICAST_PING + /* multicast destination address? */ + if (ip6_addr_ismulticast(ip6_current_dest_addr())) { + /* drop */ + pbuf_free(p); + ICMP6_STATS_INC(icmp6.drop); + return; + } +#endif /* LWIP_MULTICAST_PING */ + + /* Allocate reply. */ + r = pbuf_alloc(PBUF_IP, p->tot_len, PBUF_RAM); + if (r == NULL) { + /* drop */ + pbuf_free(p); + ICMP6_STATS_INC(icmp6.memerr); + return; + } + + /* Copy echo request. */ + if (pbuf_copy(r, p) != ERR_OK) { + /* drop */ + pbuf_free(p); + pbuf_free(r); + ICMP6_STATS_INC(icmp6.err); + return; + } + + /* Determine reply source IPv6 address. */ +#if LWIP_MULTICAST_PING + if (ip6_addr_ismulticast(ip6_current_dest_addr())) { + reply_src = ip_2_ip6(ip6_select_source_address(inp, ip6_current_src_addr())); + if (reply_src == NULL) { + /* drop */ + pbuf_free(p); + pbuf_free(r); + ICMP6_STATS_INC(icmp6.rterr); + return; + } + } + else +#endif /* LWIP_MULTICAST_PING */ + { + reply_src = ip6_current_dest_addr(); + } + + /* Set fields in reply. */ + ((struct icmp6_echo_hdr *)(r->payload))->type = ICMP6_TYPE_EREP; + ((struct icmp6_echo_hdr *)(r->payload))->chksum = 0; +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_ICMP6) { + ((struct icmp6_echo_hdr *)(r->payload))->chksum = ip6_chksum_pseudo(r, + IP6_NEXTH_ICMP6, r->tot_len, reply_src, ip6_current_src_addr()); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Send reply. */ + ICMP6_STATS_INC(icmp6.xmit); + ip6_output_if(r, reply_src, ip6_current_src_addr(), + LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, inp); + pbuf_free(r); + + break; + default: + ICMP6_STATS_INC(icmp6.proterr); + ICMP6_STATS_INC(icmp6.drop); + break; + } + + pbuf_free(p); +} + + +/** + * Send an icmpv6 'destination unreachable' packet. + * + * This function must be used only in direct response to a packet that is being + * received right now. Otherwise, address zones would be lost. + * + * @param p the input packet for which the 'unreachable' should be sent, + * p->payload pointing to the IPv6 header + * @param c ICMPv6 code for the unreachable type + */ +void +icmp6_dest_unreach(struct pbuf *p, enum icmp6_dur_code c) +{ + icmp6_send_response(p, c, 0, ICMP6_TYPE_DUR); +} + +/** + * Send an icmpv6 'packet too big' packet. + * + * This function must be used only in direct response to a packet that is being + * received right now. Otherwise, address zones would be lost. + * + * @param p the input packet for which the 'packet too big' should be sent, + * p->payload pointing to the IPv6 header + * @param mtu the maximum mtu that we can accept + */ +void +icmp6_packet_too_big(struct pbuf *p, u32_t mtu) +{ + icmp6_send_response(p, 0, mtu, ICMP6_TYPE_PTB); +} + +/** + * Send an icmpv6 'time exceeded' packet. + * + * This function must be used only in direct response to a packet that is being + * received right now. Otherwise, address zones would be lost. + * + * @param p the input packet for which the 'time exceeded' should be sent, + * p->payload pointing to the IPv6 header + * @param c ICMPv6 code for the time exceeded type + */ +void +icmp6_time_exceeded(struct pbuf *p, enum icmp6_te_code c) +{ + icmp6_send_response(p, c, 0, ICMP6_TYPE_TE); +} + +/** + * Send an icmpv6 'time exceeded' packet, with explicit source and destination + * addresses. + * + * This function may be used to send a response sometime after receiving the + * packet for which this response is meant. The provided source and destination + * addresses are used primarily to retain their zone information. + * + * @param p the input packet for which the 'time exceeded' should be sent, + * p->payload pointing to the IPv6 header + * @param c ICMPv6 code for the time exceeded type + * @param src_addr source address of the original packet, with zone information + * @param dest_addr destination address of the original packet, with zone + * information + */ +void +icmp6_time_exceeded_with_addrs(struct pbuf *p, enum icmp6_te_code c, + const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr) +{ + icmp6_send_response_with_addrs(p, c, 0, ICMP6_TYPE_TE, src_addr, dest_addr); +} + +/** + * Send an icmpv6 'parameter problem' packet. + * + * This function must be used only in direct response to a packet that is being + * received right now. Otherwise, address zones would be lost and the calculated + * offset would be wrong (calculated against ip6_current_header()). + * + * @param p the input packet for which the 'param problem' should be sent, + * p->payload pointing to the IP header + * @param c ICMPv6 code for the param problem type + * @param pointer the pointer to the byte where the parameter is found + */ +void +icmp6_param_problem(struct pbuf *p, enum icmp6_pp_code c, const void *pointer) +{ + u32_t pointer_u32 = (u32_t)((const u8_t *)pointer - (const u8_t *)ip6_current_header()); + icmp6_send_response(p, c, pointer_u32, ICMP6_TYPE_PP); +} + +/** + * Send an ICMPv6 packet in response to an incoming packet. + * The packet is sent *to* ip_current_src_addr() on ip_current_netif(). + * + * @param p the input packet for which the response should be sent, + * p->payload pointing to the IPv6 header + * @param code Code of the ICMPv6 header + * @param data Additional 32-bit parameter in the ICMPv6 header + * @param type Type of the ICMPv6 header + */ +static void +icmp6_send_response(struct pbuf *p, u8_t code, u32_t data, u8_t type) +{ + const struct ip6_addr *reply_src, *reply_dest; + struct netif *netif = ip_current_netif(); + + LWIP_ASSERT("icmpv6 packet not a direct response", netif != NULL); + reply_dest = ip6_current_src_addr(); + + /* Select an address to use as source. */ + reply_src = ip_2_ip6(ip6_select_source_address(netif, reply_dest)); + if (reply_src == NULL) { + ICMP6_STATS_INC(icmp6.rterr); + return; + } + icmp6_send_response_with_addrs_and_netif(p, code, data, type, reply_src, reply_dest, netif); +} + +/** + * Send an ICMPv6 packet in response to an incoming packet. + * + * Call this function if the packet is NOT sent as a direct response to an + * incoming packet, but rather sometime later (e.g. for a fragment reassembly + * timeout). The caller must provide the zoned source and destination addresses + * from the original packet with the src_addr and dest_addr parameters. The + * reason for this approach is that while the addresses themselves are part of + * the original packet, their zone information is not, thus possibly resulting + * in a link-local response being sent over the wrong link. + * + * @param p the input packet for which the response should be sent, + * p->payload pointing to the IPv6 header + * @param code Code of the ICMPv6 header + * @param data Additional 32-bit parameter in the ICMPv6 header + * @param type Type of the ICMPv6 header + * @param src_addr original source address + * @param dest_addr original destination address + */ +static void +icmp6_send_response_with_addrs(struct pbuf *p, u8_t code, u32_t data, u8_t type, + const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr) +{ + const struct ip6_addr *reply_src, *reply_dest; + struct netif *netif; + + /* Get the destination address and netif for this ICMP message. */ + LWIP_ASSERT("must provide both source and destination", src_addr != NULL); + LWIP_ASSERT("must provide both source and destination", dest_addr != NULL); + + /* Special case, as ip6_current_xxx is either NULL, or points + to a different packet than the one that expired. */ + IP6_ADDR_ZONECHECK(src_addr); + IP6_ADDR_ZONECHECK(dest_addr); + /* Swap source and destination for the reply. */ + reply_dest = src_addr; + reply_src = dest_addr; + netif = ip6_route(reply_src, reply_dest); + if (netif == NULL) { + ICMP6_STATS_INC(icmp6.rterr); + return; + } + icmp6_send_response_with_addrs_and_netif(p, code, data, type, reply_src, + reply_dest, netif); +} + +/** + * Send an ICMPv6 packet (with srd/dst address and netif given). + * + * @param p the input packet for which the response should be sent, + * p->payload pointing to the IPv6 header + * @param code Code of the ICMPv6 header + * @param data Additional 32-bit parameter in the ICMPv6 header + * @param type Type of the ICMPv6 header + * @param reply_src source address of the packet to send + * @param reply_dest destination address of the packet to send + * @param netif netif to send the packet + */ +static void +icmp6_send_response_with_addrs_and_netif(struct pbuf *p, u8_t code, u32_t data, u8_t type, + const ip6_addr_t *reply_src, const ip6_addr_t *reply_dest, struct netif *netif) +{ + struct pbuf *q; + struct icmp6_hdr *icmp6hdr; + + /* ICMPv6 header + IPv6 header + data */ + q = pbuf_alloc(PBUF_IP, sizeof(struct icmp6_hdr) + IP6_HLEN + LWIP_ICMP6_DATASIZE, + PBUF_RAM); + if (q == NULL) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded: failed to allocate pbuf for ICMPv6 packet.\n")); + ICMP6_STATS_INC(icmp6.memerr); + return; + } + LWIP_ASSERT("check that first pbuf can hold icmp 6message", + (q->len >= (sizeof(struct icmp6_hdr) + IP6_HLEN + LWIP_ICMP6_DATASIZE))); + + icmp6hdr = (struct icmp6_hdr *)q->payload; + icmp6hdr->type = type; + icmp6hdr->code = code; + icmp6hdr->data = lwip_htonl(data); + + /* copy fields from original packet */ + SMEMCPY((u8_t *)q->payload + sizeof(struct icmp6_hdr), (u8_t *)p->payload, + IP6_HLEN + LWIP_ICMP6_DATASIZE); + + /* calculate checksum */ + icmp6hdr->chksum = 0; +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + icmp6hdr->chksum = ip6_chksum_pseudo(q, IP6_NEXTH_ICMP6, q->tot_len, + reply_src, reply_dest); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + ICMP6_STATS_INC(icmp6.xmit); + ip6_output_if(q, reply_src, reply_dest, LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, netif); + pbuf_free(q); +} + +#endif /* LWIP_ICMP6 && LWIP_IPV6 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c new file mode 100644 index 0000000..7cb16aa --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c @@ -0,0 +1,53 @@ +/** + * @file + * + * INET v6 addresses. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/inet.h" + +/** This variable is initialized by the system to contain the wildcard IPv6 address. + */ +const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT; + +#endif /* LWIP_IPV6 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c new file mode 100644 index 0000000..b0906a3 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c @@ -0,0 +1,1492 @@ +/** + * @file + * + * IPv6 layer. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/netif.h" +#include "lwip/ip.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/ip6_frag.h" +#include "lwip/icmp6.h" +#include "lwip/priv/raw_priv.h" +#include "lwip/udp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/dhcp6.h" +#include "lwip/nd6.h" +#include "lwip/mld6.h" +#include "lwip/debug.h" +#include "lwip/stats.h" + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** + * Finds the appropriate network interface for a given IPv6 address. It tries to select + * a netif following a sequence of heuristics: + * 1) if there is only 1 netif, return it + * 2) if the destination is a zoned address, match its zone to a netif + * 3) if the either the source or destination address is a scoped address, + * match the source address's zone (if set) or address (if not) to a netif + * 4) tries to match the destination subnet to a configured address + * 5) tries to find a router-announced route + * 6) tries to match the (unscoped) source address to the netif + * 7) returns the default netif, if configured + * + * Note that each of the two given addresses may or may not be properly zoned. + * + * @param src the source IPv6 address, if known + * @param dest the destination IPv6 address for which to find the route + * @return the netif on which to send to reach dest + */ +struct netif * +ip6_route(const ip6_addr_t *src, const ip6_addr_t *dest) +{ +#if LWIP_SINGLE_NETIF + LWIP_UNUSED_ARG(src); + LWIP_UNUSED_ARG(dest); +#else /* LWIP_SINGLE_NETIF */ + struct netif *netif; + s8_t i; + + LWIP_ASSERT_CORE_LOCKED(); + + /* If single netif configuration, fast return. */ + if ((netif_list != NULL) && (netif_list->next == NULL)) { + if (!netif_is_up(netif_list) || !netif_is_link_up(netif_list) || + (ip6_addr_has_zone(dest) && !ip6_addr_test_zone(dest, netif_list))) { + return NULL; + } + return netif_list; + } + +#if LWIP_IPV6_SCOPES + /* Special processing for zoned destination addresses. This includes link- + * local unicast addresses and interface/link-local multicast addresses. Use + * the zone to find a matching netif. If the address is not zoned, then there + * is technically no "wrong" netif to choose, and we leave routing to other + * rules; in most cases this should be the scoped-source rule below. */ + if (ip6_addr_has_zone(dest)) { + IP6_ADDR_ZONECHECK(dest); + /* Find a netif based on the zone. For custom mappings, one zone may map + * to multiple netifs, so find one that can actually send a packet. */ + NETIF_FOREACH(netif) { + if (ip6_addr_test_zone(dest, netif) && + netif_is_up(netif) && netif_is_link_up(netif)) { + return netif; + } + } + /* No matching netif found. Do no try to route to a different netif, + * as that would be a zone violation, resulting in any packets sent to + * that netif being dropped on output. */ + return NULL; + } +#endif /* LWIP_IPV6_SCOPES */ + + /* Special processing for scoped source and destination addresses. If we get + * here, the destination address does not have a zone, so either way we need + * to look at the source address, which may or may not have a zone. If it + * does, the zone is restrictive: there is (typically) only one matching + * netif for it, and we should avoid routing to any other netif as that would + * result in guaranteed zone violations. For scoped source addresses that do + * not have a zone, use (only) a netif that has that source address locally + * assigned. This case also applies to the loopback source address, which has + * an implied link-local scope. If only the destination address is scoped + * (but, again, not zoned), we still want to use only the source address to + * determine its zone because that's most likely what the user/application + * wants, regardless of whether the source address is scoped. Finally, some + * of this story also applies if scoping is disabled altogether. */ +#if LWIP_IPV6_SCOPES + if (ip6_addr_has_scope(dest, IP6_UNKNOWN) || + ip6_addr_has_scope(src, IP6_UNICAST) || +#else /* LWIP_IPV6_SCOPES */ + if (ip6_addr_islinklocal(dest) || ip6_addr_ismulticast_iflocal(dest) || + ip6_addr_ismulticast_linklocal(dest) || ip6_addr_islinklocal(src) || +#endif /* LWIP_IPV6_SCOPES */ + ip6_addr_isloopback(src)) { +#if LWIP_IPV6_SCOPES + if (ip6_addr_has_zone(src)) { + /* Find a netif matching the source zone (relatively cheap). */ + NETIF_FOREACH(netif) { + if (netif_is_up(netif) && netif_is_link_up(netif) && + ip6_addr_test_zone(src, netif)) { + return netif; + } + } + } else +#endif /* LWIP_IPV6_SCOPES */ + { + /* Find a netif matching the source address (relatively expensive). */ + NETIF_FOREACH(netif) { + if (!netif_is_up(netif) || !netif_is_link_up(netif)) { + continue; + } + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp_zoneless(src, netif_ip6_addr(netif, i))) { + return netif; + } + } + } + } + /* Again, do not use any other netif in this case, as that could result in + * zone boundary violations. */ + return NULL; + } + + /* We come here only if neither source nor destination is scoped. */ + IP6_ADDR_ZONECHECK(src); + +#ifdef LWIP_HOOK_IP6_ROUTE + netif = LWIP_HOOK_IP6_ROUTE(src, dest); + if (netif != NULL) { + return netif; + } +#endif + + /* See if the destination subnet matches a configured address. In accordance + * with RFC 5942, dynamically configured addresses do not have an implied + * local subnet, and thus should be considered /128 assignments. However, as + * such, the destination address may still match a local address, and so we + * still need to check for exact matches here. By (lwIP) policy, statically + * configured addresses do always have an implied local /64 subnet. */ + NETIF_FOREACH(netif) { + if (!netif_is_up(netif) || !netif_is_link_up(netif)) { + continue; + } + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_netcmp(dest, netif_ip6_addr(netif, i)) && + (netif_ip6_addr_isstatic(netif, i) || + ip6_addr_nethostcmp(dest, netif_ip6_addr(netif, i)))) { + return netif; + } + } + } + + /* Get the netif for a suitable router-announced route. */ + netif = nd6_find_route(dest); + if (netif != NULL) { + return netif; + } + + /* Try with the netif that matches the source address. Given the earlier rule + * for scoped source addresses, this applies to unscoped addresses only. */ + if (!ip6_addr_isany(src)) { + NETIF_FOREACH(netif) { + if (!netif_is_up(netif) || !netif_is_link_up(netif)) { + continue; + } + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp(src, netif_ip6_addr(netif, i))) { + return netif; + } + } + } + } + +#if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF + /* loopif is disabled, loopback traffic is passed through any netif */ + if (ip6_addr_isloopback(dest)) { + /* don't check for link on loopback traffic */ + if (netif_default != NULL && netif_is_up(netif_default)) { + return netif_default; + } + /* default netif is not up, just use any netif for loopback traffic */ + NETIF_FOREACH(netif) { + if (netif_is_up(netif)) { + return netif; + } + } + return NULL; + } +#endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */ +#endif /* !LWIP_SINGLE_NETIF */ + + /* no matching netif found, use default netif, if up */ + if ((netif_default == NULL) || !netif_is_up(netif_default) || !netif_is_link_up(netif_default)) { + return NULL; + } + return netif_default; +} + +/** + * @ingroup ip6 + * Select the best IPv6 source address for a given destination IPv6 address. + * + * This implementation follows RFC 6724 Sec. 5 to the following extent: + * - Rules 1, 2, 3: fully implemented + * - Rules 4, 5, 5.5: not applicable + * - Rule 6: not implemented + * - Rule 7: not applicable + * - Rule 8: limited to "prefer /64 subnet match over non-match" + * + * For Rule 2, we deliberately deviate from RFC 6724 Sec. 3.1 by considering + * ULAs to be of smaller scope than global addresses, to avoid that a preferred + * ULA is picked over a deprecated global address when given a global address + * as destination, as that would likely result in broken two-way communication. + * + * As long as temporary addresses are not supported (as used in Rule 7), a + * proper implementation of Rule 8 would obviate the need to implement Rule 6. + * + * @param netif the netif on which to send a packet + * @param dest the destination we are trying to reach (possibly not properly + * zoned) + * @return the most suitable source address to use, or NULL if no suitable + * source address is found + */ +const ip_addr_t * +ip6_select_source_address(struct netif *netif, const ip6_addr_t *dest) +{ + const ip_addr_t *best_addr; + const ip6_addr_t *cand_addr; + s8_t dest_scope, cand_scope; + s8_t best_scope = IP6_MULTICAST_SCOPE_RESERVED; + u8_t i, cand_pref, cand_bits; + u8_t best_pref = 0; + u8_t best_bits = 0; + + /* Start by determining the scope of the given destination address. These + * tests are hopefully (roughly) in order of likeliness to match. */ + if (ip6_addr_isglobal(dest)) { + dest_scope = IP6_MULTICAST_SCOPE_GLOBAL; + } else if (ip6_addr_islinklocal(dest) || ip6_addr_isloopback(dest)) { + dest_scope = IP6_MULTICAST_SCOPE_LINK_LOCAL; + } else if (ip6_addr_isuniquelocal(dest)) { + dest_scope = IP6_MULTICAST_SCOPE_ORGANIZATION_LOCAL; + } else if (ip6_addr_ismulticast(dest)) { + dest_scope = ip6_addr_multicast_scope(dest); + } else if (ip6_addr_issitelocal(dest)) { + dest_scope = IP6_MULTICAST_SCOPE_SITE_LOCAL; + } else { + /* no match, consider scope global */ + dest_scope = IP6_MULTICAST_SCOPE_GLOBAL; + } + + best_addr = NULL; + + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + /* Consider only valid (= preferred and deprecated) addresses. */ + if (!ip6_addr_isvalid(netif_ip6_addr_state(netif, i))) { + continue; + } + /* Determine the scope of this candidate address. Same ordering idea. */ + cand_addr = netif_ip6_addr(netif, i); + if (ip6_addr_isglobal(cand_addr)) { + cand_scope = IP6_MULTICAST_SCOPE_GLOBAL; + } else if (ip6_addr_islinklocal(cand_addr)) { + cand_scope = IP6_MULTICAST_SCOPE_LINK_LOCAL; + } else if (ip6_addr_isuniquelocal(cand_addr)) { + cand_scope = IP6_MULTICAST_SCOPE_ORGANIZATION_LOCAL; + } else if (ip6_addr_issitelocal(cand_addr)) { + cand_scope = IP6_MULTICAST_SCOPE_SITE_LOCAL; + } else { + /* no match, treat as low-priority global scope */ + cand_scope = IP6_MULTICAST_SCOPE_RESERVEDF; + } + cand_pref = ip6_addr_ispreferred(netif_ip6_addr_state(netif, i)); + /* @todo compute the actual common bits, for longest matching prefix. */ + /* We cannot count on the destination address having a proper zone + * assignment, so do not compare zones in this case. */ + cand_bits = ip6_addr_netcmp_zoneless(cand_addr, dest); /* just 1 or 0 for now */ + if (cand_bits && ip6_addr_nethostcmp(cand_addr, dest)) { + return netif_ip_addr6(netif, i); /* Rule 1 */ + } + if ((best_addr == NULL) || /* no alternative yet */ + ((cand_scope < best_scope) && (cand_scope >= dest_scope)) || + ((cand_scope > best_scope) && (best_scope < dest_scope)) || /* Rule 2 */ + ((cand_scope == best_scope) && ((cand_pref > best_pref) || /* Rule 3 */ + ((cand_pref == best_pref) && (cand_bits > best_bits))))) { /* Rule 8 */ + /* We found a new "winning" candidate. */ + best_addr = netif_ip_addr6(netif, i); + best_scope = cand_scope; + best_pref = cand_pref; + best_bits = cand_bits; + } + } + + return best_addr; /* may be NULL */ +} + +#if LWIP_IPV6_FORWARD +/** + * Forwards an IPv6 packet. It finds an appropriate route for the + * packet, decrements the HL value of the packet, and outputs + * the packet on the appropriate interface. + * + * @param p the packet to forward (p->payload points to IP header) + * @param iphdr the IPv6 header of the input packet + * @param inp the netif on which this packet was received + */ +static void +ip6_forward(struct pbuf *p, struct ip6_hdr *iphdr, struct netif *inp) +{ + struct netif *netif; + + /* do not forward link-local or loopback addresses */ + if (ip6_addr_islinklocal(ip6_current_dest_addr()) || + ip6_addr_isloopback(ip6_current_dest_addr())) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: not forwarding link-local address.\n")); + IP6_STATS_INC(ip6.rterr); + IP6_STATS_INC(ip6.drop); + return; + } + + /* Find network interface where to forward this IP packet to. */ + netif = ip6_route(IP6_ADDR_ANY6, ip6_current_dest_addr()); + if (netif == NULL) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: no route for %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", + IP6_ADDR_BLOCK1(ip6_current_dest_addr()), + IP6_ADDR_BLOCK2(ip6_current_dest_addr()), + IP6_ADDR_BLOCK3(ip6_current_dest_addr()), + IP6_ADDR_BLOCK4(ip6_current_dest_addr()), + IP6_ADDR_BLOCK5(ip6_current_dest_addr()), + IP6_ADDR_BLOCK6(ip6_current_dest_addr()), + IP6_ADDR_BLOCK7(ip6_current_dest_addr()), + IP6_ADDR_BLOCK8(ip6_current_dest_addr()))); +#if LWIP_ICMP6 + /* Don't send ICMP messages in response to ICMP messages */ + if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) { + icmp6_dest_unreach(p, ICMP6_DUR_NO_ROUTE); + } +#endif /* LWIP_ICMP6 */ + IP6_STATS_INC(ip6.rterr); + IP6_STATS_INC(ip6.drop); + return; + } +#if LWIP_IPV6_SCOPES + /* Do not forward packets with a zoned (e.g., link-local) source address + * outside of their zone. We determined the zone a bit earlier, so we know + * that the address is properly zoned here, so we can safely use has_zone. + * Also skip packets with a loopback source address (link-local implied). */ + if ((ip6_addr_has_zone(ip6_current_src_addr()) && + !ip6_addr_test_zone(ip6_current_src_addr(), netif)) || + ip6_addr_isloopback(ip6_current_src_addr())) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: not forwarding packet beyond its source address zone.\n")); + IP6_STATS_INC(ip6.rterr); + IP6_STATS_INC(ip6.drop); + return; + } +#endif /* LWIP_IPV6_SCOPES */ + /* Do not forward packets onto the same network interface on which + * they arrived. */ + if (netif == inp) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: not bouncing packets back on incoming interface.\n")); + IP6_STATS_INC(ip6.rterr); + IP6_STATS_INC(ip6.drop); + return; + } + + /* decrement HL */ + IP6H_HOPLIM_SET(iphdr, IP6H_HOPLIM(iphdr) - 1); + /* send ICMP6 if HL == 0 */ + if (IP6H_HOPLIM(iphdr) == 0) { +#if LWIP_ICMP6 + /* Don't send ICMP messages in response to ICMP messages */ + if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) { + icmp6_time_exceeded(p, ICMP6_TE_HL); + } +#endif /* LWIP_ICMP6 */ + IP6_STATS_INC(ip6.drop); + return; + } + + if (netif->mtu && (p->tot_len > netif->mtu)) { +#if LWIP_ICMP6 + /* Don't send ICMP messages in response to ICMP messages */ + if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) { + icmp6_packet_too_big(p, netif->mtu); + } +#endif /* LWIP_ICMP6 */ + IP6_STATS_INC(ip6.drop); + return; + } + + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: forwarding packet to %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", + IP6_ADDR_BLOCK1(ip6_current_dest_addr()), + IP6_ADDR_BLOCK2(ip6_current_dest_addr()), + IP6_ADDR_BLOCK3(ip6_current_dest_addr()), + IP6_ADDR_BLOCK4(ip6_current_dest_addr()), + IP6_ADDR_BLOCK5(ip6_current_dest_addr()), + IP6_ADDR_BLOCK6(ip6_current_dest_addr()), + IP6_ADDR_BLOCK7(ip6_current_dest_addr()), + IP6_ADDR_BLOCK8(ip6_current_dest_addr()))); + + /* transmit pbuf on chosen interface */ + netif->output_ip6(netif, p, ip6_current_dest_addr()); + IP6_STATS_INC(ip6.fw); + IP6_STATS_INC(ip6.xmit); + return; +} +#endif /* LWIP_IPV6_FORWARD */ + +/** Return true if the current input packet should be accepted on this netif */ +static int +ip6_input_accept(struct netif *netif) +{ + /* interface is up? */ + if (netif_is_up(netif)) { + u8_t i; + /* unicast to this interface address? address configured? */ + /* If custom scopes are used, the destination zone will be tested as + * part of the local-address comparison, but we need to test the source + * scope as well (e.g., is this interface on the same link?). */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp(ip6_current_dest_addr(), netif_ip6_addr(netif, i)) +#if IPV6_CUSTOM_SCOPES + && (!ip6_addr_has_zone(ip6_current_src_addr()) || + ip6_addr_test_zone(ip6_current_src_addr(), netif)) +#endif /* IPV6_CUSTOM_SCOPES */ + ) { + /* accept on this netif */ + return 1; + } + } + } + return 0; +} + +/** + * This function is called by the network interface device driver when + * an IPv6 packet is received. The function does the basic checks of the + * IP header such as packet size being at least larger than the header + * size etc. If the packet was not destined for us, the packet is + * forwarded (using ip6_forward). + * + * Finally, the packet is sent to the upper layer protocol input function. + * + * @param p the received IPv6 packet (p->payload points to IPv6 header) + * @param inp the netif on which this packet was received + * @return ERR_OK if the packet was processed (could return ERR_* if it wasn't + * processed, but currently always returns ERR_OK) + */ +err_t +ip6_input(struct pbuf *p, struct netif *inp) +{ + struct ip6_hdr *ip6hdr; + struct netif *netif; + const u8_t *nexth; + u16_t hlen, hlen_tot; /* the current header length */ +#if 0 /*IP_ACCEPT_LINK_LAYER_ADDRESSING*/ + @todo + int check_ip_src=1; +#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */ +#if LWIP_RAW + raw_input_state_t raw_status; +#endif /* LWIP_RAW */ + + LWIP_ASSERT_CORE_LOCKED(); + + IP6_STATS_INC(ip6.recv); + + /* identify the IP header */ + ip6hdr = (struct ip6_hdr *)p->payload; + if (IP6H_V(ip6hdr) != 6) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_WARNING, ("IPv6 packet dropped due to bad version number %"U32_F"\n", + IP6H_V(ip6hdr))); + pbuf_free(p); + IP6_STATS_INC(ip6.err); + IP6_STATS_INC(ip6.drop); + return ERR_OK; + } + +#ifdef LWIP_HOOK_IP6_INPUT + if (LWIP_HOOK_IP6_INPUT(p, inp)) { + /* the packet has been eaten */ + return ERR_OK; + } +#endif + + /* header length exceeds first pbuf length, or ip length exceeds total pbuf length? */ + if ((IP6_HLEN > p->len) || (IP6H_PLEN(ip6hdr) > (p->tot_len - IP6_HLEN))) { + if (IP6_HLEN > p->len) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 header (len %"U16_F") does not fit in first pbuf (len %"U16_F"), IP packet dropped.\n", + (u16_t)IP6_HLEN, p->len)); + } + if ((IP6H_PLEN(ip6hdr) + IP6_HLEN) > p->tot_len) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 (plen %"U16_F") is longer than pbuf (len %"U16_F"), IP packet dropped.\n", + (u16_t)(IP6H_PLEN(ip6hdr) + IP6_HLEN), p->tot_len)); + } + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.lenerr); + IP6_STATS_INC(ip6.drop); + return ERR_OK; + } + + /* Trim pbuf. This should have been done at the netif layer, + * but we'll do it anyway just to be sure that its done. */ + pbuf_realloc(p, (u16_t)(IP6_HLEN + IP6H_PLEN(ip6hdr))); + + /* copy IP addresses to aligned ip6_addr_t */ + ip_addr_copy_from_ip6_packed(ip_data.current_iphdr_dest, ip6hdr->dest); + ip_addr_copy_from_ip6_packed(ip_data.current_iphdr_src, ip6hdr->src); + + /* Don't accept virtual IPv4 mapped IPv6 addresses. + * Don't accept multicast source addresses. */ + if (ip6_addr_isipv4mappedipv6(ip_2_ip6(&ip_data.current_iphdr_dest)) || + ip6_addr_isipv4mappedipv6(ip_2_ip6(&ip_data.current_iphdr_src)) || + ip6_addr_ismulticast(ip_2_ip6(&ip_data.current_iphdr_src))) { + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.err); + IP6_STATS_INC(ip6.drop); + return ERR_OK; + } + + /* Set the appropriate zone identifier on the addresses. */ + ip6_addr_assign_zone(ip_2_ip6(&ip_data.current_iphdr_dest), IP6_UNKNOWN, inp); + ip6_addr_assign_zone(ip_2_ip6(&ip_data.current_iphdr_src), IP6_UNICAST, inp); + + /* current header pointer. */ + ip_data.current_ip6_header = ip6hdr; + + /* In netif, used in case we need to send ICMPv6 packets back. */ + ip_data.current_netif = inp; + ip_data.current_input_netif = inp; + + /* match packet against an interface, i.e. is this packet for us? */ + if (ip6_addr_ismulticast(ip6_current_dest_addr())) { + /* Always joined to multicast if-local and link-local all-nodes group. */ + if (ip6_addr_isallnodes_iflocal(ip6_current_dest_addr()) || + ip6_addr_isallnodes_linklocal(ip6_current_dest_addr())) { + netif = inp; + } +#if LWIP_IPV6_MLD + else if (mld6_lookfor_group(inp, ip6_current_dest_addr())) { + netif = inp; + } +#else /* LWIP_IPV6_MLD */ + else if (ip6_addr_issolicitednode(ip6_current_dest_addr())) { + u8_t i; + /* Filter solicited node packets when MLD is not enabled + * (for Neighbor discovery). */ + netif = NULL; + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(inp, i)) && + ip6_addr_cmp_solicitednode(ip6_current_dest_addr(), netif_ip6_addr(inp, i))) { + netif = inp; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: solicited node packet accepted on interface %c%c\n", + netif->name[0], netif->name[1])); + break; + } + } + } +#endif /* LWIP_IPV6_MLD */ + else { + netif = NULL; + } + } else { + /* start trying with inp. if that's not acceptable, start walking the + list of configured netifs. */ + if (ip6_input_accept(inp)) { + netif = inp; + } else { + netif = NULL; +#if !IPV6_CUSTOM_SCOPES + /* Shortcut: stop looking for other interfaces if either the source or + * the destination has a scope constrained to this interface. Custom + * scopes may break the 1:1 link/interface mapping, however. */ + if (ip6_addr_islinklocal(ip6_current_dest_addr()) || + ip6_addr_islinklocal(ip6_current_src_addr())) { + goto netif_found; + } +#endif /* !IPV6_CUSTOM_SCOPES */ +#if !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF + /* The loopback address is to be considered link-local. Packets to it + * should be dropped on other interfaces, as per RFC 4291 Sec. 2.5.3. + * Its implied scope means packets *from* the loopback address should + * not be accepted on other interfaces, either. These requirements + * cannot be implemented in the case that loopback traffic is sent + * across a non-loopback interface, however. */ + if (ip6_addr_isloopback(ip6_current_dest_addr()) || + ip6_addr_isloopback(ip6_current_src_addr())) { + goto netif_found; + } +#endif /* !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF */ +#if !LWIP_SINGLE_NETIF + NETIF_FOREACH(netif) { + if (netif == inp) { + /* we checked that before already */ + continue; + } + if (ip6_input_accept(netif)) { + break; + } + } +#endif /* !LWIP_SINGLE_NETIF */ + } +netif_found: + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet accepted on interface %c%c\n", + netif ? netif->name[0] : 'X', netif? netif->name[1] : 'X')); + } + + /* "::" packet source address? (used in duplicate address detection) */ + if (ip6_addr_isany(ip6_current_src_addr()) && + (!ip6_addr_issolicitednode(ip6_current_dest_addr()))) { + /* packet source is not valid */ + /* free (drop) packet pbufs */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with src ANY_ADDRESS dropped\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + /* packet not for us? */ + if (netif == NULL) { + /* packet not for us, route or discard */ + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_TRACE, ("ip6_input: packet not for us.\n")); +#if LWIP_IPV6_FORWARD + /* non-multicast packet? */ + if (!ip6_addr_ismulticast(ip6_current_dest_addr())) { + /* try to forward IP packet on (other) interfaces */ + ip6_forward(p, ip6hdr, inp); + } +#endif /* LWIP_IPV6_FORWARD */ + pbuf_free(p); + goto ip6_input_cleanup; + } + + /* current netif pointer. */ + ip_data.current_netif = netif; + + /* Save next header type. */ + nexth = &IP6H_NEXTH(ip6hdr); + + /* Init header length. */ + hlen = hlen_tot = IP6_HLEN; + + /* Move to payload. */ + pbuf_remove_header(p, IP6_HLEN); + + /* Process known option extension headers, if present. */ + while (*nexth != IP6_NEXTH_NONE) + { + switch (*nexth) { + case IP6_NEXTH_HOPBYHOP: + { + s32_t opt_offset; + struct ip6_hbh_hdr *hbh_hdr; + struct ip6_opt_hdr *opt_hdr; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Hop-by-Hop options header\n")); + + /* Get and check the header length, while staying in packet bounds. */ + hbh_hdr = (struct ip6_hbh_hdr *)p->payload; + + /* Get next header type. */ + nexth = &IP6_HBH_NEXTH(hbh_hdr); + + /* Get the header length. */ + hlen = (u16_t)(8 * (1 + hbh_hdr->_hlen)); + + if ((p->len < 8) || (hlen > p->len)) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", + hlen, p->len)); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.lenerr); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + hlen_tot = (u16_t)(hlen_tot + hlen); + + /* The extended option header starts right after Hop-by-Hop header. */ + opt_offset = IP6_HBH_HLEN; + while (opt_offset < hlen) + { + s32_t opt_dlen = 0; + + opt_hdr = (struct ip6_opt_hdr *)((u8_t *)hbh_hdr + opt_offset); + + switch (IP6_OPT_TYPE(opt_hdr)) { + /* @todo: process IPV6 Hop-by-Hop option data */ + case IP6_PAD1_OPTION: + /* PAD1 option doesn't have length and value field */ + opt_dlen = -1; + break; + case IP6_PADN_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_ROUTER_ALERT_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_JUMBO_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + default: + /* Check 2 MSB of Hop-by-Hop header type. */ + switch (IP6_OPT_TYPE_ACTION(opt_hdr)) { + case 1: + /* Discard the packet. */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid Hop-by-Hop option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + case 2: + /* Send ICMP Parameter Problem */ + icmp6_param_problem(p, ICMP6_PP_OPTION, opt_hdr); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid Hop-by-Hop option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + case 3: + /* Send ICMP Parameter Problem if destination address is not a multicast address */ + if (!ip6_addr_ismulticast(ip6_current_dest_addr())) { + icmp6_param_problem(p, ICMP6_PP_OPTION, opt_hdr); + } + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid Hop-by-Hop option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + default: + /* Skip over this option. */ + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + } + break; + } + + /* Adjust the offset to move to the next extended option header */ + opt_offset = opt_offset + IP6_OPT_HLEN + opt_dlen; + } + pbuf_remove_header(p, hlen); + break; + } + case IP6_NEXTH_DESTOPTS: + { + s32_t opt_offset; + struct ip6_dest_hdr *dest_hdr; + struct ip6_opt_hdr *opt_hdr; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Destination options header\n")); + + dest_hdr = (struct ip6_dest_hdr *)p->payload; + + /* Get next header type. */ + nexth = &IP6_DEST_NEXTH(dest_hdr); + + /* Get the header length. */ + hlen = 8 * (1 + dest_hdr->_hlen); + if ((p->len < 8) || (hlen > p->len)) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", + hlen, p->len)); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.lenerr); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + hlen_tot = (u16_t)(hlen_tot + hlen); + + /* The extended option header starts right after Destination header. */ + opt_offset = IP6_DEST_HLEN; + while (opt_offset < hlen) + { + s32_t opt_dlen = 0; + + opt_hdr = (struct ip6_opt_hdr *)((u8_t *)dest_hdr + opt_offset); + + switch (IP6_OPT_TYPE(opt_hdr)) + { + /* @todo: process IPV6 Destination option data */ + case IP6_PAD1_OPTION: + /* PAD1 option deosn't have length and value field */ + opt_dlen = -1; + break; + case IP6_PADN_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_ROUTER_ALERT_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_JUMBO_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_HOME_ADDRESS_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + default: + /* Check 2 MSB of Destination header type. */ + switch (IP6_OPT_TYPE_ACTION(opt_hdr)) + { + case 1: + /* Discard the packet. */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid destination option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + case 2: + /* Send ICMP Parameter Problem */ + icmp6_param_problem(p, ICMP6_PP_OPTION, opt_hdr); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid destination option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + case 3: + /* Send ICMP Parameter Problem if destination address is not a multicast address */ + if (!ip6_addr_ismulticast(ip6_current_dest_addr())) { + icmp6_param_problem(p, ICMP6_PP_OPTION, opt_hdr); + } + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid destination option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + default: + /* Skip over this option. */ + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + } + break; + } + + /* Adjust the offset to move to the next extended option header */ + opt_offset = opt_offset + IP6_OPT_HLEN + opt_dlen; + } + + pbuf_remove_header(p, hlen); + break; + } + case IP6_NEXTH_ROUTING: + { + struct ip6_rout_hdr *rout_hdr; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Routing header\n")); + + rout_hdr = (struct ip6_rout_hdr *)p->payload; + + /* Get next header type. */ + nexth = &IP6_ROUT_NEXTH(rout_hdr); + + /* Get the header length. */ + hlen = 8 * (1 + rout_hdr->_hlen); + + if ((p->len < 8) || (hlen > p->len)) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", + hlen, p->len)); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.lenerr); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + /* Skip over this header. */ + hlen_tot = (u16_t)(hlen_tot + hlen); + + /* if segment left value is 0 in routing header, ignore the option */ + if (IP6_ROUT_SEG_LEFT(rout_hdr)) { + /* The length field of routing option header must be even */ + if (rout_hdr->_hlen & 0x1) { + /* Discard and send parameter field error */ + icmp6_param_problem(p, ICMP6_PP_FIELD, &rout_hdr->_hlen); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid routing type dropped\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + switch (IP6_ROUT_TYPE(rout_hdr)) + { + /* TODO: process routing by the type */ + case IP6_ROUT_TYPE2: + break; + case IP6_ROUT_RPL: + break; + default: + /* Discard unrecognized routing type and send parameter field error */ + icmp6_param_problem(p, ICMP6_PP_FIELD, &IP6_ROUT_TYPE(rout_hdr)); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid routing type dropped\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + } + + pbuf_remove_header(p, hlen); + break; + } + case IP6_NEXTH_FRAGMENT: + { + struct ip6_frag_hdr *frag_hdr; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Fragment header\n")); + + frag_hdr = (struct ip6_frag_hdr *)p->payload; + + /* Get next header type. */ + nexth = &IP6_FRAG_NEXTH(frag_hdr); + + /* Fragment Header length. */ + hlen = 8; + + /* Make sure this header fits in current pbuf. */ + if (hlen > p->len) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", + hlen, p->len)); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_FRAG_STATS_INC(ip6_frag.lenerr); + IP6_FRAG_STATS_INC(ip6_frag.drop); + goto ip6_input_cleanup; + } + + hlen_tot = (u16_t)(hlen_tot + hlen); + + /* check payload length is multiple of 8 octets when mbit is set */ + if (IP6_FRAG_MBIT(frag_hdr) && (IP6H_PLEN(ip6hdr) & 0x7)) { + /* ipv6 payload length is not multiple of 8 octets */ + icmp6_param_problem(p, ICMP6_PP_FIELD, LWIP_PACKED_CAST(const void *, &ip6hdr->_plen)); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid payload length dropped\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + /* Offset == 0 and more_fragments == 0? */ + if ((frag_hdr->_fragment_offset & + PP_HTONS(IP6_FRAG_OFFSET_MASK | IP6_FRAG_MORE_FLAG)) == 0) { + /* This is a 1-fragment packet. Skip this header and continue. */ + pbuf_remove_header(p, hlen); + } else { +#if LWIP_IPV6_REASS + /* reassemble the packet */ + ip_data.current_ip_header_tot_len = hlen_tot; + p = ip6_reass(p); + /* packet not fully reassembled yet? */ + if (p == NULL) { + goto ip6_input_cleanup; + } + + /* Returned p point to IPv6 header. + * Update all our variables and pointers and continue. */ + ip6hdr = (struct ip6_hdr *)p->payload; + nexth = &IP6H_NEXTH(ip6hdr); + hlen = hlen_tot = IP6_HLEN; + pbuf_remove_header(p, IP6_HLEN); + +#else /* LWIP_IPV6_REASS */ + /* free (drop) packet pbufs */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Fragment header dropped (with LWIP_IPV6_REASS==0)\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.opterr); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; +#endif /* LWIP_IPV6_REASS */ + } + break; + } + default: + goto options_done; + } + + if (*nexth == IP6_NEXTH_HOPBYHOP) { + /* Hop-by-Hop header comes only as a first option */ + icmp6_param_problem(p, ICMP6_PP_HEADER, nexth); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Hop-by-Hop options header dropped (only valid as a first option)\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + } + +options_done: + + /* send to upper layers */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: \n")); + ip6_debug_print(p); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: p->len %"U16_F" p->tot_len %"U16_F"\n", p->len, p->tot_len)); + + ip_data.current_ip_header_tot_len = hlen_tot; + +#if LWIP_RAW + /* p points to IPv6 header again for raw_input. */ + pbuf_add_header_force(p, hlen_tot); + /* raw input did not eat the packet? */ + raw_status = raw_input(p, inp); + if (raw_status != RAW_INPUT_EATEN) + { + /* Point to payload. */ + pbuf_remove_header(p, hlen_tot); +#else /* LWIP_RAW */ + { +#endif /* LWIP_RAW */ + switch (*nexth) { + case IP6_NEXTH_NONE: + pbuf_free(p); + break; +#if LWIP_UDP + case IP6_NEXTH_UDP: +#if LWIP_UDPLITE + case IP6_NEXTH_UDPLITE: +#endif /* LWIP_UDPLITE */ + udp_input(p, inp); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case IP6_NEXTH_TCP: + tcp_input(p, inp); + break; +#endif /* LWIP_TCP */ +#if LWIP_ICMP6 + case IP6_NEXTH_ICMP6: + icmp6_input(p, inp); + break; +#endif /* LWIP_ICMP */ + default: +#if LWIP_RAW + if (raw_status == RAW_INPUT_DELIVERED) { + /* @todo: ipv6 mib in-delivers? */ + } else +#endif /* LWIP_RAW */ + { +#if LWIP_ICMP6 + /* p points to IPv6 header again for raw_input. */ + pbuf_add_header_force(p, hlen_tot); + /* send ICMP parameter problem unless it was a multicast or ICMPv6 */ + if ((!ip6_addr_ismulticast(ip6_current_dest_addr())) && + (IP6H_NEXTH(ip6hdr) != IP6_NEXTH_ICMP6)) { + icmp6_param_problem(p, ICMP6_PP_HEADER, nexth); + } +#endif /* LWIP_ICMP */ + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip6_input: Unsupported transport protocol %"U16_F"\n", (u16_t)IP6H_NEXTH(ip6hdr))); + IP6_STATS_INC(ip6.proterr); + IP6_STATS_INC(ip6.drop); + } + pbuf_free(p); + break; + } + } + +ip6_input_cleanup: + ip_data.current_netif = NULL; + ip_data.current_input_netif = NULL; + ip_data.current_ip6_header = NULL; + ip_data.current_ip_header_tot_len = 0; + ip6_addr_set_zero(ip6_current_src_addr()); + ip6_addr_set_zero(ip6_current_dest_addr()); + + return ERR_OK; +} + + +/** + * Sends an IPv6 packet on a network interface. This function constructs + * the IPv6 header. If the source IPv6 address is NULL, the IPv6 "ANY" address is + * used as source (usually during network startup). If the source IPv6 address it + * IP6_ADDR_ANY, the most appropriate IPv6 address of the outgoing network + * interface is filled in as source address. If the destination IPv6 address is + * LWIP_IP_HDRINCL, p is assumed to already include an IPv6 header and + * p->payload points to it instead of the data. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IPv6 header and p->payload points to that IPv6 header) + * @param src the source IPv6 address to send from (if src == IP6_ADDR_ANY, an + * IP address of the netif is selected and used as source address. + * if src == NULL, IP6_ADDR_ANY is used as source) (src is possibly not + * properly zoned) + * @param dest the destination IPv6 address to send the packet to (possibly not + * properly zoned) + * @param hl the Hop Limit value to be set in the IPv6 header + * @param tc the Traffic Class value to be set in the IPv6 header + * @param nexth the Next Header to be set in the IPv6 header + * @param netif the netif on which to send this packet + * @return ERR_OK if the packet was sent OK + * ERR_BUF if p doesn't have enough space for IPv6/LINK headers + * returns errors returned by netif->output_ip6 + */ +err_t +ip6_output_if(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, + u8_t nexth, struct netif *netif) +{ + const ip6_addr_t *src_used = src; + if (dest != LWIP_IP_HDRINCL) { + if (src != NULL && ip6_addr_isany(src)) { + src_used = ip_2_ip6(ip6_select_source_address(netif, dest)); + if ((src_used == NULL) || ip6_addr_isany(src_used)) { + /* No appropriate source address was found for this packet. */ + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip6_output: No suitable source address for packet.\n")); + IP6_STATS_INC(ip6.rterr); + return ERR_RTE; + } + } + } + return ip6_output_if_src(p, src_used, dest, hl, tc, nexth, netif); +} + +/** + * Same as ip6_output_if() but 'src' address is not replaced by netif address + * when it is 'any'. + */ +err_t +ip6_output_if_src(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, + u8_t nexth, struct netif *netif) +{ + struct ip6_hdr *ip6hdr; + ip6_addr_t dest_addr; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + /* Should the IPv6 header be generated or is it already included in p? */ + if (dest != LWIP_IP_HDRINCL) { +#if LWIP_IPV6_SCOPES + /* If the destination address is scoped but lacks a zone, add a zone now, + * based on the outgoing interface. The lower layers (e.g., nd6) absolutely + * require addresses to be properly zoned for correctness. In some cases, + * earlier attempts will have been made to add a zone to the destination, + * but this function is the only one that is called in all (other) cases, + * so we must do this here. */ + if (ip6_addr_lacks_zone(dest, IP6_UNKNOWN)) { + ip6_addr_copy(dest_addr, *dest); + ip6_addr_assign_zone(&dest_addr, IP6_UNKNOWN, netif); + dest = &dest_addr; + } +#endif /* LWIP_IPV6_SCOPES */ + + /* generate IPv6 header */ + if (pbuf_add_header(p, IP6_HLEN)) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip6_output: not enough room for IPv6 header in pbuf\n")); + IP6_STATS_INC(ip6.err); + return ERR_BUF; + } + + ip6hdr = (struct ip6_hdr *)p->payload; + LWIP_ASSERT("check that first pbuf can hold struct ip6_hdr", + (p->len >= sizeof(struct ip6_hdr))); + + IP6H_HOPLIM_SET(ip6hdr, hl); + IP6H_NEXTH_SET(ip6hdr, nexth); + + /* dest cannot be NULL here */ + ip6_addr_copy_to_packed(ip6hdr->dest, *dest); + + IP6H_VTCFL_SET(ip6hdr, 6, tc, 0); + IP6H_PLEN_SET(ip6hdr, (u16_t)(p->tot_len - IP6_HLEN)); + + if (src == NULL) { + src = IP6_ADDR_ANY6; + } + /* src cannot be NULL here */ + ip6_addr_copy_to_packed(ip6hdr->src, *src); + + } else { + /* IP header already included in p */ + ip6hdr = (struct ip6_hdr *)p->payload; + ip6_addr_copy_from_packed(dest_addr, ip6hdr->dest); + ip6_addr_assign_zone(&dest_addr, IP6_UNKNOWN, netif); + dest = &dest_addr; + } + + IP6_STATS_INC(ip6.xmit); + + LWIP_DEBUGF(IP6_DEBUG, ("ip6_output_if: %c%c%"U16_F"\n", netif->name[0], netif->name[1], (u16_t)netif->num)); + ip6_debug_print(p); + +#if ENABLE_LOOPBACK + { + int i; +#if !LWIP_HAVE_LOOPIF + if (ip6_addr_isloopback(dest)) { + return netif_loop_output(netif, p); + } +#endif /* !LWIP_HAVE_LOOPIF */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp(dest, netif_ip6_addr(netif, i))) { + /* Packet to self, enqueue it for loopback */ + LWIP_DEBUGF(IP6_DEBUG, ("netif_loop_output()\n")); + return netif_loop_output(netif, p); + } + } + } +#if LWIP_MULTICAST_TX_OPTIONS + if ((p->flags & PBUF_FLAG_MCASTLOOP) != 0) { + netif_loop_output(netif, p); + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ +#endif /* ENABLE_LOOPBACK */ +#if LWIP_IPV6_FRAG + /* don't fragment if interface has mtu set to 0 [loopif] */ + if (netif_mtu6(netif) && (p->tot_len > nd6_get_destination_mtu(dest, netif))) { + return ip6_frag(p, netif, dest); + } +#endif /* LWIP_IPV6_FRAG */ + + LWIP_DEBUGF(IP6_DEBUG, ("netif->output_ip6()\n")); + return netif->output_ip6(netif, p, dest); +} + +/** + * Simple interface to ip6_output_if. It finds the outgoing network + * interface and calls upon ip6_output_if to do the actual work. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IPv6 header and p->payload points to that IPv6 header) + * @param src the source IPv6 address to send from (if src == IP6_ADDR_ANY, an + * IP address of the netif is selected and used as source address. + * if src == NULL, IP6_ADDR_ANY is used as source) + * @param dest the destination IPv6 address to send the packet to + * @param hl the Hop Limit value to be set in the IPv6 header + * @param tc the Traffic Class value to be set in the IPv6 header + * @param nexth the Next Header to be set in the IPv6 header + * + * @return ERR_RTE if no route is found + * see ip_output_if() for more return values + */ +err_t +ip6_output(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth) +{ + struct netif *netif; + struct ip6_hdr *ip6hdr; + ip6_addr_t src_addr, dest_addr; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + if (dest != LWIP_IP_HDRINCL) { + netif = ip6_route(src, dest); + } else { + /* IP header included in p, read addresses. */ + ip6hdr = (struct ip6_hdr *)p->payload; + ip6_addr_copy_from_packed(src_addr, ip6hdr->src); + ip6_addr_copy_from_packed(dest_addr, ip6hdr->dest); + netif = ip6_route(&src_addr, &dest_addr); + } + + if (netif == NULL) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_output: no route for %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", + IP6_ADDR_BLOCK1(dest), + IP6_ADDR_BLOCK2(dest), + IP6_ADDR_BLOCK3(dest), + IP6_ADDR_BLOCK4(dest), + IP6_ADDR_BLOCK5(dest), + IP6_ADDR_BLOCK6(dest), + IP6_ADDR_BLOCK7(dest), + IP6_ADDR_BLOCK8(dest))); + IP6_STATS_INC(ip6.rterr); + return ERR_RTE; + } + + return ip6_output_if(p, src, dest, hl, tc, nexth, netif); +} + + +#if LWIP_NETIF_USE_HINTS +/** Like ip6_output, but takes and addr_hint pointer that is passed on to netif->addr_hint + * before calling ip6_output_if. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IPv6 header and p->payload points to that IPv6 header) + * @param src the source IPv6 address to send from (if src == IP6_ADDR_ANY, an + * IP address of the netif is selected and used as source address. + * if src == NULL, IP6_ADDR_ANY is used as source) + * @param dest the destination IPv6 address to send the packet to + * @param hl the Hop Limit value to be set in the IPv6 header + * @param tc the Traffic Class value to be set in the IPv6 header + * @param nexth the Next Header to be set in the IPv6 header + * @param netif_hint netif output hint pointer set to netif->hint before + * calling ip_output_if() + * + * @return ERR_RTE if no route is found + * see ip_output_if() for more return values + */ +err_t +ip6_output_hinted(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth, struct netif_hint *netif_hint) +{ + struct netif *netif; + struct ip6_hdr *ip6hdr; + ip6_addr_t src_addr, dest_addr; + err_t err; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + if (dest != LWIP_IP_HDRINCL) { + netif = ip6_route(src, dest); + } else { + /* IP header included in p, read addresses. */ + ip6hdr = (struct ip6_hdr *)p->payload; + ip6_addr_copy_from_packed(src_addr, ip6hdr->src); + ip6_addr_copy_from_packed(dest_addr, ip6hdr->dest); + netif = ip6_route(&src_addr, &dest_addr); + } + + if (netif == NULL) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_output: no route for %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", + IP6_ADDR_BLOCK1(dest), + IP6_ADDR_BLOCK2(dest), + IP6_ADDR_BLOCK3(dest), + IP6_ADDR_BLOCK4(dest), + IP6_ADDR_BLOCK5(dest), + IP6_ADDR_BLOCK6(dest), + IP6_ADDR_BLOCK7(dest), + IP6_ADDR_BLOCK8(dest))); + IP6_STATS_INC(ip6.rterr); + return ERR_RTE; + } + + NETIF_SET_HINTS(netif, netif_hint); + err = ip6_output_if(p, src, dest, hl, tc, nexth, netif); + NETIF_RESET_HINTS(netif); + + return err; +} +#endif /* LWIP_NETIF_USE_HINTS*/ + +#if LWIP_IPV6_MLD +/** + * Add a hop-by-hop options header with a router alert option and padding. + * + * Used by MLD when sending a Multicast listener report/done message. + * + * @param p the packet to which we will prepend the options header + * @param nexth the next header protocol number (e.g. IP6_NEXTH_ICMP6) + * @param value the value of the router alert option data (e.g. IP6_ROUTER_ALERT_VALUE_MLD) + * @return ERR_OK if hop-by-hop header was added, ERR_* otherwise + */ +err_t +ip6_options_add_hbh_ra(struct pbuf *p, u8_t nexth, u8_t value) +{ + u8_t *opt_data; + u32_t offset = 0; + struct ip6_hbh_hdr *hbh_hdr; + struct ip6_opt_hdr *opt_hdr; + + /* fixed 4 bytes for router alert option and 2 bytes padding */ + const u8_t hlen = (sizeof(struct ip6_opt_hdr) * 2) + IP6_ROUTER_ALERT_DLEN; + /* Move pointer to make room for hop-by-hop options header. */ + if (pbuf_add_header(p, sizeof(struct ip6_hbh_hdr) + hlen)) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_options: no space for options header\n")); + IP6_STATS_INC(ip6.err); + return ERR_BUF; + } + + /* Set fields of Hop-by-Hop header */ + hbh_hdr = (struct ip6_hbh_hdr *)p->payload; + IP6_HBH_NEXTH(hbh_hdr) = nexth; + hbh_hdr->_hlen = 0; + offset = IP6_HBH_HLEN; + + /* Set router alert options to Hop-by-Hop extended option header */ + opt_hdr = (struct ip6_opt_hdr *)((u8_t *)hbh_hdr + offset); + IP6_OPT_TYPE(opt_hdr) = IP6_ROUTER_ALERT_OPTION; + IP6_OPT_DLEN(opt_hdr) = IP6_ROUTER_ALERT_DLEN; + offset += IP6_OPT_HLEN; + + /* Set router alert option data */ + opt_data = (u8_t *)hbh_hdr + offset; + opt_data[0] = value; + opt_data[1] = 0; + offset += IP6_OPT_DLEN(opt_hdr); + + /* add 2 bytes padding to make 8 bytes Hop-by-Hop header length */ + opt_hdr = (struct ip6_opt_hdr *)((u8_t *)hbh_hdr + offset); + IP6_OPT_TYPE(opt_hdr) = IP6_PADN_OPTION; + IP6_OPT_DLEN(opt_hdr) = 0; + + return ERR_OK; +} +#endif /* LWIP_IPV6_MLD */ + +#if IP6_DEBUG +/* Print an IPv6 header by using LWIP_DEBUGF + * @param p an IPv6 packet, p->payload pointing to the IPv6 header + */ +void +ip6_debug_print(struct pbuf *p) +{ + struct ip6_hdr *ip6hdr = (struct ip6_hdr *)p->payload; + + LWIP_DEBUGF(IP6_DEBUG, ("IPv6 header:\n")); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP6_DEBUG, ("| %2"U16_F" | %3"U16_F" | %7"U32_F" | (ver, class, flow)\n", + IP6H_V(ip6hdr), + IP6H_TC(ip6hdr), + IP6H_FL(ip6hdr))); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP6_DEBUG, ("| %5"U16_F" | %3"U16_F" | %3"U16_F" | (plen, nexth, hopl)\n", + IP6H_PLEN(ip6hdr), + IP6H_NEXTH(ip6hdr), + IP6H_HOPLIM(ip6hdr))); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" | (src)\n", + IP6_ADDR_BLOCK1(&(ip6hdr->src)), + IP6_ADDR_BLOCK2(&(ip6hdr->src)), + IP6_ADDR_BLOCK3(&(ip6hdr->src)), + IP6_ADDR_BLOCK4(&(ip6hdr->src)))); + LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" |\n", + IP6_ADDR_BLOCK5(&(ip6hdr->src)), + IP6_ADDR_BLOCK6(&(ip6hdr->src)), + IP6_ADDR_BLOCK7(&(ip6hdr->src)), + IP6_ADDR_BLOCK8(&(ip6hdr->src)))); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" | (dest)\n", + IP6_ADDR_BLOCK1(&(ip6hdr->dest)), + IP6_ADDR_BLOCK2(&(ip6hdr->dest)), + IP6_ADDR_BLOCK3(&(ip6hdr->dest)), + IP6_ADDR_BLOCK4(&(ip6hdr->dest)))); + LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" |\n", + IP6_ADDR_BLOCK5(&(ip6hdr->dest)), + IP6_ADDR_BLOCK6(&(ip6hdr->dest)), + IP6_ADDR_BLOCK7(&(ip6hdr->dest)), + IP6_ADDR_BLOCK8(&(ip6hdr->dest)))); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); +} +#endif /* IP6_DEBUG */ + +#endif /* LWIP_IPV6 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c new file mode 100644 index 0000000..aafba72 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c @@ -0,0 +1,343 @@ +/** + * @file + * + * IPv6 addresses. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * Functions for handling IPv6 addresses. + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip_addr.h" +#include "lwip/def.h" + +#include + +#if LWIP_IPV4 +#include "lwip/ip4_addr.h" /* for ip6addr_aton to handle IPv4-mapped addresses */ +#endif /* LWIP_IPV4 */ + +/* used by IP6_ADDR_ANY(6) in ip6_addr.h */ +const ip_addr_t ip6_addr_any = IPADDR6_INIT(0ul, 0ul, 0ul, 0ul); + +#define lwip_xchar(i) ((char)((i) < 10 ? '0' + (i) : 'A' + (i) - 10)) + +/** + * Check whether "cp" is a valid ascii representation + * of an IPv6 address and convert to a binary address. + * Returns 1 if the address is valid, 0 if not. + * + * @param cp IPv6 address in ascii representation (e.g. "FF01::1") + * @param addr pointer to which to save the ip address in network order + * @return 1 if cp could be converted to addr, 0 on failure + */ +int +ip6addr_aton(const char *cp, ip6_addr_t *addr) +{ + u32_t addr_index, zero_blocks, current_block_index, current_block_value; + const char *s; +#if LWIP_IPV4 + int check_ipv4_mapped = 0; +#endif /* LWIP_IPV4 */ + + /* Count the number of colons, to count the number of blocks in a "::" sequence + zero_blocks may be 1 even if there are no :: sequences */ + zero_blocks = 8; + for (s = cp; *s != 0; s++) { + if (*s == ':') { + zero_blocks--; +#if LWIP_IPV4 + } else if (*s == '.') { + if ((zero_blocks == 5) ||(zero_blocks == 2)) { + check_ipv4_mapped = 1; + /* last block could be the start of an IPv4 address */ + zero_blocks--; + } else { + /* invalid format */ + return 0; + } + break; +#endif /* LWIP_IPV4 */ + } else if (!lwip_isxdigit(*s)) { + break; + } + } + + /* parse each block */ + addr_index = 0; + current_block_index = 0; + current_block_value = 0; + for (s = cp; *s != 0; s++) { + if (*s == ':') { + if (addr) { + if (current_block_index & 0x1) { + addr->addr[addr_index++] |= current_block_value; + } + else { + addr->addr[addr_index] = current_block_value << 16; + } + } + current_block_index++; +#if LWIP_IPV4 + if (check_ipv4_mapped) { + if (current_block_index == 6) { + ip4_addr_t ip4; + int ret = ip4addr_aton(s + 1, &ip4); + if (ret) { + if (addr) { + addr->addr[3] = lwip_htonl(ip4.addr); + current_block_index++; + goto fix_byte_order_and_return; + } + return 1; + } + } + } +#endif /* LWIP_IPV4 */ + current_block_value = 0; + if (current_block_index > 7) { + /* address too long! */ + return 0; + } + if (s[1] == ':') { + if (s[2] == ':') { + /* invalid format: three successive colons */ + return 0; + } + s++; + /* "::" found, set zeros */ + while (zero_blocks > 0) { + zero_blocks--; + if (current_block_index & 0x1) { + addr_index++; + } else { + if (addr) { + addr->addr[addr_index] = 0; + } + } + current_block_index++; + if (current_block_index > 7) { + /* address too long! */ + return 0; + } + } + } + } else if (lwip_isxdigit(*s)) { + /* add current digit */ + current_block_value = (current_block_value << 4) + + (lwip_isdigit(*s) ? (u32_t)(*s - '0') : + (u32_t)(10 + (lwip_islower(*s) ? *s - 'a' : *s - 'A'))); + } else { + /* unexpected digit, space? CRLF? */ + break; + } + } + + if (addr) { + if (current_block_index & 0x1) { + addr->addr[addr_index++] |= current_block_value; + } + else { + addr->addr[addr_index] = current_block_value << 16; + } +#if LWIP_IPV4 +fix_byte_order_and_return: +#endif + /* convert to network byte order. */ + for (addr_index = 0; addr_index < 4; addr_index++) { + addr->addr[addr_index] = lwip_htonl(addr->addr[addr_index]); + } + + ip6_addr_clear_zone(addr); + } + + if (current_block_index != 7) { + return 0; + } + + return 1; +} + +/** + * Convert numeric IPv6 address into ASCII representation. + * returns ptr to static buffer; not reentrant! + * + * @param addr ip6 address in network order to convert + * @return pointer to a global static (!) buffer that holds the ASCII + * representation of addr + */ +char * +ip6addr_ntoa(const ip6_addr_t *addr) +{ + static char str[40]; + return ip6addr_ntoa_r(addr, str, 40); +} + +/** + * Same as ipaddr_ntoa, but reentrant since a user-supplied buffer is used. + * + * @param addr ip6 address in network order to convert + * @param buf target buffer where the string is stored + * @param buflen length of buf + * @return either pointer to buf which now holds the ASCII + * representation of addr or NULL if buf was too small + */ +char * +ip6addr_ntoa_r(const ip6_addr_t *addr, char *buf, int buflen) +{ + u32_t current_block_index, current_block_value, next_block_value; + s32_t i; + u8_t zero_flag, empty_block_flag; + +#if LWIP_IPV4 + if (ip6_addr_isipv4mappedipv6(addr)) { + /* This is an IPv4 mapped address */ + ip4_addr_t addr4; + char *ret; +#define IP4MAPPED_HEADER "::FFFF:" + char *buf_ip4 = buf + sizeof(IP4MAPPED_HEADER) - 1; + int buflen_ip4 = buflen - sizeof(IP4MAPPED_HEADER) + 1; + if (buflen < (int)sizeof(IP4MAPPED_HEADER)) { + return NULL; + } + memcpy(buf, IP4MAPPED_HEADER, sizeof(IP4MAPPED_HEADER)); + addr4.addr = addr->addr[3]; + ret = ip4addr_ntoa_r(&addr4, buf_ip4, buflen_ip4); + if (ret != buf_ip4) { + return NULL; + } + return buf; + } +#endif /* LWIP_IPV4 */ + i = 0; + empty_block_flag = 0; /* used to indicate a zero chain for "::' */ + + for (current_block_index = 0; current_block_index < 8; current_block_index++) { + /* get the current 16-bit block */ + current_block_value = lwip_htonl(addr->addr[current_block_index >> 1]); + if ((current_block_index & 0x1) == 0) { + current_block_value = current_block_value >> 16; + } + current_block_value &= 0xffff; + + /* Check for empty block. */ + if (current_block_value == 0) { + if (current_block_index == 7 && empty_block_flag == 1) { + /* special case, we must render a ':' for the last block. */ + buf[i++] = ':'; + if (i >= buflen) { + return NULL; + } + break; + } + if (empty_block_flag == 0) { + /* generate empty block "::", but only if more than one contiguous zero block, + * according to current formatting suggestions RFC 5952. */ + next_block_value = lwip_htonl(addr->addr[(current_block_index + 1) >> 1]); + if ((current_block_index & 0x1) == 0x01) { + next_block_value = next_block_value >> 16; + } + next_block_value &= 0xffff; + if (next_block_value == 0) { + empty_block_flag = 1; + buf[i++] = ':'; + if (i >= buflen) { + return NULL; + } + continue; /* move on to next block. */ + } + } else if (empty_block_flag == 1) { + /* move on to next block. */ + continue; + } + } else if (empty_block_flag == 1) { + /* Set this flag value so we don't produce multiple empty blocks. */ + empty_block_flag = 2; + } + + if (current_block_index > 0) { + buf[i++] = ':'; + if (i >= buflen) { + return NULL; + } + } + + if ((current_block_value & 0xf000) == 0) { + zero_flag = 1; + } else { + buf[i++] = lwip_xchar(((current_block_value & 0xf000) >> 12)); + zero_flag = 0; + if (i >= buflen) { + return NULL; + } + } + + if (((current_block_value & 0xf00) == 0) && (zero_flag)) { + /* do nothing */ + } else { + buf[i++] = lwip_xchar(((current_block_value & 0xf00) >> 8)); + zero_flag = 0; + if (i >= buflen) { + return NULL; + } + } + + if (((current_block_value & 0xf0) == 0) && (zero_flag)) { + /* do nothing */ + } + else { + buf[i++] = lwip_xchar(((current_block_value & 0xf0) >> 4)); + zero_flag = 0; + if (i >= buflen) { + return NULL; + } + } + + buf[i++] = lwip_xchar((current_block_value & 0xf)); + if (i >= buflen) { + return NULL; + } + } + + buf[i] = 0; + + return buf; +} + +#endif /* LWIP_IPV6 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c new file mode 100644 index 0000000..0f277b6 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c @@ -0,0 +1,862 @@ +/** + * @file + * + * IPv6 fragmentation and reassembly. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" +#include "lwip/ip6_frag.h" +#include "lwip/ip6.h" +#include "lwip/icmp6.h" +#include "lwip/nd6.h" +#include "lwip/ip.h" + +#include "lwip/pbuf.h" +#include "lwip/memp.h" +#include "lwip/stats.h" + +#include + +#if LWIP_IPV6 && LWIP_IPV6_REASS /* don't build if not configured for use in lwipopts.h */ + + +/** Setting this to 0, you can turn off checking the fragments for overlapping + * regions. The code gets a little smaller. Only use this if you know that + * overlapping won't occur on your network! */ +#ifndef IP_REASS_CHECK_OVERLAP +#define IP_REASS_CHECK_OVERLAP 1 +#endif /* IP_REASS_CHECK_OVERLAP */ + +/** Set to 0 to prevent freeing the oldest datagram when the reassembly buffer is + * full (IP_REASS_MAX_PBUFS pbufs are enqueued). The code gets a little smaller. + * Datagrams will be freed by timeout only. Especially useful when MEMP_NUM_REASSDATA + * is set to 1, so one datagram can be reassembled at a time, only. */ +#ifndef IP_REASS_FREE_OLDEST +#define IP_REASS_FREE_OLDEST 1 +#endif /* IP_REASS_FREE_OLDEST */ + +#if IPV6_FRAG_COPYHEADER +/* The number of bytes we need to "borrow" from (i.e., overwrite in) the header + * that precedes the fragment header for reassembly pruposes. */ +#define IPV6_FRAG_REQROOM ((s16_t)(sizeof(struct ip6_reass_helper) - IP6_FRAG_HLEN)) +#endif + +#define IP_REASS_FLAG_LASTFRAG 0x01 + +/** This is a helper struct which holds the starting + * offset and the ending offset of this fragment to + * easily chain the fragments. + * It has the same packing requirements as the IPv6 header, since it replaces + * the Fragment Header in memory in incoming fragments to keep + * track of the various fragments. + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_reass_helper { + PACK_STRUCT_FIELD(struct pbuf *next_pbuf); + PACK_STRUCT_FIELD(u16_t start); + PACK_STRUCT_FIELD(u16_t end); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* static variables */ +static struct ip6_reassdata *reassdatagrams; +static u16_t ip6_reass_pbufcount; + +/* Forward declarations. */ +static void ip6_reass_free_complete_datagram(struct ip6_reassdata *ipr); +#if IP_REASS_FREE_OLDEST +static void ip6_reass_remove_oldest_datagram(struct ip6_reassdata *ipr, int pbufs_needed); +#endif /* IP_REASS_FREE_OLDEST */ + +void +ip6_reass_tmr(void) +{ + struct ip6_reassdata *r, *tmp; + +#if !IPV6_FRAG_COPYHEADER + LWIP_ASSERT("sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN, set IPV6_FRAG_COPYHEADER to 1", + sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN); +#endif /* !IPV6_FRAG_COPYHEADER */ + + r = reassdatagrams; + while (r != NULL) { + /* Decrement the timer. Once it reaches 0, + * clean up the incomplete fragment assembly */ + if (r->timer > 0) { + r->timer--; + r = r->next; + } else { + /* reassembly timed out */ + tmp = r; + /* get the next pointer before freeing */ + r = r->next; + /* free the helper struct and all enqueued pbufs */ + ip6_reass_free_complete_datagram(tmp); + } + } +} + +/** + * Free a datagram (struct ip6_reassdata) and all its pbufs. + * Updates the total count of enqueued pbufs (ip6_reass_pbufcount), + * sends an ICMP time exceeded packet. + * + * @param ipr datagram to free + */ +static void +ip6_reass_free_complete_datagram(struct ip6_reassdata *ipr) +{ + struct ip6_reassdata *prev; + u16_t pbufs_freed = 0; + u16_t clen; + struct pbuf *p; + struct ip6_reass_helper *iprh; + +#if LWIP_ICMP6 + iprh = (struct ip6_reass_helper *)ipr->p->payload; + if (iprh->start == 0) { + /* The first fragment was received, send ICMP time exceeded. */ + /* First, de-queue the first pbuf from r->p. */ + p = ipr->p; + ipr->p = iprh->next_pbuf; + /* Restore the part that we've overwritten with our helper structure, or we + * might send garbage (and disclose a pointer) in the ICMPv6 reply. */ + MEMCPY(p->payload, ipr->orig_hdr, sizeof(iprh)); + /* Then, move back to the original ipv6 header (we are now pointing to Fragment header). + This cannot fail since we already checked when receiving this fragment. */ + if (pbuf_header_force(p, (s16_t)((u8_t*)p->payload - (u8_t*)ipr->iphdr))) { + LWIP_ASSERT("ip6_reass_free: moving p->payload to ip6 header failed\n", 0); + } + else { + /* Reconstruct the zoned source and destination addresses, so that we do + * not end up sending the ICMP response over the wrong link. */ + ip6_addr_t src_addr, dest_addr; + ip6_addr_copy_from_packed(src_addr, IPV6_FRAG_SRC(ipr)); + ip6_addr_set_zone(&src_addr, ipr->src_zone); + ip6_addr_copy_from_packed(dest_addr, IPV6_FRAG_DEST(ipr)); + ip6_addr_set_zone(&dest_addr, ipr->dest_zone); + /* Send the actual ICMP response. */ + icmp6_time_exceeded_with_addrs(p, ICMP6_TE_FRAG, &src_addr, &dest_addr); + } + clen = pbuf_clen(p); + LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); + pbufs_freed = (u16_t)(pbufs_freed + clen); + pbuf_free(p); + } +#endif /* LWIP_ICMP6 */ + + /* First, free all received pbufs. The individual pbufs need to be released + separately as they have not yet been chained */ + p = ipr->p; + while (p != NULL) { + struct pbuf *pcur; + iprh = (struct ip6_reass_helper *)p->payload; + pcur = p; + /* get the next pointer before freeing */ + p = iprh->next_pbuf; + clen = pbuf_clen(pcur); + LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); + pbufs_freed = (u16_t)(pbufs_freed + clen); + pbuf_free(pcur); + } + + /* Then, unchain the struct ip6_reassdata from the list and free it. */ + if (ipr == reassdatagrams) { + reassdatagrams = ipr->next; + } else { + prev = reassdatagrams; + while (prev != NULL) { + if (prev->next == ipr) { + break; + } + prev = prev->next; + } + if (prev != NULL) { + prev->next = ipr->next; + } + } + memp_free(MEMP_IP6_REASSDATA, ipr); + + /* Finally, update number of pbufs in reassembly queue */ + LWIP_ASSERT("ip_reass_pbufcount >= clen", ip6_reass_pbufcount >= pbufs_freed); + ip6_reass_pbufcount = (u16_t)(ip6_reass_pbufcount - pbufs_freed); +} + +#if IP_REASS_FREE_OLDEST +/** + * Free the oldest datagram to make room for enqueueing new fragments. + * The datagram ipr is not freed! + * + * @param ipr ip6_reassdata for the current fragment + * @param pbufs_needed number of pbufs needed to enqueue + * (used for freeing other datagrams if not enough space) + */ +static void +ip6_reass_remove_oldest_datagram(struct ip6_reassdata *ipr, int pbufs_needed) +{ + struct ip6_reassdata *r, *oldest; + + /* Free datagrams until being allowed to enqueue 'pbufs_needed' pbufs, + * but don't free the current datagram! */ + do { + r = oldest = reassdatagrams; + while (r != NULL) { + if (r != ipr) { + if (r->timer <= oldest->timer) { + /* older than the previous oldest */ + oldest = r; + } + } + r = r->next; + } + if (oldest == ipr) { + /* nothing to free, ipr is the only element on the list */ + return; + } + if (oldest != NULL) { + ip6_reass_free_complete_datagram(oldest); + } + } while (((ip6_reass_pbufcount + pbufs_needed) > IP_REASS_MAX_PBUFS) && (reassdatagrams != NULL)); +} +#endif /* IP_REASS_FREE_OLDEST */ + +/** + * Reassembles incoming IPv6 fragments into an IPv6 datagram. + * + * @param p points to the IPv6 Fragment Header + * @return NULL if reassembly is incomplete, pbuf pointing to + * IPv6 Header if reassembly is complete + */ +struct pbuf * +ip6_reass(struct pbuf *p) +{ + struct ip6_reassdata *ipr, *ipr_prev; + struct ip6_reass_helper *iprh, *iprh_tmp, *iprh_prev=NULL; + struct ip6_frag_hdr *frag_hdr; + u16_t offset, len, start, end; + ptrdiff_t hdrdiff; + u16_t clen; + u8_t valid = 1; + struct pbuf *q, *next_pbuf; + + IP6_FRAG_STATS_INC(ip6_frag.recv); + + /* ip6_frag_hdr must be in the first pbuf, not chained. Checked by caller. */ + LWIP_ASSERT("IPv6 fragment header does not fit in first pbuf", + p->len >= sizeof(struct ip6_frag_hdr)); + + frag_hdr = (struct ip6_frag_hdr *) p->payload; + + clen = pbuf_clen(p); + + offset = lwip_ntohs(frag_hdr->_fragment_offset); + + /* Calculate fragment length from IPv6 payload length. + * Adjust for headers before Fragment Header. + * And finally adjust by Fragment Header length. */ + len = lwip_ntohs(ip6_current_header()->_plen); + hdrdiff = (u8_t*)p->payload - (const u8_t*)ip6_current_header(); + LWIP_ASSERT("not a valid pbuf (ip6_input check missing?)", hdrdiff <= 0xFFFF); + LWIP_ASSERT("not a valid pbuf (ip6_input check missing?)", hdrdiff >= IP6_HLEN); + hdrdiff -= IP6_HLEN; + hdrdiff += IP6_FRAG_HLEN; + if (hdrdiff > len) { + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; + } + len = (u16_t)(len - hdrdiff); + start = (offset & IP6_FRAG_OFFSET_MASK); + if (start > (0xFFFF - len)) { + /* u16_t overflow, cannot handle this */ + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; + } + + /* Look for the datagram the fragment belongs to in the current datagram queue, + * remembering the previous in the queue for later dequeueing. */ + for (ipr = reassdatagrams, ipr_prev = NULL; ipr != NULL; ipr = ipr->next) { + /* Check if the incoming fragment matches the one currently present + in the reassembly buffer. If so, we proceed with copying the + fragment into the buffer. */ + if ((frag_hdr->_identification == ipr->identification) && + ip6_addr_cmp_packed(ip6_current_src_addr(), &(IPV6_FRAG_SRC(ipr)), ipr->src_zone) && + ip6_addr_cmp_packed(ip6_current_dest_addr(), &(IPV6_FRAG_DEST(ipr)), ipr->dest_zone)) { + IP6_FRAG_STATS_INC(ip6_frag.cachehit); + break; + } + ipr_prev = ipr; + } + + if (ipr == NULL) { + /* Enqueue a new datagram into the datagram queue */ + ipr = (struct ip6_reassdata *)memp_malloc(MEMP_IP6_REASSDATA); + if (ipr == NULL) { +#if IP_REASS_FREE_OLDEST + /* Make room and try again. */ + ip6_reass_remove_oldest_datagram(ipr, clen); + ipr = (struct ip6_reassdata *)memp_malloc(MEMP_IP6_REASSDATA); + if (ipr != NULL) { + /* re-search ipr_prev since it might have been removed */ + for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) { + if (ipr_prev->next == ipr) { + break; + } + } + } else +#endif /* IP_REASS_FREE_OLDEST */ + { + IP6_FRAG_STATS_INC(ip6_frag.memerr); + goto nullreturn; + } + } + + memset(ipr, 0, sizeof(struct ip6_reassdata)); + ipr->timer = IPV6_REASS_MAXAGE; + + /* enqueue the new structure to the front of the list */ + ipr->next = reassdatagrams; + reassdatagrams = ipr; + + /* Use the current IPv6 header for src/dest address reference. + * Eventually, we will replace it when we get the first fragment + * (it might be this one, in any case, it is done later). */ + /* need to use the none-const pointer here: */ + ipr->iphdr = ip_data.current_ip6_header; +#if IPV6_FRAG_COPYHEADER + MEMCPY(&ipr->src, &ip6_current_header()->src, sizeof(ipr->src)); + MEMCPY(&ipr->dest, &ip6_current_header()->dest, sizeof(ipr->dest)); +#endif /* IPV6_FRAG_COPYHEADER */ +#if LWIP_IPV6_SCOPES + /* Also store the address zone information. + * @todo It is possible that due to netif destruction and recreation, the + * stored zones end up resolving to a different interface. In that case, we + * risk sending a "time exceeded" ICMP response over the wrong link. + * Ideally, netif destruction would clean up matching pending reassembly + * structures, but custom zone mappings would make that non-trivial. */ + ipr->src_zone = ip6_addr_zone(ip6_current_src_addr()); + ipr->dest_zone = ip6_addr_zone(ip6_current_dest_addr()); +#endif /* LWIP_IPV6_SCOPES */ + /* copy the fragmented packet id. */ + ipr->identification = frag_hdr->_identification; + + /* copy the nexth field */ + ipr->nexth = frag_hdr->_nexth; + } + + /* Check if we are allowed to enqueue more datagrams. */ + if ((ip6_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) { +#if IP_REASS_FREE_OLDEST + ip6_reass_remove_oldest_datagram(ipr, clen); + if ((ip6_reass_pbufcount + clen) <= IP_REASS_MAX_PBUFS) { + /* re-search ipr_prev since it might have been removed */ + for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) { + if (ipr_prev->next == ipr) { + break; + } + } + } else +#endif /* IP_REASS_FREE_OLDEST */ + { + /* @todo: send ICMPv6 time exceeded here? */ + /* drop this pbuf */ + IP6_FRAG_STATS_INC(ip6_frag.memerr); + goto nullreturn; + } + } + + /* Overwrite Fragment Header with our own helper struct. */ +#if IPV6_FRAG_COPYHEADER + if (IPV6_FRAG_REQROOM > 0) { + /* Make room for struct ip6_reass_helper (only required if sizeof(void*) > 4). + This cannot fail since we already checked when receiving this fragment. */ + u8_t hdrerr = pbuf_header_force(p, IPV6_FRAG_REQROOM); + LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0); + } +#else /* IPV6_FRAG_COPYHEADER */ + LWIP_ASSERT("sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN, set IPV6_FRAG_COPYHEADER to 1", + sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN); +#endif /* IPV6_FRAG_COPYHEADER */ + + /* Prepare the pointer to the helper structure, and its initial values. + * Do not yet write to the structure itself, as we still have to make a + * backup of the original data, and we should not do that until we know for + * sure that we are going to add this packet to the list. */ + iprh = (struct ip6_reass_helper *)p->payload; + next_pbuf = NULL; + end = (u16_t)(start + len); + + /* find the right place to insert this pbuf */ + /* Iterate through until we either get to the end of the list (append), + * or we find on with a larger offset (insert). */ + for (q = ipr->p; q != NULL;) { + iprh_tmp = (struct ip6_reass_helper*)q->payload; + if (start < iprh_tmp->start) { +#if IP_REASS_CHECK_OVERLAP + if (end > iprh_tmp->start) { + /* fragment overlaps with following, throw away */ + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; + } + if (iprh_prev != NULL) { + if (start < iprh_prev->end) { + /* fragment overlaps with previous, throw away */ + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; + } + } +#endif /* IP_REASS_CHECK_OVERLAP */ + /* the new pbuf should be inserted before this */ + next_pbuf = q; + if (iprh_prev != NULL) { + /* not the fragment with the lowest offset */ + iprh_prev->next_pbuf = p; + } else { + /* fragment with the lowest offset */ + ipr->p = p; + } + break; + } else if (start == iprh_tmp->start) { + /* received the same datagram twice: no need to keep the datagram */ + goto nullreturn; +#if IP_REASS_CHECK_OVERLAP + } else if (start < iprh_tmp->end) { + /* overlap: no need to keep the new datagram */ + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; +#endif /* IP_REASS_CHECK_OVERLAP */ + } else { + /* Check if the fragments received so far have no gaps. */ + if (iprh_prev != NULL) { + if (iprh_prev->end != iprh_tmp->start) { + /* There is a fragment missing between the current + * and the previous fragment */ + valid = 0; + } + } + } + q = iprh_tmp->next_pbuf; + iprh_prev = iprh_tmp; + } + + /* If q is NULL, then we made it to the end of the list. Determine what to do now */ + if (q == NULL) { + if (iprh_prev != NULL) { + /* this is (for now), the fragment with the highest offset: + * chain it to the last fragment */ +#if IP_REASS_CHECK_OVERLAP + LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= start); +#endif /* IP_REASS_CHECK_OVERLAP */ + iprh_prev->next_pbuf = p; + if (iprh_prev->end != start) { + valid = 0; + } + } else { +#if IP_REASS_CHECK_OVERLAP + LWIP_ASSERT("no previous fragment, this must be the first fragment!", + ipr->p == NULL); +#endif /* IP_REASS_CHECK_OVERLAP */ + /* this is the first fragment we ever received for this ip datagram */ + ipr->p = p; + } + } + + /* Track the current number of pbufs current 'in-flight', in order to limit + the number of fragments that may be enqueued at any one time */ + ip6_reass_pbufcount = (u16_t)(ip6_reass_pbufcount + clen); + + /* Remember IPv6 header if this is the first fragment. */ + if (start == 0) { + /* need to use the none-const pointer here: */ + ipr->iphdr = ip_data.current_ip6_header; + /* Make a backup of the part of the packet data that we are about to + * overwrite, so that we can restore the original later. */ + MEMCPY(ipr->orig_hdr, p->payload, sizeof(*iprh)); + /* For IPV6_FRAG_COPYHEADER there is no need to copy src/dst again, as they + * will be the same as they were. With LWIP_IPV6_SCOPES, the same applies + * to the source/destination zones. */ + } + /* Only after the backup do we get to fill in the actual helper structure. */ + iprh->next_pbuf = next_pbuf; + iprh->start = start; + iprh->end = end; + + /* If this is the last fragment, calculate total packet length. */ + if ((offset & IP6_FRAG_MORE_FLAG) == 0) { + ipr->datagram_len = iprh->end; + } + + /* Additional validity tests: we have received first and last fragment. */ + iprh_tmp = (struct ip6_reass_helper*)ipr->p->payload; + if (iprh_tmp->start != 0) { + valid = 0; + } + if (ipr->datagram_len == 0) { + valid = 0; + } + + /* Final validity test: no gaps between current and last fragment. */ + iprh_prev = iprh; + q = iprh->next_pbuf; + while ((q != NULL) && valid) { + iprh = (struct ip6_reass_helper*)q->payload; + if (iprh_prev->end != iprh->start) { + valid = 0; + break; + } + iprh_prev = iprh; + q = iprh->next_pbuf; + } + + if (valid) { + /* All fragments have been received */ + struct ip6_hdr* iphdr_ptr; + + /* chain together the pbufs contained within the ip6_reassdata list. */ + iprh = (struct ip6_reass_helper*) ipr->p->payload; + while (iprh != NULL) { + next_pbuf = iprh->next_pbuf; + if (next_pbuf != NULL) { + /* Save next helper struct (will be hidden in next step). */ + iprh_tmp = (struct ip6_reass_helper*)next_pbuf->payload; + + /* hide the fragment header for every succeeding fragment */ + pbuf_remove_header(next_pbuf, IP6_FRAG_HLEN); +#if IPV6_FRAG_COPYHEADER + if (IPV6_FRAG_REQROOM > 0) { + /* hide the extra bytes borrowed from ip6_hdr for struct ip6_reass_helper */ + u8_t hdrerr = pbuf_remove_header(next_pbuf, IPV6_FRAG_REQROOM); + LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0); + } +#endif + pbuf_cat(ipr->p, next_pbuf); + } + else { + iprh_tmp = NULL; + } + + iprh = iprh_tmp; + } + + /* Get the first pbuf. */ + p = ipr->p; + +#if IPV6_FRAG_COPYHEADER + if (IPV6_FRAG_REQROOM > 0) { + u8_t hdrerr; + /* Restore (only) the bytes that we overwrote beyond the fragment header. + * Those bytes may belong to either the IPv6 header or an extension + * header placed before the fragment header. */ + MEMCPY(p->payload, ipr->orig_hdr, IPV6_FRAG_REQROOM); + /* get back room for struct ip6_reass_helper (only required if sizeof(void*) > 4) */ + hdrerr = pbuf_remove_header(p, IPV6_FRAG_REQROOM); + LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0); + } +#endif + + /* We need to get rid of the fragment header itself, which is somewhere in + * the middle of the packet (but still in the first pbuf of the chain). + * Getting rid of the header is required by RFC 2460 Sec. 4.5 and necessary + * in order to be able to reassemble packets that are close to full size + * (i.e., around 65535 bytes). We simply move up all the headers before the + * fragment header, including the IPv6 header, and adjust the payload start + * accordingly. This works because all these headers are in the first pbuf + * of the chain, and because the caller adjusts all its pointers on + * successful reassembly. */ + MEMMOVE((u8_t*)ipr->iphdr + sizeof(struct ip6_frag_hdr), ipr->iphdr, + (size_t)((u8_t*)p->payload - (u8_t*)ipr->iphdr)); + + /* This is where the IPv6 header is now. */ + iphdr_ptr = (struct ip6_hdr*)((u8_t*)ipr->iphdr + + sizeof(struct ip6_frag_hdr)); + + /* Adjust datagram length by adding header lengths. */ + ipr->datagram_len = (u16_t)(ipr->datagram_len + ((u8_t*)p->payload - (u8_t*)iphdr_ptr) + - IP6_HLEN); + + /* Set payload length in ip header. */ + iphdr_ptr->_plen = lwip_htons(ipr->datagram_len); + + /* With the fragment header gone, we now need to adjust the next-header + * field of whatever header was originally before it. Since the packet made + * it through the original header processing routines at least up to the + * fragment header, we do not need any further sanity checks here. */ + if (IP6H_NEXTH(iphdr_ptr) == IP6_NEXTH_FRAGMENT) { + iphdr_ptr->_nexth = ipr->nexth; + } else { + u8_t *ptr = (u8_t *)iphdr_ptr + IP6_HLEN; + while (*ptr != IP6_NEXTH_FRAGMENT) { + ptr += 8 * (1 + ptr[1]); + } + *ptr = ipr->nexth; + } + + /* release the resources allocated for the fragment queue entry */ + if (reassdatagrams == ipr) { + /* it was the first in the list */ + reassdatagrams = ipr->next; + } else { + /* it wasn't the first, so it must have a valid 'prev' */ + LWIP_ASSERT("sanity check linked list", ipr_prev != NULL); + ipr_prev->next = ipr->next; + } + memp_free(MEMP_IP6_REASSDATA, ipr); + + /* adjust the number of pbufs currently queued for reassembly. */ + clen = pbuf_clen(p); + LWIP_ASSERT("ip6_reass_pbufcount >= clen", ip6_reass_pbufcount >= clen); + ip6_reass_pbufcount = (u16_t)(ip6_reass_pbufcount - clen); + + /* Move pbuf back to IPv6 header. This should never fail. */ + if (pbuf_header_force(p, (s16_t)((u8_t*)p->payload - (u8_t*)iphdr_ptr))) { + LWIP_ASSERT("ip6_reass: moving p->payload to ip6 header failed\n", 0); + pbuf_free(p); + return NULL; + } + + /* Return the pbuf chain */ + return p; + } + /* the datagram is not (yet?) reassembled completely */ + return NULL; + +nullreturn: + IP6_FRAG_STATS_INC(ip6_frag.drop); + pbuf_free(p); + return NULL; +} + +#endif /* LWIP_IPV6 && LWIP_IPV6_REASS */ + +#if LWIP_IPV6 && LWIP_IPV6_FRAG + +#if !LWIP_NETIF_TX_SINGLE_PBUF +/** Allocate a new struct pbuf_custom_ref */ +static struct pbuf_custom_ref* +ip6_frag_alloc_pbuf_custom_ref(void) +{ + return (struct pbuf_custom_ref*)memp_malloc(MEMP_FRAG_PBUF); +} + +/** Free a struct pbuf_custom_ref */ +static void +ip6_frag_free_pbuf_custom_ref(struct pbuf_custom_ref* p) +{ + LWIP_ASSERT("p != NULL", p != NULL); + memp_free(MEMP_FRAG_PBUF, p); +} + +/** Free-callback function to free a 'struct pbuf_custom_ref', called by + * pbuf_free. */ +static void +ip6_frag_free_pbuf_custom(struct pbuf *p) +{ + struct pbuf_custom_ref *pcr = (struct pbuf_custom_ref*)p; + LWIP_ASSERT("pcr != NULL", pcr != NULL); + LWIP_ASSERT("pcr == p", (void*)pcr == (void*)p); + if (pcr->original != NULL) { + pbuf_free(pcr->original); + } + ip6_frag_free_pbuf_custom_ref(pcr); +} +#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ + +/** + * Fragment an IPv6 datagram if too large for the netif or path MTU. + * + * Chop the datagram in MTU sized chunks and send them in order + * by pointing PBUF_REFs into p + * + * @param p ipv6 packet to send + * @param netif the netif on which to send + * @param dest destination ipv6 address to which to send + * + * @return ERR_OK if sent successfully, err_t otherwise + */ +err_t +ip6_frag(struct pbuf *p, struct netif *netif, const ip6_addr_t *dest) +{ + struct ip6_hdr *original_ip6hdr; + struct ip6_hdr *ip6hdr; + struct ip6_frag_hdr *frag_hdr; + struct pbuf *rambuf; +#if !LWIP_NETIF_TX_SINGLE_PBUF + struct pbuf *newpbuf; + u16_t newpbuflen = 0; + u16_t left_to_copy; +#endif + static u32_t identification; + u16_t left, cop; + const u16_t mtu = nd6_get_destination_mtu(dest, netif); + const u16_t nfb = (u16_t)((mtu - (IP6_HLEN + IP6_FRAG_HLEN)) & IP6_FRAG_OFFSET_MASK); + u16_t fragment_offset = 0; + u16_t last; + u16_t poff = IP6_HLEN; + + identification++; + + original_ip6hdr = (struct ip6_hdr *)p->payload; + + /* @todo we assume there are no options in the unfragmentable part (IPv6 header). */ + LWIP_ASSERT("p->tot_len >= IP6_HLEN", p->tot_len >= IP6_HLEN); + left = (u16_t)(p->tot_len - IP6_HLEN); + + while (left) { + last = (left <= nfb); + + /* Fill this fragment */ + cop = last ? left : nfb; + +#if LWIP_NETIF_TX_SINGLE_PBUF + rambuf = pbuf_alloc(PBUF_IP, cop + IP6_FRAG_HLEN, PBUF_RAM); + if (rambuf == NULL) { + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + LWIP_ASSERT("this needs a pbuf in one piece!", + (rambuf->len == rambuf->tot_len) && (rambuf->next == NULL)); + poff += pbuf_copy_partial(p, (u8_t*)rambuf->payload + IP6_FRAG_HLEN, cop, poff); + /* make room for the IP header */ + if (pbuf_add_header(rambuf, IP6_HLEN)) { + pbuf_free(rambuf); + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + /* fill in the IP header */ + SMEMCPY(rambuf->payload, original_ip6hdr, IP6_HLEN); + ip6hdr = (struct ip6_hdr *)rambuf->payload; + frag_hdr = (struct ip6_frag_hdr *)((u8_t*)rambuf->payload + IP6_HLEN); +#else + /* When not using a static buffer, create a chain of pbufs. + * The first will be a PBUF_RAM holding the link, IPv6, and Fragment header. + * The rest will be PBUF_REFs mirroring the pbuf chain to be fragged, + * but limited to the size of an mtu. + */ + rambuf = pbuf_alloc(PBUF_LINK, IP6_HLEN + IP6_FRAG_HLEN, PBUF_RAM); + if (rambuf == NULL) { + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + LWIP_ASSERT("this needs a pbuf in one piece!", + (p->len >= (IP6_HLEN))); + SMEMCPY(rambuf->payload, original_ip6hdr, IP6_HLEN); + ip6hdr = (struct ip6_hdr *)rambuf->payload; + frag_hdr = (struct ip6_frag_hdr *)((u8_t*)rambuf->payload + IP6_HLEN); + + /* Can just adjust p directly for needed offset. */ + p->payload = (u8_t *)p->payload + poff; + p->len = (u16_t)(p->len - poff); + p->tot_len = (u16_t)(p->tot_len - poff); + + left_to_copy = cop; + while (left_to_copy) { + struct pbuf_custom_ref *pcr; + newpbuflen = (left_to_copy < p->len) ? left_to_copy : p->len; + /* Is this pbuf already empty? */ + if (!newpbuflen) { + p = p->next; + continue; + } + pcr = ip6_frag_alloc_pbuf_custom_ref(); + if (pcr == NULL) { + pbuf_free(rambuf); + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + /* Mirror this pbuf, although we might not need all of it. */ + newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc, p->payload, newpbuflen); + if (newpbuf == NULL) { + ip6_frag_free_pbuf_custom_ref(pcr); + pbuf_free(rambuf); + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + pbuf_ref(p); + pcr->original = p; + pcr->pc.custom_free_function = ip6_frag_free_pbuf_custom; + + /* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain + * so that it is removed when pbuf_dechain is later called on rambuf. + */ + pbuf_cat(rambuf, newpbuf); + left_to_copy = (u16_t)(left_to_copy - newpbuflen); + if (left_to_copy) { + p = p->next; + } + } + poff = newpbuflen; +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + + /* Set headers */ + frag_hdr->_nexth = original_ip6hdr->_nexth; + frag_hdr->reserved = 0; + frag_hdr->_fragment_offset = lwip_htons((u16_t)((fragment_offset & IP6_FRAG_OFFSET_MASK) | (last ? 0 : IP6_FRAG_MORE_FLAG))); + frag_hdr->_identification = lwip_htonl(identification); + + IP6H_NEXTH_SET(ip6hdr, IP6_NEXTH_FRAGMENT); + IP6H_PLEN_SET(ip6hdr, (u16_t)(cop + IP6_FRAG_HLEN)); + + /* No need for separate header pbuf - we allowed room for it in rambuf + * when allocated. + */ + IP6_FRAG_STATS_INC(ip6_frag.xmit); + netif->output_ip6(netif, rambuf, dest); + + /* Unfortunately we can't reuse rambuf - the hardware may still be + * using the buffer. Instead we free it (and the ensuing chain) and + * recreate it next time round the loop. If we're lucky the hardware + * will have already sent the packet, the free will really free, and + * there will be zero memory penalty. + */ + + pbuf_free(rambuf); + left = (u16_t)(left - cop); + fragment_offset = (u16_t)(fragment_offset + cop); + } + return ERR_OK; +} + +#endif /* LWIP_IPV6 && LWIP_IPV6_FRAG */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c new file mode 100644 index 0000000..db1d09a --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c @@ -0,0 +1,626 @@ +/** + * @file + * Multicast listener discovery + * + * @defgroup mld6 MLD6 + * @ingroup ip6 + * Multicast listener discovery for IPv6. Aims to be compliant with RFC 2710. + * No support for MLDv2.\n + * Note: The allnodes (ff01::1, ff02::1) group is assumed be received by your + * netif since it must always be received for correct IPv6 operation (e.g. SLAAC). + * Ensure the netif filters are configured accordingly!\n + * The netif flags also need NETIF_FLAG_MLD6 flag set to enable MLD6 on a + * netif ("netif->flags |= NETIF_FLAG_MLD6;").\n + * To be called from TCPIP thread. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +/* Based on igmp.c implementation of igmp v2 protocol */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_IPV6_MLD /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/mld6.h" +#include "lwip/prot/mld6.h" +#include "lwip/icmp6.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/ip.h" +#include "lwip/inet_chksum.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/memp.h" +#include "lwip/stats.h" + +#include + + +/* + * MLD constants + */ +#define MLD6_HL 1 +#define MLD6_JOIN_DELAYING_MEMBER_TMR_MS (500) + +#define MLD6_GROUP_NON_MEMBER 0 +#define MLD6_GROUP_DELAYING_MEMBER 1 +#define MLD6_GROUP_IDLE_MEMBER 2 + +/* Forward declarations. */ +static struct mld_group *mld6_new_group(struct netif *ifp, const ip6_addr_t *addr); +static err_t mld6_remove_group(struct netif *netif, struct mld_group *group); +static void mld6_delayed_report(struct mld_group *group, u16_t maxresp); +static void mld6_send(struct netif *netif, struct mld_group *group, u8_t type); + + +/** + * Stop MLD processing on interface + * + * @param netif network interface on which stop MLD processing + */ +err_t +mld6_stop(struct netif *netif) +{ + struct mld_group *group = netif_mld6_data(netif); + + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, NULL); + + while (group != NULL) { + struct mld_group *next = group->next; /* avoid use-after-free below */ + + /* disable the group at the MAC level */ + if (netif->mld_mac_filter != NULL) { + netif->mld_mac_filter(netif, &(group->group_address), NETIF_DEL_MAC_FILTER); + } + + /* free group */ + memp_free(MEMP_MLD6_GROUP, group); + + /* move to "next" */ + group = next; + } + return ERR_OK; +} + +/** + * Report MLD memberships for this interface + * + * @param netif network interface on which report MLD memberships + */ +void +mld6_report_groups(struct netif *netif) +{ + struct mld_group *group = netif_mld6_data(netif); + + while (group != NULL) { + mld6_delayed_report(group, MLD6_JOIN_DELAYING_MEMBER_TMR_MS); + group = group->next; + } +} + +/** + * Search for a group that is joined on a netif + * + * @param ifp the network interface for which to look + * @param addr the group ipv6 address to search for + * @return a struct mld_group* if the group has been found, + * NULL if the group wasn't found. + */ +struct mld_group * +mld6_lookfor_group(struct netif *ifp, const ip6_addr_t *addr) +{ + struct mld_group *group = netif_mld6_data(ifp); + + while (group != NULL) { + if (ip6_addr_cmp(&(group->group_address), addr)) { + return group; + } + group = group->next; + } + + return NULL; +} + + +/** + * create a new group + * + * @param ifp the network interface for which to create + * @param addr the new group ipv6 + * @return a struct mld_group*, + * NULL on memory error. + */ +static struct mld_group * +mld6_new_group(struct netif *ifp, const ip6_addr_t *addr) +{ + struct mld_group *group; + + group = (struct mld_group *)memp_malloc(MEMP_MLD6_GROUP); + if (group != NULL) { + ip6_addr_set(&(group->group_address), addr); + group->timer = 0; /* Not running */ + group->group_state = MLD6_GROUP_IDLE_MEMBER; + group->last_reporter_flag = 0; + group->use = 0; + group->next = netif_mld6_data(ifp); + + netif_set_client_data(ifp, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, group); + } + + return group; +} + +/** + * Remove a group from the mld_group_list, but do not free it yet + * + * @param group the group to remove + * @return ERR_OK if group was removed from the list, an err_t otherwise + */ +static err_t +mld6_remove_group(struct netif *netif, struct mld_group *group) +{ + err_t err = ERR_OK; + + /* Is it the first group? */ + if (netif_mld6_data(netif) == group) { + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, group->next); + } else { + /* look for group further down the list */ + struct mld_group *tmpGroup; + for (tmpGroup = netif_mld6_data(netif); tmpGroup != NULL; tmpGroup = tmpGroup->next) { + if (tmpGroup->next == group) { + tmpGroup->next = group->next; + break; + } + } + /* Group not find group */ + if (tmpGroup == NULL) { + err = ERR_ARG; + } + } + + return err; +} + + +/** + * Process an input MLD message. Called by icmp6_input. + * + * @param p the mld packet, p->payload pointing to the icmpv6 header + * @param inp the netif on which this packet was received + */ +void +mld6_input(struct pbuf *p, struct netif *inp) +{ + struct mld_header *mld_hdr; + struct mld_group *group; + + MLD6_STATS_INC(mld6.recv); + + /* Check that mld header fits in packet. */ + if (p->len < sizeof(struct mld_header)) { + /* @todo debug message */ + pbuf_free(p); + MLD6_STATS_INC(mld6.lenerr); + MLD6_STATS_INC(mld6.drop); + return; + } + + mld_hdr = (struct mld_header *)p->payload; + + switch (mld_hdr->type) { + case ICMP6_TYPE_MLQ: /* Multicast listener query. */ + /* Is it a general query? */ + if (ip6_addr_isallnodes_linklocal(ip6_current_dest_addr()) && + ip6_addr_isany(&(mld_hdr->multicast_address))) { + MLD6_STATS_INC(mld6.rx_general); + /* Report all groups, except all nodes group, and if-local groups. */ + group = netif_mld6_data(inp); + while (group != NULL) { + if ((!(ip6_addr_ismulticast_iflocal(&(group->group_address)))) && + (!(ip6_addr_isallnodes_linklocal(&(group->group_address))))) { + mld6_delayed_report(group, mld_hdr->max_resp_delay); + } + group = group->next; + } + } else { + /* Have we joined this group? + * We use IP6 destination address to have a memory aligned copy. + * mld_hdr->multicast_address should be the same. */ + MLD6_STATS_INC(mld6.rx_group); + group = mld6_lookfor_group(inp, ip6_current_dest_addr()); + if (group != NULL) { + /* Schedule a report. */ + mld6_delayed_report(group, mld_hdr->max_resp_delay); + } + } + break; /* ICMP6_TYPE_MLQ */ + case ICMP6_TYPE_MLR: /* Multicast listener report. */ + /* Have we joined this group? + * We use IP6 destination address to have a memory aligned copy. + * mld_hdr->multicast_address should be the same. */ + MLD6_STATS_INC(mld6.rx_report); + group = mld6_lookfor_group(inp, ip6_current_dest_addr()); + if (group != NULL) { + /* If we are waiting to report, cancel it. */ + if (group->group_state == MLD6_GROUP_DELAYING_MEMBER) { + group->timer = 0; /* stopped */ + group->group_state = MLD6_GROUP_IDLE_MEMBER; + group->last_reporter_flag = 0; + } + } + break; /* ICMP6_TYPE_MLR */ + case ICMP6_TYPE_MLD: /* Multicast listener done. */ + /* Do nothing, router will query us. */ + break; /* ICMP6_TYPE_MLD */ + default: + MLD6_STATS_INC(mld6.proterr); + MLD6_STATS_INC(mld6.drop); + break; + } + + pbuf_free(p); +} + +/** + * @ingroup mld6 + * Join a group on one or all network interfaces. + * + * If the group is to be joined on all interfaces, the given group address must + * not have a zone set (i.e., it must have its zone index set to IP6_NO_ZONE). + * If the group is to be joined on one particular interface, the given group + * address may or may not have a zone set. + * + * @param srcaddr ipv6 address (zoned) of the network interface which should + * join a new group. If IP6_ADDR_ANY6, join on all netifs + * @param groupaddr the ipv6 address of the group to join (possibly but not + * necessarily zoned) + * @return ERR_OK if group was joined on the netif(s), an err_t otherwise + */ +err_t +mld6_joingroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr) +{ + err_t err = ERR_VAL; /* no matching interface */ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + /* loop through netif's */ + NETIF_FOREACH(netif) { + /* Should we join this interface ? */ + if (ip6_addr_isany(srcaddr) || + netif_get_ip6_addr_match(netif, srcaddr) >= 0) { + err = mld6_joingroup_netif(netif, groupaddr); + if (err != ERR_OK) { + return err; + } + } + } + + return err; +} + +/** + * @ingroup mld6 + * Join a group on a network interface. + * + * @param netif the network interface which should join a new group. + * @param groupaddr the ipv6 address of the group to join (possibly but not + * necessarily zoned) + * @return ERR_OK if group was joined on the netif, an err_t otherwise + */ +err_t +mld6_joingroup_netif(struct netif *netif, const ip6_addr_t *groupaddr) +{ + struct mld_group *group; +#if LWIP_IPV6_SCOPES + ip6_addr_t ip6addr; + + /* If the address has a particular scope but no zone set, use the netif to + * set one now. Within the mld6 module, all addresses are properly zoned. */ + if (ip6_addr_lacks_zone(groupaddr, IP6_MULTICAST)) { + ip6_addr_set(&ip6addr, groupaddr); + ip6_addr_assign_zone(&ip6addr, IP6_MULTICAST, netif); + groupaddr = &ip6addr; + } + IP6_ADDR_ZONECHECK_NETIF(groupaddr, netif); +#endif /* LWIP_IPV6_SCOPES */ + + LWIP_ASSERT_CORE_LOCKED(); + + /* find group or create a new one if not found */ + group = mld6_lookfor_group(netif, groupaddr); + + if (group == NULL) { + /* Joining a new group. Create a new group entry. */ + group = mld6_new_group(netif, groupaddr); + if (group == NULL) { + return ERR_MEM; + } + + /* Activate this address on the MAC layer. */ + if (netif->mld_mac_filter != NULL) { + netif->mld_mac_filter(netif, groupaddr, NETIF_ADD_MAC_FILTER); + } + + /* Report our membership. */ + MLD6_STATS_INC(mld6.tx_report); + mld6_send(netif, group, ICMP6_TYPE_MLR); + mld6_delayed_report(group, MLD6_JOIN_DELAYING_MEMBER_TMR_MS); + } + + /* Increment group use */ + group->use++; + return ERR_OK; +} + +/** + * @ingroup mld6 + * Leave a group on a network interface. + * + * Zoning of address follows the same rules as @ref mld6_joingroup. + * + * @param srcaddr ipv6 address (zoned) of the network interface which should + * leave the group. If IP6_ADDR_ANY6, leave on all netifs + * @param groupaddr the ipv6 address of the group to leave (possibly, but not + * necessarily zoned) + * @return ERR_OK if group was left on the netif(s), an err_t otherwise + */ +err_t +mld6_leavegroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr) +{ + err_t err = ERR_VAL; /* no matching interface */ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + /* loop through netif's */ + NETIF_FOREACH(netif) { + /* Should we leave this interface ? */ + if (ip6_addr_isany(srcaddr) || + netif_get_ip6_addr_match(netif, srcaddr) >= 0) { + err_t res = mld6_leavegroup_netif(netif, groupaddr); + if (err != ERR_OK) { + /* Store this result if we have not yet gotten a success */ + err = res; + } + } + } + + return err; +} + +/** + * @ingroup mld6 + * Leave a group on a network interface. + * + * @param netif the network interface which should leave the group. + * @param groupaddr the ipv6 address of the group to leave (possibly, but not + * necessarily zoned) + * @return ERR_OK if group was left on the netif, an err_t otherwise + */ +err_t +mld6_leavegroup_netif(struct netif *netif, const ip6_addr_t *groupaddr) +{ + struct mld_group *group; +#if LWIP_IPV6_SCOPES + ip6_addr_t ip6addr; + + if (ip6_addr_lacks_zone(groupaddr, IP6_MULTICAST)) { + ip6_addr_set(&ip6addr, groupaddr); + ip6_addr_assign_zone(&ip6addr, IP6_MULTICAST, netif); + groupaddr = &ip6addr; + } + IP6_ADDR_ZONECHECK_NETIF(groupaddr, netif); +#endif /* LWIP_IPV6_SCOPES */ + + LWIP_ASSERT_CORE_LOCKED(); + + /* find group */ + group = mld6_lookfor_group(netif, groupaddr); + + if (group != NULL) { + /* Leave if there is no other use of the group */ + if (group->use <= 1) { + /* Remove the group from the list */ + mld6_remove_group(netif, group); + + /* If we are the last reporter for this group */ + if (group->last_reporter_flag) { + MLD6_STATS_INC(mld6.tx_leave); + mld6_send(netif, group, ICMP6_TYPE_MLD); + } + + /* Disable the group at the MAC level */ + if (netif->mld_mac_filter != NULL) { + netif->mld_mac_filter(netif, groupaddr, NETIF_DEL_MAC_FILTER); + } + + /* free group struct */ + memp_free(MEMP_MLD6_GROUP, group); + } else { + /* Decrement group use */ + group->use--; + } + + /* Left group */ + return ERR_OK; + } + + /* Group not found */ + return ERR_VAL; +} + + +/** + * Periodic timer for mld processing. Must be called every + * MLD6_TMR_INTERVAL milliseconds (100). + * + * When a delaying member expires, a membership report is sent. + */ +void +mld6_tmr(void) +{ + struct netif *netif; + + NETIF_FOREACH(netif) { + struct mld_group *group = netif_mld6_data(netif); + + while (group != NULL) { + if (group->timer > 0) { + group->timer--; + if (group->timer == 0) { + /* If the state is MLD6_GROUP_DELAYING_MEMBER then we send a report for this group */ + if (group->group_state == MLD6_GROUP_DELAYING_MEMBER) { + MLD6_STATS_INC(mld6.tx_report); + mld6_send(netif, group, ICMP6_TYPE_MLR); + group->group_state = MLD6_GROUP_IDLE_MEMBER; + } + } + } + group = group->next; + } + } +} + +/** + * Schedule a delayed membership report for a group + * + * @param group the mld_group for which "delaying" membership report + * should be sent + * @param maxresp_in the max resp delay provided in the query + */ +static void +mld6_delayed_report(struct mld_group *group, u16_t maxresp_in) +{ + /* Convert maxresp from milliseconds to tmr ticks */ + u16_t maxresp = maxresp_in / MLD6_TMR_INTERVAL; + if (maxresp == 0) { + maxresp = 1; + } + +#ifdef LWIP_RAND + /* Randomize maxresp. (if LWIP_RAND is supported) */ + maxresp = (u16_t)(LWIP_RAND() % maxresp); + if (maxresp == 0) { + maxresp = 1; + } +#endif /* LWIP_RAND */ + + /* Apply timer value if no report has been scheduled already. */ + if ((group->group_state == MLD6_GROUP_IDLE_MEMBER) || + ((group->group_state == MLD6_GROUP_DELAYING_MEMBER) && + ((group->timer == 0) || (maxresp < group->timer)))) { + group->timer = maxresp; + group->group_state = MLD6_GROUP_DELAYING_MEMBER; + } +} + +/** + * Send a MLD message (report or done). + * + * An IPv6 hop-by-hop options header with a router alert option + * is prepended. + * + * @param group the group to report or quit + * @param type ICMP6_TYPE_MLR (report) or ICMP6_TYPE_MLD (done) + */ +static void +mld6_send(struct netif *netif, struct mld_group *group, u8_t type) +{ + struct mld_header *mld_hdr; + struct pbuf *p; + const ip6_addr_t *src_addr; + + /* Allocate a packet. Size is MLD header + IPv6 Hop-by-hop options header. */ + p = pbuf_alloc(PBUF_IP, sizeof(struct mld_header) + MLD6_HBH_HLEN, PBUF_RAM); + if (p == NULL) { + MLD6_STATS_INC(mld6.memerr); + return; + } + + /* Move to make room for Hop-by-hop options header. */ + if (pbuf_remove_header(p, MLD6_HBH_HLEN)) { + pbuf_free(p); + MLD6_STATS_INC(mld6.lenerr); + return; + } + + /* Select our source address. */ + if (!ip6_addr_isvalid(netif_ip6_addr_state(netif, 0))) { + /* This is a special case, when we are performing duplicate address detection. + * We must join the multicast group, but we don't have a valid address yet. */ + src_addr = IP6_ADDR_ANY6; + } else { + /* Use link-local address as source address. */ + src_addr = netif_ip6_addr(netif, 0); + } + + /* MLD message header pointer. */ + mld_hdr = (struct mld_header *)p->payload; + + /* Set fields. */ + mld_hdr->type = type; + mld_hdr->code = 0; + mld_hdr->chksum = 0; + mld_hdr->max_resp_delay = 0; + mld_hdr->reserved = 0; + ip6_addr_copy_to_packed(mld_hdr->multicast_address, group->group_address); + +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + mld_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, + src_addr, &(group->group_address)); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Add hop-by-hop headers options: router alert with MLD value. */ + ip6_options_add_hbh_ra(p, IP6_NEXTH_ICMP6, IP6_ROUTER_ALERT_VALUE_MLD); + + if (type == ICMP6_TYPE_MLR) { + /* Remember we were the last to report */ + group->last_reporter_flag = 1; + } + + /* Send the packet out. */ + MLD6_STATS_INC(mld6.xmit); + ip6_output_if(p, (ip6_addr_isany(src_addr)) ? NULL : src_addr, &(group->group_address), + MLD6_HL, 0, IP6_NEXTH_HOPBYHOP, netif); + pbuf_free(p); +} + +#endif /* LWIP_IPV6 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c new file mode 100644 index 0000000..5deb3f1 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c @@ -0,0 +1,2434 @@ +/** + * @file + * + * Neighbor discovery and stateless address autoconfiguration for IPv6. + * Aims to be compliant with RFC 4861 (Neighbor discovery) and RFC 4862 + * (Address autoconfiguration). + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/nd6.h" +#include "lwip/priv/nd6_priv.h" +#include "lwip/prot/nd6.h" +#include "lwip/prot/icmp6.h" +#include "lwip/pbuf.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/icmp6.h" +#include "lwip/mld6.h" +#include "lwip/dhcp6.h" +#include "lwip/ip.h" +#include "lwip/stats.h" +#include "lwip/dns.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +#if LWIP_IPV6_DUP_DETECT_ATTEMPTS > IP6_ADDR_TENTATIVE_COUNT_MASK +#error LWIP_IPV6_DUP_DETECT_ATTEMPTS > IP6_ADDR_TENTATIVE_COUNT_MASK +#endif + +/* Router tables. */ +struct nd6_neighbor_cache_entry neighbor_cache[LWIP_ND6_NUM_NEIGHBORS]; +struct nd6_destination_cache_entry destination_cache[LWIP_ND6_NUM_DESTINATIONS]; +struct nd6_prefix_list_entry prefix_list[LWIP_ND6_NUM_PREFIXES]; +struct nd6_router_list_entry default_router_list[LWIP_ND6_NUM_ROUTERS]; + +/* Default values, can be updated by a RA message. */ +u32_t reachable_time = LWIP_ND6_REACHABLE_TIME; +u32_t retrans_timer = LWIP_ND6_RETRANS_TIMER; /* @todo implement this value in timer */ + +/* Index for cache entries. */ +static u8_t nd6_cached_neighbor_index; +static netif_addr_idx_t nd6_cached_destination_index; + +/* Multicast address holder. */ +static ip6_addr_t multicast_address; + +static u8_t nd6_tmr_rs_reduction; + +/* Static buffer to parse RA packet options */ +union ra_options { + struct lladdr_option lladdr; + struct mtu_option mtu; + struct prefix_option prefix; +#if LWIP_ND6_RDNSS_MAX_DNS_SERVERS + struct rdnss_option rdnss; +#endif +}; +static union ra_options nd6_ra_buffer; + +/* Forward declarations. */ +static s8_t nd6_find_neighbor_cache_entry(const ip6_addr_t *ip6addr); +static s8_t nd6_new_neighbor_cache_entry(void); +static void nd6_free_neighbor_cache_entry(s8_t i); +static s16_t nd6_find_destination_cache_entry(const ip6_addr_t *ip6addr); +static s16_t nd6_new_destination_cache_entry(void); +static int nd6_is_prefix_in_netif(const ip6_addr_t *ip6addr, struct netif *netif); +static s8_t nd6_select_router(const ip6_addr_t *ip6addr, struct netif *netif); +static s8_t nd6_get_router(const ip6_addr_t *router_addr, struct netif *netif); +static s8_t nd6_new_router(const ip6_addr_t *router_addr, struct netif *netif); +static s8_t nd6_get_onlink_prefix(const ip6_addr_t *prefix, struct netif *netif); +static s8_t nd6_new_onlink_prefix(const ip6_addr_t *prefix, struct netif *netif); +static s8_t nd6_get_next_hop_entry(const ip6_addr_t *ip6addr, struct netif *netif); +static err_t nd6_queue_packet(s8_t neighbor_index, struct pbuf *q); + +#define ND6_SEND_FLAG_MULTICAST_DEST 0x01 +#define ND6_SEND_FLAG_ALLNODES_DEST 0x02 +#define ND6_SEND_FLAG_ANY_SRC 0x04 +static void nd6_send_ns(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags); +static void nd6_send_na(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags); +static void nd6_send_neighbor_cache_probe(struct nd6_neighbor_cache_entry *entry, u8_t flags); +#if LWIP_IPV6_SEND_ROUTER_SOLICIT +static err_t nd6_send_rs(struct netif *netif); +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + +#if LWIP_ND6_QUEUEING +static void nd6_free_q(struct nd6_q_entry *q); +#else /* LWIP_ND6_QUEUEING */ +#define nd6_free_q(q) pbuf_free(q) +#endif /* LWIP_ND6_QUEUEING */ +static void nd6_send_q(s8_t i); + + +/** + * A local address has been determined to be a duplicate. Take the appropriate + * action(s) on the address and the interface as a whole. + * + * @param netif the netif that owns the address + * @param addr_idx the index of the address detected to be a duplicate + */ +static void +nd6_duplicate_addr_detected(struct netif *netif, s8_t addr_idx) +{ + + /* Mark the address as duplicate, but leave its lifetimes alone. If this was + * a manually assigned address, it will remain in existence as duplicate, and + * as such be unusable for any practical purposes until manual intervention. + * If this was an autogenerated address, the address will follow normal + * expiration rules, and thus disappear once its valid lifetime expires. */ + netif_ip6_addr_set_state(netif, addr_idx, IP6_ADDR_DUPLICATED); + +#if LWIP_IPV6_AUTOCONFIG + /* If the affected address was the link-local address that we use to generate + * all other addresses, then we should not continue to use those derived + * addresses either, so mark them as duplicate as well. For autoconfig-only + * setups, this will make the interface effectively unusable, approaching the + * intention of RFC 4862 Sec. 5.4.5. @todo implement the full requirements */ + if (addr_idx == 0) { + s8_t i; + for (i = 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(netif, i)) && + !netif_ip6_addr_isstatic(netif, i)) { + netif_ip6_addr_set_state(netif, i, IP6_ADDR_DUPLICATED); + } + } + } +#endif /* LWIP_IPV6_AUTOCONFIG */ +} + +#if LWIP_IPV6_AUTOCONFIG +/** + * We received a router advertisement that contains a prefix with the + * autoconfiguration flag set. Add or update an associated autogenerated + * address. + * + * @param netif the netif on which the router advertisement arrived + * @param prefix_opt a pointer to the prefix option data + * @param prefix_addr an aligned copy of the prefix address + */ +static void +nd6_process_autoconfig_prefix(struct netif *netif, + struct prefix_option *prefix_opt, const ip6_addr_t *prefix_addr) +{ + ip6_addr_t ip6addr; + u32_t valid_life, pref_life; + u8_t addr_state; + s8_t i, free_idx; + + /* The caller already checks RFC 4862 Sec. 5.5.3 points (a) and (b). We do + * the rest, starting with checks for (c) and (d) here. */ + valid_life = lwip_htonl(prefix_opt->valid_lifetime); + pref_life = lwip_htonl(prefix_opt->preferred_lifetime); + if (pref_life > valid_life || prefix_opt->prefix_length != 64) { + return; /* silently ignore this prefix for autoconfiguration purposes */ + } + + /* If an autogenerated address already exists for this prefix, update its + * lifetimes. An address is considered autogenerated if 1) it is not static + * (i.e., manually assigned), and 2) there is an advertised autoconfiguration + * prefix for it (the one we are processing here). This does not necessarily + * exclude the possibility that the address was actually assigned by, say, + * DHCPv6. If that distinction becomes important in the future, more state + * must be kept. As explained elsewhere we also update lifetimes of tentative + * and duplicate addresses. Skip address slot 0 (the link-local address). */ + for (i = 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + addr_state = netif_ip6_addr_state(netif, i); + if (!ip6_addr_isinvalid(addr_state) && !netif_ip6_addr_isstatic(netif, i) && + ip6_addr_netcmp(prefix_addr, netif_ip6_addr(netif, i))) { + /* Update the valid lifetime, as per RFC 4862 Sec. 5.5.3 point (e). + * The valid lifetime will never drop to zero as a result of this. */ + u32_t remaining_life = netif_ip6_addr_valid_life(netif, i); + if (valid_life > ND6_2HRS || valid_life > remaining_life) { + netif_ip6_addr_set_valid_life(netif, i, valid_life); + } else if (remaining_life > ND6_2HRS) { + netif_ip6_addr_set_valid_life(netif, i, ND6_2HRS); + } + LWIP_ASSERT("bad valid lifetime", !netif_ip6_addr_isstatic(netif, i)); + /* Update the preferred lifetime. No bounds checks are needed here. In + * rare cases the advertisement may un-deprecate the address, though. + * Deprecation is left to the timer code where it is handled anyway. */ + if (pref_life > 0 && addr_state == IP6_ADDR_DEPRECATED) { + netif_ip6_addr_set_state(netif, i, IP6_ADDR_PREFERRED); + } + netif_ip6_addr_set_pref_life(netif, i, pref_life); + return; /* there should be at most one matching address */ + } + } + + /* No autogenerated address exists for this prefix yet. See if we can add a + * new one. However, if IPv6 autoconfiguration is administratively disabled, + * do not generate new addresses, but do keep updating lifetimes for existing + * addresses. Also, when adding new addresses, we must protect explicitly + * against a valid lifetime of zero, because again, we use that as a special + * value. The generated address would otherwise expire immediately anyway. + * Finally, the original link-local address must be usable at all. We start + * creating addresses even if the link-local address is still in tentative + * state though, and deal with the fallout of that upon DAD collision. */ + addr_state = netif_ip6_addr_state(netif, 0); + if (!netif->ip6_autoconfig_enabled || valid_life == IP6_ADDR_LIFE_STATIC || + ip6_addr_isinvalid(addr_state) || ip6_addr_isduplicated(addr_state)) { + return; + } + + /* Construct the new address that we intend to use, and then see if that + * address really does not exist. It might have been added manually, after + * all. As a side effect, find a free slot. Note that we cannot use + * netif_add_ip6_address() here, as it would return ERR_OK if the address + * already did exist, resulting in that address being given lifetimes. */ + IP6_ADDR(&ip6addr, prefix_addr->addr[0], prefix_addr->addr[1], + netif_ip6_addr(netif, 0)->addr[2], netif_ip6_addr(netif, 0)->addr[3]); + ip6_addr_assign_zone(&ip6addr, IP6_UNICAST, netif); + + free_idx = 0; + for (i = 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(netif, i))) { + if (ip6_addr_cmp(&ip6addr, netif_ip6_addr(netif, i))) { + return; /* formed address already exists */ + } + } else if (free_idx == 0) { + free_idx = i; + } + } + if (free_idx == 0) { + return; /* no address slots available, try again on next advertisement */ + } + + /* Assign the new address to the interface. */ + ip_addr_copy_from_ip6(netif->ip6_addr[free_idx], ip6addr); + netif_ip6_addr_set_valid_life(netif, free_idx, valid_life); + netif_ip6_addr_set_pref_life(netif, free_idx, pref_life); + netif_ip6_addr_set_state(netif, free_idx, IP6_ADDR_TENTATIVE); +} +#endif /* LWIP_IPV6_AUTOCONFIG */ + +/** + * Process an incoming neighbor discovery message + * + * @param p the nd packet, p->payload pointing to the icmpv6 header + * @param inp the netif on which this packet was received + */ +void +nd6_input(struct pbuf *p, struct netif *inp) +{ + u8_t msg_type; + s8_t i; + s16_t dest_idx; + + ND6_STATS_INC(nd6.recv); + + msg_type = *((u8_t *)p->payload); + switch (msg_type) { + case ICMP6_TYPE_NA: /* Neighbor Advertisement. */ + { + struct na_header *na_hdr; + struct lladdr_option *lladdr_opt; + ip6_addr_t target_address; + + /* Check that na header fits in packet. */ + if (p->len < (sizeof(struct na_header))) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + na_hdr = (struct na_header *)p->payload; + + /* Create an aligned, zoned copy of the target address. */ + ip6_addr_copy_from_packed(target_address, na_hdr->target_address); + ip6_addr_assign_zone(&target_address, IP6_UNICAST, inp); + + /* Check a subset of the other RFC 4861 Sec. 7.1.2 requirements. */ + if (IP6H_HOPLIM(ip6_current_header()) != ND6_HOPLIM || na_hdr->code != 0 || + ip6_addr_ismulticast(&target_address)) { + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* @todo RFC MUST: if IP destination is multicast, Solicited flag is zero */ + /* @todo RFC MUST: all included options have a length greater than zero */ + + /* Unsolicited NA?*/ + if (ip6_addr_ismulticast(ip6_current_dest_addr())) { + /* This is an unsolicited NA. + * link-layer changed? + * part of DAD mechanism? */ + +#if LWIP_IPV6_DUP_DETECT_ATTEMPTS + /* If the target address matches this netif, it is a DAD response. */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(inp, i)) && + !ip6_addr_isduplicated(netif_ip6_addr_state(inp, i)) && + ip6_addr_cmp(&target_address, netif_ip6_addr(inp, i))) { + /* We are using a duplicate address. */ + nd6_duplicate_addr_detected(inp, i); + + pbuf_free(p); + return; + } + } +#endif /* LWIP_IPV6_DUP_DETECT_ATTEMPTS */ + + /* Check that link-layer address option also fits in packet. */ + if (p->len < (sizeof(struct na_header) + 2)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct na_header)); + + if (p->len < (sizeof(struct na_header) + (lladdr_opt->length << 3))) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* This is an unsolicited NA, most likely there was a LLADDR change. */ + i = nd6_find_neighbor_cache_entry(&target_address); + if (i >= 0) { + if (na_hdr->flags & ND6_FLAG_OVERRIDE) { + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + } + } + } else { + /* This is a solicited NA. + * neighbor address resolution response? + * neighbor unreachability detection response? */ + + /* Find the cache entry corresponding to this na. */ + i = nd6_find_neighbor_cache_entry(&target_address); + if (i < 0) { + /* We no longer care about this target address. drop it. */ + pbuf_free(p); + return; + } + + /* Update cache entry. */ + if ((na_hdr->flags & ND6_FLAG_OVERRIDE) || + (neighbor_cache[i].state == ND6_INCOMPLETE)) { + /* Check that link-layer address option also fits in packet. */ + if (p->len < (sizeof(struct na_header) + 2)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct na_header)); + + if (p->len < (sizeof(struct na_header) + (lladdr_opt->length << 3))) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + } + + neighbor_cache[i].netif = inp; + neighbor_cache[i].state = ND6_REACHABLE; + neighbor_cache[i].counter.reachable_time = reachable_time; + + /* Send queued packets, if any. */ + if (neighbor_cache[i].q != NULL) { + nd6_send_q(i); + } + } + + break; /* ICMP6_TYPE_NA */ + } + case ICMP6_TYPE_NS: /* Neighbor solicitation. */ + { + struct ns_header *ns_hdr; + struct lladdr_option *lladdr_opt; + ip6_addr_t target_address; + u8_t accepted; + + /* Check that ns header fits in packet. */ + if (p->len < sizeof(struct ns_header)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + ns_hdr = (struct ns_header *)p->payload; + + /* Create an aligned, zoned copy of the target address. */ + ip6_addr_copy_from_packed(target_address, ns_hdr->target_address); + ip6_addr_assign_zone(&target_address, IP6_UNICAST, inp); + + /* Check a subset of the other RFC 4861 Sec. 7.1.1 requirements. */ + if (IP6H_HOPLIM(ip6_current_header()) != ND6_HOPLIM || ns_hdr->code != 0 || + ip6_addr_ismulticast(&target_address)) { + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* @todo RFC MUST: all included options have a length greater than zero */ + /* @todo RFC MUST: if IP source is 'any', destination is solicited-node multicast address */ + /* @todo RFC MUST: if IP source is 'any', there is no source LL address option */ + + /* Check if there is a link-layer address provided. Only point to it if in this buffer. */ + if (p->len >= (sizeof(struct ns_header) + 2)) { + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct ns_header)); + if (p->len < (sizeof(struct ns_header) + (lladdr_opt->length << 3))) { + lladdr_opt = NULL; + } + } else { + lladdr_opt = NULL; + } + + /* Check if the target address is configured on the receiving netif. */ + accepted = 0; + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { + if ((ip6_addr_isvalid(netif_ip6_addr_state(inp, i)) || + (ip6_addr_istentative(netif_ip6_addr_state(inp, i)) && + ip6_addr_isany(ip6_current_src_addr()))) && + ip6_addr_cmp(&target_address, netif_ip6_addr(inp, i))) { + accepted = 1; + break; + } + } + + /* NS not for us? */ + if (!accepted) { + pbuf_free(p); + return; + } + + /* Check for ANY address in src (DAD algorithm). */ + if (ip6_addr_isany(ip6_current_src_addr())) { + /* Sender is validating this address. */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(inp, i)) && + ip6_addr_cmp(&target_address, netif_ip6_addr(inp, i))) { + /* Send a NA back so that the sender does not use this address. */ + nd6_send_na(inp, netif_ip6_addr(inp, i), ND6_FLAG_OVERRIDE | ND6_SEND_FLAG_ALLNODES_DEST); + if (ip6_addr_istentative(netif_ip6_addr_state(inp, i))) { + /* We shouldn't use this address either. */ + nd6_duplicate_addr_detected(inp, i); + } + } + } + } else { + /* Sender is trying to resolve our address. */ + /* Verify that they included their own link-layer address. */ + if (lladdr_opt == NULL) { + /* Not a valid message. */ + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + i = nd6_find_neighbor_cache_entry(ip6_current_src_addr()); + if (i>= 0) { + /* We already have a record for the solicitor. */ + if (neighbor_cache[i].state == ND6_INCOMPLETE) { + neighbor_cache[i].netif = inp; + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + + /* Delay probe in case we get confirmation of reachability from upper layer (TCP). */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + } else { + /* Add their IPv6 address and link-layer address to neighbor cache. + * We will need it at least to send a unicast NA message, but most + * likely we will also be communicating with this node soon. */ + i = nd6_new_neighbor_cache_entry(); + if (i < 0) { + /* We couldn't assign a cache entry for this neighbor. + * we won't be able to reply. drop it. */ + pbuf_free(p); + ND6_STATS_INC(nd6.memerr); + return; + } + neighbor_cache[i].netif = inp; + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + ip6_addr_set(&(neighbor_cache[i].next_hop_address), ip6_current_src_addr()); + + /* Receiving a message does not prove reachability: only in one direction. + * Delay probe in case we get confirmation of reachability from upper layer (TCP). */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + + /* Send back a NA for us. Allocate the reply pbuf. */ + nd6_send_na(inp, &target_address, ND6_FLAG_SOLICITED | ND6_FLAG_OVERRIDE); + } + + break; /* ICMP6_TYPE_NS */ + } + case ICMP6_TYPE_RA: /* Router Advertisement. */ + { + struct ra_header *ra_hdr; + u8_t *buffer; /* Used to copy options. */ + u16_t offset; +#if LWIP_ND6_RDNSS_MAX_DNS_SERVERS + /* There can be multiple RDNSS options per RA */ + u8_t rdnss_server_idx = 0; +#endif /* LWIP_ND6_RDNSS_MAX_DNS_SERVERS */ + + /* Check that RA header fits in packet. */ + if (p->len < sizeof(struct ra_header)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + ra_hdr = (struct ra_header *)p->payload; + + /* Check a subset of the other RFC 4861 Sec. 6.1.2 requirements. */ + if (!ip6_addr_islinklocal(ip6_current_src_addr()) || + IP6H_HOPLIM(ip6_current_header()) != ND6_HOPLIM || ra_hdr->code != 0) { + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* @todo RFC MUST: all included options have a length greater than zero */ + + /* If we are sending RS messages, stop. */ +#if LWIP_IPV6_SEND_ROUTER_SOLICIT + /* ensure at least one solicitation is sent (see RFC 4861, ch. 6.3.7) */ + if ((inp->rs_count < LWIP_ND6_MAX_MULTICAST_SOLICIT) || + (nd6_send_rs(inp) == ERR_OK)) { + inp->rs_count = 0; + } else { + inp->rs_count = 1; + } +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + + /* Get the matching default router entry. */ + i = nd6_get_router(ip6_current_src_addr(), inp); + if (i < 0) { + /* Create a new router entry. */ + i = nd6_new_router(ip6_current_src_addr(), inp); + } + + if (i < 0) { + /* Could not create a new router entry. */ + pbuf_free(p); + ND6_STATS_INC(nd6.memerr); + return; + } + + /* Re-set invalidation timer. */ + default_router_list[i].invalidation_timer = lwip_htons(ra_hdr->router_lifetime); + + /* Re-set default timer values. */ +#if LWIP_ND6_ALLOW_RA_UPDATES + if (ra_hdr->retrans_timer > 0) { + retrans_timer = lwip_htonl(ra_hdr->retrans_timer); + } + if (ra_hdr->reachable_time > 0) { + reachable_time = lwip_htonl(ra_hdr->reachable_time); + } +#endif /* LWIP_ND6_ALLOW_RA_UPDATES */ + + /* @todo set default hop limit... */ + /* ra_hdr->current_hop_limit;*/ + + /* Update flags in local entry (incl. preference). */ + default_router_list[i].flags = ra_hdr->flags; + +#if LWIP_IPV6_DHCP6 + /* Trigger DHCPv6 if enabled */ + dhcp6_nd6_ra_trigger(inp, ra_hdr->flags & ND6_RA_FLAG_MANAGED_ADDR_CONFIG, + ra_hdr->flags & ND6_RA_FLAG_OTHER_CONFIG); +#endif + + /* Offset to options. */ + offset = sizeof(struct ra_header); + + /* Process each option. */ + while ((p->tot_len - offset) >= 2) { + u8_t option_type; + u16_t option_len; + int option_len8 = pbuf_try_get_at(p, offset + 1); + if (option_len8 <= 0) { + /* read beyond end or zero length */ + goto lenerr_drop_free_return; + } + option_len = ((u8_t)option_len8) << 3; + if (option_len > p->tot_len - offset) { + /* short packet (option does not fit in) */ + goto lenerr_drop_free_return; + } + if (p->len == p->tot_len) { + /* no need to copy from contiguous pbuf */ + buffer = &((u8_t*)p->payload)[offset]; + } else { + /* check if this option fits into our buffer */ + if (option_len > sizeof(nd6_ra_buffer)) { + option_type = pbuf_get_at(p, offset); + /* invalid option length */ + if (option_type != ND6_OPTION_TYPE_RDNSS) { + goto lenerr_drop_free_return; + } + /* we allow RDNSS option to be longer - we'll just drop some servers */ + option_len = sizeof(nd6_ra_buffer); + } + buffer = (u8_t*)&nd6_ra_buffer; + option_len = pbuf_copy_partial(p, &nd6_ra_buffer, option_len, offset); + } + option_type = buffer[0]; + switch (option_type) { + case ND6_OPTION_TYPE_SOURCE_LLADDR: + { + struct lladdr_option *lladdr_opt; + if (option_len < sizeof(struct lladdr_option)) { + goto lenerr_drop_free_return; + } + lladdr_opt = (struct lladdr_option *)buffer; + if ((default_router_list[i].neighbor_entry != NULL) && + (default_router_list[i].neighbor_entry->state == ND6_INCOMPLETE)) { + SMEMCPY(default_router_list[i].neighbor_entry->lladdr, lladdr_opt->addr, inp->hwaddr_len); + default_router_list[i].neighbor_entry->state = ND6_REACHABLE; + default_router_list[i].neighbor_entry->counter.reachable_time = reachable_time; + } + break; + } + case ND6_OPTION_TYPE_MTU: + { + struct mtu_option *mtu_opt; + u32_t mtu32; + if (option_len < sizeof(struct mtu_option)) { + goto lenerr_drop_free_return; + } + mtu_opt = (struct mtu_option *)buffer; + mtu32 = lwip_htonl(mtu_opt->mtu); + if ((mtu32 >= 1280) && (mtu32 <= 0xffff)) { +#if LWIP_ND6_ALLOW_RA_UPDATES + if (inp->mtu) { + /* don't set the mtu for IPv6 higher than the netif driver supports */ + inp->mtu6 = LWIP_MIN(inp->mtu, (u16_t)mtu32); + } else { + inp->mtu6 = (u16_t)mtu32; + } +#endif /* LWIP_ND6_ALLOW_RA_UPDATES */ + } + break; + } + case ND6_OPTION_TYPE_PREFIX_INFO: + { + struct prefix_option *prefix_opt; + ip6_addr_t prefix_addr; + if (option_len < sizeof(struct prefix_option)) { + goto lenerr_drop_free_return; + } + + prefix_opt = (struct prefix_option *)buffer; + + /* Get a memory-aligned copy of the prefix. */ + ip6_addr_copy_from_packed(prefix_addr, prefix_opt->prefix); + ip6_addr_assign_zone(&prefix_addr, IP6_UNICAST, inp); + + if (!ip6_addr_islinklocal(&prefix_addr)) { + if ((prefix_opt->flags & ND6_PREFIX_FLAG_ON_LINK) && + (prefix_opt->prefix_length == 64)) { + /* Add to on-link prefix list. */ + u32_t valid_life; + s8_t prefix; + + valid_life = lwip_htonl(prefix_opt->valid_lifetime); + + /* find cache entry for this prefix. */ + prefix = nd6_get_onlink_prefix(&prefix_addr, inp); + if (prefix < 0 && valid_life > 0) { + /* Create a new cache entry. */ + prefix = nd6_new_onlink_prefix(&prefix_addr, inp); + } + if (prefix >= 0) { + prefix_list[prefix].invalidation_timer = valid_life; + } + } +#if LWIP_IPV6_AUTOCONFIG + if (prefix_opt->flags & ND6_PREFIX_FLAG_AUTONOMOUS) { + /* Perform processing for autoconfiguration. */ + nd6_process_autoconfig_prefix(inp, prefix_opt, &prefix_addr); + } +#endif /* LWIP_IPV6_AUTOCONFIG */ + } + + break; + } + case ND6_OPTION_TYPE_ROUTE_INFO: + /* @todo implement preferred routes. + struct route_option * route_opt; + route_opt = (struct route_option *)buffer;*/ + + break; +#if LWIP_ND6_RDNSS_MAX_DNS_SERVERS + case ND6_OPTION_TYPE_RDNSS: + { + u8_t num, n; + u16_t copy_offset = offset + SIZEOF_RDNSS_OPTION_BASE; + struct rdnss_option * rdnss_opt; + if (option_len < SIZEOF_RDNSS_OPTION_BASE) { + goto lenerr_drop_free_return; + } + + rdnss_opt = (struct rdnss_option *)buffer; + num = (rdnss_opt->length - 1) / 2; + for (n = 0; (rdnss_server_idx < DNS_MAX_SERVERS) && (n < num); n++) { + ip_addr_t rdnss_address; + + /* Copy directly from pbuf to get an aligned, zoned copy of the prefix. */ + if (pbuf_copy_partial(p, &rdnss_address, sizeof(ip6_addr_p_t), copy_offset) == sizeof(ip6_addr_p_t)) { + IP_SET_TYPE_VAL(rdnss_address, IPADDR_TYPE_V6); + ip6_addr_assign_zone(ip_2_ip6(&rdnss_address), IP6_UNKNOWN, inp); + + if (htonl(rdnss_opt->lifetime) > 0) { + /* TODO implement Lifetime > 0 */ + dns_setserver(rdnss_server_idx++, &rdnss_address); + } else { + /* TODO implement DNS removal in dns.c */ + u8_t s; + for (s = 0; s < DNS_MAX_SERVERS; s++) { + const ip_addr_t *addr = dns_getserver(s); + if(ip_addr_cmp(addr, &rdnss_address)) { + dns_setserver(s, NULL); + } + } + } + } + } + break; + } +#endif /* LWIP_ND6_RDNSS_MAX_DNS_SERVERS */ + default: + /* Unrecognized option, abort. */ + ND6_STATS_INC(nd6.proterr); + break; + } + /* option length is checked earlier to be non-zero to make sure loop ends */ + offset += 8 * (u8_t)option_len8; + } + + break; /* ICMP6_TYPE_RA */ + } + case ICMP6_TYPE_RD: /* Redirect */ + { + struct redirect_header *redir_hdr; + struct lladdr_option *lladdr_opt; + ip6_addr_t destination_address, target_address; + + /* Check that Redir header fits in packet. */ + if (p->len < sizeof(struct redirect_header)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + redir_hdr = (struct redirect_header *)p->payload; + + /* Create an aligned, zoned copy of the destination address. */ + ip6_addr_copy_from_packed(destination_address, redir_hdr->destination_address); + ip6_addr_assign_zone(&destination_address, IP6_UNICAST, inp); + + /* Check a subset of the other RFC 4861 Sec. 8.1 requirements. */ + if (!ip6_addr_islinklocal(ip6_current_src_addr()) || + IP6H_HOPLIM(ip6_current_header()) != ND6_HOPLIM || + redir_hdr->code != 0 || ip6_addr_ismulticast(&destination_address)) { + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* @todo RFC MUST: IP source address equals first-hop router for destination_address */ + /* @todo RFC MUST: ICMP target address is either link-local address or same as destination_address */ + /* @todo RFC MUST: all included options have a length greater than zero */ + + if (p->len >= (sizeof(struct redirect_header) + 2)) { + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct redirect_header)); + if (p->len < (sizeof(struct redirect_header) + (lladdr_opt->length << 3))) { + lladdr_opt = NULL; + } + } else { + lladdr_opt = NULL; + } + + /* Find dest address in cache */ + dest_idx = nd6_find_destination_cache_entry(&destination_address); + if (dest_idx < 0) { + /* Destination not in cache, drop packet. */ + pbuf_free(p); + return; + } + + /* Create an aligned, zoned copy of the target address. */ + ip6_addr_copy_from_packed(target_address, redir_hdr->target_address); + ip6_addr_assign_zone(&target_address, IP6_UNICAST, inp); + + /* Set the new target address. */ + ip6_addr_copy(destination_cache[dest_idx].next_hop_addr, target_address); + + /* If Link-layer address of other router is given, try to add to neighbor cache. */ + if (lladdr_opt != NULL) { + if (lladdr_opt->type == ND6_OPTION_TYPE_TARGET_LLADDR) { + i = nd6_find_neighbor_cache_entry(&target_address); + if (i < 0) { + i = nd6_new_neighbor_cache_entry(); + if (i >= 0) { + neighbor_cache[i].netif = inp; + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + ip6_addr_copy(neighbor_cache[i].next_hop_address, target_address); + + /* Receiving a message does not prove reachability: only in one direction. + * Delay probe in case we get confirmation of reachability from upper layer (TCP). */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + } + if (i >= 0) { + if (neighbor_cache[i].state == ND6_INCOMPLETE) { + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + /* Receiving a message does not prove reachability: only in one direction. + * Delay probe in case we get confirmation of reachability from upper layer (TCP). */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + } + } + } + break; /* ICMP6_TYPE_RD */ + } + case ICMP6_TYPE_PTB: /* Packet too big */ + { + struct icmp6_hdr *icmp6hdr; /* Packet too big message */ + struct ip6_hdr *ip6hdr; /* IPv6 header of the packet which caused the error */ + u32_t pmtu; + ip6_addr_t destination_address; + + /* Check that ICMPv6 header + IPv6 header fit in payload */ + if (p->len < (sizeof(struct icmp6_hdr) + IP6_HLEN)) { + /* drop short packets */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + icmp6hdr = (struct icmp6_hdr *)p->payload; + ip6hdr = (struct ip6_hdr *)((u8_t*)p->payload + sizeof(struct icmp6_hdr)); + + /* Create an aligned, zoned copy of the destination address. */ + ip6_addr_copy_from_packed(destination_address, ip6hdr->dest); + ip6_addr_assign_zone(&destination_address, IP6_UNKNOWN, inp); + + /* Look for entry in destination cache. */ + dest_idx = nd6_find_destination_cache_entry(&destination_address); + if (dest_idx < 0) { + /* Destination not in cache, drop packet. */ + pbuf_free(p); + return; + } + + /* Change the Path MTU. */ + pmtu = lwip_htonl(icmp6hdr->data); + destination_cache[dest_idx].pmtu = (u16_t)LWIP_MIN(pmtu, 0xFFFF); + + break; /* ICMP6_TYPE_PTB */ + } + + default: + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + break; /* default */ + } + + pbuf_free(p); + return; +lenerr_drop_free_return: + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + pbuf_free(p); +} + + +/** + * Periodic timer for Neighbor discovery functions: + * + * - Update neighbor reachability states + * - Update destination cache entries age + * - Update invalidation timers of default routers and on-link prefixes + * - Update lifetimes of our addresses + * - Perform duplicate address detection (DAD) for our addresses + * - Send router solicitations + */ +void +nd6_tmr(void) +{ + s8_t i; + struct netif *netif; + + /* Process neighbor entries. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + switch (neighbor_cache[i].state) { + case ND6_INCOMPLETE: + if ((neighbor_cache[i].counter.probes_sent >= LWIP_ND6_MAX_MULTICAST_SOLICIT) && + (!neighbor_cache[i].isrouter)) { + /* Retries exceeded. */ + nd6_free_neighbor_cache_entry(i); + } else { + /* Send a NS for this entry. */ + neighbor_cache[i].counter.probes_sent++; + nd6_send_neighbor_cache_probe(&neighbor_cache[i], ND6_SEND_FLAG_MULTICAST_DEST); + } + break; + case ND6_REACHABLE: + /* Send queued packets, if any are left. Should have been sent already. */ + if (neighbor_cache[i].q != NULL) { + nd6_send_q(i); + } + if (neighbor_cache[i].counter.reachable_time <= ND6_TMR_INTERVAL) { + /* Change to stale state. */ + neighbor_cache[i].state = ND6_STALE; + neighbor_cache[i].counter.stale_time = 0; + } else { + neighbor_cache[i].counter.reachable_time -= ND6_TMR_INTERVAL; + } + break; + case ND6_STALE: + neighbor_cache[i].counter.stale_time++; + break; + case ND6_DELAY: + if (neighbor_cache[i].counter.delay_time <= 1) { + /* Change to PROBE state. */ + neighbor_cache[i].state = ND6_PROBE; + neighbor_cache[i].counter.probes_sent = 0; + } else { + neighbor_cache[i].counter.delay_time--; + } + break; + case ND6_PROBE: + if ((neighbor_cache[i].counter.probes_sent >= LWIP_ND6_MAX_MULTICAST_SOLICIT) && + (!neighbor_cache[i].isrouter)) { + /* Retries exceeded. */ + nd6_free_neighbor_cache_entry(i); + } else { + /* Send a NS for this entry. */ + neighbor_cache[i].counter.probes_sent++; + nd6_send_neighbor_cache_probe(&neighbor_cache[i], 0); + } + break; + case ND6_NO_ENTRY: + default: + /* Do nothing. */ + break; + } + } + + /* Process destination entries. */ + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + destination_cache[i].age++; + } + + /* Process router entries. */ + for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { + if (default_router_list[i].neighbor_entry != NULL) { + /* Active entry. */ + if (default_router_list[i].invalidation_timer <= ND6_TMR_INTERVAL / 1000) { + /* No more than 1 second remaining. Clear this entry. Also clear any of + * its destination cache entries, as per RFC 4861 Sec. 5.3 and 6.3.5. */ + s8_t j; + for (j = 0; j < LWIP_ND6_NUM_DESTINATIONS; j++) { + if (ip6_addr_cmp(&destination_cache[j].next_hop_addr, + &default_router_list[i].neighbor_entry->next_hop_address)) { + ip6_addr_set_any(&destination_cache[j].destination_addr); + } + } + default_router_list[i].neighbor_entry->isrouter = 0; + default_router_list[i].neighbor_entry = NULL; + default_router_list[i].invalidation_timer = 0; + default_router_list[i].flags = 0; + } else { + default_router_list[i].invalidation_timer -= ND6_TMR_INTERVAL / 1000; + } + } + } + + /* Process prefix entries. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; i++) { + if (prefix_list[i].netif != NULL) { + if (prefix_list[i].invalidation_timer <= ND6_TMR_INTERVAL / 1000) { + /* Entry timed out, remove it */ + prefix_list[i].invalidation_timer = 0; + prefix_list[i].netif = NULL; + } else { + prefix_list[i].invalidation_timer -= ND6_TMR_INTERVAL / 1000; + } + } + } + + /* Process our own addresses, updating address lifetimes and/or DAD state. */ + NETIF_FOREACH(netif) { + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { + u8_t addr_state; +#if LWIP_IPV6_ADDRESS_LIFETIMES + /* Step 1: update address lifetimes (valid and preferred). */ + addr_state = netif_ip6_addr_state(netif, i); + /* RFC 4862 is not entirely clear as to whether address lifetimes affect + * tentative addresses, and is even less clear as to what should happen + * with duplicate addresses. We choose to track and update lifetimes for + * both those types, although for different reasons: + * - for tentative addresses, the line of thought of Sec. 5.7 combined + * with the potentially long period that an address may be in tentative + * state (due to the interface being down) suggests that lifetimes + * should be independent of external factors which would include DAD; + * - for duplicate addresses, retiring them early could result in a new + * but unwanted attempt at marking them as valid, while retiring them + * late/never could clog up address slots on the netif. + * As a result, we may end up expiring addresses of either type here. + */ + if (!ip6_addr_isinvalid(addr_state) && + !netif_ip6_addr_isstatic(netif, i)) { + u32_t life = netif_ip6_addr_valid_life(netif, i); + if (life <= ND6_TMR_INTERVAL / 1000) { + /* The address has expired. */ + netif_ip6_addr_set_valid_life(netif, i, 0); + netif_ip6_addr_set_pref_life(netif, i, 0); + netif_ip6_addr_set_state(netif, i, IP6_ADDR_INVALID); + } else { + if (!ip6_addr_life_isinfinite(life)) { + life -= ND6_TMR_INTERVAL / 1000; + LWIP_ASSERT("bad valid lifetime", life != IP6_ADDR_LIFE_STATIC); + netif_ip6_addr_set_valid_life(netif, i, life); + } + /* The address is still here. Update the preferred lifetime too. */ + life = netif_ip6_addr_pref_life(netif, i); + if (life <= ND6_TMR_INTERVAL / 1000) { + /* This case must also trigger if 'life' was already zero, so as to + * deal correctly with advertised preferred-lifetime reductions. */ + netif_ip6_addr_set_pref_life(netif, i, 0); + if (addr_state == IP6_ADDR_PREFERRED) + netif_ip6_addr_set_state(netif, i, IP6_ADDR_DEPRECATED); + } else if (!ip6_addr_life_isinfinite(life)) { + life -= ND6_TMR_INTERVAL / 1000; + netif_ip6_addr_set_pref_life(netif, i, life); + } + } + } + /* The address state may now have changed, so reobtain it next. */ +#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */ + /* Step 2: update DAD state. */ + addr_state = netif_ip6_addr_state(netif, i); + if (ip6_addr_istentative(addr_state)) { + if ((addr_state & IP6_ADDR_TENTATIVE_COUNT_MASK) >= LWIP_IPV6_DUP_DETECT_ATTEMPTS) { + /* No NA received in response. Mark address as valid. For dynamic + * addresses with an expired preferred lifetime, the state is set to + * deprecated right away. That should almost never happen, though. */ + addr_state = IP6_ADDR_PREFERRED; +#if LWIP_IPV6_ADDRESS_LIFETIMES + if (!netif_ip6_addr_isstatic(netif, i) && + netif_ip6_addr_pref_life(netif, i) == 0) { + addr_state = IP6_ADDR_DEPRECATED; + } +#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */ + netif_ip6_addr_set_state(netif, i, addr_state); + } else if (netif_is_up(netif) && netif_is_link_up(netif)) { + /* tentative: set next state by increasing by one */ + netif_ip6_addr_set_state(netif, i, addr_state + 1); + /* Send a NS for this address. Use the unspecified address as source + * address in all cases (RFC 4862 Sec. 5.4.2), not in the least + * because as it is, we only consider multicast replies for DAD. */ + nd6_send_ns(netif, netif_ip6_addr(netif, i), + ND6_SEND_FLAG_MULTICAST_DEST | ND6_SEND_FLAG_ANY_SRC); + } + } + } + } + +#if LWIP_IPV6_SEND_ROUTER_SOLICIT + /* Send router solicitation messages, if necessary. */ + if (!nd6_tmr_rs_reduction) { + nd6_tmr_rs_reduction = (ND6_RTR_SOLICITATION_INTERVAL / ND6_TMR_INTERVAL) - 1; + NETIF_FOREACH(netif) { + if ((netif->rs_count > 0) && netif_is_up(netif) && + netif_is_link_up(netif) && + !ip6_addr_isinvalid(netif_ip6_addr_state(netif, 0)) && + !ip6_addr_isduplicated(netif_ip6_addr_state(netif, 0))) { + if (nd6_send_rs(netif) == ERR_OK) { + netif->rs_count--; + } + } + } + } else { + nd6_tmr_rs_reduction--; + } +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + +} + +/** Send a neighbor solicitation message for a specific neighbor cache entry + * + * @param entry the neightbor cache entry for wich to send the message + * @param flags one of ND6_SEND_FLAG_* + */ +static void +nd6_send_neighbor_cache_probe(struct nd6_neighbor_cache_entry *entry, u8_t flags) +{ + nd6_send_ns(entry->netif, &entry->next_hop_address, flags); +} + +/** + * Send a neighbor solicitation message + * + * @param netif the netif on which to send the message + * @param target_addr the IPv6 target address for the ND message + * @param flags one of ND6_SEND_FLAG_* + */ +static void +nd6_send_ns(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags) +{ + struct ns_header *ns_hdr; + struct pbuf *p; + const ip6_addr_t *src_addr; + u16_t lladdr_opt_len; + + LWIP_ASSERT("target address is required", target_addr != NULL); + + if (!(flags & ND6_SEND_FLAG_ANY_SRC) && + ip6_addr_isvalid(netif_ip6_addr_state(netif,0))) { + /* Use link-local address as source address. */ + src_addr = netif_ip6_addr(netif, 0); + /* calculate option length (in 8-byte-blocks) */ + lladdr_opt_len = ((netif->hwaddr_len + 2) + 7) >> 3; + } else { + src_addr = IP6_ADDR_ANY6; + /* Option "MUST NOT be included when the source IP address is the unspecified address." */ + lladdr_opt_len = 0; + } + + /* Allocate a packet. */ + p = pbuf_alloc(PBUF_IP, sizeof(struct ns_header) + (lladdr_opt_len << 3), PBUF_RAM); + if (p == NULL) { + ND6_STATS_INC(nd6.memerr); + return; + } + + /* Set fields. */ + ns_hdr = (struct ns_header *)p->payload; + + ns_hdr->type = ICMP6_TYPE_NS; + ns_hdr->code = 0; + ns_hdr->chksum = 0; + ns_hdr->reserved = 0; + ip6_addr_copy_to_packed(ns_hdr->target_address, *target_addr); + + if (lladdr_opt_len != 0) { + struct lladdr_option *lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct ns_header)); + lladdr_opt->type = ND6_OPTION_TYPE_SOURCE_LLADDR; + lladdr_opt->length = (u8_t)lladdr_opt_len; + SMEMCPY(lladdr_opt->addr, netif->hwaddr, netif->hwaddr_len); + } + + /* Generate the solicited node address for the target address. */ + if (flags & ND6_SEND_FLAG_MULTICAST_DEST) { + ip6_addr_set_solicitednode(&multicast_address, target_addr->addr[3]); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + target_addr = &multicast_address; + } + +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + ns_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr, + target_addr); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Send the packet out. */ + ND6_STATS_INC(nd6.xmit); + ip6_output_if(p, (src_addr == IP6_ADDR_ANY6) ? NULL : src_addr, target_addr, + ND6_HOPLIM, 0, IP6_NEXTH_ICMP6, netif); + pbuf_free(p); +} + +/** + * Send a neighbor advertisement message + * + * @param netif the netif on which to send the message + * @param target_addr the IPv6 target address for the ND message + * @param flags one of ND6_SEND_FLAG_* + */ +static void +nd6_send_na(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags) +{ + struct na_header *na_hdr; + struct lladdr_option *lladdr_opt; + struct pbuf *p; + const ip6_addr_t *src_addr; + const ip6_addr_t *dest_addr; + u16_t lladdr_opt_len; + + LWIP_ASSERT("target address is required", target_addr != NULL); + + /* Use link-local address as source address. */ + /* src_addr = netif_ip6_addr(netif, 0); */ + /* Use target address as source address. */ + src_addr = target_addr; + + /* Allocate a packet. */ + lladdr_opt_len = ((netif->hwaddr_len + 2) >> 3) + (((netif->hwaddr_len + 2) & 0x07) ? 1 : 0); + p = pbuf_alloc(PBUF_IP, sizeof(struct na_header) + (lladdr_opt_len << 3), PBUF_RAM); + if (p == NULL) { + ND6_STATS_INC(nd6.memerr); + return; + } + + /* Set fields. */ + na_hdr = (struct na_header *)p->payload; + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct na_header)); + + na_hdr->type = ICMP6_TYPE_NA; + na_hdr->code = 0; + na_hdr->chksum = 0; + na_hdr->flags = flags & 0xf0; + na_hdr->reserved[0] = 0; + na_hdr->reserved[1] = 0; + na_hdr->reserved[2] = 0; + ip6_addr_copy_to_packed(na_hdr->target_address, *target_addr); + + lladdr_opt->type = ND6_OPTION_TYPE_TARGET_LLADDR; + lladdr_opt->length = (u8_t)lladdr_opt_len; + SMEMCPY(lladdr_opt->addr, netif->hwaddr, netif->hwaddr_len); + + /* Generate the solicited node address for the target address. */ + if (flags & ND6_SEND_FLAG_MULTICAST_DEST) { + ip6_addr_set_solicitednode(&multicast_address, target_addr->addr[3]); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + dest_addr = &multicast_address; + } else if (flags & ND6_SEND_FLAG_ALLNODES_DEST) { + ip6_addr_set_allnodes_linklocal(&multicast_address); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + dest_addr = &multicast_address; + } else { + dest_addr = ip6_current_src_addr(); + } + +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + na_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr, + dest_addr); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Send the packet out. */ + ND6_STATS_INC(nd6.xmit); + ip6_output_if(p, src_addr, dest_addr, + ND6_HOPLIM, 0, IP6_NEXTH_ICMP6, netif); + pbuf_free(p); +} + +#if LWIP_IPV6_SEND_ROUTER_SOLICIT +/** + * Send a router solicitation message + * + * @param netif the netif on which to send the message + */ +static err_t +nd6_send_rs(struct netif *netif) +{ + struct rs_header *rs_hdr; + struct lladdr_option *lladdr_opt; + struct pbuf *p; + const ip6_addr_t *src_addr; + err_t err; + u16_t lladdr_opt_len = 0; + + /* Link-local source address, or unspecified address? */ + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, 0))) { + src_addr = netif_ip6_addr(netif, 0); + } else { + src_addr = IP6_ADDR_ANY6; + } + + /* Generate the all routers target address. */ + ip6_addr_set_allrouters_linklocal(&multicast_address); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + + /* Allocate a packet. */ + if (src_addr != IP6_ADDR_ANY6) { + lladdr_opt_len = ((netif->hwaddr_len + 2) >> 3) + (((netif->hwaddr_len + 2) & 0x07) ? 1 : 0); + } + p = pbuf_alloc(PBUF_IP, sizeof(struct rs_header) + (lladdr_opt_len << 3), PBUF_RAM); + if (p == NULL) { + ND6_STATS_INC(nd6.memerr); + return ERR_BUF; + } + + /* Set fields. */ + rs_hdr = (struct rs_header *)p->payload; + + rs_hdr->type = ICMP6_TYPE_RS; + rs_hdr->code = 0; + rs_hdr->chksum = 0; + rs_hdr->reserved = 0; + + if (src_addr != IP6_ADDR_ANY6) { + /* Include our hw address. */ + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct rs_header)); + lladdr_opt->type = ND6_OPTION_TYPE_SOURCE_LLADDR; + lladdr_opt->length = (u8_t)lladdr_opt_len; + SMEMCPY(lladdr_opt->addr, netif->hwaddr, netif->hwaddr_len); + } + +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + rs_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr, + &multicast_address); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Send the packet out. */ + ND6_STATS_INC(nd6.xmit); + + err = ip6_output_if(p, (src_addr == IP6_ADDR_ANY6) ? NULL : src_addr, &multicast_address, + ND6_HOPLIM, 0, IP6_NEXTH_ICMP6, netif); + pbuf_free(p); + + return err; +} +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + +/** + * Search for a neighbor cache entry + * + * @param ip6addr the IPv6 address of the neighbor + * @return The neighbor cache entry index that matched, -1 if no + * entry is found + */ +static s8_t +nd6_find_neighbor_cache_entry(const ip6_addr_t *ip6addr) +{ + s8_t i; + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if (ip6_addr_cmp(ip6addr, &(neighbor_cache[i].next_hop_address))) { + return i; + } + } + return -1; +} + +/** + * Create a new neighbor cache entry. + * + * If no unused entry is found, will try to recycle an old entry + * according to ad-hoc "age" heuristic. + * + * @return The neighbor cache entry index that was created, -1 if no + * entry could be created + */ +static s8_t +nd6_new_neighbor_cache_entry(void) +{ + s8_t i; + s8_t j; + u32_t time; + + + /* First, try to find an empty entry. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if (neighbor_cache[i].state == ND6_NO_ENTRY) { + return i; + } + } + + /* We need to recycle an entry. in general, do not recycle if it is a router. */ + + /* Next, try to find a Stale entry. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_STALE) && + (!neighbor_cache[i].isrouter)) { + nd6_free_neighbor_cache_entry(i); + return i; + } + } + + /* Next, try to find a Probe entry. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_PROBE) && + (!neighbor_cache[i].isrouter)) { + nd6_free_neighbor_cache_entry(i); + return i; + } + } + + /* Next, try to find a Delayed entry. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_DELAY) && + (!neighbor_cache[i].isrouter)) { + nd6_free_neighbor_cache_entry(i); + return i; + } + } + + /* Next, try to find the oldest reachable entry. */ + time = 0xfffffffful; + j = -1; + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_REACHABLE) && + (!neighbor_cache[i].isrouter)) { + if (neighbor_cache[i].counter.reachable_time < time) { + j = i; + time = neighbor_cache[i].counter.reachable_time; + } + } + } + if (j >= 0) { + nd6_free_neighbor_cache_entry(j); + return j; + } + + /* Next, find oldest incomplete entry without queued packets. */ + time = 0; + j = -1; + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ( + (neighbor_cache[i].q == NULL) && + (neighbor_cache[i].state == ND6_INCOMPLETE) && + (!neighbor_cache[i].isrouter)) { + if (neighbor_cache[i].counter.probes_sent >= time) { + j = i; + time = neighbor_cache[i].counter.probes_sent; + } + } + } + if (j >= 0) { + nd6_free_neighbor_cache_entry(j); + return j; + } + + /* Next, find oldest incomplete entry with queued packets. */ + time = 0; + j = -1; + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_INCOMPLETE) && + (!neighbor_cache[i].isrouter)) { + if (neighbor_cache[i].counter.probes_sent >= time) { + j = i; + time = neighbor_cache[i].counter.probes_sent; + } + } + } + if (j >= 0) { + nd6_free_neighbor_cache_entry(j); + return j; + } + + /* No more entries to try. */ + return -1; +} + +/** + * Will free any resources associated with a neighbor cache + * entry, and will mark it as unused. + * + * @param i the neighbor cache entry index to free + */ +static void +nd6_free_neighbor_cache_entry(s8_t i) +{ + if ((i < 0) || (i >= LWIP_ND6_NUM_NEIGHBORS)) { + return; + } + if (neighbor_cache[i].isrouter) { + /* isrouter needs to be cleared before deleting a neighbor cache entry */ + return; + } + + /* Free any queued packets. */ + if (neighbor_cache[i].q != NULL) { + nd6_free_q(neighbor_cache[i].q); + neighbor_cache[i].q = NULL; + } + + neighbor_cache[i].state = ND6_NO_ENTRY; + neighbor_cache[i].isrouter = 0; + neighbor_cache[i].netif = NULL; + neighbor_cache[i].counter.reachable_time = 0; + ip6_addr_set_zero(&(neighbor_cache[i].next_hop_address)); +} + +/** + * Search for a destination cache entry + * + * @param ip6addr the IPv6 address of the destination + * @return The destination cache entry index that matched, -1 if no + * entry is found + */ +static s16_t +nd6_find_destination_cache_entry(const ip6_addr_t *ip6addr) +{ + s16_t i; + + IP6_ADDR_ZONECHECK(ip6addr); + + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + if (ip6_addr_cmp(ip6addr, &(destination_cache[i].destination_addr))) { + return i; + } + } + return -1; +} + +/** + * Create a new destination cache entry. If no unused entry is found, + * will recycle oldest entry. + * + * @return The destination cache entry index that was created, -1 if no + * entry was created + */ +static s16_t +nd6_new_destination_cache_entry(void) +{ + s16_t i, j; + u32_t age; + + /* Find an empty entry. */ + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + if (ip6_addr_isany(&(destination_cache[i].destination_addr))) { + return i; + } + } + + /* Find oldest entry. */ + age = 0; + j = LWIP_ND6_NUM_DESTINATIONS - 1; + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + if (destination_cache[i].age > age) { + j = i; + } + } + + return j; +} + +/** + * Clear the destination cache. + * + * This operation may be necessary for consistency in the light of changing + * local addresses and/or use of the gateway hook. + */ +void +nd6_clear_destination_cache(void) +{ + int i; + + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + ip6_addr_set_any(&destination_cache[i].destination_addr); + } +} + +/** + * Determine whether an address matches an on-link prefix or the subnet of a + * statically assigned address. + * + * @param ip6addr the IPv6 address to match + * @return 1 if the address is on-link, 0 otherwise + */ +static int +nd6_is_prefix_in_netif(const ip6_addr_t *ip6addr, struct netif *netif) +{ + s8_t i; + + /* Check to see if the address matches an on-link prefix. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; i++) { + if ((prefix_list[i].netif == netif) && + (prefix_list[i].invalidation_timer > 0) && + ip6_addr_netcmp(ip6addr, &(prefix_list[i].prefix))) { + return 1; + } + } + /* Check to see if address prefix matches a manually configured (= static) + * address. Static addresses have an implied /64 subnet assignment. Dynamic + * addresses (from autoconfiguration) have no implied subnet assignment, and + * are thus effectively /128 assignments. See RFC 5942 for more on this. */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + netif_ip6_addr_isstatic(netif, i) && + ip6_addr_netcmp(ip6addr, netif_ip6_addr(netif, i))) { + return 1; + } + } + return 0; +} + +/** + * Select a default router for a destination. + * + * This function is used both for routing and for finding a next-hop target for + * a packet. In the former case, the given netif is NULL, and the returned + * router entry must be for a netif suitable for sending packets (up, link up). + * In the latter case, the given netif is not NULL and restricts router choice. + * + * @param ip6addr the destination address + * @param netif the netif for the outgoing packet, if known + * @return the default router entry index, or -1 if no suitable + * router is found + */ +static s8_t +nd6_select_router(const ip6_addr_t *ip6addr, struct netif *netif) +{ + struct netif *router_netif; + s8_t i, j, valid_router; + static s8_t last_router; + + LWIP_UNUSED_ARG(ip6addr); /* @todo match preferred routes!! (must implement ND6_OPTION_TYPE_ROUTE_INFO) */ + + /* @todo: implement default router preference */ + + /* Look for valid routers. A reachable router is preferred. */ + valid_router = -1; + for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { + /* Is the router netif both set and apppropriate? */ + if (default_router_list[i].neighbor_entry != NULL) { + router_netif = default_router_list[i].neighbor_entry->netif; + if ((router_netif != NULL) && (netif != NULL ? netif == router_netif : + (netif_is_up(router_netif) && netif_is_link_up(router_netif)))) { + /* Is the router valid, i.e., reachable or probably reachable as per + * RFC 4861 Sec. 6.3.6? Note that we will never return a router that + * has no neighbor cache entry, due to the netif association tests. */ + if (default_router_list[i].neighbor_entry->state != ND6_INCOMPLETE) { + /* Is the router known to be reachable? */ + if (default_router_list[i].neighbor_entry->state == ND6_REACHABLE) { + return i; /* valid and reachable - done! */ + } else if (valid_router < 0) { + valid_router = i; /* valid but not known to be reachable */ + } + } + } + } + } + if (valid_router >= 0) { + return valid_router; + } + + /* Look for any router for which we have any information at all. */ + /* last_router is used for round-robin selection of incomplete routers, as + * recommended in RFC 4861 Sec. 6.3.6 point (2). Advance only when picking a + * route, to select the same router as next-hop target in the common case. */ + if ((netif == NULL) && (++last_router >= LWIP_ND6_NUM_ROUTERS)) { + last_router = 0; + } + i = last_router; + for (j = 0; j < LWIP_ND6_NUM_ROUTERS; j++) { + if (default_router_list[i].neighbor_entry != NULL) { + router_netif = default_router_list[i].neighbor_entry->netif; + if ((router_netif != NULL) && (netif != NULL ? netif == router_netif : + (netif_is_up(router_netif) && netif_is_link_up(router_netif)))) { + return i; + } + } + if (++i >= LWIP_ND6_NUM_ROUTERS) { + i = 0; + } + } + + /* no suitable router found. */ + return -1; +} + +/** + * Find a router-announced route to the given destination. This route may be + * based on an on-link prefix or a default router. + * + * If a suitable route is found, the returned netif is guaranteed to be in a + * suitable state (up, link up) to be used for packet transmission. + * + * @param ip6addr the destination IPv6 address + * @return the netif to use for the destination, or NULL if none found + */ +struct netif * +nd6_find_route(const ip6_addr_t *ip6addr) +{ + struct netif *netif; + s8_t i; + + /* @todo decide if it makes sense to check the destination cache first */ + + /* Check if there is a matching on-link prefix. There may be multiple + * matches. Pick the first one that is associated with a suitable netif. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; ++i) { + netif = prefix_list[i].netif; + if ((netif != NULL) && ip6_addr_netcmp(&prefix_list[i].prefix, ip6addr) && + netif_is_up(netif) && netif_is_link_up(netif)) { + return netif; + } + } + + /* No on-link prefix match. Find a router that can forward the packet. */ + i = nd6_select_router(ip6addr, NULL); + if (i >= 0) { + LWIP_ASSERT("selected router must have a neighbor entry", + default_router_list[i].neighbor_entry != NULL); + return default_router_list[i].neighbor_entry->netif; + } + + return NULL; +} + +/** + * Find an entry for a default router. + * + * @param router_addr the IPv6 address of the router + * @param netif the netif on which the router is found, if known + * @return the index of the router entry, or -1 if not found + */ +static s8_t +nd6_get_router(const ip6_addr_t *router_addr, struct netif *netif) +{ + s8_t i; + + IP6_ADDR_ZONECHECK_NETIF(router_addr, netif); + + /* Look for router. */ + for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { + if ((default_router_list[i].neighbor_entry != NULL) && + ((netif != NULL) ? netif == default_router_list[i].neighbor_entry->netif : 1) && + ip6_addr_cmp(router_addr, &(default_router_list[i].neighbor_entry->next_hop_address))) { + return i; + } + } + + /* router not found. */ + return -1; +} + +/** + * Create a new entry for a default router. + * + * @param router_addr the IPv6 address of the router + * @param netif the netif on which the router is connected, if known + * @return the index on the router table, or -1 if could not be created + */ +static s8_t +nd6_new_router(const ip6_addr_t *router_addr, struct netif *netif) +{ + s8_t router_index; + s8_t free_router_index; + s8_t neighbor_index; + + IP6_ADDR_ZONECHECK_NETIF(router_addr, netif); + + /* Do we have a neighbor entry for this router? */ + neighbor_index = nd6_find_neighbor_cache_entry(router_addr); + if (neighbor_index < 0) { + /* Create a neighbor entry for this router. */ + neighbor_index = nd6_new_neighbor_cache_entry(); + if (neighbor_index < 0) { + /* Could not create neighbor entry for this router. */ + return -1; + } + ip6_addr_set(&(neighbor_cache[neighbor_index].next_hop_address), router_addr); + neighbor_cache[neighbor_index].netif = netif; + neighbor_cache[neighbor_index].q = NULL; + neighbor_cache[neighbor_index].state = ND6_INCOMPLETE; + neighbor_cache[neighbor_index].counter.probes_sent = 1; + nd6_send_neighbor_cache_probe(&neighbor_cache[neighbor_index], ND6_SEND_FLAG_MULTICAST_DEST); + } + + /* Mark neighbor as router. */ + neighbor_cache[neighbor_index].isrouter = 1; + + /* Look for empty entry. */ + free_router_index = LWIP_ND6_NUM_ROUTERS; + for (router_index = LWIP_ND6_NUM_ROUTERS - 1; router_index >= 0; router_index--) { + /* check if router already exists (this is a special case for 2 netifs on the same subnet + - e.g. wifi and cable) */ + if(default_router_list[router_index].neighbor_entry == &(neighbor_cache[neighbor_index])){ + return router_index; + } + if (default_router_list[router_index].neighbor_entry == NULL) { + /* remember lowest free index to create a new entry */ + free_router_index = router_index; + } + } + if (free_router_index < LWIP_ND6_NUM_ROUTERS) { + default_router_list[free_router_index].neighbor_entry = &(neighbor_cache[neighbor_index]); + return free_router_index; + } + + /* Could not create a router entry. */ + + /* Mark neighbor entry as not-router. Entry might be useful as neighbor still. */ + neighbor_cache[neighbor_index].isrouter = 0; + + /* router not found. */ + return -1; +} + +/** + * Find the cached entry for an on-link prefix. + * + * @param prefix the IPv6 prefix that is on-link + * @param netif the netif on which the prefix is on-link + * @return the index on the prefix table, or -1 if not found + */ +static s8_t +nd6_get_onlink_prefix(const ip6_addr_t *prefix, struct netif *netif) +{ + s8_t i; + + /* Look for prefix in list. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; ++i) { + if ((ip6_addr_netcmp(&(prefix_list[i].prefix), prefix)) && + (prefix_list[i].netif == netif)) { + return i; + } + } + + /* Entry not available. */ + return -1; +} + +/** + * Creates a new entry for an on-link prefix. + * + * @param prefix the IPv6 prefix that is on-link + * @param netif the netif on which the prefix is on-link + * @return the index on the prefix table, or -1 if not created + */ +static s8_t +nd6_new_onlink_prefix(const ip6_addr_t *prefix, struct netif *netif) +{ + s8_t i; + + /* Create new entry. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; ++i) { + if ((prefix_list[i].netif == NULL) || + (prefix_list[i].invalidation_timer == 0)) { + /* Found empty prefix entry. */ + prefix_list[i].netif = netif; + ip6_addr_set(&(prefix_list[i].prefix), prefix); + return i; + } + } + + /* Entry not available. */ + return -1; +} + +/** + * Determine the next hop for a destination. Will determine if the + * destination is on-link, else a suitable on-link router is selected. + * + * The last entry index is cached for fast entry search. + * + * @param ip6addr the destination address + * @param netif the netif on which the packet will be sent + * @return the neighbor cache entry for the next hop, ERR_RTE if no + * suitable next hop was found, ERR_MEM if no cache entry + * could be created + */ +static s8_t +nd6_get_next_hop_entry(const ip6_addr_t *ip6addr, struct netif *netif) +{ +#ifdef LWIP_HOOK_ND6_GET_GW + const ip6_addr_t *next_hop_addr; +#endif /* LWIP_HOOK_ND6_GET_GW */ + s8_t i; + s16_t dst_idx; + + IP6_ADDR_ZONECHECK_NETIF(ip6addr, netif); + +#if LWIP_NETIF_HWADDRHINT + if (netif->hints != NULL) { + /* per-pcb cached entry was given */ + netif_addr_idx_t addr_hint = netif->hints->addr_hint; + if (addr_hint < LWIP_ND6_NUM_DESTINATIONS) { + nd6_cached_destination_index = addr_hint; + } + } +#endif /* LWIP_NETIF_HWADDRHINT */ + + /* Look for ip6addr in destination cache. */ + if (ip6_addr_cmp(ip6addr, &(destination_cache[nd6_cached_destination_index].destination_addr))) { + /* the cached entry index is the right one! */ + /* do nothing. */ + ND6_STATS_INC(nd6.cachehit); + } else { + /* Search destination cache. */ + dst_idx = nd6_find_destination_cache_entry(ip6addr); + if (dst_idx >= 0) { + /* found destination entry. make it our new cached index. */ + LWIP_ASSERT("type overflow", (size_t)dst_idx < NETIF_ADDR_IDX_MAX); + nd6_cached_destination_index = (netif_addr_idx_t)dst_idx; + } else { + /* Not found. Create a new destination entry. */ + dst_idx = nd6_new_destination_cache_entry(); + if (dst_idx >= 0) { + /* got new destination entry. make it our new cached index. */ + LWIP_ASSERT("type overflow", (size_t)dst_idx < NETIF_ADDR_IDX_MAX); + nd6_cached_destination_index = (netif_addr_idx_t)dst_idx; + } else { + /* Could not create a destination cache entry. */ + return ERR_MEM; + } + + /* Copy dest address to destination cache. */ + ip6_addr_set(&(destination_cache[nd6_cached_destination_index].destination_addr), ip6addr); + + /* Now find the next hop. is it a neighbor? */ + if (ip6_addr_islinklocal(ip6addr) || + nd6_is_prefix_in_netif(ip6addr, netif)) { + /* Destination in local link. */ + destination_cache[nd6_cached_destination_index].pmtu = netif_mtu6(netif); + ip6_addr_copy(destination_cache[nd6_cached_destination_index].next_hop_addr, destination_cache[nd6_cached_destination_index].destination_addr); +#ifdef LWIP_HOOK_ND6_GET_GW + } else if ((next_hop_addr = LWIP_HOOK_ND6_GET_GW(netif, ip6addr)) != NULL) { + /* Next hop for destination provided by hook function. */ + destination_cache[nd6_cached_destination_index].pmtu = netif->mtu; + ip6_addr_set(&destination_cache[nd6_cached_destination_index].next_hop_addr, next_hop_addr); +#endif /* LWIP_HOOK_ND6_GET_GW */ + } else { + /* We need to select a router. */ + i = nd6_select_router(ip6addr, netif); + if (i < 0) { + /* No router found. */ + ip6_addr_set_any(&(destination_cache[nd6_cached_destination_index].destination_addr)); + return ERR_RTE; + } + destination_cache[nd6_cached_destination_index].pmtu = netif_mtu6(netif); /* Start with netif mtu, correct through ICMPv6 if necessary */ + ip6_addr_copy(destination_cache[nd6_cached_destination_index].next_hop_addr, default_router_list[i].neighbor_entry->next_hop_address); + } + } + } + +#if LWIP_NETIF_HWADDRHINT + if (netif->hints != NULL) { + /* per-pcb cached entry was given */ + netif->hints->addr_hint = nd6_cached_destination_index; + } +#endif /* LWIP_NETIF_HWADDRHINT */ + + /* Look in neighbor cache for the next-hop address. */ + if (ip6_addr_cmp(&(destination_cache[nd6_cached_destination_index].next_hop_addr), + &(neighbor_cache[nd6_cached_neighbor_index].next_hop_address))) { + /* Cache hit. */ + /* Do nothing. */ + ND6_STATS_INC(nd6.cachehit); + } else { + i = nd6_find_neighbor_cache_entry(&(destination_cache[nd6_cached_destination_index].next_hop_addr)); + if (i >= 0) { + /* Found a matching record, make it new cached entry. */ + nd6_cached_neighbor_index = i; + } else { + /* Neighbor not in cache. Make a new entry. */ + i = nd6_new_neighbor_cache_entry(); + if (i >= 0) { + /* got new neighbor entry. make it our new cached index. */ + nd6_cached_neighbor_index = i; + } else { + /* Could not create a neighbor cache entry. */ + return ERR_MEM; + } + + /* Initialize fields. */ + ip6_addr_copy(neighbor_cache[i].next_hop_address, + destination_cache[nd6_cached_destination_index].next_hop_addr); + neighbor_cache[i].isrouter = 0; + neighbor_cache[i].netif = netif; + neighbor_cache[i].state = ND6_INCOMPLETE; + neighbor_cache[i].counter.probes_sent = 1; + nd6_send_neighbor_cache_probe(&neighbor_cache[i], ND6_SEND_FLAG_MULTICAST_DEST); + } + } + + /* Reset this destination's age. */ + destination_cache[nd6_cached_destination_index].age = 0; + + return nd6_cached_neighbor_index; +} + +/** + * Queue a packet for a neighbor. + * + * @param neighbor_index the index in the neighbor cache table + * @param q packet to be queued + * @return ERR_OK if succeeded, ERR_MEM if out of memory + */ +static err_t +nd6_queue_packet(s8_t neighbor_index, struct pbuf *q) +{ + err_t result = ERR_MEM; + struct pbuf *p; + int copy_needed = 0; +#if LWIP_ND6_QUEUEING + struct nd6_q_entry *new_entry, *r; +#endif /* LWIP_ND6_QUEUEING */ + + if ((neighbor_index < 0) || (neighbor_index >= LWIP_ND6_NUM_NEIGHBORS)) { + return ERR_ARG; + } + + /* IF q includes a pbuf that must be copied, we have to copy the whole chain + * into a new PBUF_RAM. See the definition of PBUF_NEEDS_COPY for details. */ + p = q; + while (p) { + if (PBUF_NEEDS_COPY(p)) { + copy_needed = 1; + break; + } + p = p->next; + } + if (copy_needed) { + /* copy the whole packet into new pbufs */ + p = pbuf_clone(PBUF_LINK, PBUF_RAM, q); + while ((p == NULL) && (neighbor_cache[neighbor_index].q != NULL)) { + /* Free oldest packet (as per RFC recommendation) */ +#if LWIP_ND6_QUEUEING + r = neighbor_cache[neighbor_index].q; + neighbor_cache[neighbor_index].q = r->next; + r->next = NULL; + nd6_free_q(r); +#else /* LWIP_ND6_QUEUEING */ + pbuf_free(neighbor_cache[neighbor_index].q); + neighbor_cache[neighbor_index].q = NULL; +#endif /* LWIP_ND6_QUEUEING */ + p = pbuf_clone(PBUF_LINK, PBUF_RAM, q); + } + } else { + /* referencing the old pbuf is enough */ + p = q; + pbuf_ref(p); + } + /* packet was copied/ref'd? */ + if (p != NULL) { + /* queue packet ... */ +#if LWIP_ND6_QUEUEING + /* allocate a new nd6 queue entry */ + new_entry = (struct nd6_q_entry *)memp_malloc(MEMP_ND6_QUEUE); + if ((new_entry == NULL) && (neighbor_cache[neighbor_index].q != NULL)) { + /* Free oldest packet (as per RFC recommendation) */ + r = neighbor_cache[neighbor_index].q; + neighbor_cache[neighbor_index].q = r->next; + r->next = NULL; + nd6_free_q(r); + new_entry = (struct nd6_q_entry *)memp_malloc(MEMP_ND6_QUEUE); + } + if (new_entry != NULL) { + new_entry->next = NULL; + new_entry->p = p; + if (neighbor_cache[neighbor_index].q != NULL) { + /* queue was already existent, append the new entry to the end */ + r = neighbor_cache[neighbor_index].q; + while (r->next != NULL) { + r = r->next; + } + r->next = new_entry; + } else { + /* queue did not exist, first item in queue */ + neighbor_cache[neighbor_index].q = new_entry; + } + LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: queued packet %p on neighbor entry %"S16_F"\n", (void *)p, (s16_t)neighbor_index)); + result = ERR_OK; + } else { + /* the pool MEMP_ND6_QUEUE is empty */ + pbuf_free(p); + LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: could not queue a copy of packet %p (out of memory)\n", (void *)p)); + /* { result == ERR_MEM } through initialization */ + } +#else /* LWIP_ND6_QUEUEING */ + /* Queue a single packet. If an older packet is already queued, free it as per RFC. */ + if (neighbor_cache[neighbor_index].q != NULL) { + pbuf_free(neighbor_cache[neighbor_index].q); + } + neighbor_cache[neighbor_index].q = p; + LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: queued packet %p on neighbor entry %"S16_F"\n", (void *)p, (s16_t)neighbor_index)); + result = ERR_OK; +#endif /* LWIP_ND6_QUEUEING */ + } else { + LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: could not queue a copy of packet %p (out of memory)\n", (void *)q)); + /* { result == ERR_MEM } through initialization */ + } + + return result; +} + +#if LWIP_ND6_QUEUEING +/** + * Free a complete queue of nd6 q entries + * + * @param q a queue of nd6_q_entry to free + */ +static void +nd6_free_q(struct nd6_q_entry *q) +{ + struct nd6_q_entry *r; + LWIP_ASSERT("q != NULL", q != NULL); + LWIP_ASSERT("q->p != NULL", q->p != NULL); + while (q) { + r = q; + q = q->next; + LWIP_ASSERT("r->p != NULL", (r->p != NULL)); + pbuf_free(r->p); + memp_free(MEMP_ND6_QUEUE, r); + } +} +#endif /* LWIP_ND6_QUEUEING */ + +/** + * Send queued packets for a neighbor + * + * @param i the neighbor to send packets to + */ +static void +nd6_send_q(s8_t i) +{ + struct ip6_hdr *ip6hdr; + ip6_addr_t dest; +#if LWIP_ND6_QUEUEING + struct nd6_q_entry *q; +#endif /* LWIP_ND6_QUEUEING */ + + if ((i < 0) || (i >= LWIP_ND6_NUM_NEIGHBORS)) { + return; + } + +#if LWIP_ND6_QUEUEING + while (neighbor_cache[i].q != NULL) { + /* remember first in queue */ + q = neighbor_cache[i].q; + /* pop first item off the queue */ + neighbor_cache[i].q = q->next; + /* Get ipv6 header. */ + ip6hdr = (struct ip6_hdr *)(q->p->payload); + /* Create an aligned copy. */ + ip6_addr_copy_from_packed(dest, ip6hdr->dest); + /* Restore the zone, if applicable. */ + ip6_addr_assign_zone(&dest, IP6_UNKNOWN, neighbor_cache[i].netif); + /* send the queued IPv6 packet */ + (neighbor_cache[i].netif)->output_ip6(neighbor_cache[i].netif, q->p, &dest); + /* free the queued IP packet */ + pbuf_free(q->p); + /* now queue entry can be freed */ + memp_free(MEMP_ND6_QUEUE, q); + } +#else /* LWIP_ND6_QUEUEING */ + if (neighbor_cache[i].q != NULL) { + /* Get ipv6 header. */ + ip6hdr = (struct ip6_hdr *)(neighbor_cache[i].q->payload); + /* Create an aligned copy. */ + ip6_addr_copy_from_packed(dest, ip6hdr->dest); + /* Restore the zone, if applicable. */ + ip6_addr_assign_zone(&dest, IP6_UNKNOWN, neighbor_cache[i].netif); + /* send the queued IPv6 packet */ + (neighbor_cache[i].netif)->output_ip6(neighbor_cache[i].netif, neighbor_cache[i].q, &dest); + /* free the queued IP packet */ + pbuf_free(neighbor_cache[i].q); + neighbor_cache[i].q = NULL; + } +#endif /* LWIP_ND6_QUEUEING */ +} + +/** + * A packet is to be transmitted to a specific IPv6 destination on a specific + * interface. Check if we can find the hardware address of the next hop to use + * for the packet. If so, give the hardware address to the caller, which should + * use it to send the packet right away. Otherwise, enqueue the packet for + * later transmission while looking up the hardware address, if possible. + * + * As such, this function returns one of three different possible results: + * + * - ERR_OK with a non-NULL 'hwaddrp': the caller should send the packet now. + * - ERR_OK with a NULL 'hwaddrp': the packet has been enqueued for later. + * - not ERR_OK: something went wrong; forward the error upward in the stack. + * + * @param netif The lwIP network interface on which the IP packet will be sent. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ip6addr The destination IPv6 address of the packet. + * @param hwaddrp On success, filled with a pointer to a HW address or NULL (meaning + * the packet has been queued). + * @return + * - ERR_OK on success, ERR_RTE if no route was found for the packet, + * or ERR_MEM if low memory conditions prohibit sending the packet at all. + */ +err_t +nd6_get_next_hop_addr_or_queue(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr, const u8_t **hwaddrp) +{ + s8_t i; + + /* Get next hop record. */ + i = nd6_get_next_hop_entry(ip6addr, netif); + if (i < 0) { + /* failed to get a next hop neighbor record. */ + return i; + } + + /* Now that we have a destination record, send or queue the packet. */ + if (neighbor_cache[i].state == ND6_STALE) { + /* Switch to delay state. */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + /* @todo should we send or queue if PROBE? send for now, to let unicast NS pass. */ + if ((neighbor_cache[i].state == ND6_REACHABLE) || + (neighbor_cache[i].state == ND6_DELAY) || + (neighbor_cache[i].state == ND6_PROBE)) { + + /* Tell the caller to send out the packet now. */ + *hwaddrp = neighbor_cache[i].lladdr; + return ERR_OK; + } + + /* We should queue packet on this interface. */ + *hwaddrp = NULL; + return nd6_queue_packet(i, q); +} + + +/** + * Get the Path MTU for a destination. + * + * @param ip6addr the destination address + * @param netif the netif on which the packet will be sent + * @return the Path MTU, if known, or the netif default MTU + */ +u16_t +nd6_get_destination_mtu(const ip6_addr_t *ip6addr, struct netif *netif) +{ + s16_t i; + + i = nd6_find_destination_cache_entry(ip6addr); + if (i >= 0) { + if (destination_cache[i].pmtu > 0) { + return destination_cache[i].pmtu; + } + } + + if (netif != NULL) { + return netif_mtu6(netif); + } + + return 1280; /* Minimum MTU */ +} + + +#if LWIP_ND6_TCP_REACHABILITY_HINTS +/** + * Provide the Neighbor discovery process with a hint that a + * destination is reachable. Called by tcp_receive when ACKs are + * received or sent (as per RFC). This is useful to avoid sending + * NS messages every 30 seconds. + * + * @param ip6addr the destination address which is know to be reachable + * by an upper layer protocol (TCP) + */ +void +nd6_reachability_hint(const ip6_addr_t *ip6addr) +{ + s8_t i; + s16_t dst_idx; + + /* Find destination in cache. */ + if (ip6_addr_cmp(ip6addr, &(destination_cache[nd6_cached_destination_index].destination_addr))) { + dst_idx = nd6_cached_destination_index; + ND6_STATS_INC(nd6.cachehit); + } else { + dst_idx = nd6_find_destination_cache_entry(ip6addr); + } + if (dst_idx < 0) { + return; + } + + /* Find next hop neighbor in cache. */ + if (ip6_addr_cmp(&(destination_cache[dst_idx].next_hop_addr), &(neighbor_cache[nd6_cached_neighbor_index].next_hop_address))) { + i = nd6_cached_neighbor_index; + ND6_STATS_INC(nd6.cachehit); + } else { + i = nd6_find_neighbor_cache_entry(&(destination_cache[dst_idx].next_hop_addr)); + } + if (i < 0) { + return; + } + + /* For safety: don't set as reachable if we don't have a LL address yet. Misuse protection. */ + if (neighbor_cache[i].state == ND6_INCOMPLETE || neighbor_cache[i].state == ND6_NO_ENTRY) { + return; + } + + /* Set reachability state. */ + neighbor_cache[i].state = ND6_REACHABLE; + neighbor_cache[i].counter.reachable_time = reachable_time; +} +#endif /* LWIP_ND6_TCP_REACHABILITY_HINTS */ + +/** + * Remove all prefix, neighbor_cache and router entries of the specified netif. + * + * @param netif points to a network interface + */ +void +nd6_cleanup_netif(struct netif *netif) +{ + u8_t i; + s8_t router_index; + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; i++) { + if (prefix_list[i].netif == netif) { + prefix_list[i].netif = NULL; + } + } + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if (neighbor_cache[i].netif == netif) { + for (router_index = 0; router_index < LWIP_ND6_NUM_ROUTERS; router_index++) { + if (default_router_list[router_index].neighbor_entry == &neighbor_cache[i]) { + default_router_list[router_index].neighbor_entry = NULL; + default_router_list[router_index].flags = 0; + } + } + neighbor_cache[i].isrouter = 0; + nd6_free_neighbor_cache_entry(i); + } + } + /* Clear the destination cache, since many entries may now have become + * invalid for one of several reasons. As destination cache entries have no + * netif association, use a sledgehammer approach (this can be improved). */ + nd6_clear_destination_cache(); +} + +#if LWIP_IPV6_MLD +/** + * The state of a local IPv6 address entry is about to change. If needed, join + * or leave the solicited-node multicast group for the address. + * + * @param netif The netif that owns the address. + * @param addr_idx The index of the address. + * @param new_state The new (IP6_ADDR_) state for the address. + */ +void +nd6_adjust_mld_membership(struct netif *netif, s8_t addr_idx, u8_t new_state) +{ + u8_t old_state, old_member, new_member; + + old_state = netif_ip6_addr_state(netif, addr_idx); + + /* Determine whether we were, and should be, a member of the solicited-node + * multicast group for this address. For tentative addresses, the group is + * not joined until the address enters the TENTATIVE_1 (or VALID) state. */ + old_member = (old_state != IP6_ADDR_INVALID && old_state != IP6_ADDR_DUPLICATED && old_state != IP6_ADDR_TENTATIVE); + new_member = (new_state != IP6_ADDR_INVALID && new_state != IP6_ADDR_DUPLICATED && new_state != IP6_ADDR_TENTATIVE); + + if (old_member != new_member) { + ip6_addr_set_solicitednode(&multicast_address, netif_ip6_addr(netif, addr_idx)->addr[3]); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + + if (new_member) { + mld6_joingroup_netif(netif, &multicast_address); + } else { + mld6_leavegroup_netif(netif, &multicast_address); + } + } +} +#endif /* LWIP_IPV6_MLD */ + +/** Netif was added, set up, or reconnected (link up) */ +void +nd6_restart_netif(struct netif *netif) +{ +#if LWIP_IPV6_SEND_ROUTER_SOLICIT + /* Send Router Solicitation messages (see RFC 4861, ch. 6.3.7). */ + netif->rs_count = LWIP_ND6_MAX_MULTICAST_SOLICIT; +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ +} + +#endif /* LWIP_IPV6 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/mem.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/mem.c new file mode 100644 index 0000000..16fd984 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/mem.c @@ -0,0 +1,1017 @@ +/** + * @file + * Dynamic memory manager + * + * This is a lightweight replacement for the standard C library malloc(). + * + * If you want to use the standard C library malloc() instead, define + * MEM_LIBC_MALLOC to 1 in your lwipopts.h + * + * To let mem_malloc() use pools (prevents fragmentation and is much faster than + * a heap but might waste some memory), define MEM_USE_POOLS to 1, define + * MEMP_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list + * of pools like this (more pools can be added between _START and _END): + * + * Define three pools with sizes 256, 512, and 1512 bytes + * LWIP_MALLOC_MEMPOOL_START + * LWIP_MALLOC_MEMPOOL(20, 256) + * LWIP_MALLOC_MEMPOOL(10, 512) + * LWIP_MALLOC_MEMPOOL(5, 1512) + * LWIP_MALLOC_MEMPOOL_END + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * Simon Goldschmidt + * + */ + +#include "lwip/opt.h" +#include "lwip/mem.h" +#include "lwip/def.h" +#include "lwip/sys.h" +#include "lwip/stats.h" +#include "lwip/err.h" + +#include + +#if MEM_LIBC_MALLOC +#include /* for malloc()/free() */ +#endif + +/* This is overridable for tests only... */ +#ifndef LWIP_MEM_ILLEGAL_FREE +#define LWIP_MEM_ILLEGAL_FREE(msg) LWIP_ASSERT(msg, 0) +#endif + +#define MEM_STATS_INC_LOCKED(x) SYS_ARCH_LOCKED(MEM_STATS_INC(x)) +#define MEM_STATS_INC_USED_LOCKED(x, y) SYS_ARCH_LOCKED(MEM_STATS_INC_USED(x, y)) +#define MEM_STATS_DEC_USED_LOCKED(x, y) SYS_ARCH_LOCKED(MEM_STATS_DEC_USED(x, y)) + +#if MEM_OVERFLOW_CHECK +#define MEM_SANITY_OFFSET MEM_SANITY_REGION_BEFORE_ALIGNED +#define MEM_SANITY_OVERHEAD (MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED) +#else +#define MEM_SANITY_OFFSET 0 +#define MEM_SANITY_OVERHEAD 0 +#endif + +#if MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK +/** + * Check if a mep element was victim of an overflow or underflow + * (e.g. the restricted area after/before it has been altered) + * + * @param p the mem element to check + * @param size allocated size of the element + * @param descr1 description of the element source shown on error + * @param descr2 description of the element source shown on error + */ +void +mem_overflow_check_raw(void *p, size_t size, const char *descr1, const char *descr2) +{ +#if MEM_SANITY_REGION_AFTER_ALIGNED || MEM_SANITY_REGION_BEFORE_ALIGNED + u16_t k; + u8_t *m; + +#if MEM_SANITY_REGION_AFTER_ALIGNED > 0 + m = (u8_t *)p + size; + for (k = 0; k < MEM_SANITY_REGION_AFTER_ALIGNED; k++) { + if (m[k] != 0xcd) { + char errstr[128]; + snprintf(errstr, sizeof(errstr), "detected mem overflow in %s%s", descr1, descr2); + LWIP_ASSERT(errstr, 0); + } + } +#endif /* MEM_SANITY_REGION_AFTER_ALIGNED > 0 */ + +#if MEM_SANITY_REGION_BEFORE_ALIGNED > 0 + m = (u8_t *)p - MEM_SANITY_REGION_BEFORE_ALIGNED; + for (k = 0; k < MEM_SANITY_REGION_BEFORE_ALIGNED; k++) { + if (m[k] != 0xcd) { + char errstr[128]; + snprintf(errstr, sizeof(errstr), "detected mem underflow in %s%s", descr1, descr2); + LWIP_ASSERT(errstr, 0); + } + } +#endif /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 */ +#else + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(desc); + LWIP_UNUSED_ARG(descr); +#endif +} + +/** + * Initialize the restricted area of a mem element. + */ +void +mem_overflow_init_raw(void *p, size_t size) +{ +#if MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 + u8_t *m; +#if MEM_SANITY_REGION_BEFORE_ALIGNED > 0 + m = (u8_t *)p - MEM_SANITY_REGION_BEFORE_ALIGNED; + memset(m, 0xcd, MEM_SANITY_REGION_BEFORE_ALIGNED); +#endif +#if MEM_SANITY_REGION_AFTER_ALIGNED > 0 + m = (u8_t *)p + size; + memset(m, 0xcd, MEM_SANITY_REGION_AFTER_ALIGNED); +#endif +#else /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 */ + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(desc); +#endif /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 */ +} +#endif /* MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK */ + +#if MEM_LIBC_MALLOC || MEM_USE_POOLS + +/** mem_init is not used when using pools instead of a heap or using + * C library malloc(). + */ +void +mem_init(void) +{ +} + +/** mem_trim is not used when using pools instead of a heap or using + * C library malloc(): we can't free part of a pool element and the stack + * support mem_trim() to return a different pointer + */ +void * +mem_trim(void *mem, mem_size_t size) +{ + LWIP_UNUSED_ARG(size); + return mem; +} +#endif /* MEM_LIBC_MALLOC || MEM_USE_POOLS */ + +#if MEM_LIBC_MALLOC +/* lwIP heap implemented using C library malloc() */ + +/* in case C library malloc() needs extra protection, + * allow these defines to be overridden. + */ +#ifndef mem_clib_free +#define mem_clib_free free +#endif +#ifndef mem_clib_malloc +#define mem_clib_malloc malloc +#endif +#ifndef mem_clib_calloc +#define mem_clib_calloc calloc +#endif + +#if LWIP_STATS && MEM_STATS +#define MEM_LIBC_STATSHELPER_SIZE LWIP_MEM_ALIGN_SIZE(sizeof(mem_size_t)) +#else +#define MEM_LIBC_STATSHELPER_SIZE 0 +#endif + +/** + * Allocate a block of memory with a minimum of 'size' bytes. + * + * @param size is the minimum size of the requested block in bytes. + * @return pointer to allocated memory or NULL if no free memory was found. + * + * Note that the returned value must always be aligned (as defined by MEM_ALIGNMENT). + */ +void * +mem_malloc(mem_size_t size) +{ + void *ret = mem_clib_malloc(size + MEM_LIBC_STATSHELPER_SIZE); + if (ret == NULL) { + MEM_STATS_INC_LOCKED(err); + } else { + LWIP_ASSERT("malloc() must return aligned memory", LWIP_MEM_ALIGN(ret) == ret); +#if LWIP_STATS && MEM_STATS + *(mem_size_t *)ret = size; + ret = (u8_t *)ret + MEM_LIBC_STATSHELPER_SIZE; + MEM_STATS_INC_USED_LOCKED(used, size); +#endif + } + return ret; +} + +/** Put memory back on the heap + * + * @param rmem is the pointer as returned by a previous call to mem_malloc() + */ +void +mem_free(void *rmem) +{ + LWIP_ASSERT("rmem != NULL", (rmem != NULL)); + LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem))); +#if LWIP_STATS && MEM_STATS + rmem = (u8_t *)rmem - MEM_LIBC_STATSHELPER_SIZE; + MEM_STATS_DEC_USED_LOCKED(used, *(mem_size_t *)rmem); +#endif + mem_clib_free(rmem); +} + +#elif MEM_USE_POOLS + +/* lwIP heap implemented with different sized pools */ + +/** + * Allocate memory: determine the smallest pool that is big enough + * to contain an element of 'size' and get an element from that pool. + * + * @param size the size in bytes of the memory needed + * @return a pointer to the allocated memory or NULL if the pool is empty + */ +void * +mem_malloc(mem_size_t size) +{ + void *ret; + struct memp_malloc_helper *element = NULL; + memp_t poolnr; + mem_size_t required_size = size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)); + + for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr = (memp_t)(poolnr + 1)) { + /* is this pool big enough to hold an element of the required size + plus a struct memp_malloc_helper that saves the pool this element came from? */ + if (required_size <= memp_pools[poolnr]->size) { + element = (struct memp_malloc_helper *)memp_malloc(poolnr); + if (element == NULL) { + /* No need to DEBUGF or ASSERT: This error is already taken care of in memp.c */ +#if MEM_USE_POOLS_TRY_BIGGER_POOL + /** Try a bigger pool if this one is empty! */ + if (poolnr < MEMP_POOL_LAST) { + continue; + } +#endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */ + MEM_STATS_INC_LOCKED(err); + return NULL; + } + break; + } + } + if (poolnr > MEMP_POOL_LAST) { + LWIP_ASSERT("mem_malloc(): no pool is that big!", 0); + MEM_STATS_INC_LOCKED(err); + return NULL; + } + + /* save the pool number this element came from */ + element->poolnr = poolnr; + /* and return a pointer to the memory directly after the struct memp_malloc_helper */ + ret = (u8_t *)element + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)); + +#if MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) + /* truncating to u16_t is safe because struct memp_desc::size is u16_t */ + element->size = (u16_t)size; + MEM_STATS_INC_USED_LOCKED(used, element->size); +#endif /* MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) */ +#if MEMP_OVERFLOW_CHECK + /* initialize unused memory (diff between requested size and selected pool's size) */ + memset((u8_t *)ret + size, 0xcd, memp_pools[poolnr]->size - size); +#endif /* MEMP_OVERFLOW_CHECK */ + return ret; +} + +/** + * Free memory previously allocated by mem_malloc. Loads the pool number + * and calls memp_free with that pool number to put the element back into + * its pool + * + * @param rmem the memory element to free + */ +void +mem_free(void *rmem) +{ + struct memp_malloc_helper *hmem; + + LWIP_ASSERT("rmem != NULL", (rmem != NULL)); + LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem))); + + /* get the original struct memp_malloc_helper */ + /* cast through void* to get rid of alignment warnings */ + hmem = (struct memp_malloc_helper *)(void *)((u8_t *)rmem - LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper))); + + LWIP_ASSERT("hmem != NULL", (hmem != NULL)); + LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem))); + LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX)); + + MEM_STATS_DEC_USED_LOCKED(used, hmem->size); +#if MEMP_OVERFLOW_CHECK + { + u16_t i; + LWIP_ASSERT("MEM_USE_POOLS: invalid chunk size", + hmem->size <= memp_pools[hmem->poolnr]->size); + /* check that unused memory remained untouched (diff between requested size and selected pool's size) */ + for (i = hmem->size; i < memp_pools[hmem->poolnr]->size; i++) { + u8_t data = *((u8_t *)rmem + i); + LWIP_ASSERT("MEM_USE_POOLS: mem overflow detected", data == 0xcd); + } + } +#endif /* MEMP_OVERFLOW_CHECK */ + + /* and put it in the pool we saved earlier */ + memp_free(hmem->poolnr, hmem); +} + +#else /* MEM_USE_POOLS */ +/* lwIP replacement for your libc malloc() */ + +/** + * The heap is made up as a list of structs of this type. + * This does not have to be aligned since for getting its size, + * we only use the macro SIZEOF_STRUCT_MEM, which automatically aligns. + */ +struct mem { + /** index (-> ram[next]) of the next struct */ + mem_size_t next; + /** index (-> ram[prev]) of the previous struct */ + mem_size_t prev; + /** 1: this area is used; 0: this area is unused */ + u8_t used; +#if MEM_OVERFLOW_CHECK + /** this keeps track of the user allocation size for guard checks */ + mem_size_t user_size; +#endif +}; + +/** All allocated blocks will be MIN_SIZE bytes big, at least! + * MIN_SIZE can be overridden to suit your needs. Smaller values save space, + * larger values could prevent too small blocks to fragment the RAM too much. */ +#ifndef MIN_SIZE +#define MIN_SIZE 12 +#endif /* MIN_SIZE */ +/* some alignment macros: we define them here for better source code layout */ +#define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE) +#define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem)) +#define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE) + +/** If you want to relocate the heap to external memory, simply define + * LWIP_RAM_HEAP_POINTER as a void-pointer to that location. + * If so, make sure the memory at that location is big enough (see below on + * how that space is calculated). */ +#ifndef LWIP_RAM_HEAP_POINTER +/** the heap. we need one struct mem at the end and some room for alignment */ +LWIP_DECLARE_MEMORY_ALIGNED(ram_heap, MEM_SIZE_ALIGNED + (2U * SIZEOF_STRUCT_MEM)); +#define LWIP_RAM_HEAP_POINTER ram_heap +#endif /* LWIP_RAM_HEAP_POINTER */ + +/** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */ +static u8_t *ram; +/** the last entry, always unused! */ +static struct mem *ram_end; + +/** concurrent access protection */ +#if !NO_SYS +static sys_mutex_t mem_mutex; +#endif + +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + +static volatile u8_t mem_free_count; + +/* Allow mem_free from other (e.g. interrupt) context */ +#define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free) +#define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free) +#define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free) +#define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc) +#define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc) +#define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc) +#define LWIP_MEM_LFREE_VOLATILE volatile + +#else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + +/* Protect the heap only by using a mutex */ +#define LWIP_MEM_FREE_DECL_PROTECT() +#define LWIP_MEM_FREE_PROTECT() sys_mutex_lock(&mem_mutex) +#define LWIP_MEM_FREE_UNPROTECT() sys_mutex_unlock(&mem_mutex) +/* mem_malloc is protected using mutex AND LWIP_MEM_ALLOC_PROTECT */ +#define LWIP_MEM_ALLOC_DECL_PROTECT() +#define LWIP_MEM_ALLOC_PROTECT() +#define LWIP_MEM_ALLOC_UNPROTECT() +#define LWIP_MEM_LFREE_VOLATILE + +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + +/** pointer to the lowest free block, this is used for faster search */ +static struct mem * LWIP_MEM_LFREE_VOLATILE lfree; + +#if MEM_SANITY_CHECK +static void mem_sanity(void); +#define MEM_SANITY() mem_sanity() +#else +#define MEM_SANITY() +#endif + +#if MEM_OVERFLOW_CHECK +static void +mem_overflow_init_element(struct mem *mem, mem_size_t user_size) +{ + void *p = (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET; + mem->user_size = user_size; + mem_overflow_init_raw(p, user_size); +} + +static void +mem_overflow_check_element(struct mem *mem) +{ + void *p = (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET; + mem_overflow_check_raw(p, mem->user_size, "heap", ""); +} +#else /* MEM_OVERFLOW_CHECK */ +#define mem_overflow_init_element(mem, size) +#define mem_overflow_check_element(mem) +#endif /* MEM_OVERFLOW_CHECK */ + +static struct mem * +ptr_to_mem(mem_size_t ptr) +{ + return (struct mem *)(void *)&ram[ptr]; +} + +static mem_size_t +mem_to_ptr(void *mem) +{ + return (mem_size_t)((u8_t *)mem - ram); +} + +/** + * "Plug holes" by combining adjacent empty struct mems. + * After this function is through, there should not exist + * one empty struct mem pointing to another empty struct mem. + * + * @param mem this points to a struct mem which just has been freed + * @internal this function is only called by mem_free() and mem_trim() + * + * This assumes access to the heap is protected by the calling function + * already. + */ +static void +plug_holes(struct mem *mem) +{ + struct mem *nmem; + struct mem *pmem; + + LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram); + LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end); + LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0); + + /* plug hole forward */ + LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED); + + nmem = ptr_to_mem(mem->next); + if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) { + /* if mem->next is unused and not end of ram, combine mem and mem->next */ + if (lfree == nmem) { + lfree = mem; + } + mem->next = nmem->next; + if (nmem->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(nmem->next)->prev = mem_to_ptr(mem); + } + } + + /* plug hole backward */ + pmem = ptr_to_mem(mem->prev); + if (pmem != mem && pmem->used == 0) { + /* if mem->prev is unused, combine mem and mem->prev */ + if (lfree == mem) { + lfree = pmem; + } + pmem->next = mem->next; + if (mem->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(mem->next)->prev = mem_to_ptr(pmem); + } + } +} + +/** + * Zero the heap and initialize start, end and lowest-free + */ +void +mem_init(void) +{ + struct mem *mem; + + LWIP_ASSERT("Sanity check alignment", + (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT - 1)) == 0); + + /* align the heap */ + ram = (u8_t *)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER); + /* initialize the start of the heap */ + mem = (struct mem *)(void *)ram; + mem->next = MEM_SIZE_ALIGNED; + mem->prev = 0; + mem->used = 0; + /* initialize the end of the heap */ + ram_end = ptr_to_mem(MEM_SIZE_ALIGNED); + ram_end->used = 1; + ram_end->next = MEM_SIZE_ALIGNED; + ram_end->prev = MEM_SIZE_ALIGNED; + MEM_SANITY(); + + /* initialize the lowest-free pointer to the start of the heap */ + lfree = (struct mem *)(void *)ram; + + MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED); + + if (sys_mutex_new(&mem_mutex) != ERR_OK) { + LWIP_ASSERT("failed to create mem_mutex", 0); + } +} + +/* Check if a struct mem is correctly linked. + * If not, double-free is a possible reason. + */ +static int +mem_link_valid(struct mem *mem) +{ + struct mem *nmem, *pmem; + mem_size_t rmem_idx; + rmem_idx = mem_to_ptr(mem); + nmem = ptr_to_mem(mem->next); + pmem = ptr_to_mem(mem->prev); + if ((mem->next > MEM_SIZE_ALIGNED) || (mem->prev > MEM_SIZE_ALIGNED) || + ((mem->prev != rmem_idx) && (pmem->next != rmem_idx)) || + ((nmem != ram_end) && (nmem->prev != rmem_idx))) { + return 0; + } + return 1; +} + +#if MEM_SANITY_CHECK +static void +mem_sanity(void) +{ + struct mem *mem; + u8_t last_used; + + /* begin with first element here */ + mem = (struct mem *)ram; + LWIP_ASSERT("heap element used valid", (mem->used == 0) || (mem->used == 1)); + last_used = mem->used; + LWIP_ASSERT("heap element prev ptr valid", mem->prev == 0); + LWIP_ASSERT("heap element next ptr valid", mem->next <= MEM_SIZE_ALIGNED); + LWIP_ASSERT("heap element next ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->next) == ptr_to_mem(mem->next))); + + /* check all elements before the end of the heap */ + for (mem = ptr_to_mem(mem->next); + ((u8_t *)mem > ram) && (mem < ram_end); + mem = ptr_to_mem(mem->next)) { + LWIP_ASSERT("heap element aligned", LWIP_MEM_ALIGN(mem) == mem); + LWIP_ASSERT("heap element prev ptr valid", mem->prev <= MEM_SIZE_ALIGNED); + LWIP_ASSERT("heap element next ptr valid", mem->next <= MEM_SIZE_ALIGNED); + LWIP_ASSERT("heap element prev ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->prev) == ptr_to_mem(mem->prev))); + LWIP_ASSERT("heap element next ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->next) == ptr_to_mem(mem->next))); + + if (last_used == 0) { + /* 2 unused elements in a row? */ + LWIP_ASSERT("heap element unused?", mem->used == 1); + } else { + LWIP_ASSERT("heap element unused member", (mem->used == 0) || (mem->used == 1)); + } + + LWIP_ASSERT("heap element link valid", mem_link_valid(mem)); + + /* used/unused altering */ + last_used = mem->used; + } + LWIP_ASSERT("heap end ptr sanity", mem == ptr_to_mem(MEM_SIZE_ALIGNED)); + LWIP_ASSERT("heap element used valid", mem->used == 1); + LWIP_ASSERT("heap element prev ptr valid", mem->prev == MEM_SIZE_ALIGNED); + LWIP_ASSERT("heap element next ptr valid", mem->next == MEM_SIZE_ALIGNED); +} +#endif /* MEM_SANITY_CHECK */ + +/** + * Put a struct mem back on the heap + * + * @param rmem is the data portion of a struct mem as returned by a previous + * call to mem_malloc() + */ +void +mem_free(void *rmem) +{ + struct mem *mem; + LWIP_MEM_FREE_DECL_PROTECT(); + + if (rmem == NULL) { + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n")); + return; + } + if ((((mem_ptr_t)rmem) & (MEM_ALIGNMENT - 1)) != 0) { + LWIP_MEM_ILLEGAL_FREE("mem_free: sanity check alignment"); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: sanity check alignment\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return; + } + + /* Get the corresponding struct mem: */ + /* cast through void* to get rid of alignment warnings */ + mem = (struct mem *)(void *)((u8_t *)rmem - (SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET)); + + if ((u8_t *)mem < ram || (u8_t *)rmem + MIN_SIZE_ALIGNED > (u8_t *)ram_end) { + LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory"); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return; + } +#if MEM_OVERFLOW_CHECK + mem_overflow_check_element(mem); +#endif + /* protect the heap from concurrent access */ + LWIP_MEM_FREE_PROTECT(); + /* mem has to be in a used state */ + if (!mem->used) { + LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory: double free"); + LWIP_MEM_FREE_UNPROTECT(); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory: double free?\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return; + } + + if (!mem_link_valid(mem)) { + LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory: non-linked: double free"); + LWIP_MEM_FREE_UNPROTECT(); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory: non-linked: double free?\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return; + } + + /* mem is now unused. */ + mem->used = 0; + + if (mem < lfree) { + /* the newly freed struct is now the lowest */ + lfree = mem; + } + + MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram))); + + /* finally, see if prev or next are free also */ + plug_holes(mem); + MEM_SANITY(); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 1; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + LWIP_MEM_FREE_UNPROTECT(); +} + +/** + * Shrink memory returned by mem_malloc(). + * + * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked + * @param new_size required size after shrinking (needs to be smaller than or + * equal to the previous size) + * @return for compatibility reasons: is always == rmem, at the moment + * or NULL if newsize is > old size, in which case rmem is NOT touched + * or freed! + */ +void * +mem_trim(void *rmem, mem_size_t new_size) +{ + mem_size_t size, newsize; + mem_size_t ptr, ptr2; + struct mem *mem, *mem2; + /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */ + LWIP_MEM_FREE_DECL_PROTECT(); + + /* Expand the size of the allocated memory region so that we can + adjust for alignment. */ + newsize = (mem_size_t)LWIP_MEM_ALIGN_SIZE(new_size); + if (newsize < MIN_SIZE_ALIGNED) { + /* every data block must be at least MIN_SIZE_ALIGNED long */ + newsize = MIN_SIZE_ALIGNED; + } +#if MEM_OVERFLOW_CHECK + newsize += MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED; +#endif + if ((newsize > MEM_SIZE_ALIGNED) || (newsize < new_size)) { + return NULL; + } + + LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram && + (u8_t *)rmem < (u8_t *)ram_end); + + if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) { + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return rmem; + } + /* Get the corresponding struct mem ... */ + /* cast through void* to get rid of alignment warnings */ + mem = (struct mem *)(void *)((u8_t *)rmem - (SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET)); +#if MEM_OVERFLOW_CHECK + mem_overflow_check_element(mem); +#endif + /* ... and its offset pointer */ + ptr = mem_to_ptr(mem); + + size = (mem_size_t)((mem_size_t)(mem->next - ptr) - (SIZEOF_STRUCT_MEM + MEM_SANITY_OVERHEAD)); + LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size); + if (newsize > size) { + /* not supported */ + return NULL; + } + if (newsize == size) { + /* No change in size, simply return */ + return rmem; + } + + /* protect the heap from concurrent access */ + LWIP_MEM_FREE_PROTECT(); + + mem2 = ptr_to_mem(mem->next); + if (mem2->used == 0) { + /* The next struct is unused, we can simply move it at little */ + mem_size_t next; + LWIP_ASSERT("invalid next ptr", mem->next != MEM_SIZE_ALIGNED); + /* remember the old next pointer */ + next = mem2->next; + /* create new struct mem which is moved directly after the shrinked mem */ + ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize); + if (lfree == mem2) { + lfree = ptr_to_mem(ptr2); + } + mem2 = ptr_to_mem(ptr2); + mem2->used = 0; + /* restore the next pointer */ + mem2->next = next; + /* link it back to mem */ + mem2->prev = ptr; + /* link mem to it */ + mem->next = ptr2; + /* last thing to restore linked list: as we have moved mem2, + * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not + * the end of the heap */ + if (mem2->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(mem2->next)->prev = ptr2; + } + MEM_STATS_DEC_USED(used, (size - newsize)); + /* no need to plug holes, we've already done that */ + } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) { + /* Next struct is used but there's room for another struct mem with + * at least MIN_SIZE_ALIGNED of data. + * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem + * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED'). + * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty + * region that couldn't hold data, but when mem->next gets freed, + * the 2 regions would be combined, resulting in more free memory */ + ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize); + LWIP_ASSERT("invalid next ptr", mem->next != MEM_SIZE_ALIGNED); + mem2 = ptr_to_mem(ptr2); + if (mem2 < lfree) { + lfree = mem2; + } + mem2->used = 0; + mem2->next = mem->next; + mem2->prev = ptr; + mem->next = ptr2; + if (mem2->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(mem2->next)->prev = ptr2; + } + MEM_STATS_DEC_USED(used, (size - newsize)); + /* the original mem->next is used, so no need to plug holes! */ + } + /* else { + next struct mem is used but size between mem and mem2 is not big enough + to create another struct mem + -> don't do anyhting. + -> the remaining space stays unused since it is too small + } */ +#if MEM_OVERFLOW_CHECK + mem_overflow_init_element(mem, new_size); +#endif + MEM_SANITY(); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 1; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + LWIP_MEM_FREE_UNPROTECT(); + return rmem; +} + +/** + * Allocate a block of memory with a minimum of 'size' bytes. + * + * @param size_in is the minimum size of the requested block in bytes. + * @return pointer to allocated memory or NULL if no free memory was found. + * + * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT). + */ +void * +mem_malloc(mem_size_t size_in) +{ + mem_size_t ptr, ptr2, size; + struct mem *mem, *mem2; +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + u8_t local_mem_free_count = 0; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + LWIP_MEM_ALLOC_DECL_PROTECT(); + + if (size_in == 0) { + return NULL; + } + + /* Expand the size of the allocated memory region so that we can + adjust for alignment. */ + size = (mem_size_t)LWIP_MEM_ALIGN_SIZE(size_in); + if (size < MIN_SIZE_ALIGNED) { + /* every data block must be at least MIN_SIZE_ALIGNED long */ + size = MIN_SIZE_ALIGNED; + } +#if MEM_OVERFLOW_CHECK + size += MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED; +#endif + if ((size > MEM_SIZE_ALIGNED) || (size < size_in)) { + return NULL; + } + + /* protect the heap from concurrent access */ + sys_mutex_lock(&mem_mutex); + LWIP_MEM_ALLOC_PROTECT(); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + /* run as long as a mem_free disturbed mem_malloc or mem_trim */ + do { + local_mem_free_count = 0; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + + /* Scan through the heap searching for a free block that is big enough, + * beginning with the lowest free block. + */ + for (ptr = mem_to_ptr(lfree); ptr < MEM_SIZE_ALIGNED - size; + ptr = ptr_to_mem(ptr)->next) { + mem = ptr_to_mem(ptr); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 0; + LWIP_MEM_ALLOC_UNPROTECT(); + /* allow mem_free or mem_trim to run */ + LWIP_MEM_ALLOC_PROTECT(); + if (mem_free_count != 0) { + /* If mem_free or mem_trim have run, we have to restart since they + could have altered our current struct mem. */ + local_mem_free_count = 1; + break; + } +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + + if ((!mem->used) && + (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) { + /* mem is not used and at least perfect fit is possible: + * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */ + + if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) { + /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing + * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem') + * -> split large block, create empty remainder, + * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if + * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size, + * struct mem would fit in but no data between mem2 and mem2->next + * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty + * region that couldn't hold data, but when mem->next gets freed, + * the 2 regions would be combined, resulting in more free memory + */ + ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + size); + LWIP_ASSERT("invalid next ptr",ptr2 != MEM_SIZE_ALIGNED); + /* create mem2 struct */ + mem2 = ptr_to_mem(ptr2); + mem2->used = 0; + mem2->next = mem->next; + mem2->prev = ptr; + /* and insert it between mem and mem->next */ + mem->next = ptr2; + mem->used = 1; + + if (mem2->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(mem2->next)->prev = ptr2; + } + MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM)); + } else { + /* (a mem2 struct does no fit into the user data space of mem and mem->next will always + * be used at this point: if not we have 2 unused structs in a row, plug_holes should have + * take care of this). + * -> near fit or exact fit: do not split, no mem2 creation + * also can't move mem->next directly behind mem, since mem->next + * will always be used at this point! + */ + mem->used = 1; + MEM_STATS_INC_USED(used, mem->next - mem_to_ptr(mem)); + } +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT +mem_malloc_adjust_lfree: +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + if (mem == lfree) { + struct mem *cur = lfree; + /* Find next free block after mem and update lowest free pointer */ + while (cur->used && cur != ram_end) { +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 0; + LWIP_MEM_ALLOC_UNPROTECT(); + /* prevent high interrupt latency... */ + LWIP_MEM_ALLOC_PROTECT(); + if (mem_free_count != 0) { + /* If mem_free or mem_trim have run, we have to restart since they + could have altered our current struct mem or lfree. */ + goto mem_malloc_adjust_lfree; + } +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + cur = ptr_to_mem(cur->next); + } + lfree = cur; + LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used))); + } + LWIP_MEM_ALLOC_UNPROTECT(); + sys_mutex_unlock(&mem_mutex); + LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.", + (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end); + LWIP_ASSERT("mem_malloc: allocated memory properly aligned.", + ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0); + LWIP_ASSERT("mem_malloc: sanity check alignment", + (((mem_ptr_t)mem) & (MEM_ALIGNMENT - 1)) == 0); + +#if MEM_OVERFLOW_CHECK + mem_overflow_init_element(mem, size_in); +#endif + MEM_SANITY(); + return (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET; + } + } +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + /* if we got interrupted by a mem_free, try again */ + } while (local_mem_free_count != 0); +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + MEM_STATS_INC(err); + LWIP_MEM_ALLOC_UNPROTECT(); + sys_mutex_unlock(&mem_mutex); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size)); + return NULL; +} + +#endif /* MEM_USE_POOLS */ + +#if MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) +void * +mem_calloc(mem_size_t count, mem_size_t size) +{ + return mem_clib_calloc(count, size); +} + +#else /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */ +/** + * Contiguously allocates enough space for count objects that are size bytes + * of memory each and returns a pointer to the allocated memory. + * + * The allocated memory is filled with bytes of value zero. + * + * @param count number of objects to allocate + * @param size size of the objects to allocate + * @return pointer to allocated memory / NULL pointer if there is an error + */ +void * +mem_calloc(mem_size_t count, mem_size_t size) +{ + void *p; + size_t alloc_size = (size_t)count * (size_t)size; + + if ((size_t)(mem_size_t)alloc_size != alloc_size) { + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_calloc: could not allocate %"SZT_F" bytes\n", alloc_size)); + return NULL; + } + + /* allocate 'count' objects of size 'size' */ + p = mem_malloc((mem_size_t)alloc_size); + if (p) { + /* zero the memory */ + memset(p, 0, alloc_size); + } + return p; +} +#endif /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/memp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/memp.c new file mode 100644 index 0000000..18771f6 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/memp.c @@ -0,0 +1,447 @@ +/** + * @file + * Dynamic pool memory manager + * + * lwIP has dedicated pools for many structures (netconn, protocol control blocks, + * packet buffers, ...). All these pools are managed here. + * + * @defgroup mempool Memory pools + * @ingroup infrastructure + * Custom memory pools + + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#include "lwip/memp.h" +#include "lwip/sys.h" +#include "lwip/stats.h" + +#include + +/* Make sure we include everything we need for size calculation required by memp_std.h */ +#include "lwip/pbuf.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/tcp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/altcp.h" +#include "lwip/ip4_frag.h" +#include "lwip/netbuf.h" +#include "lwip/api.h" +#include "lwip/priv/tcpip_priv.h" +#include "lwip/priv/api_msg.h" +#include "lwip/priv/sockets_priv.h" +#include "lwip/etharp.h" +#include "lwip/igmp.h" +#include "lwip/timeouts.h" +/* needed by default MEMP_NUM_SYS_TIMEOUT */ +#include "netif/ppp/ppp_opts.h" +#include "lwip/netdb.h" +#include "lwip/dns.h" +#include "lwip/priv/nd6_priv.h" +#include "lwip/ip6_frag.h" +#include "lwip/mld6.h" + +#define LWIP_MEMPOOL(name,num,size,desc) LWIP_MEMPOOL_DECLARE(name,num,size,desc) +#include "lwip/priv/memp_std.h" + +const struct memp_desc *const memp_pools[MEMP_MAX] = { +#define LWIP_MEMPOOL(name,num,size,desc) &memp_ ## name, +#include "lwip/priv/memp_std.h" +}; + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +#if MEMP_MEM_MALLOC && MEMP_OVERFLOW_CHECK >= 2 +#undef MEMP_OVERFLOW_CHECK +/* MEMP_OVERFLOW_CHECK >= 2 does not work with MEMP_MEM_MALLOC, use 1 instead */ +#define MEMP_OVERFLOW_CHECK 1 +#endif + +#if MEMP_SANITY_CHECK && !MEMP_MEM_MALLOC +/** + * Check that memp-lists don't form a circle, using "Floyd's cycle-finding algorithm". + */ +static int +memp_sanity(const struct memp_desc *desc) +{ + struct memp *t, *h; + + t = *desc->tab; + if (t != NULL) { + for (h = t->next; (t != NULL) && (h != NULL); t = t->next, + h = ((h->next != NULL) ? h->next->next : NULL)) { + if (t == h) { + return 0; + } + } + } + + return 1; +} +#endif /* MEMP_SANITY_CHECK && !MEMP_MEM_MALLOC */ + +#if MEMP_OVERFLOW_CHECK +/** + * Check if a memp element was victim of an overflow or underflow + * (e.g. the restricted area after/before it has been altered) + * + * @param p the memp element to check + * @param desc the pool p comes from + */ +static void +memp_overflow_check_element(struct memp *p, const struct memp_desc *desc) +{ + mem_overflow_check_raw((u8_t *)p + MEMP_SIZE, desc->size, "pool ", desc->desc); +} + +/** + * Initialize the restricted area of on memp element. + */ +static void +memp_overflow_init_element(struct memp *p, const struct memp_desc *desc) +{ + mem_overflow_init_raw((u8_t *)p + MEMP_SIZE, desc->size); +} + +#if MEMP_OVERFLOW_CHECK >= 2 +/** + * Do an overflow check for all elements in every pool. + * + * @see memp_overflow_check_element for a description of the check + */ +static void +memp_overflow_check_all(void) +{ + u16_t i, j; + struct memp *p; + SYS_ARCH_DECL_PROTECT(old_level); + SYS_ARCH_PROTECT(old_level); + + for (i = 0; i < MEMP_MAX; ++i) { + p = (struct memp *)LWIP_MEM_ALIGN(memp_pools[i]->base); + for (j = 0; j < memp_pools[i]->num; ++j) { + memp_overflow_check_element(p, memp_pools[i]); + p = LWIP_ALIGNMENT_CAST(struct memp *, ((u8_t *)p + MEMP_SIZE + memp_pools[i]->size + MEM_SANITY_REGION_AFTER_ALIGNED)); + } + } + SYS_ARCH_UNPROTECT(old_level); +} +#endif /* MEMP_OVERFLOW_CHECK >= 2 */ +#endif /* MEMP_OVERFLOW_CHECK */ + +/** + * Initialize custom memory pool. + * Related functions: memp_malloc_pool, memp_free_pool + * + * @param desc pool to initialize + */ +void +memp_init_pool(const struct memp_desc *desc) +{ +#if MEMP_MEM_MALLOC + LWIP_UNUSED_ARG(desc); +#else + int i; + struct memp *memp; + + *desc->tab = NULL; + memp = (struct memp *)LWIP_MEM_ALIGN(desc->base); +#if MEMP_MEM_INIT + /* force memset on pool memory */ + memset(memp, 0, (size_t)desc->num * (MEMP_SIZE + desc->size +#if MEMP_OVERFLOW_CHECK + + MEM_SANITY_REGION_AFTER_ALIGNED +#endif + )); +#endif + /* create a linked list of memp elements */ + for (i = 0; i < desc->num; ++i) { + memp->next = *desc->tab; + *desc->tab = memp; +#if MEMP_OVERFLOW_CHECK + memp_overflow_init_element(memp, desc); +#endif /* MEMP_OVERFLOW_CHECK */ + /* cast through void* to get rid of alignment warnings */ + memp = (struct memp *)(void *)((u8_t *)memp + MEMP_SIZE + desc->size +#if MEMP_OVERFLOW_CHECK + + MEM_SANITY_REGION_AFTER_ALIGNED +#endif + ); + } +#if MEMP_STATS + desc->stats->avail = desc->num; +#endif /* MEMP_STATS */ +#endif /* !MEMP_MEM_MALLOC */ + +#if MEMP_STATS && (defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY) + desc->stats->name = desc->desc; +#endif /* MEMP_STATS && (defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY) */ +} + +/** + * Initializes lwIP built-in pools. + * Related functions: memp_malloc, memp_free + * + * Carves out memp_memory into linked lists for each pool-type. + */ +void +memp_init(void) +{ + u16_t i; + + /* for every pool: */ + for (i = 0; i < LWIP_ARRAYSIZE(memp_pools); i++) { + memp_init_pool(memp_pools[i]); + +#if LWIP_STATS && MEMP_STATS + lwip_stats.memp[i] = memp_pools[i]->stats; +#endif + } + +#if MEMP_OVERFLOW_CHECK >= 2 + /* check everything a first time to see if it worked */ + memp_overflow_check_all(); +#endif /* MEMP_OVERFLOW_CHECK >= 2 */ +} + +static void * +#if !MEMP_OVERFLOW_CHECK +do_memp_malloc_pool(const struct memp_desc *desc) +#else +do_memp_malloc_pool_fn(const struct memp_desc *desc, const char *file, const int line) +#endif +{ + struct memp *memp; + SYS_ARCH_DECL_PROTECT(old_level); + +#if MEMP_MEM_MALLOC + memp = (struct memp *)mem_malloc(MEMP_SIZE + MEMP_ALIGN_SIZE(desc->size)); + SYS_ARCH_PROTECT(old_level); +#else /* MEMP_MEM_MALLOC */ + SYS_ARCH_PROTECT(old_level); + + memp = *desc->tab; +#endif /* MEMP_MEM_MALLOC */ + + if (memp != NULL) { +#if !MEMP_MEM_MALLOC +#if MEMP_OVERFLOW_CHECK == 1 + memp_overflow_check_element(memp, desc); +#endif /* MEMP_OVERFLOW_CHECK */ + + *desc->tab = memp->next; +#if MEMP_OVERFLOW_CHECK + memp->next = NULL; +#endif /* MEMP_OVERFLOW_CHECK */ +#endif /* !MEMP_MEM_MALLOC */ +#if MEMP_OVERFLOW_CHECK + memp->file = file; + memp->line = line; +#if MEMP_MEM_MALLOC + memp_overflow_init_element(memp, desc); +#endif /* MEMP_MEM_MALLOC */ +#endif /* MEMP_OVERFLOW_CHECK */ + LWIP_ASSERT("memp_malloc: memp properly aligned", + ((mem_ptr_t)memp % MEM_ALIGNMENT) == 0); +#if MEMP_STATS + desc->stats->used++; + if (desc->stats->used > desc->stats->max) { + desc->stats->max = desc->stats->used; + } +#endif + SYS_ARCH_UNPROTECT(old_level); + /* cast through u8_t* to get rid of alignment warnings */ + return ((u8_t *)memp + MEMP_SIZE); + } else { +#if MEMP_STATS + desc->stats->err++; +#endif + SYS_ARCH_UNPROTECT(old_level); + LWIP_DEBUGF(MEMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("memp_malloc: out of memory in pool %s\n", desc->desc)); + } + + return NULL; +} + +/** + * Get an element from a custom pool. + * + * @param desc the pool to get an element from + * + * @return a pointer to the allocated memory or a NULL pointer on error + */ +void * +#if !MEMP_OVERFLOW_CHECK +memp_malloc_pool(const struct memp_desc *desc) +#else +memp_malloc_pool_fn(const struct memp_desc *desc, const char *file, const int line) +#endif +{ + LWIP_ASSERT("invalid pool desc", desc != NULL); + if (desc == NULL) { + return NULL; + } + +#if !MEMP_OVERFLOW_CHECK + return do_memp_malloc_pool(desc); +#else + return do_memp_malloc_pool_fn(desc, file, line); +#endif +} + +/** + * Get an element from a specific pool. + * + * @param type the pool to get an element from + * + * @return a pointer to the allocated memory or a NULL pointer on error + */ +void * +#if !MEMP_OVERFLOW_CHECK +memp_malloc(memp_t type) +#else +memp_malloc_fn(memp_t type, const char *file, const int line) +#endif +{ + void *memp; + LWIP_ERROR("memp_malloc: type < MEMP_MAX", (type < MEMP_MAX), return NULL;); + +#if MEMP_OVERFLOW_CHECK >= 2 + memp_overflow_check_all(); +#endif /* MEMP_OVERFLOW_CHECK >= 2 */ + +#if !MEMP_OVERFLOW_CHECK + memp = do_memp_malloc_pool(memp_pools[type]); +#else + memp = do_memp_malloc_pool_fn(memp_pools[type], file, line); +#endif + + return memp; +} + +static void +do_memp_free_pool(const struct memp_desc *desc, void *mem) +{ + struct memp *memp; + SYS_ARCH_DECL_PROTECT(old_level); + + LWIP_ASSERT("memp_free: mem properly aligned", + ((mem_ptr_t)mem % MEM_ALIGNMENT) == 0); + + /* cast through void* to get rid of alignment warnings */ + memp = (struct memp *)(void *)((u8_t *)mem - MEMP_SIZE); + + SYS_ARCH_PROTECT(old_level); + +#if MEMP_OVERFLOW_CHECK == 1 + memp_overflow_check_element(memp, desc); +#endif /* MEMP_OVERFLOW_CHECK */ + +#if MEMP_STATS + desc->stats->used--; +#endif + +#if MEMP_MEM_MALLOC + LWIP_UNUSED_ARG(desc); + SYS_ARCH_UNPROTECT(old_level); + mem_free(memp); +#else /* MEMP_MEM_MALLOC */ + memp->next = *desc->tab; + *desc->tab = memp; + +#if MEMP_SANITY_CHECK + LWIP_ASSERT("memp sanity", memp_sanity(desc)); +#endif /* MEMP_SANITY_CHECK */ + + SYS_ARCH_UNPROTECT(old_level); +#endif /* !MEMP_MEM_MALLOC */ +} + +/** + * Put a custom pool element back into its pool. + * + * @param desc the pool where to put mem + * @param mem the memp element to free + */ +void +memp_free_pool(const struct memp_desc *desc, void *mem) +{ + LWIP_ASSERT("invalid pool desc", desc != NULL); + if ((desc == NULL) || (mem == NULL)) { + return; + } + + do_memp_free_pool(desc, mem); +} + +/** + * Put an element back into its pool. + * + * @param type the pool where to put mem + * @param mem the memp element to free + */ +void +memp_free(memp_t type, void *mem) +{ +#ifdef LWIP_HOOK_MEMP_AVAILABLE + struct memp *old_first; +#endif + + LWIP_ERROR("memp_free: type < MEMP_MAX", (type < MEMP_MAX), return;); + + if (mem == NULL) { + return; + } + +#if MEMP_OVERFLOW_CHECK >= 2 + memp_overflow_check_all(); +#endif /* MEMP_OVERFLOW_CHECK >= 2 */ + +#ifdef LWIP_HOOK_MEMP_AVAILABLE + old_first = *memp_pools[type]->tab; +#endif + + do_memp_free_pool(memp_pools[type], mem); + +#ifdef LWIP_HOOK_MEMP_AVAILABLE + if (old_first == NULL) { + LWIP_HOOK_MEMP_AVAILABLE(type); + } +#endif +} diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/netif.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/netif.c new file mode 100644 index 0000000..a19018c --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/netif.c @@ -0,0 +1,1795 @@ +/** + * @file + * lwIP network interface abstraction + * + * @defgroup netif Network interface (NETIF) + * @ingroup callbackstyle_api + * + * @defgroup netif_ip4 IPv4 address handling + * @ingroup netif + * + * @defgroup netif_ip6 IPv6 address handling + * @ingroup netif + * + * @defgroup netif_cd Client data handling + * Store data (void*) on a netif for application usage. + * @see @ref LWIP_NUM_NETIF_CLIENT_DATA + * @ingroup netif + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + */ + +#include "lwip/opt.h" + +#include /* memset */ +#include /* atoi */ + +#include "lwip/def.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/udp.h" +#include "lwip/priv/raw_priv.h" +#include "lwip/snmp.h" +#include "lwip/igmp.h" +#include "lwip/etharp.h" +#include "lwip/stats.h" +#include "lwip/sys.h" +#include "lwip/ip.h" +#if ENABLE_LOOPBACK +#if LWIP_NETIF_LOOPBACK_MULTITHREADING +#include "lwip/tcpip.h" +#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ +#endif /* ENABLE_LOOPBACK */ + +#include "netif/ethernet.h" + +#if LWIP_AUTOIP +#include "lwip/autoip.h" +#endif /* LWIP_AUTOIP */ +#if LWIP_DHCP +#include "lwip/dhcp.h" +#endif /* LWIP_DHCP */ +#if LWIP_IPV6_DHCP6 +#include "lwip/dhcp6.h" +#endif /* LWIP_IPV6_DHCP6 */ +#if LWIP_IPV6_MLD +#include "lwip/mld6.h" +#endif /* LWIP_IPV6_MLD */ +#if LWIP_IPV6 +#include "lwip/nd6.h" +#endif + +#if LWIP_NETIF_STATUS_CALLBACK +#define NETIF_STATUS_CALLBACK(n) do{ if (n->status_callback) { (n->status_callback)(n); }}while(0) +#else +#define NETIF_STATUS_CALLBACK(n) +#endif /* LWIP_NETIF_STATUS_CALLBACK */ + +#if LWIP_NETIF_LINK_CALLBACK +#define NETIF_LINK_CALLBACK(n) do{ if (n->link_callback) { (n->link_callback)(n); }}while(0) +#else +#define NETIF_LINK_CALLBACK(n) +#endif /* LWIP_NETIF_LINK_CALLBACK */ + +#if LWIP_NETIF_EXT_STATUS_CALLBACK +static netif_ext_callback_t *ext_callback; +#endif + +#if !LWIP_SINGLE_NETIF +struct netif *netif_list; +#endif /* !LWIP_SINGLE_NETIF */ +struct netif *netif_default; + +#define netif_index_to_num(index) ((index) - 1) +static u8_t netif_num; + +#if LWIP_NUM_NETIF_CLIENT_DATA > 0 +static u8_t netif_client_id; +#endif + +#define NETIF_REPORT_TYPE_IPV4 0x01 +#define NETIF_REPORT_TYPE_IPV6 0x02 +static void netif_issue_reports(struct netif *netif, u8_t report_type); + +#if LWIP_IPV6 +static err_t netif_null_output_ip6(struct netif *netif, struct pbuf *p, const ip6_addr_t *ipaddr); +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 +static err_t netif_null_output_ip4(struct netif *netif, struct pbuf *p, const ip4_addr_t *ipaddr); +#endif /* LWIP_IPV4 */ + +#if LWIP_HAVE_LOOPIF +#if LWIP_IPV4 +static err_t netif_loop_output_ipv4(struct netif *netif, struct pbuf *p, const ip4_addr_t *addr); +#endif +#if LWIP_IPV6 +static err_t netif_loop_output_ipv6(struct netif *netif, struct pbuf *p, const ip6_addr_t *addr); +#endif + + +static struct netif loop_netif; + +/** + * Initialize a lwip network interface structure for a loopback interface + * + * @param netif the lwip network interface structure for this loopif + * @return ERR_OK if the loopif is initialized + * ERR_MEM if private data couldn't be allocated + */ +static err_t +netif_loopif_init(struct netif *netif) +{ + LWIP_ASSERT("netif_loopif_init: invalid netif", netif != NULL); + + /* initialize the snmp variables and counters inside the struct netif + * ifSpeed: no assumption can be made! + */ + MIB2_INIT_NETIF(netif, snmp_ifType_softwareLoopback, 0); + + netif->name[0] = 'l'; + netif->name[1] = 'o'; +#if LWIP_IPV4 + netif->output = netif_loop_output_ipv4; +#endif +#if LWIP_IPV6 + netif->output_ip6 = netif_loop_output_ipv6; +#endif +#if LWIP_LOOPIF_MULTICAST + netif_set_flags(netif, NETIF_FLAG_IGMP); +#endif + NETIF_SET_CHECKSUM_CTRL(netif, NETIF_CHECKSUM_DISABLE_ALL); + return ERR_OK; +} +#endif /* LWIP_HAVE_LOOPIF */ + +void +netif_init(void) +{ +#if LWIP_HAVE_LOOPIF +#if LWIP_IPV4 +#define LOOPIF_ADDRINIT &loop_ipaddr, &loop_netmask, &loop_gw, + ip4_addr_t loop_ipaddr, loop_netmask, loop_gw; + IP4_ADDR(&loop_gw, 127, 0, 0, 1); + IP4_ADDR(&loop_ipaddr, 127, 0, 0, 1); + IP4_ADDR(&loop_netmask, 255, 0, 0, 0); +#else /* LWIP_IPV4 */ +#define LOOPIF_ADDRINIT +#endif /* LWIP_IPV4 */ + +#if NO_SYS + netif_add(&loop_netif, LOOPIF_ADDRINIT NULL, netif_loopif_init, ip_input); +#else /* NO_SYS */ + netif_add(&loop_netif, LOOPIF_ADDRINIT NULL, netif_loopif_init, tcpip_input); +#endif /* NO_SYS */ + +#if LWIP_IPV6 + IP_ADDR6_HOST(loop_netif.ip6_addr, 0, 0, 0, 0x00000001UL); + loop_netif.ip6_addr_state[0] = IP6_ADDR_VALID; +#endif /* LWIP_IPV6 */ + + netif_set_link_up(&loop_netif); + netif_set_up(&loop_netif); + +#endif /* LWIP_HAVE_LOOPIF */ +} + +/** + * @ingroup lwip_nosys + * Forwards a received packet for input processing with + * ethernet_input() or ip_input() depending on netif flags. + * Don't call directly, pass to netif_add() and call + * netif->input(). + * Only works if the netif driver correctly sets + * NETIF_FLAG_ETHARP and/or NETIF_FLAG_ETHERNET flag! + */ +err_t +netif_input(struct pbuf *p, struct netif *inp) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_input: invalid pbuf", p != NULL); + LWIP_ASSERT("netif_input: invalid netif", inp != NULL); + +#if LWIP_ETHERNET + if (inp->flags & (NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET)) { + return ethernet_input(p, inp); + } else +#endif /* LWIP_ETHERNET */ + return ip_input(p, inp); +} + +/** + * @ingroup netif + * Add a network interface to the list of lwIP netifs. + * + * Same as @ref netif_add but without IPv4 addresses + */ +struct netif * +netif_add_noaddr(struct netif *netif, void *state, netif_init_fn init, netif_input_fn input) +{ + return netif_add(netif, +#if LWIP_IPV4 + NULL, NULL, NULL, +#endif /* LWIP_IPV4*/ + state, init, input); +} + +/** + * @ingroup netif + * Add a network interface to the list of lwIP netifs. + * + * @param netif a pre-allocated netif structure + * @param ipaddr IP address for the new netif + * @param netmask network mask for the new netif + * @param gw default gateway IP address for the new netif + * @param state opaque data passed to the new netif + * @param init callback function that initializes the interface + * @param input callback function that is called to pass + * ingress packets up in the protocol layer stack.\n + * It is recommended to use a function that passes the input directly + * to the stack (netif_input(), NO_SYS=1 mode) or via sending a + * message to TCPIP thread (tcpip_input(), NO_SYS=0 mode).\n + * These functions use netif flags NETIF_FLAG_ETHARP and NETIF_FLAG_ETHERNET + * to decide whether to forward to ethernet_input() or ip_input(). + * In other words, the functions only work when the netif + * driver is implemented correctly!\n + * Most members of struct netif should be be initialized by the + * netif init function = netif driver (init parameter of this function).\n + * IPv6: Don't forget to call netif_create_ip6_linklocal_address() after + * setting the MAC address in struct netif.hwaddr + * (IPv6 requires a link-local address). + * + * @return netif, or NULL if failed. + */ +struct netif * +netif_add(struct netif *netif, +#if LWIP_IPV4 + const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, +#endif /* LWIP_IPV4 */ + void *state, netif_init_fn init, netif_input_fn input) +{ +#if LWIP_IPV6 + s8_t i; +#endif + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_SINGLE_NETIF + if (netif_default != NULL) { + LWIP_ASSERT("single netif already set", 0); + return NULL; + } +#endif + + LWIP_ERROR("netif_add: invalid netif", netif != NULL, return NULL); + LWIP_ERROR("netif_add: No init function given", init != NULL, return NULL); + +#if LWIP_IPV4 + if (ipaddr == NULL) { + ipaddr = ip_2_ip4(IP4_ADDR_ANY); + } + if (netmask == NULL) { + netmask = ip_2_ip4(IP4_ADDR_ANY); + } + if (gw == NULL) { + gw = ip_2_ip4(IP4_ADDR_ANY); + } + + /* reset new interface configuration state */ + ip_addr_set_zero_ip4(&netif->ip_addr); + ip_addr_set_zero_ip4(&netif->netmask); + ip_addr_set_zero_ip4(&netif->gw); + netif->output = netif_null_output_ip4; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + ip_addr_set_zero_ip6(&netif->ip6_addr[i]); + netif->ip6_addr_state[i] = IP6_ADDR_INVALID; +#if LWIP_IPV6_ADDRESS_LIFETIMES + netif->ip6_addr_valid_life[i] = IP6_ADDR_LIFE_STATIC; + netif->ip6_addr_pref_life[i] = IP6_ADDR_LIFE_STATIC; +#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */ + } + netif->output_ip6 = netif_null_output_ip6; +#endif /* LWIP_IPV6 */ + NETIF_SET_CHECKSUM_CTRL(netif, NETIF_CHECKSUM_ENABLE_ALL); + netif->mtu = 0; + netif->flags = 0; +#ifdef netif_get_client_data + memset(netif->client_data, 0, sizeof(netif->client_data)); +#endif /* LWIP_NUM_NETIF_CLIENT_DATA */ +#if LWIP_IPV6 +#if LWIP_IPV6_AUTOCONFIG + /* IPv6 address autoconfiguration not enabled by default */ + netif->ip6_autoconfig_enabled = 0; +#endif /* LWIP_IPV6_AUTOCONFIG */ + nd6_restart_netif(netif); +#endif /* LWIP_IPV6 */ +#if LWIP_NETIF_STATUS_CALLBACK + netif->status_callback = NULL; +#endif /* LWIP_NETIF_STATUS_CALLBACK */ +#if LWIP_NETIF_LINK_CALLBACK + netif->link_callback = NULL; +#endif /* LWIP_NETIF_LINK_CALLBACK */ +#if LWIP_IGMP + netif->igmp_mac_filter = NULL; +#endif /* LWIP_IGMP */ +#if LWIP_IPV6 && LWIP_IPV6_MLD + netif->mld_mac_filter = NULL; +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ +#if ENABLE_LOOPBACK + netif->loop_first = NULL; + netif->loop_last = NULL; +#endif /* ENABLE_LOOPBACK */ + + /* remember netif specific state information data */ + netif->state = state; + netif->num = netif_num; + netif->input = input; + + NETIF_RESET_HINTS(netif); +#if ENABLE_LOOPBACK && LWIP_LOOPBACK_MAX_PBUFS + netif->loop_cnt_current = 0; +#endif /* ENABLE_LOOPBACK && LWIP_LOOPBACK_MAX_PBUFS */ + +#if LWIP_IPV4 + netif_set_addr(netif, ipaddr, netmask, gw); +#endif /* LWIP_IPV4 */ + + /* call user specified initialization function for netif */ + if (init(netif) != ERR_OK) { + return NULL; + } +#if LWIP_IPV6 && LWIP_ND6_ALLOW_RA_UPDATES + /* Initialize the MTU for IPv6 to the one set by the netif driver. + This can be updated later by RA. */ + netif->mtu6 = netif->mtu; +#endif /* LWIP_IPV6 && LWIP_ND6_ALLOW_RA_UPDATES */ + +#if !LWIP_SINGLE_NETIF + /* Assign a unique netif number in the range [0..254], so that (num+1) can + serve as an interface index that fits in a u8_t. + We assume that the new netif has not yet been added to the list here. + This algorithm is O(n^2), but that should be OK for lwIP. + */ + { + struct netif *netif2; + int num_netifs; + do { + if (netif->num == 255) { + netif->num = 0; + } + num_netifs = 0; + for (netif2 = netif_list; netif2 != NULL; netif2 = netif2->next) { + LWIP_ASSERT("netif already added", netif2 != netif); + num_netifs++; + LWIP_ASSERT("too many netifs, max. supported number is 255", num_netifs <= 255); + if (netif2->num == netif->num) { + netif->num++; + break; + } + } + } while (netif2 != NULL); + } + if (netif->num == 254) { + netif_num = 0; + } else { + netif_num = (u8_t)(netif->num + 1); + } + + /* add this netif to the list */ + netif->next = netif_list; + netif_list = netif; +#endif /* "LWIP_SINGLE_NETIF */ + mib2_netif_added(netif); + +#if LWIP_IGMP + /* start IGMP processing */ + if (netif->flags & NETIF_FLAG_IGMP) { + igmp_start(netif); + } +#endif /* LWIP_IGMP */ + + LWIP_DEBUGF(NETIF_DEBUG, ("netif: added interface %c%c IP", + netif->name[0], netif->name[1])); +#if LWIP_IPV4 + LWIP_DEBUGF(NETIF_DEBUG, (" addr ")); + ip4_addr_debug_print(NETIF_DEBUG, ipaddr); + LWIP_DEBUGF(NETIF_DEBUG, (" netmask ")); + ip4_addr_debug_print(NETIF_DEBUG, netmask); + LWIP_DEBUGF(NETIF_DEBUG, (" gw ")); + ip4_addr_debug_print(NETIF_DEBUG, gw); +#endif /* LWIP_IPV4 */ + LWIP_DEBUGF(NETIF_DEBUG, ("\n")); + + netif_invoke_ext_callback(netif, LWIP_NSC_NETIF_ADDED, NULL); + + return netif; +} + +static void +netif_do_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr) +{ +#if LWIP_TCP + tcp_netif_ip_addr_changed(old_addr, new_addr); +#endif /* LWIP_TCP */ +#if LWIP_UDP + udp_netif_ip_addr_changed(old_addr, new_addr); +#endif /* LWIP_UDP */ +#if LWIP_RAW + raw_netif_ip_addr_changed(old_addr, new_addr); +#endif /* LWIP_RAW */ +} + +#if LWIP_IPV4 +static int +netif_do_set_ipaddr(struct netif *netif, const ip4_addr_t *ipaddr, ip_addr_t *old_addr) +{ + LWIP_ASSERT("invalid pointer", ipaddr != NULL); + LWIP_ASSERT("invalid pointer", old_addr != NULL); + + /* address is actually being changed? */ + if (ip4_addr_cmp(ipaddr, netif_ip4_addr(netif)) == 0) { + ip_addr_t new_addr; + *ip_2_ip4(&new_addr) = *ipaddr; + IP_SET_TYPE_VAL(new_addr, IPADDR_TYPE_V4); + + ip_addr_copy(*old_addr, *netif_ip_addr4(netif)); + + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_set_ipaddr: netif address being changed\n")); + netif_do_ip_addr_changed(old_addr, &new_addr); + + mib2_remove_ip4(netif); + mib2_remove_route_ip4(0, netif); + /* set new IP address to netif */ + ip4_addr_set(ip_2_ip4(&netif->ip_addr), ipaddr); + IP_SET_TYPE_VAL(netif->ip_addr, IPADDR_TYPE_V4); + mib2_add_ip4(netif); + mib2_add_route_ip4(0, netif); + + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4); + + NETIF_STATUS_CALLBACK(netif); + return 1; /* address changed */ + } + return 0; /* address unchanged */ +} + +/** + * @ingroup netif_ip4 + * Change the IP address of a network interface + * + * @param netif the network interface to change + * @param ipaddr the new IP address + * + * @note call netif_set_addr() if you also want to change netmask and + * default gateway + */ +void +netif_set_ipaddr(struct netif *netif, const ip4_addr_t *ipaddr) +{ + ip_addr_t old_addr; + + LWIP_ERROR("netif_set_ipaddr: invalid netif", netif != NULL, return); + + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY4; + } + + LWIP_ASSERT_CORE_LOCKED(); + + if (netif_do_set_ipaddr(netif, ipaddr, &old_addr)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + netif_ext_callback_args_t args; + args.ipv4_changed.old_address = &old_addr; + netif_invoke_ext_callback(netif, LWIP_NSC_IPV4_ADDRESS_CHANGED, &args); +#endif + } +} + +static int +netif_do_set_netmask(struct netif *netif, const ip4_addr_t *netmask, ip_addr_t *old_nm) +{ + /* address is actually being changed? */ + if (ip4_addr_cmp(netmask, netif_ip4_netmask(netif)) == 0) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + LWIP_ASSERT("invalid pointer", old_nm != NULL); + ip_addr_copy(*old_nm, *netif_ip_netmask4(netif)); +#else + LWIP_UNUSED_ARG(old_nm); +#endif + mib2_remove_route_ip4(0, netif); + /* set new netmask to netif */ + ip4_addr_set(ip_2_ip4(&netif->netmask), netmask); + IP_SET_TYPE_VAL(netif->netmask, IPADDR_TYPE_V4); + mib2_add_route_ip4(0, netif); + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: netmask of interface %c%c set to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + netif->name[0], netif->name[1], + ip4_addr1_16(netif_ip4_netmask(netif)), + ip4_addr2_16(netif_ip4_netmask(netif)), + ip4_addr3_16(netif_ip4_netmask(netif)), + ip4_addr4_16(netif_ip4_netmask(netif)))); + return 1; /* netmask changed */ + } + return 0; /* netmask unchanged */ +} + +/** + * @ingroup netif_ip4 + * Change the netmask of a network interface + * + * @param netif the network interface to change + * @param netmask the new netmask + * + * @note call netif_set_addr() if you also want to change ip address and + * default gateway + */ +void +netif_set_netmask(struct netif *netif, const ip4_addr_t *netmask) +{ +#if LWIP_NETIF_EXT_STATUS_CALLBACK + ip_addr_t old_nm_val; + ip_addr_t *old_nm = &old_nm_val; +#else + ip_addr_t *old_nm = NULL; +#endif + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_netmask: invalid netif", netif != NULL, return); + + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (netmask == NULL) { + netmask = IP4_ADDR_ANY4; + } + + if (netif_do_set_netmask(netif, netmask, old_nm)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + netif_ext_callback_args_t args; + args.ipv4_changed.old_netmask = old_nm; + netif_invoke_ext_callback(netif, LWIP_NSC_IPV4_NETMASK_CHANGED, &args); +#endif + } +} + +static int +netif_do_set_gw(struct netif *netif, const ip4_addr_t *gw, ip_addr_t *old_gw) +{ + /* address is actually being changed? */ + if (ip4_addr_cmp(gw, netif_ip4_gw(netif)) == 0) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + LWIP_ASSERT("invalid pointer", old_gw != NULL); + ip_addr_copy(*old_gw, *netif_ip_gw4(netif)); +#else + LWIP_UNUSED_ARG(old_gw); +#endif + + ip4_addr_set(ip_2_ip4(&netif->gw), gw); + IP_SET_TYPE_VAL(netif->gw, IPADDR_TYPE_V4); + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: GW address of interface %c%c set to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + netif->name[0], netif->name[1], + ip4_addr1_16(netif_ip4_gw(netif)), + ip4_addr2_16(netif_ip4_gw(netif)), + ip4_addr3_16(netif_ip4_gw(netif)), + ip4_addr4_16(netif_ip4_gw(netif)))); + return 1; /* gateway changed */ + } + return 0; /* gateway unchanged */ +} + +/** + * @ingroup netif_ip4 + * Change the default gateway for a network interface + * + * @param netif the network interface to change + * @param gw the new default gateway + * + * @note call netif_set_addr() if you also want to change ip address and netmask + */ +void +netif_set_gw(struct netif *netif, const ip4_addr_t *gw) +{ +#if LWIP_NETIF_EXT_STATUS_CALLBACK + ip_addr_t old_gw_val; + ip_addr_t *old_gw = &old_gw_val; +#else + ip_addr_t *old_gw = NULL; +#endif + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_gw: invalid netif", netif != NULL, return); + + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (gw == NULL) { + gw = IP4_ADDR_ANY4; + } + + if (netif_do_set_gw(netif, gw, old_gw)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + netif_ext_callback_args_t args; + args.ipv4_changed.old_gw = old_gw; + netif_invoke_ext_callback(netif, LWIP_NSC_IPV4_GATEWAY_CHANGED, &args); +#endif + } +} + +/** + * @ingroup netif_ip4 + * Change IP address configuration for a network interface (including netmask + * and default gateway). + * + * @param netif the network interface to change + * @param ipaddr the new IP address + * @param netmask the new netmask + * @param gw the new default gateway + */ +void +netif_set_addr(struct netif *netif, const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, + const ip4_addr_t *gw) +{ +#if LWIP_NETIF_EXT_STATUS_CALLBACK + netif_nsc_reason_t change_reason = LWIP_NSC_NONE; + netif_ext_callback_args_t cb_args; + ip_addr_t old_nm_val; + ip_addr_t old_gw_val; + ip_addr_t *old_nm = &old_nm_val; + ip_addr_t *old_gw = &old_gw_val; +#else + ip_addr_t *old_nm = NULL; + ip_addr_t *old_gw = NULL; +#endif + ip_addr_t old_addr; + int remove; + + LWIP_ASSERT_CORE_LOCKED(); + + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY4; + } + if (netmask == NULL) { + netmask = IP4_ADDR_ANY4; + } + if (gw == NULL) { + gw = IP4_ADDR_ANY4; + } + + remove = ip4_addr_isany(ipaddr); + if (remove) { + /* when removing an address, we have to remove it *before* changing netmask/gw + to ensure that tcp RST segment can be sent correctly */ + if (netif_do_set_ipaddr(netif, ipaddr, &old_addr)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + change_reason |= LWIP_NSC_IPV4_ADDRESS_CHANGED; + cb_args.ipv4_changed.old_address = &old_addr; +#endif + } + } + if (netif_do_set_netmask(netif, netmask, old_nm)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + change_reason |= LWIP_NSC_IPV4_NETMASK_CHANGED; + cb_args.ipv4_changed.old_netmask = old_nm; +#endif + } + if (netif_do_set_gw(netif, gw, old_gw)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + change_reason |= LWIP_NSC_IPV4_GATEWAY_CHANGED; + cb_args.ipv4_changed.old_gw = old_gw; +#endif + } + if (!remove) { + /* set ipaddr last to ensure netmask/gw have been set when status callback is called */ + if (netif_do_set_ipaddr(netif, ipaddr, &old_addr)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + change_reason |= LWIP_NSC_IPV4_ADDRESS_CHANGED; + cb_args.ipv4_changed.old_address = &old_addr; +#endif + } + } + +#if LWIP_NETIF_EXT_STATUS_CALLBACK + if (change_reason != LWIP_NSC_NONE) { + change_reason |= LWIP_NSC_IPV4_SETTINGS_CHANGED; + netif_invoke_ext_callback(netif, change_reason, &cb_args); + } +#endif +} +#endif /* LWIP_IPV4*/ + +/** + * @ingroup netif + * Remove a network interface from the list of lwIP netifs. + * + * @param netif the network interface to remove + */ +void +netif_remove(struct netif *netif) +{ +#if LWIP_IPV6 + int i; +#endif + + LWIP_ASSERT_CORE_LOCKED(); + + if (netif == NULL) { + return; + } + + netif_invoke_ext_callback(netif, LWIP_NSC_NETIF_REMOVED, NULL); + +#if LWIP_IPV4 + if (!ip4_addr_isany_val(*netif_ip4_addr(netif))) { + netif_do_ip_addr_changed(netif_ip_addr4(netif), NULL); + } + +#if LWIP_IGMP + /* stop IGMP processing */ + if (netif->flags & NETIF_FLAG_IGMP) { + igmp_stop(netif); + } +#endif /* LWIP_IGMP */ +#endif /* LWIP_IPV4*/ + +#if LWIP_IPV6 + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i))) { + netif_do_ip_addr_changed(netif_ip_addr6(netif, i), NULL); + } + } +#if LWIP_IPV6_MLD + /* stop MLD processing */ + mld6_stop(netif); +#endif /* LWIP_IPV6_MLD */ +#endif /* LWIP_IPV6 */ + if (netif_is_up(netif)) { + /* set netif down before removing (call callback function) */ + netif_set_down(netif); + } + + mib2_remove_ip4(netif); + + /* this netif is default? */ + if (netif_default == netif) { + /* reset default netif */ + netif_set_default(NULL); + } +#if !LWIP_SINGLE_NETIF + /* is it the first netif? */ + if (netif_list == netif) { + netif_list = netif->next; + } else { + /* look for netif further down the list */ + struct netif *tmp_netif; + NETIF_FOREACH(tmp_netif) { + if (tmp_netif->next == netif) { + tmp_netif->next = netif->next; + break; + } + } + if (tmp_netif == NULL) { + return; /* netif is not on the list */ + } + } +#endif /* !LWIP_SINGLE_NETIF */ + mib2_netif_removed(netif); +#if LWIP_NETIF_REMOVE_CALLBACK + if (netif->remove_callback) { + netif->remove_callback(netif); + } +#endif /* LWIP_NETIF_REMOVE_CALLBACK */ + LWIP_DEBUGF( NETIF_DEBUG, ("netif_remove: removed netif\n") ); +} + +/** + * @ingroup netif + * Set a network interface as the default network interface + * (used to output all packets for which no specific route is found) + * + * @param netif the default network interface + */ +void +netif_set_default(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif == NULL) { + /* remove default route */ + mib2_remove_route_ip4(1, netif); + } else { + /* install default route */ + mib2_add_route_ip4(1, netif); + } + netif_default = netif; + LWIP_DEBUGF(NETIF_DEBUG, ("netif: setting default interface %c%c\n", + netif ? netif->name[0] : '\'', netif ? netif->name[1] : '\'')); +} + +/** + * @ingroup netif + * Bring an interface up, available for processing + * traffic. + */ +void +netif_set_up(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_up: invalid netif", netif != NULL, return); + + if (!(netif->flags & NETIF_FLAG_UP)) { + netif_set_flags(netif, NETIF_FLAG_UP); + + MIB2_COPY_SYSUPTIME_TO(&netif->ts); + + NETIF_STATUS_CALLBACK(netif); + +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.status_changed.state = 1; + netif_invoke_ext_callback(netif, LWIP_NSC_STATUS_CHANGED, &args); + } +#endif + + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4 | NETIF_REPORT_TYPE_IPV6); +#if LWIP_IPV6 + nd6_restart_netif(netif); +#endif /* LWIP_IPV6 */ + } +} + +/** Send ARP/IGMP/MLD/RS events, e.g. on link-up/netif-up or addr-change + */ +static void +netif_issue_reports(struct netif *netif, u8_t report_type) +{ + LWIP_ASSERT("netif_issue_reports: invalid netif", netif != NULL); + + /* Only send reports when both link and admin states are up */ + if (!(netif->flags & NETIF_FLAG_LINK_UP) || + !(netif->flags & NETIF_FLAG_UP)) { + return; + } + +#if LWIP_IPV4 + if ((report_type & NETIF_REPORT_TYPE_IPV4) && + !ip4_addr_isany_val(*netif_ip4_addr(netif))) { +#if LWIP_ARP + /* For Ethernet network interfaces, we would like to send a "gratuitous ARP" */ + if (netif->flags & (NETIF_FLAG_ETHARP)) { + etharp_gratuitous(netif); + } +#endif /* LWIP_ARP */ + +#if LWIP_IGMP + /* resend IGMP memberships */ + if (netif->flags & NETIF_FLAG_IGMP) { + igmp_report_groups(netif); + } +#endif /* LWIP_IGMP */ + } +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 + if (report_type & NETIF_REPORT_TYPE_IPV6) { +#if LWIP_IPV6_MLD + /* send mld memberships */ + mld6_report_groups(netif); +#endif /* LWIP_IPV6_MLD */ + } +#endif /* LWIP_IPV6 */ +} + +/** + * @ingroup netif + * Bring an interface down, disabling any traffic processing. + */ +void +netif_set_down(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_down: invalid netif", netif != NULL, return); + + if (netif->flags & NETIF_FLAG_UP) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.status_changed.state = 0; + netif_invoke_ext_callback(netif, LWIP_NSC_STATUS_CHANGED, &args); + } +#endif + + netif_clear_flags(netif, NETIF_FLAG_UP); + MIB2_COPY_SYSUPTIME_TO(&netif->ts); + +#if LWIP_IPV4 && LWIP_ARP + if (netif->flags & NETIF_FLAG_ETHARP) { + etharp_cleanup_netif(netif); + } +#endif /* LWIP_IPV4 && LWIP_ARP */ + +#if LWIP_IPV6 + nd6_cleanup_netif(netif); +#endif /* LWIP_IPV6 */ + + NETIF_STATUS_CALLBACK(netif); + } +} + +#if LWIP_NETIF_STATUS_CALLBACK +/** + * @ingroup netif + * Set callback to be called when interface is brought up/down or address is changed while up + */ +void +netif_set_status_callback(struct netif *netif, netif_status_callback_fn status_callback) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif) { + netif->status_callback = status_callback; + } +} +#endif /* LWIP_NETIF_STATUS_CALLBACK */ + +#if LWIP_NETIF_REMOVE_CALLBACK +/** + * @ingroup netif + * Set callback to be called when the interface has been removed + */ +void +netif_set_remove_callback(struct netif *netif, netif_status_callback_fn remove_callback) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif) { + netif->remove_callback = remove_callback; + } +} +#endif /* LWIP_NETIF_REMOVE_CALLBACK */ + +/** + * @ingroup netif + * Called by a driver when its link goes up + */ +void +netif_set_link_up(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_link_up: invalid netif", netif != NULL, return); + + if (!(netif->flags & NETIF_FLAG_LINK_UP)) { + netif_set_flags(netif, NETIF_FLAG_LINK_UP); + +#if LWIP_DHCP + dhcp_network_changed(netif); +#endif /* LWIP_DHCP */ + +#if LWIP_AUTOIP + autoip_network_changed(netif); +#endif /* LWIP_AUTOIP */ + + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4 | NETIF_REPORT_TYPE_IPV6); +#if LWIP_IPV6 + nd6_restart_netif(netif); +#endif /* LWIP_IPV6 */ + + NETIF_LINK_CALLBACK(netif); +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.link_changed.state = 1; + netif_invoke_ext_callback(netif, LWIP_NSC_LINK_CHANGED, &args); + } +#endif + } +} + +/** + * @ingroup netif + * Called by a driver when its link goes down + */ +void +netif_set_link_down(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_link_down: invalid netif", netif != NULL, return); + + if (netif->flags & NETIF_FLAG_LINK_UP) { + netif_clear_flags(netif, NETIF_FLAG_LINK_UP); + NETIF_LINK_CALLBACK(netif); +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.link_changed.state = 0; + netif_invoke_ext_callback(netif, LWIP_NSC_LINK_CHANGED, &args); + } +#endif + } +} + +#if LWIP_NETIF_LINK_CALLBACK +/** + * @ingroup netif + * Set callback to be called when link is brought up/down + */ +void +netif_set_link_callback(struct netif *netif, netif_status_callback_fn link_callback) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif) { + netif->link_callback = link_callback; + } +} +#endif /* LWIP_NETIF_LINK_CALLBACK */ + +#if ENABLE_LOOPBACK +/** + * @ingroup netif + * Send an IP packet to be received on the same netif (loopif-like). + * The pbuf is simply copied and handed back to netif->input. + * In multithreaded mode, this is done directly since netif->input must put + * the packet on a queue. + * In callback mode, the packet is put on an internal queue and is fed to + * netif->input by netif_poll(). + * + * @param netif the lwip network interface structure + * @param p the (IP) packet to 'send' + * @return ERR_OK if the packet has been sent + * ERR_MEM if the pbuf used to copy the packet couldn't be allocated + */ +err_t +netif_loop_output(struct netif *netif, struct pbuf *p) +{ + struct pbuf *r; + err_t err; + struct pbuf *last; +#if LWIP_LOOPBACK_MAX_PBUFS + u16_t clen = 0; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + /* If we have a loopif, SNMP counters are adjusted for it, + * if not they are adjusted for 'netif'. */ +#if MIB2_STATS +#if LWIP_HAVE_LOOPIF + struct netif *stats_if = &loop_netif; +#else /* LWIP_HAVE_LOOPIF */ + struct netif *stats_if = netif; +#endif /* LWIP_HAVE_LOOPIF */ +#endif /* MIB2_STATS */ +#if LWIP_NETIF_LOOPBACK_MULTITHREADING + u8_t schedule_poll = 0; +#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_ASSERT("netif_loop_output: invalid netif", netif != NULL); + LWIP_ASSERT("netif_loop_output: invalid pbuf", p != NULL); + + /* Allocate a new pbuf */ + r = pbuf_alloc(PBUF_LINK, p->tot_len, PBUF_RAM); + if (r == NULL) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); + return ERR_MEM; + } +#if LWIP_LOOPBACK_MAX_PBUFS + clen = pbuf_clen(r); + /* check for overflow or too many pbuf on queue */ + if (((netif->loop_cnt_current + clen) < netif->loop_cnt_current) || + ((netif->loop_cnt_current + clen) > LWIP_MIN(LWIP_LOOPBACK_MAX_PBUFS, 0xFFFF))) { + pbuf_free(r); + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); + return ERR_MEM; + } + netif->loop_cnt_current = (u16_t)(netif->loop_cnt_current + clen); +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + + /* Copy the whole pbuf queue p into the single pbuf r */ + if ((err = pbuf_copy(r, p)) != ERR_OK) { + pbuf_free(r); + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); + return err; + } + + /* Put the packet on a linked list which gets emptied through calling + netif_poll(). */ + + /* let last point to the last pbuf in chain r */ + for (last = r; last->next != NULL; last = last->next) { + /* nothing to do here, just get to the last pbuf */ + } + + SYS_ARCH_PROTECT(lev); + if (netif->loop_first != NULL) { + LWIP_ASSERT("if first != NULL, last must also be != NULL", netif->loop_last != NULL); + netif->loop_last->next = r; + netif->loop_last = last; + } else { + netif->loop_first = r; + netif->loop_last = last; +#if LWIP_NETIF_LOOPBACK_MULTITHREADING + /* No existing packets queued, schedule poll */ + schedule_poll = 1; +#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ + } + SYS_ARCH_UNPROTECT(lev); + + LINK_STATS_INC(link.xmit); + MIB2_STATS_NETIF_ADD(stats_if, ifoutoctets, p->tot_len); + MIB2_STATS_NETIF_INC(stats_if, ifoutucastpkts); + +#if LWIP_NETIF_LOOPBACK_MULTITHREADING + /* For multithreading environment, schedule a call to netif_poll */ + if (schedule_poll) { + tcpip_try_callback((tcpip_callback_fn)netif_poll, netif); + } +#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ + + return ERR_OK; +} + +#if LWIP_HAVE_LOOPIF +#if LWIP_IPV4 +static err_t +netif_loop_output_ipv4(struct netif *netif, struct pbuf *p, const ip4_addr_t *addr) +{ + LWIP_UNUSED_ARG(addr); + return netif_loop_output(netif, p); +} +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +static err_t +netif_loop_output_ipv6(struct netif *netif, struct pbuf *p, const ip6_addr_t *addr) +{ + LWIP_UNUSED_ARG(addr); + return netif_loop_output(netif, p); +} +#endif /* LWIP_IPV6 */ +#endif /* LWIP_HAVE_LOOPIF */ + + +/** + * Call netif_poll() in the main loop of your application. This is to prevent + * reentering non-reentrant functions like tcp_input(). Packets passed to + * netif_loop_output() are put on a list that is passed to netif->input() by + * netif_poll(). + */ +void +netif_poll(struct netif *netif) +{ + /* If we have a loopif, SNMP counters are adjusted for it, + * if not they are adjusted for 'netif'. */ +#if MIB2_STATS +#if LWIP_HAVE_LOOPIF + struct netif *stats_if = &loop_netif; +#else /* LWIP_HAVE_LOOPIF */ + struct netif *stats_if = netif; +#endif /* LWIP_HAVE_LOOPIF */ +#endif /* MIB2_STATS */ + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_ASSERT("netif_poll: invalid netif", netif != NULL); + + /* Get a packet from the list. With SYS_LIGHTWEIGHT_PROT=1, this is protected */ + SYS_ARCH_PROTECT(lev); + while (netif->loop_first != NULL) { + struct pbuf *in, *in_end; +#if LWIP_LOOPBACK_MAX_PBUFS + u8_t clen = 1; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + + in = in_end = netif->loop_first; + while (in_end->len != in_end->tot_len) { + LWIP_ASSERT("bogus pbuf: len != tot_len but next == NULL!", in_end->next != NULL); + in_end = in_end->next; +#if LWIP_LOOPBACK_MAX_PBUFS + clen++; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + } +#if LWIP_LOOPBACK_MAX_PBUFS + /* adjust the number of pbufs on queue */ + LWIP_ASSERT("netif->loop_cnt_current underflow", + ((netif->loop_cnt_current - clen) < netif->loop_cnt_current)); + netif->loop_cnt_current = (u16_t)(netif->loop_cnt_current - clen); +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + + /* 'in_end' now points to the last pbuf from 'in' */ + if (in_end == netif->loop_last) { + /* this was the last pbuf in the list */ + netif->loop_first = netif->loop_last = NULL; + } else { + /* pop the pbuf off the list */ + netif->loop_first = in_end->next; + LWIP_ASSERT("should not be null since first != last!", netif->loop_first != NULL); + } + /* De-queue the pbuf from its successors on the 'loop_' list. */ + in_end->next = NULL; + SYS_ARCH_UNPROTECT(lev); + + in->if_idx = netif_get_index(netif); + + LINK_STATS_INC(link.recv); + MIB2_STATS_NETIF_ADD(stats_if, ifinoctets, in->tot_len); + MIB2_STATS_NETIF_INC(stats_if, ifinucastpkts); + /* loopback packets are always IP packets! */ + if (ip_input(in, netif) != ERR_OK) { + pbuf_free(in); + } + SYS_ARCH_PROTECT(lev); + } + SYS_ARCH_UNPROTECT(lev); +} + +#if !LWIP_NETIF_LOOPBACK_MULTITHREADING +/** + * Calls netif_poll() for every netif on the netif_list. + */ +void +netif_poll_all(void) +{ + struct netif *netif; + /* loop through netifs */ + NETIF_FOREACH(netif) { + netif_poll(netif); + } +} +#endif /* !LWIP_NETIF_LOOPBACK_MULTITHREADING */ +#endif /* ENABLE_LOOPBACK */ + +#if LWIP_NUM_NETIF_CLIENT_DATA > 0 +/** + * @ingroup netif_cd + * Allocate an index to store data in client_data member of struct netif. + * Returned value is an index in mentioned array. + * @see LWIP_NUM_NETIF_CLIENT_DATA + */ +u8_t +netif_alloc_client_data_id(void) +{ + u8_t result = netif_client_id; + netif_client_id++; + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_NUM_NETIF_CLIENT_DATA > 256 +#error LWIP_NUM_NETIF_CLIENT_DATA must be <= 256 +#endif + LWIP_ASSERT("Increase LWIP_NUM_NETIF_CLIENT_DATA in lwipopts.h", result < LWIP_NUM_NETIF_CLIENT_DATA); + return (u8_t)(result + LWIP_NETIF_CLIENT_DATA_INDEX_MAX); +} +#endif + +#if LWIP_IPV6 +/** + * @ingroup netif_ip6 + * Change an IPv6 address of a network interface + * + * @param netif the network interface to change + * @param addr_idx index of the IPv6 address + * @param addr6 the new IPv6 address + * + * @note call netif_ip6_addr_set_state() to set the address valid/temptative + */ +void +netif_ip6_addr_set(struct netif *netif, s8_t addr_idx, const ip6_addr_t *addr6) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_ip6_addr_set: invalid netif", netif != NULL); + LWIP_ASSERT("netif_ip6_addr_set: invalid addr6", addr6 != NULL); + + netif_ip6_addr_set_parts(netif, addr_idx, addr6->addr[0], addr6->addr[1], + addr6->addr[2], addr6->addr[3]); +} + +/* + * Change an IPv6 address of a network interface (internal version taking 4 * u32_t) + * + * @param netif the network interface to change + * @param addr_idx index of the IPv6 address + * @param i0 word0 of the new IPv6 address + * @param i1 word1 of the new IPv6 address + * @param i2 word2 of the new IPv6 address + * @param i3 word3 of the new IPv6 address + */ +void +netif_ip6_addr_set_parts(struct netif *netif, s8_t addr_idx, u32_t i0, u32_t i1, u32_t i2, u32_t i3) +{ + ip_addr_t old_addr; + ip_addr_t new_ipaddr; + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("invalid index", addr_idx < LWIP_IPV6_NUM_ADDRESSES); + + ip6_addr_copy(*ip_2_ip6(&old_addr), *netif_ip6_addr(netif, addr_idx)); + IP_SET_TYPE_VAL(old_addr, IPADDR_TYPE_V6); + + /* address is actually being changed? */ + if ((ip_2_ip6(&old_addr)->addr[0] != i0) || (ip_2_ip6(&old_addr)->addr[1] != i1) || + (ip_2_ip6(&old_addr)->addr[2] != i2) || (ip_2_ip6(&old_addr)->addr[3] != i3)) { + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_ip6_addr_set: netif address being changed\n")); + + IP_ADDR6(&new_ipaddr, i0, i1, i2, i3); + ip6_addr_assign_zone(ip_2_ip6(&new_ipaddr), IP6_UNICAST, netif); + + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, addr_idx))) { + netif_do_ip_addr_changed(netif_ip_addr6(netif, addr_idx), &new_ipaddr); + } + /* @todo: remove/readd mib2 ip6 entries? */ + + ip_addr_copy(netif->ip6_addr[addr_idx], new_ipaddr); + + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, addr_idx))) { + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV6); + NETIF_STATUS_CALLBACK(netif); + } + +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.ipv6_set.addr_index = addr_idx; + args.ipv6_set.old_address = &old_addr; + netif_invoke_ext_callback(netif, LWIP_NSC_IPV6_SET, &args); + } +#endif + } + + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: IPv6 address %d of interface %c%c set to %s/0x%"X8_F"\n", + addr_idx, netif->name[0], netif->name[1], ip6addr_ntoa(netif_ip6_addr(netif, addr_idx)), + netif_ip6_addr_state(netif, addr_idx))); +} + +/** + * @ingroup netif_ip6 + * Change the state of an IPv6 address of a network interface + * (INVALID, TEMPTATIVE, PREFERRED, DEPRECATED, where TEMPTATIVE + * includes the number of checks done, see ip6_addr.h) + * + * @param netif the network interface to change + * @param addr_idx index of the IPv6 address + * @param state the new IPv6 address state + */ +void +netif_ip6_addr_set_state(struct netif *netif, s8_t addr_idx, u8_t state) +{ + u8_t old_state; + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("invalid index", addr_idx < LWIP_IPV6_NUM_ADDRESSES); + + old_state = netif_ip6_addr_state(netif, addr_idx); + /* state is actually being changed? */ + if (old_state != state) { + u8_t old_valid = old_state & IP6_ADDR_VALID; + u8_t new_valid = state & IP6_ADDR_VALID; + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_ip6_addr_set_state: netif address state being changed\n")); + +#if LWIP_IPV6_MLD + /* Reevaluate solicited-node multicast group membership. */ + if (netif->flags & NETIF_FLAG_MLD6) { + nd6_adjust_mld_membership(netif, addr_idx, state); + } +#endif /* LWIP_IPV6_MLD */ + + if (old_valid && !new_valid) { + /* address about to be removed by setting invalid */ + netif_do_ip_addr_changed(netif_ip_addr6(netif, addr_idx), NULL); + /* @todo: remove mib2 ip6 entries? */ + } + netif->ip6_addr_state[addr_idx] = state; + + if (!old_valid && new_valid) { + /* address added by setting valid */ + /* This is a good moment to check that the address is properly zoned. */ + IP6_ADDR_ZONECHECK_NETIF(netif_ip6_addr(netif, addr_idx), netif); + /* @todo: add mib2 ip6 entries? */ + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV6); + } + if ((old_state & ~IP6_ADDR_TENTATIVE_COUNT_MASK) != + (state & ~IP6_ADDR_TENTATIVE_COUNT_MASK)) { + /* address state has changed -> call the callback function */ + NETIF_STATUS_CALLBACK(netif); + } + +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.ipv6_addr_state_changed.addr_index = addr_idx; + args.ipv6_addr_state_changed.old_state = old_state; + args.ipv6_addr_state_changed.address = netif_ip_addr6(netif, addr_idx); + netif_invoke_ext_callback(netif, LWIP_NSC_IPV6_ADDR_STATE_CHANGED, &args); + } +#endif + } + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: IPv6 address %d of interface %c%c set to %s/0x%"X8_F"\n", + addr_idx, netif->name[0], netif->name[1], ip6addr_ntoa(netif_ip6_addr(netif, addr_idx)), + netif_ip6_addr_state(netif, addr_idx))); +} + +/** + * Checks if a specific local address is present on the netif and returns its + * index. Depending on its state, it may or may not be assigned to the + * interface (as per RFC terminology). + * + * The given address may or may not be zoned (i.e., have a zone index other + * than IP6_NO_ZONE). If the address is zoned, it must have the correct zone + * for the given netif, or no match will be found. + * + * @param netif the netif to check + * @param ip6addr the IPv6 address to find + * @return >= 0: address found, this is its index + * -1: address not found on this netif + */ +s8_t +netif_get_ip6_addr_match(struct netif *netif, const ip6_addr_t *ip6addr) +{ + s8_t i; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_get_ip6_addr_match: invalid netif", netif != NULL); + LWIP_ASSERT("netif_get_ip6_addr_match: invalid ip6addr", ip6addr != NULL); + +#if LWIP_IPV6_SCOPES + if (ip6_addr_has_zone(ip6addr) && !ip6_addr_test_zone(ip6addr, netif)) { + return -1; /* wrong zone, no match */ + } +#endif /* LWIP_IPV6_SCOPES */ + + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp_zoneless(netif_ip6_addr(netif, i), ip6addr)) { + return i; + } + } + return -1; +} + +/** + * @ingroup netif_ip6 + * Create a link-local IPv6 address on a netif (stored in slot 0) + * + * @param netif the netif to create the address on + * @param from_mac_48bit if != 0, assume hwadr is a 48-bit MAC address (std conversion) + * if == 0, use hwaddr directly as interface ID + */ +void +netif_create_ip6_linklocal_address(struct netif *netif, u8_t from_mac_48bit) +{ + u8_t i, addr_index; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_create_ip6_linklocal_address: invalid netif", netif != NULL); + + /* Link-local prefix. */ + ip_2_ip6(&netif->ip6_addr[0])->addr[0] = PP_HTONL(0xfe800000ul); + ip_2_ip6(&netif->ip6_addr[0])->addr[1] = 0; + + /* Generate interface ID. */ + if (from_mac_48bit) { + /* Assume hwaddr is a 48-bit IEEE 802 MAC. Convert to EUI-64 address. Complement Group bit. */ + ip_2_ip6(&netif->ip6_addr[0])->addr[2] = lwip_htonl((((u32_t)(netif->hwaddr[0] ^ 0x02)) << 24) | + ((u32_t)(netif->hwaddr[1]) << 16) | + ((u32_t)(netif->hwaddr[2]) << 8) | + (0xff)); + ip_2_ip6(&netif->ip6_addr[0])->addr[3] = lwip_htonl((u32_t)(0xfeul << 24) | + ((u32_t)(netif->hwaddr[3]) << 16) | + ((u32_t)(netif->hwaddr[4]) << 8) | + (netif->hwaddr[5])); + } else { + /* Use hwaddr directly as interface ID. */ + ip_2_ip6(&netif->ip6_addr[0])->addr[2] = 0; + ip_2_ip6(&netif->ip6_addr[0])->addr[3] = 0; + + addr_index = 3; + for (i = 0; (i < 8) && (i < netif->hwaddr_len); i++) { + if (i == 4) { + addr_index--; + } + ip_2_ip6(&netif->ip6_addr[0])->addr[addr_index] |= lwip_htonl(((u32_t)(netif->hwaddr[netif->hwaddr_len - i - 1])) << (8 * (i & 0x03))); + } + } + + /* Set a link-local zone. Even though the zone is implied by the owning + * netif, setting the zone anyway has two important conceptual advantages: + * 1) it avoids the need for a ton of exceptions in internal code, allowing + * e.g. ip6_addr_cmp() to be used on local addresses; + * 2) the properly zoned address is visible externally, e.g. when any outside + * code enumerates available addresses or uses one to bind a socket. + * Any external code unaware of address scoping is likely to just ignore the + * zone field, so this should not create any compatibility problems. */ + ip6_addr_assign_zone(ip_2_ip6(&netif->ip6_addr[0]), IP6_UNICAST, netif); + + /* Set address state. */ +#if LWIP_IPV6_DUP_DETECT_ATTEMPTS + /* Will perform duplicate address detection (DAD). */ + netif_ip6_addr_set_state(netif, 0, IP6_ADDR_TENTATIVE); +#else + /* Consider address valid. */ + netif_ip6_addr_set_state(netif, 0, IP6_ADDR_PREFERRED); +#endif /* LWIP_IPV6_AUTOCONFIG */ +} + +/** + * @ingroup netif_ip6 + * This function allows for the easy addition of a new IPv6 address to an interface. + * It takes care of finding an empty slot and then sets the address tentative + * (to make sure that all the subsequent processing happens). + * + * @param netif netif to add the address on + * @param ip6addr address to add + * @param chosen_idx if != NULL, the chosen IPv6 address index will be stored here + */ +err_t +netif_add_ip6_address(struct netif *netif, const ip6_addr_t *ip6addr, s8_t *chosen_idx) +{ + s8_t i; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_add_ip6_address: invalid netif", netif != NULL); + LWIP_ASSERT("netif_add_ip6_address: invalid ip6addr", ip6addr != NULL); + + i = netif_get_ip6_addr_match(netif, ip6addr); + if (i >= 0) { + /* Address already added */ + if (chosen_idx != NULL) { + *chosen_idx = i; + } + return ERR_OK; + } + + /* Find a free slot. The first one is reserved for link-local addresses. */ + for (i = ip6_addr_islinklocal(ip6addr) ? 0 : 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isinvalid(netif_ip6_addr_state(netif, i))) { + ip_addr_copy_from_ip6(netif->ip6_addr[i], *ip6addr); + ip6_addr_assign_zone(ip_2_ip6(&netif->ip6_addr[i]), IP6_UNICAST, netif); + netif_ip6_addr_set_state(netif, i, IP6_ADDR_TENTATIVE); + if (chosen_idx != NULL) { + *chosen_idx = i; + } + return ERR_OK; + } + } + + if (chosen_idx != NULL) { + *chosen_idx = -1; + } + return ERR_VAL; +} + +/** Dummy IPv6 output function for netifs not supporting IPv6 + */ +static err_t +netif_null_output_ip6(struct netif *netif, struct pbuf *p, const ip6_addr_t *ipaddr) +{ + LWIP_UNUSED_ARG(netif); + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(ipaddr); + + return ERR_IF; +} +#endif /* LWIP_IPV6 */ + +#if LWIP_IPV4 +/** Dummy IPv4 output function for netifs not supporting IPv4 + */ +static err_t +netif_null_output_ip4(struct netif *netif, struct pbuf *p, const ip4_addr_t *ipaddr) +{ + LWIP_UNUSED_ARG(netif); + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(ipaddr); + + return ERR_IF; +} +#endif /* LWIP_IPV4 */ + +/** +* @ingroup netif +* Return the interface index for the netif with name +* or NETIF_NO_INDEX if not found/on error +* +* @param name the name of the netif +*/ +u8_t +netif_name_to_index(const char *name) +{ + struct netif *netif = netif_find(name); + if (netif != NULL) { + return netif_get_index(netif); + } + /* No name found, return invalid index */ + return NETIF_NO_INDEX; +} + +/** +* @ingroup netif +* Return the interface name for the netif matching index +* or NULL if not found/on error +* +* @param idx the interface index of the netif +* @param name char buffer of at least NETIF_NAMESIZE bytes +*/ +char * +netif_index_to_name(u8_t idx, char *name) +{ + struct netif *netif = netif_get_by_index(idx); + + if (netif != NULL) { + name[0] = netif->name[0]; + name[1] = netif->name[1]; + lwip_itoa(&name[2], NETIF_NAMESIZE - 2, netif_index_to_num(idx)); + return name; + } + return NULL; +} + +/** +* @ingroup netif +* Return the interface for the netif index +* +* @param idx index of netif to find +*/ +struct netif * +netif_get_by_index(u8_t idx) +{ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + if (idx != NETIF_NO_INDEX) { + NETIF_FOREACH(netif) { + if (idx == netif_get_index(netif)) { + return netif; /* found! */ + } + } + } + + return NULL; +} + +/** + * @ingroup netif + * Find a network interface by searching for its name + * + * @param name the name of the netif (like netif->name) plus concatenated number + * in ascii representation (e.g. 'en0') + */ +struct netif * +netif_find(const char *name) +{ + struct netif *netif; + u8_t num; + + LWIP_ASSERT_CORE_LOCKED(); + + if (name == NULL) { + return NULL; + } + + num = (u8_t)atoi(&name[2]); + + NETIF_FOREACH(netif) { + if (num == netif->num && + name[0] == netif->name[0] && + name[1] == netif->name[1]) { + LWIP_DEBUGF(NETIF_DEBUG, ("netif_find: found %c%c\n", name[0], name[1])); + return netif; + } + } + LWIP_DEBUGF(NETIF_DEBUG, ("netif_find: didn't find %c%c\n", name[0], name[1])); + return NULL; +} + +#if LWIP_NETIF_EXT_STATUS_CALLBACK +/** + * @ingroup netif + * Add extended netif events listener + * @param callback pointer to listener structure + * @param fn callback function + */ +void +netif_add_ext_callback(netif_ext_callback_t *callback, netif_ext_callback_fn fn) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("callback must be != NULL", callback != NULL); + LWIP_ASSERT("fn must be != NULL", fn != NULL); + + callback->callback_fn = fn; + callback->next = ext_callback; + ext_callback = callback; +} + +/** + * @ingroup netif + * Remove extended netif events listener + * @param callback pointer to listener structure + */ +void +netif_remove_ext_callback(netif_ext_callback_t* callback) +{ + netif_ext_callback_t *last, *iter; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("callback must be != NULL", callback != NULL); + + if (ext_callback == NULL) { + return; + } + + if (callback == ext_callback) { + ext_callback = ext_callback->next; + } else { + last = ext_callback; + for (iter = ext_callback->next; iter != NULL; last = iter, iter = iter->next) { + if (iter == callback) { + LWIP_ASSERT("last != NULL", last != NULL); + last->next = callback->next; + callback->next = NULL; + return; + } + } + } +} + +/** + * Invoke extended netif status event + * @param netif netif that is affected by change + * @param reason change reason + * @param args depends on reason, see reason description + */ +void +netif_invoke_ext_callback(struct netif *netif, netif_nsc_reason_t reason, const netif_ext_callback_args_t *args) +{ + netif_ext_callback_t *callback = ext_callback; + + LWIP_ASSERT("netif must be != NULL", netif != NULL); + + while (callback != NULL) { + callback->callback_fn(netif, reason, args); + callback = callback->next; + } +} +#endif /* LWIP_NETIF_EXT_STATUS_CALLBACK */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/pbuf.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/pbuf.c new file mode 100644 index 0000000..7579d86 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/pbuf.c @@ -0,0 +1,1514 @@ +/** + * @file + * Packet buffer management + */ + +/** + * @defgroup pbuf Packet buffers (PBUF) + * @ingroup infrastructure + * + * Packets are built from the pbuf data structure. It supports dynamic + * memory allocation for packet contents or can reference externally + * managed packet contents both in RAM and ROM. Quick allocation for + * incoming packets is provided through pools with fixed sized pbufs. + * + * A packet may span over multiple pbufs, chained as a singly linked + * list. This is called a "pbuf chain". + * + * Multiple packets may be queued, also using this singly linked list. + * This is called a "packet queue". + * + * So, a packet queue consists of one or more pbuf chains, each of + * which consist of one or more pbufs. CURRENTLY, PACKET QUEUES ARE + * NOT SUPPORTED!!! Use helper structs to queue multiple packets. + * + * The differences between a pbuf chain and a packet queue are very + * precise but subtle. + * + * The last pbuf of a packet has a ->tot_len field that equals the + * ->len field. It can be found by traversing the list. If the last + * pbuf of a packet has a ->next field other than NULL, more packets + * are on the queue. + * + * Therefore, looping through a pbuf of a single packet, has an + * loop end condition (tot_len == p->len), NOT (next == NULL). + * + * Example of custom pbuf usage: @ref zerocopyrx + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#include "lwip/pbuf.h" +#include "lwip/stats.h" +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/sys.h" +#include "lwip/netif.h" +#if LWIP_TCP && TCP_QUEUE_OOSEQ +#include "lwip/priv/tcp_priv.h" +#endif +#if LWIP_CHECKSUM_ON_COPY +#include "lwip/inet_chksum.h" +#endif + +#include + +#define SIZEOF_STRUCT_PBUF LWIP_MEM_ALIGN_SIZE(sizeof(struct pbuf)) +/* Since the pool is created in memp, PBUF_POOL_BUFSIZE will be automatically + aligned there. Therefore, PBUF_POOL_BUFSIZE_ALIGNED can be used here. */ +#define PBUF_POOL_BUFSIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE) + +static const struct pbuf * +pbuf_skip_const(const struct pbuf *in, u16_t in_offset, u16_t *out_offset); + +#if !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ +#define PBUF_POOL_IS_EMPTY() +#else /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */ + +#if !NO_SYS +#ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL +#include "lwip/tcpip.h" +#define PBUF_POOL_FREE_OOSEQ_QUEUE_CALL() do { \ + if (tcpip_try_callback(pbuf_free_ooseq_callback, NULL) != ERR_OK) { \ + SYS_ARCH_PROTECT(old_level); \ + pbuf_free_ooseq_pending = 0; \ + SYS_ARCH_UNPROTECT(old_level); \ + } } while(0) +#endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ +#endif /* !NO_SYS */ + +volatile u8_t pbuf_free_ooseq_pending; +#define PBUF_POOL_IS_EMPTY() pbuf_pool_is_empty() + +/** + * Attempt to reclaim some memory from queued out-of-sequence TCP segments + * if we run out of pool pbufs. It's better to give priority to new packets + * if we're running out. + * + * This must be done in the correct thread context therefore this function + * can only be used with NO_SYS=0 and through tcpip_callback. + */ +#if !NO_SYS +static +#endif /* !NO_SYS */ +void +pbuf_free_ooseq(void) +{ + struct tcp_pcb *pcb; + SYS_ARCH_SET(pbuf_free_ooseq_pending, 0); + + for (pcb = tcp_active_pcbs; NULL != pcb; pcb = pcb->next) { + if (pcb->ooseq != NULL) { + /** Free the ooseq pbufs of one PCB only */ + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free_ooseq: freeing out-of-sequence pbufs\n")); + tcp_free_ooseq(pcb); + return; + } + } +} + +#if !NO_SYS +/** + * Just a callback function for tcpip_callback() that calls pbuf_free_ooseq(). + */ +static void +pbuf_free_ooseq_callback(void *arg) +{ + LWIP_UNUSED_ARG(arg); + pbuf_free_ooseq(); +} +#endif /* !NO_SYS */ + +/** Queue a call to pbuf_free_ooseq if not already queued. */ +static void +pbuf_pool_is_empty(void) +{ +#ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL + SYS_ARCH_SET(pbuf_free_ooseq_pending, 1); +#else /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ + u8_t queued; + SYS_ARCH_DECL_PROTECT(old_level); + SYS_ARCH_PROTECT(old_level); + queued = pbuf_free_ooseq_pending; + pbuf_free_ooseq_pending = 1; + SYS_ARCH_UNPROTECT(old_level); + + if (!queued) { + /* queue a call to pbuf_free_ooseq if not already queued */ + PBUF_POOL_FREE_OOSEQ_QUEUE_CALL(); + } +#endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ +} +#endif /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */ + +/* Initialize members of struct pbuf after allocation */ +static void +pbuf_init_alloced_pbuf(struct pbuf *p, void *payload, u16_t tot_len, u16_t len, pbuf_type type, u8_t flags) +{ + p->next = NULL; + p->payload = payload; + p->tot_len = tot_len; + p->len = len; + p->type_internal = (u8_t)type; + p->flags = flags; + p->ref = 1; + p->if_idx = NETIF_NO_INDEX; +} + +/** + * @ingroup pbuf + * Allocates a pbuf of the given type (possibly a chain for PBUF_POOL type). + * + * The actual memory allocated for the pbuf is determined by the + * layer at which the pbuf is allocated and the requested size + * (from the size parameter). + * + * @param layer header size + * @param length size of the pbuf's payload + * @param type this parameter decides how and where the pbuf + * should be allocated as follows: + * + * - PBUF_RAM: buffer memory for pbuf is allocated as one large + * chunk. This includes protocol headers as well. + * - PBUF_ROM: no buffer memory is allocated for the pbuf, even for + * protocol headers. Additional headers must be prepended + * by allocating another pbuf and chain in to the front of + * the ROM pbuf. It is assumed that the memory used is really + * similar to ROM in that it is immutable and will not be + * changed. Memory which is dynamic should generally not + * be attached to PBUF_ROM pbufs. Use PBUF_REF instead. + * - PBUF_REF: no buffer memory is allocated for the pbuf, even for + * protocol headers. It is assumed that the pbuf is only + * being used in a single thread. If the pbuf gets queued, + * then pbuf_take should be called to copy the buffer. + * - PBUF_POOL: the pbuf is allocated as a pbuf chain, with pbufs from + * the pbuf pool that is allocated during pbuf_init(). + * + * @return the allocated pbuf. If multiple pbufs where allocated, this + * is the first pbuf of a pbuf chain. + */ +struct pbuf * +pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type) +{ + struct pbuf *p; + u16_t offset = (u16_t)layer; + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F")\n", length)); + + switch (type) { + case PBUF_REF: /* fall through */ + case PBUF_ROM: + p = pbuf_alloc_reference(NULL, length, type); + break; + case PBUF_POOL: { + struct pbuf *q, *last; + u16_t rem_len; /* remaining length */ + p = NULL; + last = NULL; + rem_len = length; + do { + u16_t qlen; + q = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL); + if (q == NULL) { + PBUF_POOL_IS_EMPTY(); + /* free chain so far allocated */ + if (p) { + pbuf_free(p); + } + /* bail out unsuccessfully */ + return NULL; + } + qlen = LWIP_MIN(rem_len, (u16_t)(PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset))); + pbuf_init_alloced_pbuf(q, LWIP_MEM_ALIGN((void *)((u8_t *)q + SIZEOF_STRUCT_PBUF + offset)), + rem_len, qlen, type, 0); + LWIP_ASSERT("pbuf_alloc: pbuf q->payload properly aligned", + ((mem_ptr_t)q->payload % MEM_ALIGNMENT) == 0); + LWIP_ASSERT("PBUF_POOL_BUFSIZE must be bigger than MEM_ALIGNMENT", + (PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)) > 0 ); + if (p == NULL) { + /* allocated head of pbuf chain (into p) */ + p = q; + } else { + /* make previous pbuf point to this pbuf */ + last->next = q; + } + last = q; + rem_len = (u16_t)(rem_len - qlen); + offset = 0; + } while (rem_len > 0); + break; + } + case PBUF_RAM: { + u16_t payload_len = (u16_t)(LWIP_MEM_ALIGN_SIZE(offset) + LWIP_MEM_ALIGN_SIZE(length)); + mem_size_t alloc_len = (mem_size_t)(LWIP_MEM_ALIGN_SIZE(SIZEOF_STRUCT_PBUF) + payload_len); + + /* bug #50040: Check for integer overflow when calculating alloc_len */ + if ((payload_len < LWIP_MEM_ALIGN_SIZE(length)) || + (alloc_len < LWIP_MEM_ALIGN_SIZE(length))) { + return NULL; + } + + /* If pbuf is to be allocated in RAM, allocate memory for it. */ + p = (struct pbuf *)mem_malloc(alloc_len); + if (p == NULL) { + return NULL; + } + pbuf_init_alloced_pbuf(p, LWIP_MEM_ALIGN((void *)((u8_t *)p + SIZEOF_STRUCT_PBUF + offset)), + length, length, type, 0); + LWIP_ASSERT("pbuf_alloc: pbuf->payload properly aligned", + ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0); + break; + } + default: + LWIP_ASSERT("pbuf_alloc: erroneous type", 0); + return NULL; + } + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F") == %p\n", length, (void *)p)); + return p; +} + +/** + * @ingroup pbuf + * Allocates a pbuf for referenced data. + * Referenced data can be volatile (PBUF_REF) or long-lived (PBUF_ROM). + * + * The actual memory allocated for the pbuf is determined by the + * layer at which the pbuf is allocated and the requested size + * (from the size parameter). + * + * @param payload referenced payload + * @param length size of the pbuf's payload + * @param type this parameter decides how and where the pbuf + * should be allocated as follows: + * + * - PBUF_ROM: It is assumed that the memory used is really + * similar to ROM in that it is immutable and will not be + * changed. Memory which is dynamic should generally not + * be attached to PBUF_ROM pbufs. Use PBUF_REF instead. + * - PBUF_REF: It is assumed that the pbuf is only + * being used in a single thread. If the pbuf gets queued, + * then pbuf_take should be called to copy the buffer. + * + * @return the allocated pbuf. + */ +struct pbuf * +pbuf_alloc_reference(void *payload, u16_t length, pbuf_type type) +{ + struct pbuf *p; + LWIP_ASSERT("invalid pbuf_type", (type == PBUF_REF) || (type == PBUF_ROM)); + /* only allocate memory for the pbuf structure */ + p = (struct pbuf *)memp_malloc(MEMP_PBUF); + if (p == NULL) { + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("pbuf_alloc_reference: Could not allocate MEMP_PBUF for PBUF_%s.\n", + (type == PBUF_ROM) ? "ROM" : "REF")); + return NULL; + } + pbuf_init_alloced_pbuf(p, payload, length, length, type, 0); + return p; +} + + +#if LWIP_SUPPORT_CUSTOM_PBUF +/** + * @ingroup pbuf + * Initialize a custom pbuf (already allocated). + * Example of custom pbuf usage: @ref zerocopyrx + * + * @param l header size + * @param length size of the pbuf's payload + * @param type type of the pbuf (only used to treat the pbuf accordingly, as + * this function allocates no memory) + * @param p pointer to the custom pbuf to initialize (already allocated) + * @param payload_mem pointer to the buffer that is used for payload and headers, + * must be at least big enough to hold 'length' plus the header size, + * may be NULL if set later. + * ATTENTION: The caller is responsible for correct alignment of this buffer!! + * @param payload_mem_len the size of the 'payload_mem' buffer, must be at least + * big enough to hold 'length' plus the header size + */ +struct pbuf * +pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, struct pbuf_custom *p, + void *payload_mem, u16_t payload_mem_len) +{ + u16_t offset = (u16_t)l; + void *payload; + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloced_custom(length=%"U16_F")\n", length)); + + if (LWIP_MEM_ALIGN_SIZE(offset) + length > payload_mem_len) { + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_WARNING, ("pbuf_alloced_custom(length=%"U16_F") buffer too short\n", length)); + return NULL; + } + + if (payload_mem != NULL) { + payload = (u8_t *)payload_mem + LWIP_MEM_ALIGN_SIZE(offset); + } else { + payload = NULL; + } + pbuf_init_alloced_pbuf(&p->pbuf, payload, length, length, type, PBUF_FLAG_IS_CUSTOM); + return &p->pbuf; +} +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ + +/** + * @ingroup pbuf + * Shrink a pbuf chain to a desired length. + * + * @param p pbuf to shrink. + * @param new_len desired new length of pbuf chain + * + * Depending on the desired length, the first few pbufs in a chain might + * be skipped and left unchanged. The new last pbuf in the chain will be + * resized, and any remaining pbufs will be freed. + * + * @note If the pbuf is ROM/REF, only the ->tot_len and ->len fields are adjusted. + * @note May not be called on a packet queue. + * + * @note Despite its name, pbuf_realloc cannot grow the size of a pbuf (chain). + */ +void +pbuf_realloc(struct pbuf *p, u16_t new_len) +{ + struct pbuf *q; + u16_t rem_len; /* remaining length */ + u16_t shrink; + + LWIP_ASSERT("pbuf_realloc: p != NULL", p != NULL); + + /* desired length larger than current length? */ + if (new_len >= p->tot_len) { + /* enlarging not yet supported */ + return; + } + + /* the pbuf chain grows by (new_len - p->tot_len) bytes + * (which may be negative in case of shrinking) */ + shrink = (u16_t)(p->tot_len - new_len); + + /* first, step over any pbufs that should remain in the chain */ + rem_len = new_len; + q = p; + /* should this pbuf be kept? */ + while (rem_len > q->len) { + /* decrease remaining length by pbuf length */ + rem_len = (u16_t)(rem_len - q->len); + /* decrease total length indicator */ + q->tot_len = (u16_t)(q->tot_len - shrink); + /* proceed to next pbuf in chain */ + q = q->next; + LWIP_ASSERT("pbuf_realloc: q != NULL", q != NULL); + } + /* we have now reached the new last pbuf (in q) */ + /* rem_len == desired length for pbuf q */ + + /* shrink allocated memory for PBUF_RAM */ + /* (other types merely adjust their length fields */ + if (pbuf_match_allocsrc(q, PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP) && (rem_len != q->len) +#if LWIP_SUPPORT_CUSTOM_PBUF + && ((q->flags & PBUF_FLAG_IS_CUSTOM) == 0) +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ + ) { + /* reallocate and adjust the length of the pbuf that will be split */ + q = (struct pbuf *)mem_trim(q, (mem_size_t)(((u8_t *)q->payload - (u8_t *)q) + rem_len)); + LWIP_ASSERT("mem_trim returned q == NULL", q != NULL); + } + /* adjust length fields for new last pbuf */ + q->len = rem_len; + q->tot_len = q->len; + + /* any remaining pbufs in chain? */ + if (q->next != NULL) { + /* free remaining pbufs in chain */ + pbuf_free(q->next); + } + /* q is last packet in chain */ + q->next = NULL; + +} + +/** + * Adjusts the payload pointer to reveal headers in the payload. + * @see pbuf_add_header. + * + * @param p pbuf to change the header size. + * @param header_size_increment Number of bytes to increment header size. + * @param force Allow 'header_size_increment > 0' for PBUF_REF/PBUF_ROM types + * + * @return non-zero on failure, zero on success. + * + */ +static u8_t +pbuf_add_header_impl(struct pbuf *p, size_t header_size_increment, u8_t force) +{ + u16_t type_internal; + void *payload; + u16_t increment_magnitude; + + LWIP_ASSERT("p != NULL", p != NULL); + if ((p == NULL) || (header_size_increment > 0xFFFF)) { + return 1; + } + if (header_size_increment == 0) { + return 0; + } + + increment_magnitude = (u16_t)header_size_increment; + /* Do not allow tot_len to wrap as a result. */ + if ((u16_t)(increment_magnitude + p->tot_len) < increment_magnitude) { + return 1; + } + + type_internal = p->type_internal; + + /* pbuf types containing payloads? */ + if (type_internal & PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS) { + /* set new payload pointer */ + payload = (u8_t *)p->payload - header_size_increment; + /* boundary check fails? */ + if ((u8_t *)payload < (u8_t *)p + SIZEOF_STRUCT_PBUF) { + LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, + ("pbuf_add_header: failed as %p < %p (not enough space for new header size)\n", + (void *)payload, (void *)((u8_t *)p + SIZEOF_STRUCT_PBUF))); + /* bail out unsuccessfully */ + return 1; + } + /* pbuf types referring to external payloads? */ + } else { + /* hide a header in the payload? */ + if (force) { + payload = (u8_t *)p->payload - header_size_increment; + } else { + /* cannot expand payload to front (yet!) + * bail out unsuccessfully */ + return 1; + } + } + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_add_header: old %p new %p (%"U16_F")\n", + (void *)p->payload, (void *)payload, increment_magnitude)); + + /* modify pbuf fields */ + p->payload = payload; + p->len = (u16_t)(p->len + increment_magnitude); + p->tot_len = (u16_t)(p->tot_len + increment_magnitude); + + + return 0; +} + +/** + * Adjusts the payload pointer to reveal headers in the payload. + * + * Adjusts the ->payload pointer so that space for a header + * appears in the pbuf payload. + * + * The ->payload, ->tot_len and ->len fields are adjusted. + * + * @param p pbuf to change the header size. + * @param header_size_increment Number of bytes to increment header size which + * increases the size of the pbuf. New space is on the front. + * If header_size_increment is 0, this function does nothing and returns successful. + * + * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so + * the call will fail. A check is made that the increase in header size does + * not move the payload pointer in front of the start of the buffer. + * + * @return non-zero on failure, zero on success. + * + */ +u8_t +pbuf_add_header(struct pbuf *p, size_t header_size_increment) +{ + return pbuf_add_header_impl(p, header_size_increment, 0); +} + +/** + * Same as @ref pbuf_add_header but does not check if 'header_size > 0' is allowed. + * This is used internally only, to allow PBUF_REF for RX. + */ +u8_t +pbuf_add_header_force(struct pbuf *p, size_t header_size_increment) +{ + return pbuf_add_header_impl(p, header_size_increment, 1); +} + +/** + * Adjusts the payload pointer to hide headers in the payload. + * + * Adjusts the ->payload pointer so that space for a header + * disappears in the pbuf payload. + * + * The ->payload, ->tot_len and ->len fields are adjusted. + * + * @param p pbuf to change the header size. + * @param header_size_decrement Number of bytes to decrement header size which + * decreases the size of the pbuf. + * If header_size_decrement is 0, this function does nothing and returns successful. + * @return non-zero on failure, zero on success. + * + */ +u8_t +pbuf_remove_header(struct pbuf *p, size_t header_size_decrement) +{ + void *payload; + u16_t increment_magnitude; + + LWIP_ASSERT("p != NULL", p != NULL); + if ((p == NULL) || (header_size_decrement > 0xFFFF)) { + return 1; + } + if (header_size_decrement == 0) { + return 0; + } + + increment_magnitude = (u16_t)header_size_decrement; + /* Check that we aren't going to move off the end of the pbuf */ + LWIP_ERROR("increment_magnitude <= p->len", (increment_magnitude <= p->len), return 1;); + + /* remember current payload pointer */ + payload = p->payload; + LWIP_UNUSED_ARG(payload); /* only used in LWIP_DEBUGF below */ + + /* increase payload pointer (guarded by length check above) */ + p->payload = (u8_t *)p->payload + header_size_decrement; + /* modify pbuf length fields */ + p->len = (u16_t)(p->len - increment_magnitude); + p->tot_len = (u16_t)(p->tot_len - increment_magnitude); + + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_remove_header: old %p new %p (%"U16_F")\n", + (void *)payload, (void *)p->payload, increment_magnitude)); + + return 0; +} + +static u8_t +pbuf_header_impl(struct pbuf *p, s16_t header_size_increment, u8_t force) +{ + if (header_size_increment < 0) { + return pbuf_remove_header(p, (size_t) - header_size_increment); + } else { + return pbuf_add_header_impl(p, (size_t)header_size_increment, force); + } +} + +/** + * Adjusts the payload pointer to hide or reveal headers in the payload. + * + * Adjusts the ->payload pointer so that space for a header + * (dis)appears in the pbuf payload. + * + * The ->payload, ->tot_len and ->len fields are adjusted. + * + * @param p pbuf to change the header size. + * @param header_size_increment Number of bytes to increment header size which + * increases the size of the pbuf. New space is on the front. + * (Using a negative value decreases the header size.) + * If header_size_increment is 0, this function does nothing and returns successful. + * + * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so + * the call will fail. A check is made that the increase in header size does + * not move the payload pointer in front of the start of the buffer. + * @return non-zero on failure, zero on success. + * + */ +u8_t +pbuf_header(struct pbuf *p, s16_t header_size_increment) +{ + return pbuf_header_impl(p, header_size_increment, 0); +} + +/** + * Same as pbuf_header but does not check if 'header_size > 0' is allowed. + * This is used internally only, to allow PBUF_REF for RX. + */ +u8_t +pbuf_header_force(struct pbuf *p, s16_t header_size_increment) +{ + return pbuf_header_impl(p, header_size_increment, 1); +} + +/** Similar to pbuf_header(-size) but de-refs header pbufs for (size >= p->len) + * + * @param q pbufs to operate on + * @param size The number of bytes to remove from the beginning of the pbuf list. + * While size >= p->len, pbufs are freed. + * ATTENTION: this is the opposite direction as @ref pbuf_header, but + * takes an u16_t not s16_t! + * @return the new head pbuf + */ +struct pbuf * +pbuf_free_header(struct pbuf *q, u16_t size) +{ + struct pbuf *p = q; + u16_t free_left = size; + while (free_left && p) { + if (free_left >= p->len) { + struct pbuf *f = p; + free_left = (u16_t)(free_left - p->len); + p = p->next; + f->next = 0; + pbuf_free(f); + } else { + pbuf_remove_header(p, free_left); + free_left = 0; + } + } + return p; +} + +/** + * @ingroup pbuf + * Dereference a pbuf chain or queue and deallocate any no-longer-used + * pbufs at the head of this chain or queue. + * + * Decrements the pbuf reference count. If it reaches zero, the pbuf is + * deallocated. + * + * For a pbuf chain, this is repeated for each pbuf in the chain, + * up to the first pbuf which has a non-zero reference count after + * decrementing. So, when all reference counts are one, the whole + * chain is free'd. + * + * @param p The pbuf (chain) to be dereferenced. + * + * @return the number of pbufs that were de-allocated + * from the head of the chain. + * + * @note MUST NOT be called on a packet queue (Not verified to work yet). + * @note the reference counter of a pbuf equals the number of pointers + * that refer to the pbuf (or into the pbuf). + * + * @internal examples: + * + * Assuming existing chains a->b->c with the following reference + * counts, calling pbuf_free(a) results in: + * + * 1->2->3 becomes ...1->3 + * 3->3->3 becomes 2->3->3 + * 1->1->2 becomes ......1 + * 2->1->1 becomes 1->1->1 + * 1->1->1 becomes ....... + * + */ +u8_t +pbuf_free(struct pbuf *p) +{ + u8_t alloc_src; + struct pbuf *q; + u8_t count; + + if (p == NULL) { + LWIP_ASSERT("p != NULL", p != NULL); + /* if assertions are disabled, proceed with debug output */ + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("pbuf_free(p == NULL) was called.\n")); + return 0; + } + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free(%p)\n", (void *)p)); + + PERF_START; + + count = 0; + /* de-allocate all consecutive pbufs from the head of the chain that + * obtain a zero reference count after decrementing*/ + while (p != NULL) { + LWIP_PBUF_REF_T ref; + SYS_ARCH_DECL_PROTECT(old_level); + /* Since decrementing ref cannot be guaranteed to be a single machine operation + * we must protect it. We put the new ref into a local variable to prevent + * further protection. */ + SYS_ARCH_PROTECT(old_level); + /* all pbufs in a chain are referenced at least once */ + LWIP_ASSERT("pbuf_free: p->ref > 0", p->ref > 0); + /* decrease reference count (number of pointers to pbuf) */ + ref = --(p->ref); + SYS_ARCH_UNPROTECT(old_level); + /* this pbuf is no longer referenced to? */ + if (ref == 0) { + /* remember next pbuf in chain for next iteration */ + q = p->next; + LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: deallocating %p\n", (void *)p)); + alloc_src = pbuf_get_allocsrc(p); +#if LWIP_SUPPORT_CUSTOM_PBUF + /* is this a custom pbuf? */ + if ((p->flags & PBUF_FLAG_IS_CUSTOM) != 0) { + struct pbuf_custom *pc = (struct pbuf_custom *)p; + LWIP_ASSERT("pc->custom_free_function != NULL", pc->custom_free_function != NULL); + pc->custom_free_function(p); + } else +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ + { + /* is this a pbuf from the pool? */ + if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF_POOL) { + memp_free(MEMP_PBUF_POOL, p); + /* is this a ROM or RAM referencing pbuf? */ + } else if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF) { + memp_free(MEMP_PBUF, p); + /* type == PBUF_RAM */ + } else if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP) { + mem_free(p); + } else { + /* @todo: support freeing other types */ + LWIP_ASSERT("invalid pbuf type", 0); + } + } + count++; + /* proceed to next pbuf */ + p = q; + /* p->ref > 0, this pbuf is still referenced to */ + /* (and so the remaining pbufs in chain as well) */ + } else { + LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: %p has ref %"U16_F", ending here.\n", (void *)p, (u16_t)ref)); + /* stop walking through the chain */ + p = NULL; + } + } + PERF_STOP("pbuf_free"); + /* return number of de-allocated pbufs */ + return count; +} + +/** + * Count number of pbufs in a chain + * + * @param p first pbuf of chain + * @return the number of pbufs in a chain + */ +u16_t +pbuf_clen(const struct pbuf *p) +{ + u16_t len; + + len = 0; + while (p != NULL) { + ++len; + p = p->next; + } + return len; +} + +/** + * @ingroup pbuf + * Increment the reference count of the pbuf. + * + * @param p pbuf to increase reference counter of + * + */ +void +pbuf_ref(struct pbuf *p) +{ + /* pbuf given? */ + if (p != NULL) { + SYS_ARCH_SET(p->ref, (LWIP_PBUF_REF_T)(p->ref + 1)); + LWIP_ASSERT("pbuf ref overflow", p->ref > 0); + } +} + +/** + * @ingroup pbuf + * Concatenate two pbufs (each may be a pbuf chain) and take over + * the caller's reference of the tail pbuf. + * + * @note The caller MAY NOT reference the tail pbuf afterwards. + * Use pbuf_chain() for that purpose. + * + * This function explicitly does not check for tot_len overflow to prevent + * failing to queue too long pbufs. This can produce invalid pbufs, so + * handle with care! + * + * @see pbuf_chain() + */ +void +pbuf_cat(struct pbuf *h, struct pbuf *t) +{ + struct pbuf *p; + + LWIP_ERROR("(h != NULL) && (t != NULL) (programmer violates API)", + ((h != NULL) && (t != NULL)), return;); + + /* proceed to last pbuf of chain */ + for (p = h; p->next != NULL; p = p->next) { + /* add total length of second chain to all totals of first chain */ + p->tot_len = (u16_t)(p->tot_len + t->tot_len); + } + /* { p is last pbuf of first h chain, p->next == NULL } */ + LWIP_ASSERT("p->tot_len == p->len (of last pbuf in chain)", p->tot_len == p->len); + LWIP_ASSERT("p->next == NULL", p->next == NULL); + /* add total length of second chain to last pbuf total of first chain */ + p->tot_len = (u16_t)(p->tot_len + t->tot_len); + /* chain last pbuf of head (p) with first of tail (t) */ + p->next = t; + /* p->next now references t, but the caller will drop its reference to t, + * so netto there is no change to the reference count of t. + */ +} + +/** + * @ingroup pbuf + * Chain two pbufs (or pbuf chains) together. + * + * The caller MUST call pbuf_free(t) once it has stopped + * using it. Use pbuf_cat() instead if you no longer use t. + * + * @param h head pbuf (chain) + * @param t tail pbuf (chain) + * @note The pbufs MUST belong to the same packet. + * @note MAY NOT be called on a packet queue. + * + * The ->tot_len fields of all pbufs of the head chain are adjusted. + * The ->next field of the last pbuf of the head chain is adjusted. + * The ->ref field of the first pbuf of the tail chain is adjusted. + * + */ +void +pbuf_chain(struct pbuf *h, struct pbuf *t) +{ + pbuf_cat(h, t); + /* t is now referenced by h */ + pbuf_ref(t); + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_chain: %p references %p\n", (void *)h, (void *)t)); +} + +/** + * Dechains the first pbuf from its succeeding pbufs in the chain. + * + * Makes p->tot_len field equal to p->len. + * @param p pbuf to dechain + * @return remainder of the pbuf chain, or NULL if it was de-allocated. + * @note May not be called on a packet queue. + */ +struct pbuf * +pbuf_dechain(struct pbuf *p) +{ + struct pbuf *q; + u8_t tail_gone = 1; + /* tail */ + q = p->next; + /* pbuf has successor in chain? */ + if (q != NULL) { + /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */ + LWIP_ASSERT("p->tot_len == p->len + q->tot_len", q->tot_len == p->tot_len - p->len); + /* enforce invariant if assertion is disabled */ + q->tot_len = (u16_t)(p->tot_len - p->len); + /* decouple pbuf from remainder */ + p->next = NULL; + /* total length of pbuf p is its own length only */ + p->tot_len = p->len; + /* q is no longer referenced by p, free it */ + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_dechain: unreferencing %p\n", (void *)q)); + tail_gone = pbuf_free(q); + if (tail_gone > 0) { + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, + ("pbuf_dechain: deallocated %p (as it is no longer referenced)\n", (void *)q)); + } + /* return remaining tail or NULL if deallocated */ + } + /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */ + LWIP_ASSERT("p->tot_len == p->len", p->tot_len == p->len); + return ((tail_gone > 0) ? NULL : q); +} + +/** + * @ingroup pbuf + * Create PBUF_RAM copies of pbufs. + * + * Used to queue packets on behalf of the lwIP stack, such as + * ARP based queueing. + * + * @note You MUST explicitly use p = pbuf_take(p); + * + * @note Only one packet is copied, no packet queue! + * + * @param p_to pbuf destination of the copy + * @param p_from pbuf source of the copy + * + * @return ERR_OK if pbuf was copied + * ERR_ARG if one of the pbufs is NULL or p_to is not big + * enough to hold p_from + */ +err_t +pbuf_copy(struct pbuf *p_to, const struct pbuf *p_from) +{ + size_t offset_to = 0, offset_from = 0, len; + + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy(%p, %p)\n", + (const void *)p_to, (const void *)p_from)); + + /* is the target big enough to hold the source? */ + LWIP_ERROR("pbuf_copy: target not big enough to hold source", ((p_to != NULL) && + (p_from != NULL) && (p_to->tot_len >= p_from->tot_len)), return ERR_ARG;); + + /* iterate through pbuf chain */ + do { + /* copy one part of the original chain */ + if ((p_to->len - offset_to) >= (p_from->len - offset_from)) { + /* complete current p_from fits into current p_to */ + len = p_from->len - offset_from; + } else { + /* current p_from does not fit into current p_to */ + len = p_to->len - offset_to; + } + MEMCPY((u8_t *)p_to->payload + offset_to, (u8_t *)p_from->payload + offset_from, len); + offset_to += len; + offset_from += len; + LWIP_ASSERT("offset_to <= p_to->len", offset_to <= p_to->len); + LWIP_ASSERT("offset_from <= p_from->len", offset_from <= p_from->len); + if (offset_from >= p_from->len) { + /* on to next p_from (if any) */ + offset_from = 0; + p_from = p_from->next; + } + if (offset_to == p_to->len) { + /* on to next p_to (if any) */ + offset_to = 0; + p_to = p_to->next; + LWIP_ERROR("p_to != NULL", (p_to != NULL) || (p_from == NULL), return ERR_ARG;); + } + + if ((p_from != NULL) && (p_from->len == p_from->tot_len)) { + /* don't copy more than one packet! */ + LWIP_ERROR("pbuf_copy() does not allow packet queues!", + (p_from->next == NULL), return ERR_VAL;); + } + if ((p_to != NULL) && (p_to->len == p_to->tot_len)) { + /* don't copy more than one packet! */ + LWIP_ERROR("pbuf_copy() does not allow packet queues!", + (p_to->next == NULL), return ERR_VAL;); + } + } while (p_from); + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy: end of chain reached.\n")); + return ERR_OK; +} + +/** + * @ingroup pbuf + * Copy (part of) the contents of a packet buffer + * to an application supplied buffer. + * + * @param buf the pbuf from which to copy data + * @param dataptr the application supplied buffer + * @param len length of data to copy (dataptr must be big enough). No more + * than buf->tot_len will be copied, irrespective of len + * @param offset offset into the packet buffer from where to begin copying len bytes + * @return the number of bytes copied, or 0 on failure + */ +u16_t +pbuf_copy_partial(const struct pbuf *buf, void *dataptr, u16_t len, u16_t offset) +{ + const struct pbuf *p; + u16_t left = 0; + u16_t buf_copy_len; + u16_t copied_total = 0; + + LWIP_ERROR("pbuf_copy_partial: invalid buf", (buf != NULL), return 0;); + LWIP_ERROR("pbuf_copy_partial: invalid dataptr", (dataptr != NULL), return 0;); + + /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */ + for (p = buf; len != 0 && p != NULL; p = p->next) { + if ((offset != 0) && (offset >= p->len)) { + /* don't copy from this buffer -> on to the next */ + offset = (u16_t)(offset - p->len); + } else { + /* copy from this buffer. maybe only partially. */ + buf_copy_len = (u16_t)(p->len - offset); + if (buf_copy_len > len) { + buf_copy_len = len; + } + /* copy the necessary parts of the buffer */ + MEMCPY(&((char *)dataptr)[left], &((char *)p->payload)[offset], buf_copy_len); + copied_total = (u16_t)(copied_total + buf_copy_len); + left = (u16_t)(left + buf_copy_len); + len = (u16_t)(len - buf_copy_len); + offset = 0; + } + } + return copied_total; +} + +/** + * @ingroup pbuf + * Get part of a pbuf's payload as contiguous memory. The returned memory is + * either a pointer into the pbuf's payload or, if split over multiple pbufs, + * a copy into the user-supplied buffer. + * + * @param p the pbuf from which to copy data + * @param buffer the application supplied buffer + * @param bufsize size of the application supplied buffer + * @param len length of data to copy (dataptr must be big enough). No more + * than buf->tot_len will be copied, irrespective of len + * @param offset offset into the packet buffer from where to begin copying len bytes + * @return the number of bytes copied, or 0 on failure + */ +void * +pbuf_get_contiguous(const struct pbuf *p, void *buffer, size_t bufsize, u16_t len, u16_t offset) +{ + const struct pbuf *q; + u16_t out_offset; + + LWIP_ERROR("pbuf_get_contiguous: invalid buf", (p != NULL), return NULL;); + LWIP_ERROR("pbuf_get_contiguous: invalid dataptr", (buffer != NULL), return NULL;); + LWIP_ERROR("pbuf_get_contiguous: invalid dataptr", (bufsize >= len), return NULL;); + + q = pbuf_skip_const(p, offset, &out_offset); + if (q != NULL) { + if (q->len >= (out_offset + len)) { + /* all data in this pbuf, return zero-copy */ + return (u8_t *)q->payload + out_offset; + } + /* need to copy */ + if (pbuf_copy_partial(q, buffer, len, out_offset) != len) { + /* copying failed: pbuf is too short */ + return NULL; + } + return buffer; + } + /* pbuf is too short (offset does not fit in) */ + return NULL; +} + +#if LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE +/** + * This method modifies a 'pbuf chain', so that its total length is + * smaller than 64K. The remainder of the original pbuf chain is stored + * in *rest. + * This function never creates new pbufs, but splits an existing chain + * in two parts. The tot_len of the modified packet queue will likely be + * smaller than 64K. + * 'packet queues' are not supported by this function. + * + * @param p the pbuf queue to be split + * @param rest pointer to store the remainder (after the first 64K) + */ +void pbuf_split_64k(struct pbuf *p, struct pbuf **rest) +{ + *rest = NULL; + if ((p != NULL) && (p->next != NULL)) { + u16_t tot_len_front = p->len; + struct pbuf *i = p; + struct pbuf *r = p->next; + + /* continue until the total length (summed up as u16_t) overflows */ + while ((r != NULL) && ((u16_t)(tot_len_front + r->len) >= tot_len_front)) { + tot_len_front = (u16_t)(tot_len_front + r->len); + i = r; + r = r->next; + } + /* i now points to last packet of the first segment. Set next + pointer to NULL */ + i->next = NULL; + + if (r != NULL) { + /* Update the tot_len field in the first part */ + for (i = p; i != NULL; i = i->next) { + i->tot_len = (u16_t)(i->tot_len - r->tot_len); + LWIP_ASSERT("tot_len/len mismatch in last pbuf", + (i->next != NULL) || (i->tot_len == i->len)); + } + if (p->flags & PBUF_FLAG_TCP_FIN) { + r->flags |= PBUF_FLAG_TCP_FIN; + } + + /* tot_len field in rest does not need modifications */ + /* reference counters do not need modifications */ + *rest = r; + } + } +} +#endif /* LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + +/* Actual implementation of pbuf_skip() but returning const pointer... */ +static const struct pbuf * +pbuf_skip_const(const struct pbuf *in, u16_t in_offset, u16_t *out_offset) +{ + u16_t offset_left = in_offset; + const struct pbuf *q = in; + + /* get the correct pbuf */ + while ((q != NULL) && (q->len <= offset_left)) { + offset_left = (u16_t)(offset_left - q->len); + q = q->next; + } + if (out_offset != NULL) { + *out_offset = offset_left; + } + return q; +} + +/** + * @ingroup pbuf + * Skip a number of bytes at the start of a pbuf + * + * @param in input pbuf + * @param in_offset offset to skip + * @param out_offset resulting offset in the returned pbuf + * @return the pbuf in the queue where the offset is + */ +struct pbuf * +pbuf_skip(struct pbuf *in, u16_t in_offset, u16_t *out_offset) +{ + const struct pbuf *out = pbuf_skip_const(in, in_offset, out_offset); + return LWIP_CONST_CAST(struct pbuf *, out); +} + +/** + * @ingroup pbuf + * Copy application supplied data into a pbuf. + * This function can only be used to copy the equivalent of buf->tot_len data. + * + * @param buf pbuf to fill with data + * @param dataptr application supplied data buffer + * @param len length of the application supplied data buffer + * + * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough + */ +err_t +pbuf_take(struct pbuf *buf, const void *dataptr, u16_t len) +{ + struct pbuf *p; + size_t buf_copy_len; + size_t total_copy_len = len; + size_t copied_total = 0; + + LWIP_ERROR("pbuf_take: invalid buf", (buf != NULL), return ERR_ARG;); + LWIP_ERROR("pbuf_take: invalid dataptr", (dataptr != NULL), return ERR_ARG;); + LWIP_ERROR("pbuf_take: buf not large enough", (buf->tot_len >= len), return ERR_MEM;); + + if ((buf == NULL) || (dataptr == NULL) || (buf->tot_len < len)) { + return ERR_ARG; + } + + /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */ + for (p = buf; total_copy_len != 0; p = p->next) { + LWIP_ASSERT("pbuf_take: invalid pbuf", p != NULL); + buf_copy_len = total_copy_len; + if (buf_copy_len > p->len) { + /* this pbuf cannot hold all remaining data */ + buf_copy_len = p->len; + } + /* copy the necessary parts of the buffer */ + MEMCPY(p->payload, &((const char *)dataptr)[copied_total], buf_copy_len); + total_copy_len -= buf_copy_len; + copied_total += buf_copy_len; + } + LWIP_ASSERT("did not copy all data", total_copy_len == 0 && copied_total == len); + return ERR_OK; +} + +/** + * @ingroup pbuf + * Same as pbuf_take() but puts data at an offset + * + * @param buf pbuf to fill with data + * @param dataptr application supplied data buffer + * @param len length of the application supplied data buffer + * @param offset offset in pbuf where to copy dataptr to + * + * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough + */ +err_t +pbuf_take_at(struct pbuf *buf, const void *dataptr, u16_t len, u16_t offset) +{ + u16_t target_offset; + struct pbuf *q = pbuf_skip(buf, offset, &target_offset); + + /* return requested data if pbuf is OK */ + if ((q != NULL) && (q->tot_len >= target_offset + len)) { + u16_t remaining_len = len; + const u8_t *src_ptr = (const u8_t *)dataptr; + /* copy the part that goes into the first pbuf */ + u16_t first_copy_len; + LWIP_ASSERT("check pbuf_skip result", target_offset < q->len); + first_copy_len = (u16_t)LWIP_MIN(q->len - target_offset, len); + MEMCPY(((u8_t *)q->payload) + target_offset, dataptr, first_copy_len); + remaining_len = (u16_t)(remaining_len - first_copy_len); + src_ptr += first_copy_len; + if (remaining_len > 0) { + return pbuf_take(q->next, src_ptr, remaining_len); + } + return ERR_OK; + } + return ERR_MEM; +} + +/** + * @ingroup pbuf + * Creates a single pbuf out of a queue of pbufs. + * + * @remark: Either the source pbuf 'p' is freed by this function or the original + * pbuf 'p' is returned, therefore the caller has to check the result! + * + * @param p the source pbuf + * @param layer pbuf_layer of the new pbuf + * + * @return a new, single pbuf (p->next is NULL) + * or the old pbuf if allocation fails + */ +struct pbuf * +pbuf_coalesce(struct pbuf *p, pbuf_layer layer) +{ + struct pbuf *q; + if (p->next == NULL) { + return p; + } + q = pbuf_clone(layer, PBUF_RAM, p); + if (q == NULL) { + /* @todo: what do we do now? */ + return p; + } + pbuf_free(p); + return q; +} + +/** + * @ingroup pbuf + * Allocates a new pbuf of same length (via pbuf_alloc()) and copies the source + * pbuf into this new pbuf (using pbuf_copy()). + * + * @param layer pbuf_layer of the new pbuf + * @param type this parameter decides how and where the pbuf should be allocated + * (@see pbuf_alloc()) + * @param p the source pbuf + * + * @return a new pbuf or NULL if allocation fails + */ +struct pbuf * +pbuf_clone(pbuf_layer layer, pbuf_type type, struct pbuf *p) +{ + struct pbuf *q; + err_t err; + q = pbuf_alloc(layer, p->tot_len, type); + if (q == NULL) { + return NULL; + } + err = pbuf_copy(q, p); + LWIP_UNUSED_ARG(err); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("pbuf_copy failed", err == ERR_OK); + return q; +} + +#if LWIP_CHECKSUM_ON_COPY +/** + * Copies data into a single pbuf (*not* into a pbuf queue!) and updates + * the checksum while copying + * + * @param p the pbuf to copy data into + * @param start_offset offset of p->payload where to copy the data to + * @param dataptr data to copy into the pbuf + * @param len length of data to copy into the pbuf + * @param chksum pointer to the checksum which is updated + * @return ERR_OK if successful, another error if the data does not fit + * within the (first) pbuf (no pbuf queues!) + */ +err_t +pbuf_fill_chksum(struct pbuf *p, u16_t start_offset, const void *dataptr, + u16_t len, u16_t *chksum) +{ + u32_t acc; + u16_t copy_chksum; + char *dst_ptr; + LWIP_ASSERT("p != NULL", p != NULL); + LWIP_ASSERT("dataptr != NULL", dataptr != NULL); + LWIP_ASSERT("chksum != NULL", chksum != NULL); + LWIP_ASSERT("len != 0", len != 0); + + if ((start_offset >= p->len) || (start_offset + len > p->len)) { + return ERR_ARG; + } + + dst_ptr = ((char *)p->payload) + start_offset; + copy_chksum = LWIP_CHKSUM_COPY(dst_ptr, dataptr, len); + if ((start_offset & 1) != 0) { + copy_chksum = SWAP_BYTES_IN_WORD(copy_chksum); + } + acc = *chksum; + acc += copy_chksum; + *chksum = FOLD_U32T(acc); + return ERR_OK; +} +#endif /* LWIP_CHECKSUM_ON_COPY */ + +/** + * @ingroup pbuf + * Get one byte from the specified position in a pbuf + * WARNING: returns zero for offset >= p->tot_len + * + * @param p pbuf to parse + * @param offset offset into p of the byte to return + * @return byte at an offset into p OR ZERO IF 'offset' >= p->tot_len + */ +u8_t +pbuf_get_at(const struct pbuf *p, u16_t offset) +{ + int ret = pbuf_try_get_at(p, offset); + if (ret >= 0) { + return (u8_t)ret; + } + return 0; +} + +/** + * @ingroup pbuf + * Get one byte from the specified position in a pbuf + * + * @param p pbuf to parse + * @param offset offset into p of the byte to return + * @return byte at an offset into p [0..0xFF] OR negative if 'offset' >= p->tot_len + */ +int +pbuf_try_get_at(const struct pbuf *p, u16_t offset) +{ + u16_t q_idx; + const struct pbuf *q = pbuf_skip_const(p, offset, &q_idx); + + /* return requested data if pbuf is OK */ + if ((q != NULL) && (q->len > q_idx)) { + return ((u8_t *)q->payload)[q_idx]; + } + return -1; +} + +/** + * @ingroup pbuf + * Put one byte to the specified position in a pbuf + * WARNING: silently ignores offset >= p->tot_len + * + * @param p pbuf to fill + * @param offset offset into p of the byte to write + * @param data byte to write at an offset into p + */ +void +pbuf_put_at(struct pbuf *p, u16_t offset, u8_t data) +{ + u16_t q_idx; + struct pbuf *q = pbuf_skip(p, offset, &q_idx); + + /* write requested data if pbuf is OK */ + if ((q != NULL) && (q->len > q_idx)) { + ((u8_t *)q->payload)[q_idx] = data; + } +} + +/** + * @ingroup pbuf + * Compare pbuf contents at specified offset with memory s2, both of length n + * + * @param p pbuf to compare + * @param offset offset into p at which to start comparing + * @param s2 buffer to compare + * @param n length of buffer to compare + * @return zero if equal, nonzero otherwise + * (0xffff if p is too short, diffoffset+1 otherwise) + */ +u16_t +pbuf_memcmp(const struct pbuf *p, u16_t offset, const void *s2, u16_t n) +{ + u16_t start = offset; + const struct pbuf *q = p; + u16_t i; + + /* pbuf long enough to perform check? */ + if (p->tot_len < (offset + n)) { + return 0xffff; + } + + /* get the correct pbuf from chain. We know it succeeds because of p->tot_len check above. */ + while ((q != NULL) && (q->len <= start)) { + start = (u16_t)(start - q->len); + q = q->next; + } + + /* return requested data if pbuf is OK */ + for (i = 0; i < n; i++) { + /* We know pbuf_get_at() succeeds because of p->tot_len check above. */ + u8_t a = pbuf_get_at(q, (u16_t)(start + i)); + u8_t b = ((const u8_t *)s2)[i]; + if (a != b) { + return (u16_t)LWIP_MIN(i + 1, 0xFFFF); + } + } + return 0; +} + +/** + * @ingroup pbuf + * Find occurrence of mem (with length mem_len) in pbuf p, starting at offset + * start_offset. + * + * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as + * return value 'not found' + * @param mem search for the contents of this buffer + * @param mem_len length of 'mem' + * @param start_offset offset into p at which to start searching + * @return 0xFFFF if substr was not found in p or the index where it was found + */ +u16_t +pbuf_memfind(const struct pbuf *p, const void *mem, u16_t mem_len, u16_t start_offset) +{ + u16_t i; + u16_t max_cmp_start = (u16_t)(p->tot_len - mem_len); + if (p->tot_len >= mem_len + start_offset) { + for (i = start_offset; i <= max_cmp_start; i++) { + u16_t plus = pbuf_memcmp(p, i, mem, mem_len); + if (plus == 0) { + return i; + } + } + } + return 0xFFFF; +} + +/** + * Find occurrence of substr with length substr_len in pbuf p, start at offset + * start_offset + * WARNING: in contrast to strstr(), this one does not stop at the first \0 in + * the pbuf/source string! + * + * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as + * return value 'not found' + * @param substr string to search for in p, maximum length is 0xFFFE + * @return 0xFFFF if substr was not found in p or the index where it was found + */ +u16_t +pbuf_strstr(const struct pbuf *p, const char *substr) +{ + size_t substr_len; + if ((substr == NULL) || (substr[0] == 0) || (p->tot_len == 0xFFFF)) { + return 0xFFFF; + } + substr_len = strlen(substr); + if (substr_len >= 0xFFFF) { + return 0xFFFF; + } + return pbuf_memfind(p, substr, (u16_t)substr_len, 0); +} diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/raw.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/raw.c new file mode 100644 index 0000000..7a42432 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/raw.c @@ -0,0 +1,671 @@ +/** + * @file + * Implementation of raw protocol PCBs for low-level handling of + * different types of protocols besides (or overriding) those + * already available in lwIP.\n + * See also @ref raw_raw + * + * @defgroup raw_raw RAW + * @ingroup callbackstyle_api + * Implementation of raw protocol PCBs for low-level handling of + * different types of protocols besides (or overriding) those + * already available in lwIP.\n + * @see @ref api + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_RAW /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/memp.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/raw.h" +#include "lwip/priv/raw_priv.h" +#include "lwip/stats.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/inet_chksum.h" + +#include + +/** The list of RAW PCBs */ +static struct raw_pcb *raw_pcbs; + +static u8_t +raw_input_local_match(struct raw_pcb *pcb, u8_t broadcast) +{ + LWIP_UNUSED_ARG(broadcast); /* in IPv6 only case */ + + /* check if PCB is bound to specific netif */ + if ((pcb->netif_idx != NETIF_NO_INDEX) && + (pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + return 0; + } + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: PCBs listening to any IP type also listen to any IP address */ + if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { +#if IP_SOF_BROADCAST_RECV + if ((broadcast != 0) && !ip_get_option(pcb, SOF_BROADCAST)) { + return 0; + } +#endif /* IP_SOF_BROADCAST_RECV */ + return 1; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + /* Only need to check PCB if incoming IP version matches PCB IP version */ + if (IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ip_current_dest_addr())) { +#if LWIP_IPV4 + /* Special case: IPv4 broadcast: receive all broadcasts + * Note: broadcast variable can only be 1 if it is an IPv4 broadcast */ + if (broadcast != 0) { +#if IP_SOF_BROADCAST_RECV + if (ip_get_option(pcb, SOF_BROADCAST)) +#endif /* IP_SOF_BROADCAST_RECV */ + { + if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip))) { + return 1; + } + } + } else +#endif /* LWIP_IPV4 */ + /* Handle IPv4 and IPv6: catch all or exact match */ + if (ip_addr_isany(&pcb->local_ip) || + ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { + return 1; + } + } + + return 0; +} + +/** + * Determine if in incoming IP packet is covered by a RAW PCB + * and if so, pass it to a user-provided receive callback function. + * + * Given an incoming IP datagram (as a chain of pbufs) this function + * finds a corresponding RAW PCB and calls the corresponding receive + * callback function. + * + * @param p pbuf to be demultiplexed to a RAW PCB. + * @param inp network interface on which the datagram was received. + * @return - 1 if the packet has been eaten by a RAW PCB receive + * callback function. The caller MAY NOT not reference the + * packet any longer, and MAY NOT call pbuf_free(). + * @return - 0 if packet is not eaten (pbuf is still referenced by the + * caller). + * + */ +raw_input_state_t +raw_input(struct pbuf *p, struct netif *inp) +{ + struct raw_pcb *pcb, *prev; + s16_t proto; + raw_input_state_t ret = RAW_INPUT_NONE; + u8_t broadcast = ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif()); + + LWIP_UNUSED_ARG(inp); + +#if LWIP_IPV6 +#if LWIP_IPV4 + if (IP_HDR_GET_VERSION(p->payload) == 6) +#endif /* LWIP_IPV4 */ + { + struct ip6_hdr *ip6hdr = (struct ip6_hdr *)p->payload; + proto = IP6H_NEXTH(ip6hdr); + } +#if LWIP_IPV4 + else +#endif /* LWIP_IPV4 */ +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + { + proto = IPH_PROTO((struct ip_hdr *)p->payload); + } +#endif /* LWIP_IPV4 */ + + prev = NULL; + pcb = raw_pcbs; + /* loop through all raw pcbs until the packet is eaten by one */ + /* this allows multiple pcbs to match against the packet by design */ + while (pcb != NULL) { + if ((pcb->protocol == proto) && raw_input_local_match(pcb, broadcast) && + (((pcb->flags & RAW_FLAGS_CONNECTED) == 0) || + ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()))) { + /* receive callback function available? */ + if (pcb->recv != NULL) { + u8_t eaten; +#ifndef LWIP_NOASSERT + void *old_payload = p->payload; +#endif + ret = RAW_INPUT_DELIVERED; + /* the receive callback function did not eat the packet? */ + eaten = pcb->recv(pcb->recv_arg, pcb, p, ip_current_src_addr()); + if (eaten != 0) { + /* receive function ate the packet */ + p = NULL; + if (prev != NULL) { + /* move the pcb to the front of raw_pcbs so that is + found faster next time */ + prev->next = pcb->next; + pcb->next = raw_pcbs; + raw_pcbs = pcb; + } + return RAW_INPUT_EATEN; + } else { + /* sanity-check that the receive callback did not alter the pbuf */ + LWIP_ASSERT("raw pcb recv callback altered pbuf payload pointer without eating packet", + p->payload == old_payload); + } + } + /* no receive callback function was set for this raw PCB */ + } + /* drop the packet */ + prev = pcb; + pcb = pcb->next; + } + return ret; +} + +/** + * @ingroup raw_raw + * Bind a RAW PCB. + * + * @param pcb RAW PCB to be bound with a local address ipaddr. + * @param ipaddr local IP address to bind with. Use IP4_ADDR_ANY to + * bind to all local interfaces. + * + * @return lwIP error code. + * - ERR_OK. Successful. No error occurred. + * - ERR_USE. The specified IP address is already bound to by + * another RAW PCB. + * + * @see raw_disconnect() + */ +err_t +raw_bind(struct raw_pcb *pcb, const ip_addr_t *ipaddr) +{ + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb == NULL) || (ipaddr == NULL)) { + return ERR_VAL; + } + ip_addr_set_ipaddr(&pcb->local_ip, ipaddr); +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now. + * This is legacy support: scope-aware callers should always provide properly + * zoned source addresses. */ + if (IP_IS_V6(&pcb->local_ip) && + ip6_addr_lacks_zone(ip_2_ip6(&pcb->local_ip), IP6_UNKNOWN)) { + ip6_addr_select_zone(ip_2_ip6(&pcb->local_ip), ip_2_ip6(&pcb->local_ip)); + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + return ERR_OK; +} + +/** + * @ingroup raw_raw + * Bind an RAW PCB to a specific netif. + * After calling this function, all packets received via this PCB + * are guaranteed to have come in via the specified netif, and all + * outgoing packets will go out via the specified netif. + * + * @param pcb RAW PCB to be bound with netif. + * @param netif netif to bind to. Can be NULL. + * + * @see raw_disconnect() + */ +void +raw_bind_netif(struct raw_pcb *pcb, const struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (netif != NULL) { + pcb->netif_idx = netif_get_index(netif); + } else { + pcb->netif_idx = NETIF_NO_INDEX; + } +} + +/** + * @ingroup raw_raw + * Connect an RAW PCB. This function is required by upper layers + * of lwip. Using the raw api you could use raw_sendto() instead + * + * This will associate the RAW PCB with the remote address. + * + * @param pcb RAW PCB to be connected with remote address ipaddr and port. + * @param ipaddr remote IP address to connect with. + * + * @return lwIP error code + * + * @see raw_disconnect() and raw_sendto() + */ +err_t +raw_connect(struct raw_pcb *pcb, const ip_addr_t *ipaddr) +{ + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb == NULL) || (ipaddr == NULL)) { + return ERR_VAL; + } + ip_addr_set_ipaddr(&pcb->remote_ip, ipaddr); +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now, + * using the bound address to make a more informed decision when possible. */ + if (IP_IS_V6(&pcb->remote_ip) && + ip6_addr_lacks_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNKNOWN)) { + ip6_addr_select_zone(ip_2_ip6(&pcb->remote_ip), ip_2_ip6(&pcb->local_ip)); + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + raw_set_flags(pcb, RAW_FLAGS_CONNECTED); + return ERR_OK; +} + +/** + * @ingroup raw_raw + * Disconnect a RAW PCB. + * + * @param pcb the raw pcb to disconnect. + */ +void +raw_disconnect(struct raw_pcb *pcb) +{ + LWIP_ASSERT_CORE_LOCKED(); + /* reset remote address association */ +#if LWIP_IPV4 && LWIP_IPV6 + if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { + ip_addr_copy(pcb->remote_ip, *IP_ANY_TYPE); + } else { +#endif + ip_addr_set_any(IP_IS_V6_VAL(pcb->remote_ip), &pcb->remote_ip); +#if LWIP_IPV4 && LWIP_IPV6 + } +#endif + pcb->netif_idx = NETIF_NO_INDEX; + /* mark PCB as unconnected */ + raw_clear_flags(pcb, RAW_FLAGS_CONNECTED); +} + +/** + * @ingroup raw_raw + * Set the callback function for received packets that match the + * raw PCB's protocol and binding. + * + * The callback function MUST either + * - eat the packet by calling pbuf_free() and returning non-zero. The + * packet will not be passed to other raw PCBs or other protocol layers. + * - not free the packet, and return zero. The packet will be matched + * against further PCBs and/or forwarded to another protocol layers. + */ +void +raw_recv(struct raw_pcb *pcb, raw_recv_fn recv, void *recv_arg) +{ + LWIP_ASSERT_CORE_LOCKED(); + /* remember recv() callback and user data */ + pcb->recv = recv; + pcb->recv_arg = recv_arg; +} + +/** + * @ingroup raw_raw + * Send the raw IP packet to the given address. An IP header will be prepended + * to the packet, unless the RAW_FLAGS_HDRINCL flag is set on the PCB. In that + * case, the packet must include an IP header, which will then be sent as is. + * + * @param pcb the raw pcb which to send + * @param p the IP payload to send + * @param ipaddr the destination address of the IP packet + * + */ +err_t +raw_sendto(struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *ipaddr) +{ + struct netif *netif; + const ip_addr_t *src_ip; + + if ((pcb == NULL) || (ipaddr == NULL) || !IP_ADDR_PCB_VERSION_MATCH(pcb, ipaddr)) { + return ERR_VAL; + } + + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE, ("raw_sendto\n")); + + if (pcb->netif_idx != NETIF_NO_INDEX) { + netif = netif_get_by_index(pcb->netif_idx); + } else { +#if LWIP_MULTICAST_TX_OPTIONS + netif = NULL; + if (ip_addr_ismulticast(ipaddr)) { + /* For multicast-destined packets, use the user-provided interface index to + * determine the outgoing interface, if an interface index is set and a + * matching netif can be found. Otherwise, fall back to regular routing. */ + netif = netif_get_by_index(pcb->mcast_ifindex); + } + + if (netif == NULL) +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + { + netif = ip_route(&pcb->local_ip, ipaddr); + } + } + + if (netif == NULL) { + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ("raw_sendto: No route to ")); + ip_addr_debug_print(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ipaddr); + return ERR_RTE; + } + + if (ip_addr_isany(&pcb->local_ip) || ip_addr_ismulticast(&pcb->local_ip)) { + /* use outgoing network interface IP address as source address */ + src_ip = ip_netif_get_local_ip(netif, ipaddr); +#if LWIP_IPV6 + if (src_ip == NULL) { + return ERR_RTE; + } +#endif /* LWIP_IPV6 */ + } else { + /* use RAW PCB local IP address as source address */ + src_ip = &pcb->local_ip; + } + + return raw_sendto_if_src(pcb, p, ipaddr, netif, src_ip); +} + +/** + * @ingroup raw_raw + * Send the raw IP packet to the given address, using a particular outgoing + * netif and source IP address. An IP header will be prepended to the packet, + * unless the RAW_FLAGS_HDRINCL flag is set on the PCB. In that case, the + * packet must include an IP header, which will then be sent as is. + * + * @param pcb RAW PCB used to send the data + * @param p chain of pbufs to be sent + * @param dst_ip destination IP address + * @param netif the netif used for sending + * @param src_ip source IP address + */ +err_t +raw_sendto_if_src(struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, + struct netif *netif, const ip_addr_t *src_ip) +{ + err_t err; + struct pbuf *q; /* q will be sent down the stack */ + u16_t header_size; + u8_t ttl; + + LWIP_ASSERT_CORE_LOCKED(); + + if ((pcb == NULL) || (dst_ip == NULL) || (netif == NULL) || (src_ip == NULL) || + !IP_ADDR_PCB_VERSION_MATCH(pcb, src_ip) || !IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { + return ERR_VAL; + } + + header_size = ( +#if LWIP_IPV4 && LWIP_IPV6 + IP_IS_V6(dst_ip) ? IP6_HLEN : IP_HLEN); +#elif LWIP_IPV4 + IP_HLEN); +#else + IP6_HLEN); +#endif + + /* Handle the HDRINCL option as an exception: none of the code below applies + * to this case, and sending the packet needs to be done differently too. */ + if (pcb->flags & RAW_FLAGS_HDRINCL) { + /* A full header *must* be present in the first pbuf of the chain, as the + * output routines may access its fields directly. */ + if (p->len < header_size) { + return ERR_VAL; + } + /* @todo multicast loop support, if at all desired for this scenario.. */ + NETIF_SET_HINTS(netif, &pcb->netif_hints); + err = ip_output_if_hdrincl(p, src_ip, dst_ip, netif); + NETIF_RESET_HINTS(netif); + return err; + } + + /* packet too large to add an IP header without causing an overflow? */ + if ((u16_t)(p->tot_len + header_size) < p->tot_len) { + return ERR_MEM; + } + /* not enough space to add an IP header to first pbuf in given p chain? */ + if (pbuf_add_header(p, header_size)) { + /* allocate header in new pbuf */ + q = pbuf_alloc(PBUF_IP, 0, PBUF_RAM); + /* new header pbuf could not be allocated? */ + if (q == NULL) { + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("raw_sendto: could not allocate header\n")); + return ERR_MEM; + } + if (p->tot_len != 0) { + /* chain header q in front of given pbuf p */ + pbuf_chain(q, p); + } + /* { first pbuf q points to header pbuf } */ + LWIP_DEBUGF(RAW_DEBUG, ("raw_sendto: added header pbuf %p before given pbuf %p\n", (void *)q, (void *)p)); + } else { + /* first pbuf q equals given pbuf */ + q = p; + if (pbuf_remove_header(q, header_size)) { + LWIP_ASSERT("Can't restore header we just removed!", 0); + return ERR_MEM; + } + } + +#if IP_SOF_BROADCAST + if (IP_IS_V4(dst_ip)) { + /* broadcast filter? */ + if (!ip_get_option(pcb, SOF_BROADCAST) && ip_addr_isbroadcast(dst_ip, netif)) { + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ("raw_sendto: SOF_BROADCAST not enabled on pcb %p\n", (void *)pcb)); + /* free any temporary header pbuf allocated by pbuf_header() */ + if (q != p) { + pbuf_free(q); + } + return ERR_VAL; + } + } +#endif /* IP_SOF_BROADCAST */ + + /* Multicast Loop? */ +#if LWIP_MULTICAST_TX_OPTIONS + if (((pcb->flags & RAW_FLAGS_MULTICAST_LOOP) != 0) && ip_addr_ismulticast(dst_ip)) { + q->flags |= PBUF_FLAG_MCASTLOOP; + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#if LWIP_IPV6 + /* If requested, based on the IPV6_CHECKSUM socket option per RFC3542, + compute the checksum and update the checksum in the payload. */ + if (IP_IS_V6(dst_ip) && pcb->chksum_reqd) { + u16_t chksum = ip6_chksum_pseudo(p, pcb->protocol, p->tot_len, ip_2_ip6(src_ip), ip_2_ip6(dst_ip)); + LWIP_ASSERT("Checksum must fit into first pbuf", p->len >= (pcb->chksum_offset + 2)); + SMEMCPY(((u8_t *)p->payload) + pcb->chksum_offset, &chksum, sizeof(u16_t)); + } +#endif + + /* Determine TTL to use */ +#if LWIP_MULTICAST_TX_OPTIONS + ttl = (ip_addr_ismulticast(dst_ip) ? raw_get_multicast_ttl(pcb) : pcb->ttl); +#else /* LWIP_MULTICAST_TX_OPTIONS */ + ttl = pcb->ttl; +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + NETIF_SET_HINTS(netif, &pcb->netif_hints); + err = ip_output_if(q, src_ip, dst_ip, ttl, pcb->tos, pcb->protocol, netif); + NETIF_RESET_HINTS(netif); + + /* did we chain a header earlier? */ + if (q != p) { + /* free the header */ + pbuf_free(q); + } + return err; +} + +/** + * @ingroup raw_raw + * Send the raw IP packet to the address given by raw_connect() + * + * @param pcb the raw pcb which to send + * @param p the IP payload to send + * + */ +err_t +raw_send(struct raw_pcb *pcb, struct pbuf *p) +{ + return raw_sendto(pcb, p, &pcb->remote_ip); +} + +/** + * @ingroup raw_raw + * Remove an RAW PCB. + * + * @param pcb RAW PCB to be removed. The PCB is removed from the list of + * RAW PCB's and the data structure is freed from memory. + * + * @see raw_new() + */ +void +raw_remove(struct raw_pcb *pcb) +{ + struct raw_pcb *pcb2; + LWIP_ASSERT_CORE_LOCKED(); + /* pcb to be removed is first in list? */ + if (raw_pcbs == pcb) { + /* make list start at 2nd pcb */ + raw_pcbs = raw_pcbs->next; + /* pcb not 1st in list */ + } else { + for (pcb2 = raw_pcbs; pcb2 != NULL; pcb2 = pcb2->next) { + /* find pcb in raw_pcbs list */ + if (pcb2->next != NULL && pcb2->next == pcb) { + /* remove pcb from list */ + pcb2->next = pcb->next; + break; + } + } + } + memp_free(MEMP_RAW_PCB, pcb); +} + +/** + * @ingroup raw_raw + * Create a RAW PCB. + * + * @return The RAW PCB which was created. NULL if the PCB data structure + * could not be allocated. + * + * @param proto the protocol number of the IPs payload (e.g. IP_PROTO_ICMP) + * + * @see raw_remove() + */ +struct raw_pcb * +raw_new(u8_t proto) +{ + struct raw_pcb *pcb; + + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE, ("raw_new\n")); + LWIP_ASSERT_CORE_LOCKED(); + + pcb = (struct raw_pcb *)memp_malloc(MEMP_RAW_PCB); + /* could allocate RAW PCB? */ + if (pcb != NULL) { + /* initialize PCB to all zeroes */ + memset(pcb, 0, sizeof(struct raw_pcb)); + pcb->protocol = proto; + pcb->ttl = RAW_TTL; +#if LWIP_MULTICAST_TX_OPTIONS + raw_set_multicast_ttl(pcb, RAW_TTL); +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + pcb->next = raw_pcbs; + raw_pcbs = pcb; + } + return pcb; +} + +/** + * @ingroup raw_raw + * Create a RAW PCB for specific IP type. + * + * @return The RAW PCB which was created. NULL if the PCB data structure + * could not be allocated. + * + * @param type IP address type, see @ref lwip_ip_addr_type definitions. + * If you want to listen to IPv4 and IPv6 (dual-stack) packets, + * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE. + * @param proto the protocol number (next header) of the IPv6 packet payload + * (e.g. IP6_NEXTH_ICMP6) + * + * @see raw_remove() + */ +struct raw_pcb * +raw_new_ip_type(u8_t type, u8_t proto) +{ + struct raw_pcb *pcb; + LWIP_ASSERT_CORE_LOCKED(); + pcb = raw_new(proto); +#if LWIP_IPV4 && LWIP_IPV6 + if (pcb != NULL) { + IP_SET_TYPE_VAL(pcb->local_ip, type); + IP_SET_TYPE_VAL(pcb->remote_ip, type); + } +#else /* LWIP_IPV4 && LWIP_IPV6 */ + LWIP_UNUSED_ARG(type); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + return pcb; +} + +/** This function is called from netif.c when address is changed + * + * @param old_addr IP address of the netif before change + * @param new_addr IP address of the netif after change + */ +void raw_netif_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr) +{ + struct raw_pcb *rpcb; + + if (!ip_addr_isany(old_addr) && !ip_addr_isany(new_addr)) { + for (rpcb = raw_pcbs; rpcb != NULL; rpcb = rpcb->next) { + /* PCB bound to current local interface address? */ + if (ip_addr_cmp(&rpcb->local_ip, old_addr)) { + /* The PCB is bound to the old ipaddr and + * is set to bound to the new one instead */ + ip_addr_copy(rpcb->local_ip, *new_addr); + } + } + } +} + +#endif /* LWIP_RAW */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/stats.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/stats.c new file mode 100644 index 0000000..4602842 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/stats.c @@ -0,0 +1,169 @@ +/** + * @file + * Statistics module + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_STATS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/stats.h" +#include "lwip/mem.h" +#include "lwip/debug.h" + +#include + +struct stats_ lwip_stats; + +void +stats_init(void) +{ +#ifdef LWIP_DEBUG +#if MEM_STATS + lwip_stats.mem.name = "MEM"; +#endif /* MEM_STATS */ +#endif /* LWIP_DEBUG */ +} + +#if LWIP_STATS_DISPLAY +void +stats_display_proto(struct stats_proto *proto, const char *name) +{ + LWIP_PLATFORM_DIAG(("\n%s\n\t", name)); + LWIP_PLATFORM_DIAG(("xmit: %"STAT_COUNTER_F"\n\t", proto->xmit)); + LWIP_PLATFORM_DIAG(("recv: %"STAT_COUNTER_F"\n\t", proto->recv)); + LWIP_PLATFORM_DIAG(("fw: %"STAT_COUNTER_F"\n\t", proto->fw)); + LWIP_PLATFORM_DIAG(("drop: %"STAT_COUNTER_F"\n\t", proto->drop)); + LWIP_PLATFORM_DIAG(("chkerr: %"STAT_COUNTER_F"\n\t", proto->chkerr)); + LWIP_PLATFORM_DIAG(("lenerr: %"STAT_COUNTER_F"\n\t", proto->lenerr)); + LWIP_PLATFORM_DIAG(("memerr: %"STAT_COUNTER_F"\n\t", proto->memerr)); + LWIP_PLATFORM_DIAG(("rterr: %"STAT_COUNTER_F"\n\t", proto->rterr)); + LWIP_PLATFORM_DIAG(("proterr: %"STAT_COUNTER_F"\n\t", proto->proterr)); + LWIP_PLATFORM_DIAG(("opterr: %"STAT_COUNTER_F"\n\t", proto->opterr)); + LWIP_PLATFORM_DIAG(("err: %"STAT_COUNTER_F"\n\t", proto->err)); + LWIP_PLATFORM_DIAG(("cachehit: %"STAT_COUNTER_F"\n", proto->cachehit)); +} + +#if IGMP_STATS || MLD6_STATS +void +stats_display_igmp(struct stats_igmp *igmp, const char *name) +{ + LWIP_PLATFORM_DIAG(("\n%s\n\t", name)); + LWIP_PLATFORM_DIAG(("xmit: %"STAT_COUNTER_F"\n\t", igmp->xmit)); + LWIP_PLATFORM_DIAG(("recv: %"STAT_COUNTER_F"\n\t", igmp->recv)); + LWIP_PLATFORM_DIAG(("drop: %"STAT_COUNTER_F"\n\t", igmp->drop)); + LWIP_PLATFORM_DIAG(("chkerr: %"STAT_COUNTER_F"\n\t", igmp->chkerr)); + LWIP_PLATFORM_DIAG(("lenerr: %"STAT_COUNTER_F"\n\t", igmp->lenerr)); + LWIP_PLATFORM_DIAG(("memerr: %"STAT_COUNTER_F"\n\t", igmp->memerr)); + LWIP_PLATFORM_DIAG(("proterr: %"STAT_COUNTER_F"\n\t", igmp->proterr)); + LWIP_PLATFORM_DIAG(("rx_v1: %"STAT_COUNTER_F"\n\t", igmp->rx_v1)); + LWIP_PLATFORM_DIAG(("rx_group: %"STAT_COUNTER_F"\n\t", igmp->rx_group)); + LWIP_PLATFORM_DIAG(("rx_general: %"STAT_COUNTER_F"\n\t", igmp->rx_general)); + LWIP_PLATFORM_DIAG(("rx_report: %"STAT_COUNTER_F"\n\t", igmp->rx_report)); + LWIP_PLATFORM_DIAG(("tx_join: %"STAT_COUNTER_F"\n\t", igmp->tx_join)); + LWIP_PLATFORM_DIAG(("tx_leave: %"STAT_COUNTER_F"\n\t", igmp->tx_leave)); + LWIP_PLATFORM_DIAG(("tx_report: %"STAT_COUNTER_F"\n", igmp->tx_report)); +} +#endif /* IGMP_STATS || MLD6_STATS */ + +#if MEM_STATS || MEMP_STATS +void +stats_display_mem(struct stats_mem *mem, const char *name) +{ + LWIP_PLATFORM_DIAG(("\nMEM %s\n\t", name)); + LWIP_PLATFORM_DIAG(("avail: %"MEM_SIZE_F"\n\t", mem->avail)); + LWIP_PLATFORM_DIAG(("used: %"MEM_SIZE_F"\n\t", mem->used)); + LWIP_PLATFORM_DIAG(("max: %"MEM_SIZE_F"\n\t", mem->max)); + LWIP_PLATFORM_DIAG(("err: %"STAT_COUNTER_F"\n", mem->err)); +} + +#if MEMP_STATS +void +stats_display_memp(struct stats_mem *mem, int idx) +{ + if (idx < MEMP_MAX) { + stats_display_mem(mem, mem->name); + } +} +#endif /* MEMP_STATS */ +#endif /* MEM_STATS || MEMP_STATS */ + +#if SYS_STATS +void +stats_display_sys(struct stats_sys *sys) +{ + LWIP_PLATFORM_DIAG(("\nSYS\n\t")); + LWIP_PLATFORM_DIAG(("sem.used: %"STAT_COUNTER_F"\n\t", sys->sem.used)); + LWIP_PLATFORM_DIAG(("sem.max: %"STAT_COUNTER_F"\n\t", sys->sem.max)); + LWIP_PLATFORM_DIAG(("sem.err: %"STAT_COUNTER_F"\n\t", sys->sem.err)); + LWIP_PLATFORM_DIAG(("mutex.used: %"STAT_COUNTER_F"\n\t", sys->mutex.used)); + LWIP_PLATFORM_DIAG(("mutex.max: %"STAT_COUNTER_F"\n\t", sys->mutex.max)); + LWIP_PLATFORM_DIAG(("mutex.err: %"STAT_COUNTER_F"\n\t", sys->mutex.err)); + LWIP_PLATFORM_DIAG(("mbox.used: %"STAT_COUNTER_F"\n\t", sys->mbox.used)); + LWIP_PLATFORM_DIAG(("mbox.max: %"STAT_COUNTER_F"\n\t", sys->mbox.max)); + LWIP_PLATFORM_DIAG(("mbox.err: %"STAT_COUNTER_F"\n", sys->mbox.err)); +} +#endif /* SYS_STATS */ + +void +stats_display(void) +{ + s16_t i; + + LINK_STATS_DISPLAY(); + ETHARP_STATS_DISPLAY(); + IPFRAG_STATS_DISPLAY(); + IP6_FRAG_STATS_DISPLAY(); + IP_STATS_DISPLAY(); + ND6_STATS_DISPLAY(); + IP6_STATS_DISPLAY(); + IGMP_STATS_DISPLAY(); + MLD6_STATS_DISPLAY(); + ICMP_STATS_DISPLAY(); + ICMP6_STATS_DISPLAY(); + UDP_STATS_DISPLAY(); + TCP_STATS_DISPLAY(); + MEM_STATS_DISPLAY(); + for (i = 0; i < MEMP_MAX; i++) { + MEMP_STATS_DISPLAY(i); + } + SYS_STATS_DISPLAY(); +} +#endif /* LWIP_STATS_DISPLAY */ + +#endif /* LWIP_STATS */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/sys.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/sys.c new file mode 100644 index 0000000..b95f4bd --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/sys.c @@ -0,0 +1,148 @@ +/** + * @file + * lwIP Operating System abstraction + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/** + * @defgroup sys_layer Porting (system abstraction layer) + * @ingroup lwip + * + * @defgroup sys_os OS abstraction layer + * @ingroup sys_layer + * No need to implement functions in this section in NO_SYS mode. + * The OS-specific code should be implemented in arch/sys_arch.h + * and sys_arch.c of your port. + * + * The operating system emulation layer provides a common interface + * between the lwIP code and the underlying operating system kernel. The + * general idea is that porting lwIP to new architectures requires only + * small changes to a few header files and a new sys_arch + * implementation. It is also possible to do a sys_arch implementation + * that does not rely on any underlying operating system. + * + * The sys_arch provides semaphores, mailboxes and mutexes to lwIP. For the full + * lwIP functionality, multiple threads support can be implemented in the + * sys_arch, but this is not required for the basic lwIP + * functionality. Timer scheduling is implemented in lwIP, but can be implemented + * by the sys_arch port (LWIP_TIMERS_CUSTOM==1). + * + * In addition to the source file providing the functionality of sys_arch, + * the OS emulation layer must provide several header files defining + * macros used throughout lwip. The files required and the macros they + * must define are listed below the sys_arch description. + * + * Since lwIP 1.4.0, semaphore, mutexes and mailbox functions are prototyped in a way that + * allows both using pointers or actual OS structures to be used. This way, memory + * required for such types can be either allocated in place (globally or on the + * stack) or on the heap (allocated internally in the "*_new()" functions). + * + * Note: + * ----- + * Be careful with using mem_malloc() in sys_arch. When malloc() refers to + * mem_malloc() you can run into a circular function call problem. In mem.c + * mem_init() tries to allocate a semaphore using mem_malloc, which of course + * can't be performed when sys_arch uses mem_malloc. + * + * @defgroup sys_sem Semaphores + * @ingroup sys_os + * Semaphores can be either counting or binary - lwIP works with both + * kinds. + * Semaphores are represented by the type "sys_sem_t" which is typedef'd + * in the sys_arch.h file. Mailboxes are equivalently represented by the + * type "sys_mbox_t". Mutexes are represented by the type "sys_mutex_t". + * lwIP does not place any restrictions on how these types are represented + * internally. + * + * @defgroup sys_mutex Mutexes + * @ingroup sys_os + * Mutexes are recommended to correctly handle priority inversion, + * especially if you use LWIP_CORE_LOCKING . + * + * @defgroup sys_mbox Mailboxes + * @ingroup sys_os + * Mailboxes should be implemented as a queue which allows multiple messages + * to be posted (implementing as a rendez-vous point where only one message can be + * posted at a time can have a highly negative impact on performance). A message + * in a mailbox is just a pointer, nothing more. + * + * @defgroup sys_time Time + * @ingroup sys_layer + * + * @defgroup sys_prot Critical sections + * @ingroup sys_layer + * Used to protect short regions of code against concurrent access. + * - Your system is a bare-metal system (probably with an RTOS) + * and interrupts are under your control: + * Implement this as LockInterrupts() / UnlockInterrupts() + * - Your system uses an RTOS with deferred interrupt handling from a + * worker thread: Implement as a global mutex or lock/unlock scheduler + * - Your system uses a high-level OS with e.g. POSIX signals: + * Implement as a global mutex + * + * @defgroup sys_misc Misc + * @ingroup sys_os + */ + +#include "lwip/opt.h" + +#include "lwip/sys.h" + +/* Most of the functions defined in sys.h must be implemented in the + * architecture-dependent file sys_arch.c */ + +#if !NO_SYS + +#ifndef sys_msleep +/** + * Sleep for some ms. Timeouts are NOT processed while sleeping. + * + * @param ms number of milliseconds to sleep + */ +void +sys_msleep(u32_t ms) +{ + if (ms > 0) { + sys_sem_t delaysem; + err_t err = sys_sem_new(&delaysem, 0); + if (err == ERR_OK) { + sys_arch_sem_wait(&delaysem, ms); + sys_sem_free(&delaysem); + } + } +} +#endif /* sys_msleep */ + +#endif /* !NO_SYS */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/tcp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/tcp.c new file mode 100644 index 0000000..2ff37ef --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/tcp.c @@ -0,0 +1,2686 @@ +/** + * @file + * Transmission Control Protocol for IP + * See also @ref tcp_raw + * + * @defgroup tcp_raw TCP + * @ingroup callbackstyle_api + * Transmission Control Protocol for IP\n + * @see @ref api + * + * Common functions for the TCP implementation, such as functions + * for manipulating the data structures and the TCP timer functions. TCP functions + * related to input and output is found in tcp_in.c and tcp_out.c respectively.\n + * + * TCP connection setup + * -------------------- + * The functions used for setting up connections is similar to that of + * the sequential API and of the BSD socket API. A new TCP connection + * identifier (i.e., a protocol control block - PCB) is created with the + * tcp_new() function. This PCB can then be either set to listen for new + * incoming connections or be explicitly connected to another host. + * - tcp_new() + * - tcp_bind() + * - tcp_listen() and tcp_listen_with_backlog() + * - tcp_accept() + * - tcp_connect() + * + * Sending TCP data + * ---------------- + * TCP data is sent by enqueueing the data with a call to tcp_write() and + * triggering to send by calling tcp_output(). When the data is successfully + * transmitted to the remote host, the application will be notified with a + * call to a specified callback function. + * - tcp_write() + * - tcp_output() + * - tcp_sent() + * + * Receiving TCP data + * ------------------ + * TCP data reception is callback based - an application specified + * callback function is called when new data arrives. When the + * application has taken the data, it has to call the tcp_recved() + * function to indicate that TCP can advertise increase the receive + * window. + * - tcp_recv() + * - tcp_recved() + * + * Application polling + * ------------------- + * When a connection is idle (i.e., no data is either transmitted or + * received), lwIP will repeatedly poll the application by calling a + * specified callback function. This can be used either as a watchdog + * timer for killing connections that have stayed idle for too long, or + * as a method of waiting for memory to become available. For instance, + * if a call to tcp_write() has failed because memory wasn't available, + * the application may use the polling functionality to call tcp_write() + * again when the connection has been idle for a while. + * - tcp_poll() + * + * Closing and aborting connections + * -------------------------------- + * - tcp_close() + * - tcp_abort() + * - tcp_err() + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/tcp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/debug.h" +#include "lwip/stats.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/nd6.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +#ifndef TCP_LOCAL_PORT_RANGE_START +/* From http://www.iana.org/assignments/port-numbers: + "The Dynamic and/or Private Ports are those from 49152 through 65535" */ +#define TCP_LOCAL_PORT_RANGE_START 0xc000 +#define TCP_LOCAL_PORT_RANGE_END 0xffff +#define TCP_ENSURE_LOCAL_PORT_RANGE(port) ((u16_t)(((port) & (u16_t)~TCP_LOCAL_PORT_RANGE_START) + TCP_LOCAL_PORT_RANGE_START)) +#endif + +#if LWIP_TCP_KEEPALIVE +#define TCP_KEEP_DUR(pcb) ((pcb)->keep_cnt * (pcb)->keep_intvl) +#define TCP_KEEP_INTVL(pcb) ((pcb)->keep_intvl) +#else /* LWIP_TCP_KEEPALIVE */ +#define TCP_KEEP_DUR(pcb) TCP_MAXIDLE +#define TCP_KEEP_INTVL(pcb) TCP_KEEPINTVL_DEFAULT +#endif /* LWIP_TCP_KEEPALIVE */ + +/* As initial send MSS, we use TCP_MSS but limit it to 536. */ +#if TCP_MSS > 536 +#define INITIAL_MSS 536 +#else +#define INITIAL_MSS TCP_MSS +#endif + +static const char *const tcp_state_str[] = { + "CLOSED", + "LISTEN", + "SYN_SENT", + "SYN_RCVD", + "ESTABLISHED", + "FIN_WAIT_1", + "FIN_WAIT_2", + "CLOSE_WAIT", + "CLOSING", + "LAST_ACK", + "TIME_WAIT" +}; + +/* last local TCP port */ +static u16_t tcp_port = TCP_LOCAL_PORT_RANGE_START; + +/* Incremented every coarse grained timer shot (typically every 500 ms). */ +u32_t tcp_ticks; +static const u8_t tcp_backoff[13] = +{ 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7}; +/* Times per slowtmr hits */ +static const u8_t tcp_persist_backoff[7] = { 3, 6, 12, 24, 48, 96, 120 }; + +/* The TCP PCB lists. */ + +/** List of all TCP PCBs bound but not yet (connected || listening) */ +struct tcp_pcb *tcp_bound_pcbs; +/** List of all TCP PCBs in LISTEN state */ +union tcp_listen_pcbs_t tcp_listen_pcbs; +/** List of all TCP PCBs that are in a state in which + * they accept or send data. */ +struct tcp_pcb *tcp_active_pcbs; +/** List of all TCP PCBs in TIME-WAIT state */ +struct tcp_pcb *tcp_tw_pcbs; + +/** An array with all (non-temporary) PCB lists, mainly used for smaller code size */ +struct tcp_pcb **const tcp_pcb_lists[] = {&tcp_listen_pcbs.pcbs, &tcp_bound_pcbs, + &tcp_active_pcbs, &tcp_tw_pcbs +}; + +u8_t tcp_active_pcbs_changed; + +/** Timer counter to handle calling slow-timer from tcp_tmr() */ +static u8_t tcp_timer; +static u8_t tcp_timer_ctr; +static u16_t tcp_new_port(void); + +static err_t tcp_close_shutdown_fin(struct tcp_pcb *pcb); +#if LWIP_TCP_PCB_NUM_EXT_ARGS +static void tcp_ext_arg_invoke_callbacks_destroyed(struct tcp_pcb_ext_args *ext_args); +#endif + +/** + * Initialize this module. + */ +void +tcp_init(void) +{ +#ifdef LWIP_RAND + tcp_port = TCP_ENSURE_LOCAL_PORT_RANGE(LWIP_RAND()); +#endif /* LWIP_RAND */ +} + +/** Free a tcp pcb */ +void +tcp_free(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_free: LISTEN", pcb->state != LISTEN); +#if LWIP_TCP_PCB_NUM_EXT_ARGS + tcp_ext_arg_invoke_callbacks_destroyed(pcb->ext_args); +#endif + memp_free(MEMP_TCP_PCB, pcb); +} + +/** Free a tcp listen pcb */ +static void +tcp_free_listen(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_free_listen: !LISTEN", pcb->state != LISTEN); +#if LWIP_TCP_PCB_NUM_EXT_ARGS + tcp_ext_arg_invoke_callbacks_destroyed(pcb->ext_args); +#endif + memp_free(MEMP_TCP_PCB_LISTEN, pcb); +} + +/** + * Called periodically to dispatch TCP timers. + */ +void +tcp_tmr(void) +{ + /* Call tcp_fasttmr() every 250 ms */ + tcp_fasttmr(); + + if (++tcp_timer & 1) { + /* Call tcp_slowtmr() every 500 ms, i.e., every other timer + tcp_tmr() is called. */ + tcp_slowtmr(); + } +} + +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG +/** Called when a listen pcb is closed. Iterates one pcb list and removes the + * closed listener pcb from pcb->listener if matching. + */ +static void +tcp_remove_listener(struct tcp_pcb *list, struct tcp_pcb_listen *lpcb) +{ + struct tcp_pcb *pcb; + + LWIP_ASSERT("tcp_remove_listener: invalid listener", lpcb != NULL); + + for (pcb = list; pcb != NULL; pcb = pcb->next) { + if (pcb->listener == lpcb) { + pcb->listener = NULL; + } + } +} +#endif + +/** Called when a listen pcb is closed. Iterates all pcb lists and removes the + * closed listener pcb from pcb->listener if matching. + */ +static void +tcp_listen_closed(struct tcp_pcb *pcb) +{ +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG + size_t i; + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT("pcb->state == LISTEN", pcb->state == LISTEN); + for (i = 1; i < LWIP_ARRAYSIZE(tcp_pcb_lists); i++) { + tcp_remove_listener(*tcp_pcb_lists[i], (struct tcp_pcb_listen *)pcb); + } +#endif + LWIP_UNUSED_ARG(pcb); +} + +#if TCP_LISTEN_BACKLOG +/** @ingroup tcp_raw + * Delay accepting a connection in respect to the listen backlog: + * the number of outstanding connections is increased until + * tcp_backlog_accepted() is called. + * + * ATTENTION: the caller is responsible for calling tcp_backlog_accepted() + * or else the backlog feature will get out of sync! + * + * @param pcb the connection pcb which is not fully accepted yet + */ +void +tcp_backlog_delayed(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb->flags & TF_BACKLOGPEND) == 0) { + if (pcb->listener != NULL) { + pcb->listener->accepts_pending++; + LWIP_ASSERT("accepts_pending != 0", pcb->listener->accepts_pending != 0); + tcp_set_flags(pcb, TF_BACKLOGPEND); + } + } +} + +/** @ingroup tcp_raw + * A delayed-accept a connection is accepted (or closed/aborted): decreases + * the number of outstanding connections after calling tcp_backlog_delayed(). + * + * ATTENTION: the caller is responsible for calling tcp_backlog_accepted() + * or else the backlog feature will get out of sync! + * + * @param pcb the connection pcb which is now fully accepted (or closed/aborted) + */ +void +tcp_backlog_accepted(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb->flags & TF_BACKLOGPEND) != 0) { + if (pcb->listener != NULL) { + LWIP_ASSERT("accepts_pending != 0", pcb->listener->accepts_pending != 0); + pcb->listener->accepts_pending--; + tcp_clear_flags(pcb, TF_BACKLOGPEND); + } + } +} +#endif /* TCP_LISTEN_BACKLOG */ + +/** + * Closes the TX side of a connection held by the PCB. + * For tcp_close(), a RST is sent if the application didn't receive all data + * (tcp_recved() not called for all data passed to recv callback). + * + * Listening pcbs are freed and may not be referenced any more. + * Connection pcbs are freed if not yet connected and may not be referenced + * any more. If a connection is established (at least SYN received or in + * a closing state), the connection is closed, and put in a closing state. + * The pcb is then automatically freed in tcp_slowtmr(). It is therefore + * unsafe to reference it. + * + * @param pcb the tcp_pcb to close + * @return ERR_OK if connection has been closed + * another err_t if closing failed and pcb is not freed + */ +static err_t +tcp_close_shutdown(struct tcp_pcb *pcb, u8_t rst_on_unacked_data) +{ + LWIP_ASSERT("tcp_close_shutdown: invalid pcb", pcb != NULL); + + if (rst_on_unacked_data && ((pcb->state == ESTABLISHED) || (pcb->state == CLOSE_WAIT))) { + if ((pcb->refused_data != NULL) || (pcb->rcv_wnd != TCP_WND_MAX(pcb))) { + /* Not all data received by application, send RST to tell the remote + side about this. */ + LWIP_ASSERT("pcb->flags & TF_RXCLOSED", pcb->flags & TF_RXCLOSED); + + /* don't call tcp_abort here: we must not deallocate the pcb since + that might not be expected when calling tcp_close */ + tcp_rst(pcb, pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip, + pcb->local_port, pcb->remote_port); + + tcp_pcb_purge(pcb); + TCP_RMV_ACTIVE(pcb); + /* Deallocate the pcb since we already sent a RST for it */ + if (tcp_input_pcb == pcb) { + /* prevent using a deallocated pcb: free it from tcp_input later */ + tcp_trigger_input_pcb_close(); + } else { + tcp_free(pcb); + } + return ERR_OK; + } + } + + /* - states which free the pcb are handled here, + - states which send FIN and change state are handled in tcp_close_shutdown_fin() */ + switch (pcb->state) { + case CLOSED: + /* Closing a pcb in the CLOSED state might seem erroneous, + * however, it is in this state once allocated and as yet unused + * and the user needs some way to free it should the need arise. + * Calling tcp_close() with a pcb that has already been closed, (i.e. twice) + * or for a pcb that has been used and then entered the CLOSED state + * is erroneous, but this should never happen as the pcb has in those cases + * been freed, and so any remaining handles are bogus. */ + if (pcb->local_port != 0) { + TCP_RMV(&tcp_bound_pcbs, pcb); + } + tcp_free(pcb); + break; + case LISTEN: + tcp_listen_closed(pcb); + tcp_pcb_remove(&tcp_listen_pcbs.pcbs, pcb); + tcp_free_listen(pcb); + break; + case SYN_SENT: + TCP_PCB_REMOVE_ACTIVE(pcb); + tcp_free(pcb); + MIB2_STATS_INC(mib2.tcpattemptfails); + break; + default: + return tcp_close_shutdown_fin(pcb); + } + return ERR_OK; +} + +static err_t +tcp_close_shutdown_fin(struct tcp_pcb *pcb) +{ + err_t err; + LWIP_ASSERT("pcb != NULL", pcb != NULL); + + switch (pcb->state) { + case SYN_RCVD: + err = tcp_send_fin(pcb); + if (err == ERR_OK) { + tcp_backlog_accepted(pcb); + MIB2_STATS_INC(mib2.tcpattemptfails); + pcb->state = FIN_WAIT_1; + } + break; + case ESTABLISHED: + err = tcp_send_fin(pcb); + if (err == ERR_OK) { + MIB2_STATS_INC(mib2.tcpestabresets); + pcb->state = FIN_WAIT_1; + } + break; + case CLOSE_WAIT: + err = tcp_send_fin(pcb); + if (err == ERR_OK) { + MIB2_STATS_INC(mib2.tcpestabresets); + pcb->state = LAST_ACK; + } + break; + default: + /* Has already been closed, do nothing. */ + return ERR_OK; + } + + if (err == ERR_OK) { + /* To ensure all data has been sent when tcp_close returns, we have + to make sure tcp_output doesn't fail. + Since we don't really have to ensure all data has been sent when tcp_close + returns (unsent data is sent from tcp timer functions, also), we don't care + for the return value of tcp_output for now. */ + tcp_output(pcb); + } else if (err == ERR_MEM) { + /* Mark this pcb for closing. Closing is retried from tcp_tmr. */ + tcp_set_flags(pcb, TF_CLOSEPEND); + /* We have to return ERR_OK from here to indicate to the callers that this + pcb should not be used any more as it will be freed soon via tcp_tmr. + This is OK here since sending FIN does not guarantee a time frime for + actually freeing the pcb, either (it is left in closure states for + remote ACK or timeout) */ + return ERR_OK; + } + return err; +} + +/** + * @ingroup tcp_raw + * Closes the connection held by the PCB. + * + * Listening pcbs are freed and may not be referenced any more. + * Connection pcbs are freed if not yet connected and may not be referenced + * any more. If a connection is established (at least SYN received or in + * a closing state), the connection is closed, and put in a closing state. + * The pcb is then automatically freed in tcp_slowtmr(). It is therefore + * unsafe to reference it (unless an error is returned). + * + * The function may return ERR_MEM if no memory + * was available for closing the connection. If so, the application + * should wait and try again either by using the acknowledgment + * callback or the polling functionality. If the close succeeds, the + * function returns ERR_OK. + * + * @param pcb the tcp_pcb to close + * @return ERR_OK if connection has been closed + * another err_t if closing failed and pcb is not freed + */ +err_t +tcp_close(struct tcp_pcb *pcb) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_close: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_DEBUGF(TCP_DEBUG, ("tcp_close: closing in ")); + + tcp_debug_print_state(pcb->state); + + if (pcb->state != LISTEN) { + /* Set a flag not to receive any more data... */ + tcp_set_flags(pcb, TF_RXCLOSED); + } + /* ... and close */ + return tcp_close_shutdown(pcb, 1); +} + +/** + * @ingroup tcp_raw + * Causes all or part of a full-duplex connection of this PCB to be shut down. + * This doesn't deallocate the PCB unless shutting down both sides! + * Shutting down both sides is the same as calling tcp_close, so if it succeds + * (i.e. returns ER_OK), the PCB must not be referenced any more! + * + * @param pcb PCB to shutdown + * @param shut_rx shut down receive side if this is != 0 + * @param shut_tx shut down send side if this is != 0 + * @return ERR_OK if shutdown succeeded (or the PCB has already been shut down) + * another err_t on error. + */ +err_t +tcp_shutdown(struct tcp_pcb *pcb, int shut_rx, int shut_tx) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_shutdown: invalid pcb", pcb != NULL, return ERR_ARG); + + if (pcb->state == LISTEN) { + return ERR_CONN; + } + if (shut_rx) { + /* shut down the receive side: set a flag not to receive any more data... */ + tcp_set_flags(pcb, TF_RXCLOSED); + if (shut_tx) { + /* shutting down the tx AND rx side is the same as closing for the raw API */ + return tcp_close_shutdown(pcb, 1); + } + /* ... and free buffered data */ + if (pcb->refused_data != NULL) { + pbuf_free(pcb->refused_data); + pcb->refused_data = NULL; + } + } + if (shut_tx) { + /* This can't happen twice since if it succeeds, the pcb's state is changed. + Only close in these states as the others directly deallocate the PCB */ + switch (pcb->state) { + case SYN_RCVD: + case ESTABLISHED: + case CLOSE_WAIT: + return tcp_close_shutdown(pcb, (u8_t)shut_rx); + default: + /* Not (yet?) connected, cannot shutdown the TX side as that would bring us + into CLOSED state, where the PCB is deallocated. */ + return ERR_CONN; + } + } + return ERR_OK; +} + +/** + * Abandons a connection and optionally sends a RST to the remote + * host. Deletes the local protocol control block. This is done when + * a connection is killed because of shortage of memory. + * + * @param pcb the tcp_pcb to abort + * @param reset boolean to indicate whether a reset should be sent + */ +void +tcp_abandon(struct tcp_pcb *pcb, int reset) +{ + u32_t seqno, ackno; +#if LWIP_CALLBACK_API + tcp_err_fn errf; +#endif /* LWIP_CALLBACK_API */ + void *errf_arg; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_abandon: invalid pcb", pcb != NULL, return); + + /* pcb->state LISTEN not allowed here */ + LWIP_ASSERT("don't call tcp_abort/tcp_abandon for listen-pcbs", + pcb->state != LISTEN); + /* Figure out on which TCP PCB list we are, and remove us. If we + are in an active state, call the receive function associated with + the PCB with a NULL argument, and send an RST to the remote end. */ + if (pcb->state == TIME_WAIT) { + tcp_pcb_remove(&tcp_tw_pcbs, pcb); + tcp_free(pcb); + } else { + int send_rst = 0; + u16_t local_port = 0; + enum tcp_state last_state; + seqno = pcb->snd_nxt; + ackno = pcb->rcv_nxt; +#if LWIP_CALLBACK_API + errf = pcb->errf; +#endif /* LWIP_CALLBACK_API */ + errf_arg = pcb->callback_arg; + if (pcb->state == CLOSED) { + if (pcb->local_port != 0) { + /* bound, not yet opened */ + TCP_RMV(&tcp_bound_pcbs, pcb); + } + } else { + send_rst = reset; + local_port = pcb->local_port; + TCP_PCB_REMOVE_ACTIVE(pcb); + } + if (pcb->unacked != NULL) { + tcp_segs_free(pcb->unacked); + } + if (pcb->unsent != NULL) { + tcp_segs_free(pcb->unsent); + } +#if TCP_QUEUE_OOSEQ + if (pcb->ooseq != NULL) { + tcp_segs_free(pcb->ooseq); + } +#endif /* TCP_QUEUE_OOSEQ */ + tcp_backlog_accepted(pcb); + if (send_rst) { + LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_abandon: sending RST\n")); + tcp_rst(pcb, seqno, ackno, &pcb->local_ip, &pcb->remote_ip, local_port, pcb->remote_port); + } + last_state = pcb->state; + tcp_free(pcb); + TCP_EVENT_ERR(last_state, errf, errf_arg, ERR_ABRT); + } +} + +/** + * @ingroup tcp_raw + * Aborts the connection by sending a RST (reset) segment to the remote + * host. The pcb is deallocated. This function never fails. + * + * ATTENTION: When calling this from one of the TCP callbacks, make + * sure you always return ERR_ABRT (and never return ERR_ABRT otherwise + * or you will risk accessing deallocated memory or memory leaks! + * + * @param pcb the tcp pcb to abort + */ +void +tcp_abort(struct tcp_pcb *pcb) +{ + tcp_abandon(pcb, 1); +} + +/** + * @ingroup tcp_raw + * Binds the connection to a local port number and IP address. If the + * IP address is not given (i.e., ipaddr == IP_ANY_TYPE), the connection is + * bound to all local IP addresses. + * If another connection is bound to the same port, the function will + * return ERR_USE, otherwise ERR_OK is returned. + * + * @param pcb the tcp_pcb to bind (no check is done whether this pcb is + * already bound!) + * @param ipaddr the local ip address to bind to (use IPx_ADDR_ANY to bind + * to any local address + * @param port the local port to bind to + * @return ERR_USE if the port is already in use + * ERR_VAL if bind failed because the PCB is not in a valid state + * ERR_OK if bound + */ +err_t +tcp_bind(struct tcp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port) +{ + int i; + int max_pcb_list = NUM_TCP_PCB_LISTS; + struct tcp_pcb *cpcb; +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + ip_addr_t zoned_ipaddr; +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY; + } +#else /* LWIP_IPV4 */ + LWIP_ERROR("tcp_bind: invalid ipaddr", ipaddr != NULL, return ERR_ARG); +#endif /* LWIP_IPV4 */ + + LWIP_ERROR("tcp_bind: invalid pcb", pcb != NULL, return ERR_ARG); + + LWIP_ERROR("tcp_bind: can only bind in state CLOSED", pcb->state == CLOSED, return ERR_VAL); + +#if SO_REUSE + /* Unless the REUSEADDR flag is set, + we have to check the pcbs in TIME-WAIT state, also. + We do not dump TIME_WAIT pcb's; they can still be matched by incoming + packets using both local and remote IP addresses and ports to distinguish. + */ + if (ip_get_option(pcb, SOF_REUSEADDR)) { + max_pcb_list = NUM_TCP_PCB_LISTS_NO_TIME_WAIT; + } +#endif /* SO_REUSE */ + +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now. + * This is legacy support: scope-aware callers should always provide properly + * zoned source addresses. Do the zone selection before the address-in-use + * check below; as such we have to make a temporary copy of the address. */ + if (IP_IS_V6(ipaddr) && ip6_addr_lacks_zone(ip_2_ip6(ipaddr), IP6_UNICAST)) { + ip_addr_copy(zoned_ipaddr, *ipaddr); + ip6_addr_select_zone(ip_2_ip6(&zoned_ipaddr), ip_2_ip6(&zoned_ipaddr)); + ipaddr = &zoned_ipaddr; + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + if (port == 0) { + port = tcp_new_port(); + if (port == 0) { + return ERR_BUF; + } + } else { + /* Check if the address already is in use (on all lists) */ + for (i = 0; i < max_pcb_list; i++) { + for (cpcb = *tcp_pcb_lists[i]; cpcb != NULL; cpcb = cpcb->next) { + if (cpcb->local_port == port) { +#if SO_REUSE + /* Omit checking for the same port if both pcbs have REUSEADDR set. + For SO_REUSEADDR, the duplicate-check for a 5-tuple is done in + tcp_connect. */ + if (!ip_get_option(pcb, SOF_REUSEADDR) || + !ip_get_option(cpcb, SOF_REUSEADDR)) +#endif /* SO_REUSE */ + { + /* @todo: check accept_any_ip_version */ + if ((IP_IS_V6(ipaddr) == IP_IS_V6_VAL(cpcb->local_ip)) && + (ip_addr_isany(&cpcb->local_ip) || + ip_addr_isany(ipaddr) || + ip_addr_cmp(&cpcb->local_ip, ipaddr))) { + return ERR_USE; + } + } + } + } + } + } + + if (!ip_addr_isany(ipaddr) +#if LWIP_IPV4 && LWIP_IPV6 + || (IP_GET_TYPE(ipaddr) != IP_GET_TYPE(&pcb->local_ip)) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + ) { + ip_addr_set(&pcb->local_ip, ipaddr); + } + pcb->local_port = port; + TCP_REG(&tcp_bound_pcbs, pcb); + LWIP_DEBUGF(TCP_DEBUG, ("tcp_bind: bind to port %"U16_F"\n", port)); + return ERR_OK; +} + +/** + * @ingroup tcp_raw + * Binds the connection to a netif and IP address. + * After calling this function, all packets received via this PCB + * are guaranteed to have come in via the specified netif, and all + * outgoing packets will go out via the specified netif. + * + * @param pcb the tcp_pcb to bind. + * @param netif the netif to bind to. Can be NULL. + */ +void +tcp_bind_netif(struct tcp_pcb *pcb, const struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (netif != NULL) { + pcb->netif_idx = netif_get_index(netif); + } else { + pcb->netif_idx = NETIF_NO_INDEX; + } +} + +#if LWIP_CALLBACK_API +/** + * Default accept callback if no accept callback is specified by the user. + */ +static err_t +tcp_accept_null(void *arg, struct tcp_pcb *pcb, err_t err) +{ + LWIP_UNUSED_ARG(arg); + LWIP_UNUSED_ARG(err); + + LWIP_ASSERT("tcp_accept_null: invalid pcb", pcb != NULL); + + tcp_abort(pcb); + + return ERR_ABRT; +} +#endif /* LWIP_CALLBACK_API */ + +/** + * @ingroup tcp_raw + * Set the state of the connection to be LISTEN, which means that it + * is able to accept incoming connections. The protocol control block + * is reallocated in order to consume less memory. Setting the + * connection to LISTEN is an irreversible process. + * When an incoming connection is accepted, the function specified with + * the tcp_accept() function will be called. The pcb has to be bound + * to a local port with the tcp_bind() function. + * + * The tcp_listen() function returns a new connection identifier, and + * the one passed as an argument to the function will be + * deallocated. The reason for this behavior is that less memory is + * needed for a connection that is listening, so tcp_listen() will + * reclaim the memory needed for the original connection and allocate a + * new smaller memory block for the listening connection. + * + * tcp_listen() may return NULL if no memory was available for the + * listening connection. If so, the memory associated with the pcb + * passed as an argument to tcp_listen() will not be deallocated. + * + * The backlog limits the number of outstanding connections + * in the listen queue to the value specified by the backlog argument. + * To use it, your need to set TCP_LISTEN_BACKLOG=1 in your lwipopts.h. + * + * @param pcb the original tcp_pcb + * @param backlog the incoming connections queue limit + * @return tcp_pcb used for listening, consumes less memory. + * + * @note The original tcp_pcb is freed. This function therefore has to be + * called like this: + * tpcb = tcp_listen_with_backlog(tpcb, backlog); + */ +struct tcp_pcb * +tcp_listen_with_backlog(struct tcp_pcb *pcb, u8_t backlog) +{ + LWIP_ASSERT_CORE_LOCKED(); + return tcp_listen_with_backlog_and_err(pcb, backlog, NULL); +} + +/** + * @ingroup tcp_raw + * Set the state of the connection to be LISTEN, which means that it + * is able to accept incoming connections. The protocol control block + * is reallocated in order to consume less memory. Setting the + * connection to LISTEN is an irreversible process. + * + * @param pcb the original tcp_pcb + * @param backlog the incoming connections queue limit + * @param err when NULL is returned, this contains the error reason + * @return tcp_pcb used for listening, consumes less memory. + * + * @note The original tcp_pcb is freed. This function therefore has to be + * called like this: + * tpcb = tcp_listen_with_backlog_and_err(tpcb, backlog, &err); + */ +struct tcp_pcb * +tcp_listen_with_backlog_and_err(struct tcp_pcb *pcb, u8_t backlog, err_t *err) +{ + struct tcp_pcb_listen *lpcb = NULL; + err_t res; + + LWIP_UNUSED_ARG(backlog); + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_listen_with_backlog_and_err: invalid pcb", pcb != NULL, res = ERR_ARG; goto done); + LWIP_ERROR("tcp_listen_with_backlog_and_err: pcb already connected", pcb->state == CLOSED, res = ERR_CLSD; goto done); + + /* already listening? */ + if (pcb->state == LISTEN) { + lpcb = (struct tcp_pcb_listen *)pcb; + res = ERR_ALREADY; + goto done; + } +#if SO_REUSE + if (ip_get_option(pcb, SOF_REUSEADDR)) { + /* Since SOF_REUSEADDR allows reusing a local address before the pcb's usage + is declared (listen-/connection-pcb), we have to make sure now that + this port is only used once for every local IP. */ + for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) { + if ((lpcb->local_port == pcb->local_port) && + ip_addr_cmp(&lpcb->local_ip, &pcb->local_ip)) { + /* this address/port is already used */ + lpcb = NULL; + res = ERR_USE; + goto done; + } + } + } +#endif /* SO_REUSE */ + lpcb = (struct tcp_pcb_listen *)memp_malloc(MEMP_TCP_PCB_LISTEN); + if (lpcb == NULL) { + res = ERR_MEM; + goto done; + } + lpcb->callback_arg = pcb->callback_arg; + lpcb->local_port = pcb->local_port; + lpcb->state = LISTEN; + lpcb->prio = pcb->prio; + lpcb->so_options = pcb->so_options; + lpcb->netif_idx = NETIF_NO_INDEX; + lpcb->ttl = pcb->ttl; + lpcb->tos = pcb->tos; +#if LWIP_IPV4 && LWIP_IPV6 + IP_SET_TYPE_VAL(lpcb->remote_ip, pcb->local_ip.type); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + ip_addr_copy(lpcb->local_ip, pcb->local_ip); + if (pcb->local_port != 0) { + TCP_RMV(&tcp_bound_pcbs, pcb); + } +#if LWIP_TCP_PCB_NUM_EXT_ARGS + /* copy over ext_args to listening pcb */ + memcpy(&lpcb->ext_args, &pcb->ext_args, sizeof(pcb->ext_args)); +#endif + tcp_free(pcb); +#if LWIP_CALLBACK_API + lpcb->accept = tcp_accept_null; +#endif /* LWIP_CALLBACK_API */ +#if TCP_LISTEN_BACKLOG + lpcb->accepts_pending = 0; + tcp_backlog_set(lpcb, backlog); +#endif /* TCP_LISTEN_BACKLOG */ + TCP_REG(&tcp_listen_pcbs.pcbs, (struct tcp_pcb *)lpcb); + res = ERR_OK; +done: + if (err != NULL) { + *err = res; + } + return (struct tcp_pcb *)lpcb; +} + +/** + * Update the state that tracks the available window space to advertise. + * + * Returns how much extra window would be advertised if we sent an + * update now. + */ +u32_t +tcp_update_rcv_ann_wnd(struct tcp_pcb *pcb) +{ + u32_t new_right_edge; + + LWIP_ASSERT("tcp_update_rcv_ann_wnd: invalid pcb", pcb != NULL); + new_right_edge = pcb->rcv_nxt + pcb->rcv_wnd; + + if (TCP_SEQ_GEQ(new_right_edge, pcb->rcv_ann_right_edge + LWIP_MIN((TCP_WND / 2), pcb->mss))) { + /* we can advertise more window */ + pcb->rcv_ann_wnd = pcb->rcv_wnd; + return new_right_edge - pcb->rcv_ann_right_edge; + } else { + if (TCP_SEQ_GT(pcb->rcv_nxt, pcb->rcv_ann_right_edge)) { + /* Can happen due to other end sending out of advertised window, + * but within actual available (but not yet advertised) window */ + pcb->rcv_ann_wnd = 0; + } else { + /* keep the right edge of window constant */ + u32_t new_rcv_ann_wnd = pcb->rcv_ann_right_edge - pcb->rcv_nxt; +#if !LWIP_WND_SCALE + LWIP_ASSERT("new_rcv_ann_wnd <= 0xffff", new_rcv_ann_wnd <= 0xffff); +#endif + pcb->rcv_ann_wnd = (tcpwnd_size_t)new_rcv_ann_wnd; + } + return 0; + } +} + +/** + * @ingroup tcp_raw + * This function should be called by the application when it has + * processed the data. The purpose is to advertise a larger window + * when the data has been processed. + * + * @param pcb the tcp_pcb for which data is read + * @param len the amount of bytes that have been read by the application + */ +void +tcp_recved(struct tcp_pcb *pcb, u16_t len) +{ + u32_t wnd_inflation; + tcpwnd_size_t rcv_wnd; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_recved: invalid pcb", pcb != NULL, return); + + /* pcb->state LISTEN not allowed here */ + LWIP_ASSERT("don't call tcp_recved for listen-pcbs", + pcb->state != LISTEN); + + rcv_wnd = (tcpwnd_size_t)(pcb->rcv_wnd + len); + if ((rcv_wnd > TCP_WND_MAX(pcb)) || (rcv_wnd < pcb->rcv_wnd)) { + /* window got too big or tcpwnd_size_t overflow */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_recved: window got too big or tcpwnd_size_t overflow\n")); + pcb->rcv_wnd = TCP_WND_MAX(pcb); + } else { + pcb->rcv_wnd = rcv_wnd; + } + + wnd_inflation = tcp_update_rcv_ann_wnd(pcb); + + /* If the change in the right edge of window is significant (default + * watermark is TCP_WND/4), then send an explicit update now. + * Otherwise wait for a packet to be sent in the normal course of + * events (or more window to be available later) */ + if (wnd_inflation >= TCP_WND_UPDATE_THRESHOLD) { + tcp_ack_now(pcb); + tcp_output(pcb); + } + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_recved: received %"U16_F" bytes, wnd %"TCPWNDSIZE_F" (%"TCPWNDSIZE_F").\n", + len, pcb->rcv_wnd, (u16_t)(TCP_WND_MAX(pcb) - pcb->rcv_wnd))); +} + +/** + * Allocate a new local TCP port. + * + * @return a new (free) local TCP port number + */ +static u16_t +tcp_new_port(void) +{ + u8_t i; + u16_t n = 0; + struct tcp_pcb *pcb; + +again: + tcp_port++; + if (tcp_port == TCP_LOCAL_PORT_RANGE_END) { + tcp_port = TCP_LOCAL_PORT_RANGE_START; + } + /* Check all PCB lists. */ + for (i = 0; i < NUM_TCP_PCB_LISTS; i++) { + for (pcb = *tcp_pcb_lists[i]; pcb != NULL; pcb = pcb->next) { + if (pcb->local_port == tcp_port) { + n++; + if (n > (TCP_LOCAL_PORT_RANGE_END - TCP_LOCAL_PORT_RANGE_START)) { + return 0; + } + goto again; + } + } + } + return tcp_port; +} + +/** + * @ingroup tcp_raw + * Connects to another host. The function given as the "connected" + * argument will be called when the connection has been established. + * Sets up the pcb to connect to the remote host and sends the + * initial SYN segment which opens the connection. + * + * The tcp_connect() function returns immediately; it does not wait for + * the connection to be properly setup. Instead, it will call the + * function specified as the fourth argument (the "connected" argument) + * when the connection is established. If the connection could not be + * properly established, either because the other host refused the + * connection or because the other host didn't answer, the "err" + * callback function of this pcb (registered with tcp_err, see below) + * will be called. + * + * The tcp_connect() function can return ERR_MEM if no memory is + * available for enqueueing the SYN segment. If the SYN indeed was + * enqueued successfully, the tcp_connect() function returns ERR_OK. + * + * @param pcb the tcp_pcb used to establish the connection + * @param ipaddr the remote ip address to connect to + * @param port the remote tcp port to connect to + * @param connected callback function to call when connected (on error, + the err calback will be called) + * @return ERR_VAL if invalid arguments are given + * ERR_OK if connect request has been sent + * other err_t values if connect request couldn't be sent + */ +err_t +tcp_connect(struct tcp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port, + tcp_connected_fn connected) +{ + struct netif *netif = NULL; + err_t ret; + u32_t iss; + u16_t old_local_port; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_connect: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("tcp_connect: invalid ipaddr", ipaddr != NULL, return ERR_ARG); + + LWIP_ERROR("tcp_connect: can only connect from state CLOSED", pcb->state == CLOSED, return ERR_ISCONN); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_connect to port %"U16_F"\n", port)); + ip_addr_set(&pcb->remote_ip, ipaddr); + pcb->remote_port = port; + + if (pcb->netif_idx != NETIF_NO_INDEX) { + netif = netif_get_by_index(pcb->netif_idx); + } else { + /* check if we have a route to the remote host */ + netif = ip_route(&pcb->local_ip, &pcb->remote_ip); + } + if (netif == NULL) { + /* Don't even try to send a SYN packet if we have no route since that will fail. */ + return ERR_RTE; + } + + /* check if local IP has been assigned to pcb, if not, get one */ + if (ip_addr_isany(&pcb->local_ip)) { + const ip_addr_t *local_ip = ip_netif_get_local_ip(netif, ipaddr); + if (local_ip == NULL) { + return ERR_RTE; + } + ip_addr_copy(pcb->local_ip, *local_ip); + } + +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now. + * Given that we already have the target netif, this is easy and cheap. */ + if (IP_IS_V6(&pcb->remote_ip) && + ip6_addr_lacks_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNICAST)) { + ip6_addr_assign_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNICAST, netif); + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + old_local_port = pcb->local_port; + if (pcb->local_port == 0) { + pcb->local_port = tcp_new_port(); + if (pcb->local_port == 0) { + return ERR_BUF; + } + } else { +#if SO_REUSE + if (ip_get_option(pcb, SOF_REUSEADDR)) { + /* Since SOF_REUSEADDR allows reusing a local address, we have to make sure + now that the 5-tuple is unique. */ + struct tcp_pcb *cpcb; + int i; + /* Don't check listen- and bound-PCBs, check active- and TIME-WAIT PCBs. */ + for (i = 2; i < NUM_TCP_PCB_LISTS; i++) { + for (cpcb = *tcp_pcb_lists[i]; cpcb != NULL; cpcb = cpcb->next) { + if ((cpcb->local_port == pcb->local_port) && + (cpcb->remote_port == port) && + ip_addr_cmp(&cpcb->local_ip, &pcb->local_ip) && + ip_addr_cmp(&cpcb->remote_ip, ipaddr)) { + /* linux returns EISCONN here, but ERR_USE should be OK for us */ + return ERR_USE; + } + } + } + } +#endif /* SO_REUSE */ + } + + iss = tcp_next_iss(pcb); + pcb->rcv_nxt = 0; + pcb->snd_nxt = iss; + pcb->lastack = iss - 1; + pcb->snd_wl2 = iss - 1; + pcb->snd_lbb = iss - 1; + /* Start with a window that does not need scaling. When window scaling is + enabled and used, the window is enlarged when both sides agree on scaling. */ + pcb->rcv_wnd = pcb->rcv_ann_wnd = TCPWND_MIN16(TCP_WND); + pcb->rcv_ann_right_edge = pcb->rcv_nxt; + pcb->snd_wnd = TCP_WND; + /* As initial send MSS, we use TCP_MSS but limit it to 536. + The send MSS is updated when an MSS option is received. */ + pcb->mss = INITIAL_MSS; +#if TCP_CALCULATE_EFF_SEND_MSS + pcb->mss = tcp_eff_send_mss_netif(pcb->mss, netif, &pcb->remote_ip); +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + pcb->cwnd = 1; +#if LWIP_CALLBACK_API + pcb->connected = connected; +#else /* LWIP_CALLBACK_API */ + LWIP_UNUSED_ARG(connected); +#endif /* LWIP_CALLBACK_API */ + + /* Send a SYN together with the MSS option. */ + ret = tcp_enqueue_flags(pcb, TCP_SYN); + if (ret == ERR_OK) { + /* SYN segment was enqueued, changed the pcbs state now */ + pcb->state = SYN_SENT; + if (old_local_port != 0) { + TCP_RMV(&tcp_bound_pcbs, pcb); + } + TCP_REG_ACTIVE(pcb); + MIB2_STATS_INC(mib2.tcpactiveopens); + + tcp_output(pcb); + } + return ret; +} + +/** + * Called every 500 ms and implements the retransmission timer and the timer that + * removes PCBs that have been in TIME-WAIT for enough time. It also increments + * various timers such as the inactivity timer in each PCB. + * + * Automatically called from tcp_tmr(). + */ +void +tcp_slowtmr(void) +{ + struct tcp_pcb *pcb, *prev; + tcpwnd_size_t eff_wnd; + u8_t pcb_remove; /* flag if a PCB should be removed */ + u8_t pcb_reset; /* flag if a RST should be sent when removing */ + err_t err; + + err = ERR_OK; + + ++tcp_ticks; + ++tcp_timer_ctr; + +tcp_slowtmr_start: + /* Steps through all of the active PCBs. */ + prev = NULL; + pcb = tcp_active_pcbs; + if (pcb == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: no active pcbs\n")); + } + while (pcb != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: processing active pcb\n")); + LWIP_ASSERT("tcp_slowtmr: active pcb->state != CLOSED\n", pcb->state != CLOSED); + LWIP_ASSERT("tcp_slowtmr: active pcb->state != LISTEN\n", pcb->state != LISTEN); + LWIP_ASSERT("tcp_slowtmr: active pcb->state != TIME-WAIT\n", pcb->state != TIME_WAIT); + if (pcb->last_timer == tcp_timer_ctr) { + /* skip this pcb, we have already processed it */ + prev = pcb; + pcb = pcb->next; + continue; + } + pcb->last_timer = tcp_timer_ctr; + + pcb_remove = 0; + pcb_reset = 0; + + if (pcb->state == SYN_SENT && pcb->nrtx >= TCP_SYNMAXRTX) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: max SYN retries reached\n")); + } else if (pcb->nrtx >= TCP_MAXRTX) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: max DATA retries reached\n")); + } else { + if (pcb->persist_backoff > 0) { + LWIP_ASSERT("tcp_slowtimr: persist ticking with in-flight data", pcb->unacked == NULL); + LWIP_ASSERT("tcp_slowtimr: persist ticking with empty send buffer", pcb->unsent != NULL); + if (pcb->persist_probe >= TCP_MAXRTX) { + ++pcb_remove; /* max probes reached */ + } else { + u8_t backoff_cnt = tcp_persist_backoff[pcb->persist_backoff - 1]; + if (pcb->persist_cnt < backoff_cnt) { + pcb->persist_cnt++; + } + if (pcb->persist_cnt >= backoff_cnt) { + int next_slot = 1; /* increment timer to next slot */ + /* If snd_wnd is zero, send 1 byte probes */ + if (pcb->snd_wnd == 0) { + if (tcp_zero_window_probe(pcb) != ERR_OK) { + next_slot = 0; /* try probe again with current slot */ + } + /* snd_wnd not fully closed, split unsent head and fill window */ + } else { + if (tcp_split_unsent_seg(pcb, (u16_t)pcb->snd_wnd) == ERR_OK) { + if (tcp_output(pcb) == ERR_OK) { + /* sending will cancel persist timer, else retry with current slot */ + next_slot = 0; + } + } + } + if (next_slot) { + pcb->persist_cnt = 0; + if (pcb->persist_backoff < sizeof(tcp_persist_backoff)) { + pcb->persist_backoff++; + } + } + } + } + } else { + /* Increase the retransmission timer if it is running */ + if ((pcb->rtime >= 0) && (pcb->rtime < 0x7FFF)) { + ++pcb->rtime; + } + + if (pcb->rtime >= pcb->rto) { + /* Time for a retransmission. */ + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_slowtmr: rtime %"S16_F + " pcb->rto %"S16_F"\n", + pcb->rtime, pcb->rto)); + /* If prepare phase fails but we have unsent data but no unacked data, + still execute the backoff calculations below, as this means we somehow + failed to send segment. */ + if ((tcp_rexmit_rto_prepare(pcb) == ERR_OK) || ((pcb->unacked == NULL) && (pcb->unsent != NULL))) { + /* Double retransmission time-out unless we are trying to + * connect to somebody (i.e., we are in SYN_SENT). */ + if (pcb->state != SYN_SENT) { + u8_t backoff_idx = LWIP_MIN(pcb->nrtx, sizeof(tcp_backoff) - 1); + int calc_rto = ((pcb->sa >> 3) + pcb->sv) << tcp_backoff[backoff_idx]; + pcb->rto = (s16_t)LWIP_MIN(calc_rto, 0x7FFF); + } + + /* Reset the retransmission timer. */ + pcb->rtime = 0; + + /* Reduce congestion window and ssthresh. */ + eff_wnd = LWIP_MIN(pcb->cwnd, pcb->snd_wnd); + pcb->ssthresh = eff_wnd >> 1; + if (pcb->ssthresh < (tcpwnd_size_t)(pcb->mss << 1)) { + pcb->ssthresh = (tcpwnd_size_t)(pcb->mss << 1); + } + pcb->cwnd = pcb->mss; + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_slowtmr: cwnd %"TCPWNDSIZE_F + " ssthresh %"TCPWNDSIZE_F"\n", + pcb->cwnd, pcb->ssthresh)); + pcb->bytes_acked = 0; + + /* The following needs to be called AFTER cwnd is set to one + mss - STJ */ + tcp_rexmit_rto_commit(pcb); + } + } + } + } + /* Check if this PCB has stayed too long in FIN-WAIT-2 */ + if (pcb->state == FIN_WAIT_2) { + /* If this PCB is in FIN_WAIT_2 because of SHUT_WR don't let it time out. */ + if (pcb->flags & TF_RXCLOSED) { + /* PCB was fully closed (either through close() or SHUT_RDWR): + normal FIN-WAIT timeout handling. */ + if ((u32_t)(tcp_ticks - pcb->tmr) > + TCP_FIN_WAIT_TIMEOUT / TCP_SLOW_INTERVAL) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in FIN-WAIT-2\n")); + } + } + } + + /* Check if KEEPALIVE should be sent */ + if (ip_get_option(pcb, SOF_KEEPALIVE) && + ((pcb->state == ESTABLISHED) || + (pcb->state == CLOSE_WAIT))) { + if ((u32_t)(tcp_ticks - pcb->tmr) > + (pcb->keep_idle + TCP_KEEP_DUR(pcb)) / TCP_SLOW_INTERVAL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: KEEPALIVE timeout. Aborting connection to ")); + ip_addr_debug_print_val(TCP_DEBUG, pcb->remote_ip); + LWIP_DEBUGF(TCP_DEBUG, ("\n")); + + ++pcb_remove; + ++pcb_reset; + } else if ((u32_t)(tcp_ticks - pcb->tmr) > + (pcb->keep_idle + pcb->keep_cnt_sent * TCP_KEEP_INTVL(pcb)) + / TCP_SLOW_INTERVAL) { + err = tcp_keepalive(pcb); + if (err == ERR_OK) { + pcb->keep_cnt_sent++; + } + } + } + + /* If this PCB has queued out of sequence data, but has been + inactive for too long, will drop the data (it will eventually + be retransmitted). */ +#if TCP_QUEUE_OOSEQ + if (pcb->ooseq != NULL && + (tcp_ticks - pcb->tmr >= (u32_t)pcb->rto * TCP_OOSEQ_TIMEOUT)) { + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_slowtmr: dropping OOSEQ queued data\n")); + tcp_free_ooseq(pcb); + } +#endif /* TCP_QUEUE_OOSEQ */ + + /* Check if this PCB has stayed too long in SYN-RCVD */ + if (pcb->state == SYN_RCVD) { + if ((u32_t)(tcp_ticks - pcb->tmr) > + TCP_SYN_RCVD_TIMEOUT / TCP_SLOW_INTERVAL) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in SYN-RCVD\n")); + } + } + + /* Check if this PCB has stayed too long in LAST-ACK */ + if (pcb->state == LAST_ACK) { + if ((u32_t)(tcp_ticks - pcb->tmr) > 2 * TCP_MSL / TCP_SLOW_INTERVAL) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in LAST-ACK\n")); + } + } + + /* If the PCB should be removed, do it. */ + if (pcb_remove) { + struct tcp_pcb *pcb2; +#if LWIP_CALLBACK_API + tcp_err_fn err_fn = pcb->errf; +#endif /* LWIP_CALLBACK_API */ + void *err_arg; + enum tcp_state last_state; + tcp_pcb_purge(pcb); + /* Remove PCB from tcp_active_pcbs list. */ + if (prev != NULL) { + LWIP_ASSERT("tcp_slowtmr: middle tcp != tcp_active_pcbs", pcb != tcp_active_pcbs); + prev->next = pcb->next; + } else { + /* This PCB was the first. */ + LWIP_ASSERT("tcp_slowtmr: first pcb == tcp_active_pcbs", tcp_active_pcbs == pcb); + tcp_active_pcbs = pcb->next; + } + + if (pcb_reset) { + tcp_rst(pcb, pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip, + pcb->local_port, pcb->remote_port); + } + + err_arg = pcb->callback_arg; + last_state = pcb->state; + pcb2 = pcb; + pcb = pcb->next; + tcp_free(pcb2); + + tcp_active_pcbs_changed = 0; + TCP_EVENT_ERR(last_state, err_fn, err_arg, ERR_ABRT); + if (tcp_active_pcbs_changed) { + goto tcp_slowtmr_start; + } + } else { + /* get the 'next' element now and work with 'prev' below (in case of abort) */ + prev = pcb; + pcb = pcb->next; + + /* We check if we should poll the connection. */ + ++prev->polltmr; + if (prev->polltmr >= prev->pollinterval) { + prev->polltmr = 0; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: polling application\n")); + tcp_active_pcbs_changed = 0; + TCP_EVENT_POLL(prev, err); + if (tcp_active_pcbs_changed) { + goto tcp_slowtmr_start; + } + /* if err == ERR_ABRT, 'prev' is already deallocated */ + if (err == ERR_OK) { + tcp_output(prev); + } + } + } + } + + + /* Steps through all of the TIME-WAIT PCBs. */ + prev = NULL; + pcb = tcp_tw_pcbs; + while (pcb != NULL) { + LWIP_ASSERT("tcp_slowtmr: TIME-WAIT pcb->state == TIME-WAIT", pcb->state == TIME_WAIT); + pcb_remove = 0; + + /* Check if this PCB has stayed long enough in TIME-WAIT */ + if ((u32_t)(tcp_ticks - pcb->tmr) > 2 * TCP_MSL / TCP_SLOW_INTERVAL) { + ++pcb_remove; + } + + /* If the PCB should be removed, do it. */ + if (pcb_remove) { + struct tcp_pcb *pcb2; + tcp_pcb_purge(pcb); + /* Remove PCB from tcp_tw_pcbs list. */ + if (prev != NULL) { + LWIP_ASSERT("tcp_slowtmr: middle tcp != tcp_tw_pcbs", pcb != tcp_tw_pcbs); + prev->next = pcb->next; + } else { + /* This PCB was the first. */ + LWIP_ASSERT("tcp_slowtmr: first pcb == tcp_tw_pcbs", tcp_tw_pcbs == pcb); + tcp_tw_pcbs = pcb->next; + } + pcb2 = pcb; + pcb = pcb->next; + tcp_free(pcb2); + } else { + prev = pcb; + pcb = pcb->next; + } + } +} + +/** + * Is called every TCP_FAST_INTERVAL (250 ms) and process data previously + * "refused" by upper layer (application) and sends delayed ACKs or pending FINs. + * + * Automatically called from tcp_tmr(). + */ +void +tcp_fasttmr(void) +{ + struct tcp_pcb *pcb; + + ++tcp_timer_ctr; + +tcp_fasttmr_start: + pcb = tcp_active_pcbs; + + while (pcb != NULL) { + if (pcb->last_timer != tcp_timer_ctr) { + struct tcp_pcb *next; + pcb->last_timer = tcp_timer_ctr; + /* send delayed ACKs */ + if (pcb->flags & TF_ACK_DELAY) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_fasttmr: delayed ACK\n")); + tcp_ack_now(pcb); + tcp_output(pcb); + tcp_clear_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + } + /* send pending FIN */ + if (pcb->flags & TF_CLOSEPEND) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_fasttmr: pending FIN\n")); + tcp_clear_flags(pcb, TF_CLOSEPEND); + tcp_close_shutdown_fin(pcb); + } + + next = pcb->next; + + /* If there is data which was previously "refused" by upper layer */ + if (pcb->refused_data != NULL) { + tcp_active_pcbs_changed = 0; + tcp_process_refused_data(pcb); + if (tcp_active_pcbs_changed) { + /* application callback has changed the pcb list: restart the loop */ + goto tcp_fasttmr_start; + } + } + pcb = next; + } else { + pcb = pcb->next; + } + } +} + +/** Call tcp_output for all active pcbs that have TF_NAGLEMEMERR set */ +void +tcp_txnow(void) +{ + struct tcp_pcb *pcb; + + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + if (pcb->flags & TF_NAGLEMEMERR) { + tcp_output(pcb); + } + } +} + +/** Pass pcb->refused_data to the recv callback */ +err_t +tcp_process_refused_data(struct tcp_pcb *pcb) +{ +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + struct pbuf *rest; +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + + LWIP_ERROR("tcp_process_refused_data: invalid pcb", pcb != NULL, return ERR_ARG); + +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + while (pcb->refused_data != NULL) +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + { + err_t err; + u8_t refused_flags = pcb->refused_data->flags; + /* set pcb->refused_data to NULL in case the callback frees it and then + closes the pcb */ + struct pbuf *refused_data = pcb->refused_data; +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + pbuf_split_64k(refused_data, &rest); + pcb->refused_data = rest; +#else /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + pcb->refused_data = NULL; +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + /* Notify again application with data previously received. */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: notify kept packet\n")); + TCP_EVENT_RECV(pcb, refused_data, ERR_OK, err); + if (err == ERR_OK) { + /* did refused_data include a FIN? */ + if ((refused_flags & PBUF_FLAG_TCP_FIN) +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + && (rest == NULL) +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + ) { + /* correct rcv_wnd as the application won't call tcp_recved() + for the FIN's seqno */ + if (pcb->rcv_wnd != TCP_WND_MAX(pcb)) { + pcb->rcv_wnd++; + } + TCP_EVENT_CLOSED(pcb, err); + if (err == ERR_ABRT) { + return ERR_ABRT; + } + } + } else if (err == ERR_ABRT) { + /* if err == ERR_ABRT, 'pcb' is already deallocated */ + /* Drop incoming packets because pcb is "full" (only if the incoming + segment contains data). */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: drop incoming packets, because pcb is \"full\"\n")); + return ERR_ABRT; + } else { + /* data is still refused, pbuf is still valid (go on for ACK-only packets) */ +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + if (rest != NULL) { + pbuf_cat(refused_data, rest); + } +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + pcb->refused_data = refused_data; + return ERR_INPROGRESS; + } + } + return ERR_OK; +} + +/** + * Deallocates a list of TCP segments (tcp_seg structures). + * + * @param seg tcp_seg list of TCP segments to free + */ +void +tcp_segs_free(struct tcp_seg *seg) +{ + while (seg != NULL) { + struct tcp_seg *next = seg->next; + tcp_seg_free(seg); + seg = next; + } +} + +/** + * Frees a TCP segment (tcp_seg structure). + * + * @param seg single tcp_seg to free + */ +void +tcp_seg_free(struct tcp_seg *seg) +{ + if (seg != NULL) { + if (seg->p != NULL) { + pbuf_free(seg->p); +#if TCP_DEBUG + seg->p = NULL; +#endif /* TCP_DEBUG */ + } + memp_free(MEMP_TCP_SEG, seg); + } +} + +/** + * @ingroup tcp + * Sets the priority of a connection. + * + * @param pcb the tcp_pcb to manipulate + * @param prio new priority + */ +void +tcp_setprio(struct tcp_pcb *pcb, u8_t prio) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_setprio: invalid pcb", pcb != NULL, return); + + pcb->prio = prio; +} + +#if TCP_QUEUE_OOSEQ +/** + * Returns a copy of the given TCP segment. + * The pbuf and data are not copied, only the pointers + * + * @param seg the old tcp_seg + * @return a copy of seg + */ +struct tcp_seg * +tcp_seg_copy(struct tcp_seg *seg) +{ + struct tcp_seg *cseg; + + LWIP_ASSERT("tcp_seg_copy: invalid seg", seg != NULL); + + cseg = (struct tcp_seg *)memp_malloc(MEMP_TCP_SEG); + if (cseg == NULL) { + return NULL; + } + SMEMCPY((u8_t *)cseg, (const u8_t *)seg, sizeof(struct tcp_seg)); + pbuf_ref(cseg->p); + return cseg; +} +#endif /* TCP_QUEUE_OOSEQ */ + +#if LWIP_CALLBACK_API +/** + * Default receive callback that is called if the user didn't register + * a recv callback for the pcb. + */ +err_t +tcp_recv_null(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err) +{ + LWIP_UNUSED_ARG(arg); + + LWIP_ERROR("tcp_recv_null: invalid pcb", pcb != NULL, return ERR_ARG); + + if (p != NULL) { + tcp_recved(pcb, p->tot_len); + pbuf_free(p); + } else if (err == ERR_OK) { + return tcp_close(pcb); + } + return ERR_OK; +} +#endif /* LWIP_CALLBACK_API */ + +/** + * Kills the oldest active connection that has a lower priority than 'prio'. + * + * @param prio minimum priority + */ +static void +tcp_kill_prio(u8_t prio) +{ + struct tcp_pcb *pcb, *inactive; + u32_t inactivity; + u8_t mprio; + + mprio = LWIP_MIN(TCP_PRIO_MAX, prio); + + /* We want to kill connections with a lower prio, so bail out if + * supplied prio is 0 - there can never be a lower prio + */ + if (mprio == 0) { + return; + } + + /* We only want kill connections with a lower prio, so decrement prio by one + * and start searching for oldest connection with same or lower priority than mprio. + * We want to find the connections with the lowest possible prio, and among + * these the one with the longest inactivity time. + */ + mprio--; + + inactivity = 0; + inactive = NULL; + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + /* lower prio is always a kill candidate */ + if ((pcb->prio < mprio) || + /* longer inactivity is also a kill candidate */ + ((pcb->prio == mprio) && ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity))) { + inactivity = tcp_ticks - pcb->tmr; + inactive = pcb; + mprio = pcb->prio; + } + } + if (inactive != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_prio: killing oldest PCB %p (%"S32_F")\n", + (void *)inactive, inactivity)); + tcp_abort(inactive); + } +} + +/** + * Kills the oldest connection that is in specific state. + * Called from tcp_alloc() for LAST_ACK and CLOSING if no more connections are available. + */ +static void +tcp_kill_state(enum tcp_state state) +{ + struct tcp_pcb *pcb, *inactive; + u32_t inactivity; + + LWIP_ASSERT("invalid state", (state == CLOSING) || (state == LAST_ACK)); + + inactivity = 0; + inactive = NULL; + /* Go through the list of active pcbs and get the oldest pcb that is in state + CLOSING/LAST_ACK. */ + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + if (pcb->state == state) { + if ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity) { + inactivity = tcp_ticks - pcb->tmr; + inactive = pcb; + } + } + } + if (inactive != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_closing: killing oldest %s PCB %p (%"S32_F")\n", + tcp_state_str[state], (void *)inactive, inactivity)); + /* Don't send a RST, since no data is lost. */ + tcp_abandon(inactive, 0); + } +} + +/** + * Kills the oldest connection that is in TIME_WAIT state. + * Called from tcp_alloc() if no more connections are available. + */ +static void +tcp_kill_timewait(void) +{ + struct tcp_pcb *pcb, *inactive; + u32_t inactivity; + + inactivity = 0; + inactive = NULL; + /* Go through the list of TIME_WAIT pcbs and get the oldest pcb. */ + for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { + if ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity) { + inactivity = tcp_ticks - pcb->tmr; + inactive = pcb; + } + } + if (inactive != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_timewait: killing oldest TIME-WAIT PCB %p (%"S32_F")\n", + (void *)inactive, inactivity)); + tcp_abort(inactive); + } +} + +/* Called when allocating a pcb fails. + * In this case, we want to handle all pcbs that want to close first: if we can + * now send the FIN (which failed before), the pcb might be in a state that is + * OK for us to now free it. + */ +static void +tcp_handle_closepend(void) +{ + struct tcp_pcb *pcb = tcp_active_pcbs; + + while (pcb != NULL) { + struct tcp_pcb *next = pcb->next; + /* send pending FIN */ + if (pcb->flags & TF_CLOSEPEND) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_handle_closepend: pending FIN\n")); + tcp_clear_flags(pcb, TF_CLOSEPEND); + tcp_close_shutdown_fin(pcb); + } + pcb = next; + } +} + +/** + * Allocate a new tcp_pcb structure. + * + * @param prio priority for the new pcb + * @return a new tcp_pcb that initially is in state CLOSED + */ +struct tcp_pcb * +tcp_alloc(u8_t prio) +{ + struct tcp_pcb *pcb; + + LWIP_ASSERT_CORE_LOCKED(); + + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb == NULL) { + /* Try to send FIN for all pcbs stuck in TF_CLOSEPEND first */ + tcp_handle_closepend(); + + /* Try killing oldest connection in TIME-WAIT. */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest TIME-WAIT connection\n")); + tcp_kill_timewait(); + /* Try to allocate a tcp_pcb again. */ + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb == NULL) { + /* Try killing oldest connection in LAST-ACK (these wouldn't go to TIME-WAIT). */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest LAST-ACK connection\n")); + tcp_kill_state(LAST_ACK); + /* Try to allocate a tcp_pcb again. */ + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb == NULL) { + /* Try killing oldest connection in CLOSING. */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest CLOSING connection\n")); + tcp_kill_state(CLOSING); + /* Try to allocate a tcp_pcb again. */ + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb == NULL) { + /* Try killing oldest active connection with lower priority than the new one. */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing oldest connection with prio lower than %d\n", prio)); + tcp_kill_prio(prio); + /* Try to allocate a tcp_pcb again. */ + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb != NULL) { + /* adjust err stats: memp_malloc failed multiple times before */ + MEMP_STATS_DEC(err, MEMP_TCP_PCB); + } + } + if (pcb != NULL) { + /* adjust err stats: memp_malloc failed multiple times before */ + MEMP_STATS_DEC(err, MEMP_TCP_PCB); + } + } + if (pcb != NULL) { + /* adjust err stats: memp_malloc failed multiple times before */ + MEMP_STATS_DEC(err, MEMP_TCP_PCB); + } + } + if (pcb != NULL) { + /* adjust err stats: memp_malloc failed above */ + MEMP_STATS_DEC(err, MEMP_TCP_PCB); + } + } + if (pcb != NULL) { + /* zero out the whole pcb, so there is no need to initialize members to zero */ + memset(pcb, 0, sizeof(struct tcp_pcb)); + pcb->prio = prio; + pcb->snd_buf = TCP_SND_BUF; + /* Start with a window that does not need scaling. When window scaling is + enabled and used, the window is enlarged when both sides agree on scaling. */ + pcb->rcv_wnd = pcb->rcv_ann_wnd = TCPWND_MIN16(TCP_WND); + pcb->ttl = TCP_TTL; + /* As initial send MSS, we use TCP_MSS but limit it to 536. + The send MSS is updated when an MSS option is received. */ + pcb->mss = INITIAL_MSS; + pcb->rto = 3000 / TCP_SLOW_INTERVAL; + pcb->sv = 3000 / TCP_SLOW_INTERVAL; + pcb->rtime = -1; + pcb->cwnd = 1; + pcb->tmr = tcp_ticks; + pcb->last_timer = tcp_timer_ctr; + + /* RFC 5681 recommends setting ssthresh abritrarily high and gives an example + of using the largest advertised receive window. We've seen complications with + receiving TCPs that use window scaling and/or window auto-tuning where the + initial advertised window is very small and then grows rapidly once the + connection is established. To avoid these complications, we set ssthresh to the + largest effective cwnd (amount of in-flight data) that the sender can have. */ + pcb->ssthresh = TCP_SND_BUF; + +#if LWIP_CALLBACK_API + pcb->recv = tcp_recv_null; +#endif /* LWIP_CALLBACK_API */ + + /* Init KEEPALIVE timer */ + pcb->keep_idle = TCP_KEEPIDLE_DEFAULT; + +#if LWIP_TCP_KEEPALIVE + pcb->keep_intvl = TCP_KEEPINTVL_DEFAULT; + pcb->keep_cnt = TCP_KEEPCNT_DEFAULT; +#endif /* LWIP_TCP_KEEPALIVE */ + } + return pcb; +} + +/** + * @ingroup tcp_raw + * Creates a new TCP protocol control block but doesn't place it on + * any of the TCP PCB lists. + * The pcb is not put on any list until binding using tcp_bind(). + * If memory is not available for creating the new pcb, NULL is returned. + * + * @internal: Maybe there should be a idle TCP PCB list where these + * PCBs are put on. Port reservation using tcp_bind() is implemented but + * allocated pcbs that are not bound can't be killed automatically if wanting + * to allocate a pcb with higher prio (@see tcp_kill_prio()) + * + * @return a new tcp_pcb that initially is in state CLOSED + */ +struct tcp_pcb * +tcp_new(void) +{ + return tcp_alloc(TCP_PRIO_NORMAL); +} + +/** + * @ingroup tcp_raw + * Creates a new TCP protocol control block but doesn't + * place it on any of the TCP PCB lists. + * The pcb is not put on any list until binding using tcp_bind(). + * + * @param type IP address type, see @ref lwip_ip_addr_type definitions. + * If you want to listen to IPv4 and IPv6 (dual-stack) connections, + * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE. + * @return a new tcp_pcb that initially is in state CLOSED + */ +struct tcp_pcb * +tcp_new_ip_type(u8_t type) +{ + struct tcp_pcb *pcb; + pcb = tcp_alloc(TCP_PRIO_NORMAL); +#if LWIP_IPV4 && LWIP_IPV6 + if (pcb != NULL) { + IP_SET_TYPE_VAL(pcb->local_ip, type); + IP_SET_TYPE_VAL(pcb->remote_ip, type); + } +#else + LWIP_UNUSED_ARG(type); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + return pcb; +} + +/** + * @ingroup tcp_raw + * Specifies the program specific state that should be passed to all + * other callback functions. The "pcb" argument is the current TCP + * connection control block, and the "arg" argument is the argument + * that will be passed to the callbacks. + * + * @param pcb tcp_pcb to set the callback argument + * @param arg void pointer argument to pass to callback functions + */ +void +tcp_arg(struct tcp_pcb *pcb, void *arg) +{ + LWIP_ASSERT_CORE_LOCKED(); + /* This function is allowed to be called for both listen pcbs and + connection pcbs. */ + if (pcb != NULL) { + pcb->callback_arg = arg; + } +} +#if LWIP_CALLBACK_API + +/** + * @ingroup tcp_raw + * Sets the callback function that will be called when new data + * arrives. The callback function will be passed a NULL pbuf to + * indicate that the remote host has closed the connection. If the + * callback function returns ERR_OK or ERR_ABRT it must have + * freed the pbuf, otherwise it must not have freed it. + * + * @param pcb tcp_pcb to set the recv callback + * @param recv callback function to call for this pcb when data is received + */ +void +tcp_recv(struct tcp_pcb *pcb, tcp_recv_fn recv) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (pcb != NULL) { + LWIP_ASSERT("invalid socket state for recv callback", pcb->state != LISTEN); + pcb->recv = recv; + } +} + +/** + * @ingroup tcp_raw + * Specifies the callback function that should be called when data has + * successfully been received (i.e., acknowledged) by the remote + * host. The len argument passed to the callback function gives the + * amount bytes that was acknowledged by the last acknowledgment. + * + * @param pcb tcp_pcb to set the sent callback + * @param sent callback function to call for this pcb when data is successfully sent + */ +void +tcp_sent(struct tcp_pcb *pcb, tcp_sent_fn sent) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (pcb != NULL) { + LWIP_ASSERT("invalid socket state for sent callback", pcb->state != LISTEN); + pcb->sent = sent; + } +} + +/** + * @ingroup tcp_raw + * Used to specify the function that should be called when a fatal error + * has occurred on the connection. + * + * If a connection is aborted because of an error, the application is + * alerted of this event by the err callback. Errors that might abort a + * connection are when there is a shortage of memory. The callback + * function to be called is set using the tcp_err() function. + * + * @note The corresponding pcb is already freed when this callback is called! + * + * @param pcb tcp_pcb to set the err callback + * @param err callback function to call for this pcb when a fatal error + * has occurred on the connection + */ +void +tcp_err(struct tcp_pcb *pcb, tcp_err_fn err) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (pcb != NULL) { + LWIP_ASSERT("invalid socket state for err callback", pcb->state != LISTEN); + pcb->errf = err; + } +} + +/** + * @ingroup tcp_raw + * Used for specifying the function that should be called when a + * LISTENing connection has been connected to another host. + * + * @param pcb tcp_pcb to set the accept callback + * @param accept callback function to call for this pcb when LISTENing + * connection has been connected to another host + */ +void +tcp_accept(struct tcp_pcb *pcb, tcp_accept_fn accept) +{ + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb != NULL) && (pcb->state == LISTEN)) { + struct tcp_pcb_listen *lpcb = (struct tcp_pcb_listen *)pcb; + lpcb->accept = accept; + } +} +#endif /* LWIP_CALLBACK_API */ + + +/** + * @ingroup tcp_raw + * Specifies the polling interval and the callback function that should + * be called to poll the application. The interval is specified in + * number of TCP coarse grained timer shots, which typically occurs + * twice a second. An interval of 10 means that the application would + * be polled every 5 seconds. + * + * When a connection is idle (i.e., no data is either transmitted or + * received), lwIP will repeatedly poll the application by calling a + * specified callback function. This can be used either as a watchdog + * timer for killing connections that have stayed idle for too long, or + * as a method of waiting for memory to become available. For instance, + * if a call to tcp_write() has failed because memory wasn't available, + * the application may use the polling functionality to call tcp_write() + * again when the connection has been idle for a while. + */ +void +tcp_poll(struct tcp_pcb *pcb, tcp_poll_fn poll, u8_t interval) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_poll: invalid pcb", pcb != NULL, return); + LWIP_ASSERT("invalid socket state for poll", pcb->state != LISTEN); + +#if LWIP_CALLBACK_API + pcb->poll = poll; +#else /* LWIP_CALLBACK_API */ + LWIP_UNUSED_ARG(poll); +#endif /* LWIP_CALLBACK_API */ + pcb->pollinterval = interval; +} + +/** + * Purges a TCP PCB. Removes any buffered data and frees the buffer memory + * (pcb->ooseq, pcb->unsent and pcb->unacked are freed). + * + * @param pcb tcp_pcb to purge. The pcb itself is not deallocated! + */ +void +tcp_pcb_purge(struct tcp_pcb *pcb) +{ + LWIP_ERROR("tcp_pcb_purge: invalid pcb", pcb != NULL, return); + + if (pcb->state != CLOSED && + pcb->state != TIME_WAIT && + pcb->state != LISTEN) { + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge\n")); + + tcp_backlog_accepted(pcb); + + if (pcb->refused_data != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->refused_data\n")); + pbuf_free(pcb->refused_data); + pcb->refused_data = NULL; + } + if (pcb->unsent != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: not all data sent\n")); + } + if (pcb->unacked != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->unacked\n")); + } +#if TCP_QUEUE_OOSEQ + if (pcb->ooseq != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->ooseq\n")); + tcp_free_ooseq(pcb); + } +#endif /* TCP_QUEUE_OOSEQ */ + + /* Stop the retransmission timer as it will expect data on unacked + queue if it fires */ + pcb->rtime = -1; + + tcp_segs_free(pcb->unsent); + tcp_segs_free(pcb->unacked); + pcb->unacked = pcb->unsent = NULL; +#if TCP_OVERSIZE + pcb->unsent_oversize = 0; +#endif /* TCP_OVERSIZE */ + } +} + +/** + * Purges the PCB and removes it from a PCB list. Any delayed ACKs are sent first. + * + * @param pcblist PCB list to purge. + * @param pcb tcp_pcb to purge. The pcb itself is NOT deallocated! + */ +void +tcp_pcb_remove(struct tcp_pcb **pcblist, struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_pcb_remove: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_pcb_remove: invalid pcblist", pcblist != NULL); + + TCP_RMV(pcblist, pcb); + + tcp_pcb_purge(pcb); + + /* if there is an outstanding delayed ACKs, send it */ + if ((pcb->state != TIME_WAIT) && + (pcb->state != LISTEN) && + (pcb->flags & TF_ACK_DELAY)) { + tcp_ack_now(pcb); + tcp_output(pcb); + } + + if (pcb->state != LISTEN) { + LWIP_ASSERT("unsent segments leaking", pcb->unsent == NULL); + LWIP_ASSERT("unacked segments leaking", pcb->unacked == NULL); +#if TCP_QUEUE_OOSEQ + LWIP_ASSERT("ooseq segments leaking", pcb->ooseq == NULL); +#endif /* TCP_QUEUE_OOSEQ */ + } + + pcb->state = CLOSED; + /* reset the local port to prevent the pcb from being 'bound' */ + pcb->local_port = 0; + + LWIP_ASSERT("tcp_pcb_remove: tcp_pcbs_sane()", tcp_pcbs_sane()); +} + +/** + * Calculates a new initial sequence number for new connections. + * + * @return u32_t pseudo random sequence number + */ +u32_t +tcp_next_iss(struct tcp_pcb *pcb) +{ +#ifdef LWIP_HOOK_TCP_ISN + LWIP_ASSERT("tcp_next_iss: invalid pcb", pcb != NULL); + return LWIP_HOOK_TCP_ISN(&pcb->local_ip, pcb->local_port, &pcb->remote_ip, pcb->remote_port); +#else /* LWIP_HOOK_TCP_ISN */ + static u32_t iss = 6510; + + LWIP_ASSERT("tcp_next_iss: invalid pcb", pcb != NULL); + LWIP_UNUSED_ARG(pcb); + + iss += tcp_ticks; /* XXX */ + return iss; +#endif /* LWIP_HOOK_TCP_ISN */ +} + +#if TCP_CALCULATE_EFF_SEND_MSS +/** + * Calculates the effective send mss that can be used for a specific IP address + * by calculating the minimum of TCP_MSS and the mtu (if set) of the target + * netif (if not NULL). + */ +u16_t +tcp_eff_send_mss_netif(u16_t sendmss, struct netif *outif, const ip_addr_t *dest) +{ + u16_t mss_s; + u16_t mtu; + + LWIP_UNUSED_ARG(dest); /* in case IPv6 is disabled */ + + LWIP_ASSERT("tcp_eff_send_mss_netif: invalid dst_ip", dest != NULL); + +#if LWIP_IPV6 +#if LWIP_IPV4 + if (IP_IS_V6(dest)) +#endif /* LWIP_IPV4 */ + { + /* First look in destination cache, to see if there is a Path MTU. */ + mtu = nd6_get_destination_mtu(ip_2_ip6(dest), outif); + } +#if LWIP_IPV4 + else +#endif /* LWIP_IPV4 */ +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + { + if (outif == NULL) { + return sendmss; + } + mtu = outif->mtu; + } +#endif /* LWIP_IPV4 */ + + if (mtu != 0) { + u16_t offset; +#if LWIP_IPV6 +#if LWIP_IPV4 + if (IP_IS_V6(dest)) +#endif /* LWIP_IPV4 */ + { + offset = IP6_HLEN + TCP_HLEN; + } +#if LWIP_IPV4 + else +#endif /* LWIP_IPV4 */ +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + { + offset = IP_HLEN + TCP_HLEN; + } +#endif /* LWIP_IPV4 */ + mss_s = (mtu > offset) ? (u16_t)(mtu - offset) : 0; + /* RFC 1122, chap 4.2.2.6: + * Eff.snd.MSS = min(SendMSS+20, MMS_S) - TCPhdrsize - IPoptionsize + * We correct for TCP options in tcp_write(), and don't support IP options. + */ + sendmss = LWIP_MIN(sendmss, mss_s); + } + return sendmss; +} +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + +/** Helper function for tcp_netif_ip_addr_changed() that iterates a pcb list */ +static void +tcp_netif_ip_addr_changed_pcblist(const ip_addr_t *old_addr, struct tcp_pcb *pcb_list) +{ + struct tcp_pcb *pcb; + pcb = pcb_list; + + LWIP_ASSERT("tcp_netif_ip_addr_changed_pcblist: invalid old_addr", old_addr != NULL); + + while (pcb != NULL) { + /* PCB bound to current local interface address? */ + if (ip_addr_cmp(&pcb->local_ip, old_addr) +#if LWIP_AUTOIP + /* connections to link-local addresses must persist (RFC3927 ch. 1.9) */ + && (!IP_IS_V4_VAL(pcb->local_ip) || !ip4_addr_islinklocal(ip_2_ip4(&pcb->local_ip))) +#endif /* LWIP_AUTOIP */ + ) { + /* this connection must be aborted */ + struct tcp_pcb *next = pcb->next; + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_set_ipaddr: aborting TCP pcb %p\n", (void *)pcb)); + tcp_abort(pcb); + pcb = next; + } else { + pcb = pcb->next; + } + } +} + +/** This function is called from netif.c when address is changed or netif is removed + * + * @param old_addr IP address of the netif before change + * @param new_addr IP address of the netif after change or NULL if netif has been removed + */ +void +tcp_netif_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr) +{ + struct tcp_pcb_listen *lpcb; + + if (!ip_addr_isany(old_addr)) { + tcp_netif_ip_addr_changed_pcblist(old_addr, tcp_active_pcbs); + tcp_netif_ip_addr_changed_pcblist(old_addr, tcp_bound_pcbs); + + if (!ip_addr_isany(new_addr)) { + /* PCB bound to current local interface address? */ + for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) { + /* PCB bound to current local interface address? */ + if (ip_addr_cmp(&lpcb->local_ip, old_addr)) { + /* The PCB is listening to the old ipaddr and + * is set to listen to the new one instead */ + ip_addr_copy(lpcb->local_ip, *new_addr); + } + } + } + } +} + +const char * +tcp_debug_state_str(enum tcp_state s) +{ + return tcp_state_str[s]; +} + +err_t +tcp_tcp_get_tcp_addrinfo(struct tcp_pcb *pcb, int local, ip_addr_t *addr, u16_t *port) +{ + if (pcb) { + if (local) { + if (addr) { + *addr = pcb->local_ip; + } + if (port) { + *port = pcb->local_port; + } + } else { + if (addr) { + *addr = pcb->remote_ip; + } + if (port) { + *port = pcb->remote_port; + } + } + return ERR_OK; + } + return ERR_VAL; +} + +#if TCP_QUEUE_OOSEQ +/* Free all ooseq pbufs (and possibly reset SACK state) */ +void +tcp_free_ooseq(struct tcp_pcb *pcb) +{ + if (pcb->ooseq) { + tcp_segs_free(pcb->ooseq); + pcb->ooseq = NULL; +#if LWIP_TCP_SACK_OUT + memset(pcb->rcv_sacks, 0, sizeof(pcb->rcv_sacks)); +#endif /* LWIP_TCP_SACK_OUT */ + } +} +#endif /* TCP_QUEUE_OOSEQ */ + +#if TCP_DEBUG || TCP_INPUT_DEBUG || TCP_OUTPUT_DEBUG +/** + * Print a tcp header for debugging purposes. + * + * @param tcphdr pointer to a struct tcp_hdr + */ +void +tcp_debug_print(struct tcp_hdr *tcphdr) +{ + LWIP_DEBUGF(TCP_DEBUG, ("TCP header:\n")); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| %5"U16_F" | %5"U16_F" | (src port, dest port)\n", + lwip_ntohs(tcphdr->src), lwip_ntohs(tcphdr->dest))); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| %010"U32_F" | (seq no)\n", + lwip_ntohl(tcphdr->seqno))); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| %010"U32_F" | (ack no)\n", + lwip_ntohl(tcphdr->ackno))); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| %2"U16_F" | |%"U16_F"%"U16_F"%"U16_F"%"U16_F"%"U16_F"%"U16_F"| %5"U16_F" | (hdrlen, flags (", + TCPH_HDRLEN(tcphdr), + (u16_t)(TCPH_FLAGS(tcphdr) >> 5 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) >> 4 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) >> 3 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) >> 2 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) >> 1 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) & 1), + lwip_ntohs(tcphdr->wnd))); + tcp_debug_print_flags(TCPH_FLAGS(tcphdr)); + LWIP_DEBUGF(TCP_DEBUG, ("), win)\n")); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| 0x%04"X16_F" | %5"U16_F" | (chksum, urgp)\n", + lwip_ntohs(tcphdr->chksum), lwip_ntohs(tcphdr->urgp))); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); +} + +/** + * Print a tcp state for debugging purposes. + * + * @param s enum tcp_state to print + */ +void +tcp_debug_print_state(enum tcp_state s) +{ + LWIP_DEBUGF(TCP_DEBUG, ("State: %s\n", tcp_state_str[s])); +} + +/** + * Print tcp flags for debugging purposes. + * + * @param flags tcp flags, all active flags are printed + */ +void +tcp_debug_print_flags(u8_t flags) +{ + if (flags & TCP_FIN) { + LWIP_DEBUGF(TCP_DEBUG, ("FIN ")); + } + if (flags & TCP_SYN) { + LWIP_DEBUGF(TCP_DEBUG, ("SYN ")); + } + if (flags & TCP_RST) { + LWIP_DEBUGF(TCP_DEBUG, ("RST ")); + } + if (flags & TCP_PSH) { + LWIP_DEBUGF(TCP_DEBUG, ("PSH ")); + } + if (flags & TCP_ACK) { + LWIP_DEBUGF(TCP_DEBUG, ("ACK ")); + } + if (flags & TCP_URG) { + LWIP_DEBUGF(TCP_DEBUG, ("URG ")); + } + if (flags & TCP_ECE) { + LWIP_DEBUGF(TCP_DEBUG, ("ECE ")); + } + if (flags & TCP_CWR) { + LWIP_DEBUGF(TCP_DEBUG, ("CWR ")); + } + LWIP_DEBUGF(TCP_DEBUG, ("\n")); +} + +/** + * Print all tcp_pcbs in every list for debugging purposes. + */ +void +tcp_debug_print_pcbs(void) +{ + struct tcp_pcb *pcb; + struct tcp_pcb_listen *pcbl; + + LWIP_DEBUGF(TCP_DEBUG, ("Active PCB states:\n")); + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F", foreign port %"U16_F" snd_nxt %"U32_F" rcv_nxt %"U32_F" ", + pcb->local_port, pcb->remote_port, + pcb->snd_nxt, pcb->rcv_nxt)); + tcp_debug_print_state(pcb->state); + } + + LWIP_DEBUGF(TCP_DEBUG, ("Listen PCB states:\n")); + for (pcbl = tcp_listen_pcbs.listen_pcbs; pcbl != NULL; pcbl = pcbl->next) { + LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F" ", pcbl->local_port)); + tcp_debug_print_state(pcbl->state); + } + + LWIP_DEBUGF(TCP_DEBUG, ("TIME-WAIT PCB states:\n")); + for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F", foreign port %"U16_F" snd_nxt %"U32_F" rcv_nxt %"U32_F" ", + pcb->local_port, pcb->remote_port, + pcb->snd_nxt, pcb->rcv_nxt)); + tcp_debug_print_state(pcb->state); + } +} + +/** + * Check state consistency of the tcp_pcb lists. + */ +s16_t +tcp_pcbs_sane(void) +{ + struct tcp_pcb *pcb; + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != CLOSED", pcb->state != CLOSED); + LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != LISTEN", pcb->state != LISTEN); + LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != TIME-WAIT", pcb->state != TIME_WAIT); + } + for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_ASSERT("tcp_pcbs_sane: tw pcb->state == TIME-WAIT", pcb->state == TIME_WAIT); + } + return 1; +} +#endif /* TCP_DEBUG */ + +#if LWIP_TCP_PCB_NUM_EXT_ARGS +/** + * @defgroup tcp_raw_extargs ext arguments + * @ingroup tcp_raw + * Additional data storage per tcp pcb\n + * @see @ref tcp_raw + * + * When LWIP_TCP_PCB_NUM_EXT_ARGS is > 0, every tcp pcb (including listen pcb) + * includes a number of additional argument entries in an array. + * + * To support memory management, in addition to a 'void *', callbacks can be + * provided to manage transition from listening pcbs to connections and to + * deallocate memory when a pcb is deallocated (see struct @ref tcp_ext_arg_callbacks). + * + * After allocating this index, use @ref tcp_ext_arg_set and @ref tcp_ext_arg_get + * to store and load arguments from this index for a given pcb. + */ + +static u8_t tcp_ext_arg_id; + +/** + * @ingroup tcp_raw_extargs + * Allocate an index to store data in ext_args member of struct tcp_pcb. + * Returned value is an index in mentioned array. + * The index is *global* over all pcbs! + * + * When @ref LWIP_TCP_PCB_NUM_EXT_ARGS is > 0, every tcp pcb (including listen pcb) + * includes a number of additional argument entries in an array. + * + * To support memory management, in addition to a 'void *', callbacks can be + * provided to manage transition from listening pcbs to connections and to + * deallocate memory when a pcb is deallocated (see struct @ref tcp_ext_arg_callbacks). + * + * After allocating this index, use @ref tcp_ext_arg_set and @ref tcp_ext_arg_get + * to store and load arguments from this index for a given pcb. + * + * @return a unique index into struct tcp_pcb.ext_args + */ +u8_t +tcp_ext_arg_alloc_id(void) +{ + u8_t result = tcp_ext_arg_id; + tcp_ext_arg_id++; + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_TCP_PCB_NUM_EXT_ARGS >= 255 +#error LWIP_TCP_PCB_NUM_EXT_ARGS +#endif + LWIP_ASSERT("Increase LWIP_TCP_PCB_NUM_EXT_ARGS in lwipopts.h", result < LWIP_TCP_PCB_NUM_EXT_ARGS); + return result; +} + +/** + * @ingroup tcp_raw_extargs + * Set callbacks for a given index of ext_args on the specified pcb. + * + * @param pcb tcp_pcb for which to set the callback + * @param id ext_args index to set (allocated via @ref tcp_ext_arg_alloc_id) + * @param callbacks callback table (const since it is referenced, not copied!) + */ +void +tcp_ext_arg_set_callbacks(struct tcp_pcb *pcb, uint8_t id, const struct tcp_ext_arg_callbacks * const callbacks) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT("id < LWIP_TCP_PCB_NUM_EXT_ARGS", id < LWIP_TCP_PCB_NUM_EXT_ARGS); + LWIP_ASSERT("callbacks != NULL", callbacks != NULL); + + LWIP_ASSERT_CORE_LOCKED(); + + pcb->ext_args[id].callbacks = callbacks; +} + +/** + * @ingroup tcp_raw_extargs + * Set data for a given index of ext_args on the specified pcb. + * + * @param pcb tcp_pcb for which to set the data + * @param id ext_args index to set (allocated via @ref tcp_ext_arg_alloc_id) + * @param arg data pointer to set + */ +void tcp_ext_arg_set(struct tcp_pcb *pcb, uint8_t id, void *arg) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT("id < LWIP_TCP_PCB_NUM_EXT_ARGS", id < LWIP_TCP_PCB_NUM_EXT_ARGS); + + LWIP_ASSERT_CORE_LOCKED(); + + pcb->ext_args[id].data = arg; +} + +/** + * @ingroup tcp_raw_extargs + * Set data for a given index of ext_args on the specified pcb. + * + * @param pcb tcp_pcb for which to set the data + * @param id ext_args index to set (allocated via @ref tcp_ext_arg_alloc_id) + * @return data pointer at the given index + */ +void *tcp_ext_arg_get(const struct tcp_pcb *pcb, uint8_t id) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT("id < LWIP_TCP_PCB_NUM_EXT_ARGS", id < LWIP_TCP_PCB_NUM_EXT_ARGS); + + LWIP_ASSERT_CORE_LOCKED(); + + return pcb->ext_args[id].data; +} + +/** This function calls the "destroy" callback for all ext_args once a pcb is + * freed. + */ +static void +tcp_ext_arg_invoke_callbacks_destroyed(struct tcp_pcb_ext_args *ext_args) +{ + int i; + LWIP_ASSERT("ext_args != NULL", ext_args != NULL); + + for (i = 0; i < LWIP_TCP_PCB_NUM_EXT_ARGS; i++) { + if (ext_args[i].callbacks != NULL) { + if (ext_args[i].callbacks->destroy != NULL) { + ext_args[i].callbacks->destroy((u8_t)i, ext_args[i].data); + } + } + } +} + +/** This function calls the "passive_open" callback for all ext_args if a connection + * is in the process of being accepted. This is called just after the SYN is + * received and before a SYN/ACK is sent, to allow to modify the very first + * segment sent even on passive open. Naturally, the "accepted" callback of the + * pcb has not been called yet! + */ +err_t +tcp_ext_arg_invoke_callbacks_passive_open(struct tcp_pcb_listen *lpcb, struct tcp_pcb *cpcb) +{ + int i; + LWIP_ASSERT("lpcb != NULL", lpcb != NULL); + LWIP_ASSERT("cpcb != NULL", cpcb != NULL); + + for (i = 0; i < LWIP_TCP_PCB_NUM_EXT_ARGS; i++) { + if (lpcb->ext_args[i].callbacks != NULL) { + if (lpcb->ext_args[i].callbacks->passive_open != NULL) { + err_t err = lpcb->ext_args[i].callbacks->passive_open((u8_t)i, lpcb, cpcb); + if (err != ERR_OK) { + return err; + } + } + } + } + return ERR_OK; +} +#endif /* LWIP_TCP_PCB_NUM_EXT_ARGS */ + +#endif /* LWIP_TCP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/tcp_in.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/tcp_in.c new file mode 100644 index 0000000..4670178 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/tcp_in.c @@ -0,0 +1,2178 @@ +/** + * @file + * Transmission Control Protocol, incoming traffic + * + * The input processing functions of the TCP layer. + * + * These functions are generally called in the order (ip_input() ->) + * tcp_input() -> * tcp_process() -> tcp_receive() (-> application). + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/priv/tcp_priv.h" +#include "lwip/def.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/inet_chksum.h" +#include "lwip/stats.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#if LWIP_ND6_TCP_REACHABILITY_HINTS +#include "lwip/nd6.h" +#endif /* LWIP_ND6_TCP_REACHABILITY_HINTS */ + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** Initial CWND calculation as defined RFC 2581 */ +#define LWIP_TCP_CALC_INITIAL_CWND(mss) ((tcpwnd_size_t)LWIP_MIN((4U * (mss)), LWIP_MAX((2U * (mss)), 4380U))) + +/* These variables are global to all functions involved in the input + processing of TCP segments. They are set by the tcp_input() + function. */ +static struct tcp_seg inseg; +static struct tcp_hdr *tcphdr; +static u16_t tcphdr_optlen; +static u16_t tcphdr_opt1len; +static u8_t *tcphdr_opt2; +static u16_t tcp_optidx; +static u32_t seqno, ackno; +static tcpwnd_size_t recv_acked; +static u16_t tcplen; +static u8_t flags; + +static u8_t recv_flags; +static struct pbuf *recv_data; + +struct tcp_pcb *tcp_input_pcb; + +/* Forward declarations. */ +static err_t tcp_process(struct tcp_pcb *pcb); +static void tcp_receive(struct tcp_pcb *pcb); +static void tcp_parseopt(struct tcp_pcb *pcb); + +static void tcp_listen_input(struct tcp_pcb_listen *pcb); +static void tcp_timewait_input(struct tcp_pcb *pcb); + +static int tcp_input_delayed_close(struct tcp_pcb *pcb); + +#if LWIP_TCP_SACK_OUT +static void tcp_add_sack(struct tcp_pcb *pcb, u32_t left, u32_t right); +static void tcp_remove_sacks_lt(struct tcp_pcb *pcb, u32_t seq); +#if defined(TCP_OOSEQ_BYTES_LIMIT) || defined(TCP_OOSEQ_PBUFS_LIMIT) +static void tcp_remove_sacks_gt(struct tcp_pcb *pcb, u32_t seq); +#endif /* TCP_OOSEQ_BYTES_LIMIT || TCP_OOSEQ_PBUFS_LIMIT */ +#endif /* LWIP_TCP_SACK_OUT */ + +/** + * The initial input processing of TCP. It verifies the TCP header, demultiplexes + * the segment between the PCBs and passes it on to tcp_process(), which implements + * the TCP finite state machine. This function is called by the IP layer (in + * ip_input()). + * + * @param p received TCP segment to process (p->payload pointing to the TCP header) + * @param inp network interface on which this segment was received + */ +void +tcp_input(struct pbuf *p, struct netif *inp) +{ + struct tcp_pcb *pcb, *prev; + struct tcp_pcb_listen *lpcb; +#if SO_REUSE + struct tcp_pcb *lpcb_prev = NULL; + struct tcp_pcb_listen *lpcb_any = NULL; +#endif /* SO_REUSE */ + u8_t hdrlen_bytes; + err_t err; + + LWIP_UNUSED_ARG(inp); + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("tcp_input: invalid pbuf", p != NULL); + + PERF_START; + + TCP_STATS_INC(tcp.recv); + MIB2_STATS_INC(mib2.tcpinsegs); + + tcphdr = (struct tcp_hdr *)p->payload; + +#if TCP_INPUT_DEBUG + tcp_debug_print(tcphdr); +#endif + + /* Check that TCP header fits in payload */ + if (p->len < TCP_HLEN) { + /* drop short packets */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: short packet (%"U16_F" bytes) discarded\n", p->tot_len)); + TCP_STATS_INC(tcp.lenerr); + goto dropped; + } + + /* Don't even process incoming broadcasts/multicasts. */ + if (ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif()) || + ip_addr_ismulticast(ip_current_dest_addr())) { + TCP_STATS_INC(tcp.proterr); + goto dropped; + } + +#if CHECKSUM_CHECK_TCP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_TCP) { + /* Verify TCP checksum. */ + u16_t chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len, + ip_current_src_addr(), ip_current_dest_addr()); + if (chksum != 0) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: packet discarded due to failing checksum 0x%04"X16_F"\n", + chksum)); + tcp_debug_print(tcphdr); + TCP_STATS_INC(tcp.chkerr); + goto dropped; + } + } +#endif /* CHECKSUM_CHECK_TCP */ + + /* sanity-check header length */ + hdrlen_bytes = TCPH_HDRLEN_BYTES(tcphdr); + if ((hdrlen_bytes < TCP_HLEN) || (hdrlen_bytes > p->tot_len)) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: invalid header length (%"U16_F")\n", (u16_t)hdrlen_bytes)); + TCP_STATS_INC(tcp.lenerr); + goto dropped; + } + + /* Move the payload pointer in the pbuf so that it points to the + TCP data instead of the TCP header. */ + tcphdr_optlen = (u16_t)(hdrlen_bytes - TCP_HLEN); + tcphdr_opt2 = NULL; + if (p->len >= hdrlen_bytes) { + /* all options are in the first pbuf */ + tcphdr_opt1len = tcphdr_optlen; + pbuf_remove_header(p, hdrlen_bytes); /* cannot fail */ + } else { + u16_t opt2len; + /* TCP header fits into first pbuf, options don't - data is in the next pbuf */ + /* there must be a next pbuf, due to hdrlen_bytes sanity check above */ + LWIP_ASSERT("p->next != NULL", p->next != NULL); + + /* advance over the TCP header (cannot fail) */ + pbuf_remove_header(p, TCP_HLEN); + + /* determine how long the first and second parts of the options are */ + tcphdr_opt1len = p->len; + opt2len = (u16_t)(tcphdr_optlen - tcphdr_opt1len); + + /* options continue in the next pbuf: set p to zero length and hide the + options in the next pbuf (adjusting p->tot_len) */ + pbuf_remove_header(p, tcphdr_opt1len); + + /* check that the options fit in the second pbuf */ + if (opt2len > p->next->len) { + /* drop short packets */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: options overflow second pbuf (%"U16_F" bytes)\n", p->next->len)); + TCP_STATS_INC(tcp.lenerr); + goto dropped; + } + + /* remember the pointer to the second part of the options */ + tcphdr_opt2 = (u8_t *)p->next->payload; + + /* advance p->next to point after the options, and manually + adjust p->tot_len to keep it consistent with the changed p->next */ + pbuf_remove_header(p->next, opt2len); + p->tot_len = (u16_t)(p->tot_len - opt2len); + + LWIP_ASSERT("p->len == 0", p->len == 0); + LWIP_ASSERT("p->tot_len == p->next->tot_len", p->tot_len == p->next->tot_len); + } + + /* Convert fields in TCP header to host byte order. */ + tcphdr->src = lwip_ntohs(tcphdr->src); + tcphdr->dest = lwip_ntohs(tcphdr->dest); + seqno = tcphdr->seqno = lwip_ntohl(tcphdr->seqno); + ackno = tcphdr->ackno = lwip_ntohl(tcphdr->ackno); + tcphdr->wnd = lwip_ntohs(tcphdr->wnd); + + flags = TCPH_FLAGS(tcphdr); + tcplen = p->tot_len; + if (flags & (TCP_FIN | TCP_SYN)) { + tcplen++; + if (tcplen < p->tot_len) { + /* u16_t overflow, cannot handle this */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: length u16_t overflow, cannot handle this\n")); + TCP_STATS_INC(tcp.lenerr); + goto dropped; + } + } + + /* Demultiplex an incoming segment. First, we check if it is destined + for an active connection. */ + prev = NULL; + + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_ASSERT("tcp_input: active pcb->state != CLOSED", pcb->state != CLOSED); + LWIP_ASSERT("tcp_input: active pcb->state != TIME-WAIT", pcb->state != TIME_WAIT); + LWIP_ASSERT("tcp_input: active pcb->state != LISTEN", pcb->state != LISTEN); + + /* check if PCB is bound to specific netif */ + if ((pcb->netif_idx != NETIF_NO_INDEX) && + (pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + prev = pcb; + continue; + } + + if (pcb->remote_port == tcphdr->src && + pcb->local_port == tcphdr->dest && + ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()) && + ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { + /* Move this PCB to the front of the list so that subsequent + lookups will be faster (we exploit locality in TCP segment + arrivals). */ + LWIP_ASSERT("tcp_input: pcb->next != pcb (before cache)", pcb->next != pcb); + if (prev != NULL) { + prev->next = pcb->next; + pcb->next = tcp_active_pcbs; + tcp_active_pcbs = pcb; + } else { + TCP_STATS_INC(tcp.cachehit); + } + LWIP_ASSERT("tcp_input: pcb->next != pcb (after cache)", pcb->next != pcb); + break; + } + prev = pcb; + } + + if (pcb == NULL) { + /* If it did not go to an active connection, we check the connections + in the TIME-WAIT state. */ + for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_ASSERT("tcp_input: TIME-WAIT pcb->state == TIME-WAIT", pcb->state == TIME_WAIT); + + /* check if PCB is bound to specific netif */ + if ((pcb->netif_idx != NETIF_NO_INDEX) && + (pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + continue; + } + + if (pcb->remote_port == tcphdr->src && + pcb->local_port == tcphdr->dest && + ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()) && + ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { + /* We don't really care enough to move this PCB to the front + of the list since we are not very likely to receive that + many segments for connections in TIME-WAIT. */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: packed for TIME_WAITing connection.\n")); +#ifdef LWIP_HOOK_TCP_INPACKET_PCB + if (LWIP_HOOK_TCP_INPACKET_PCB(pcb, tcphdr, tcphdr_optlen, tcphdr_opt1len, + tcphdr_opt2, p) == ERR_OK) +#endif + { + tcp_timewait_input(pcb); + } + pbuf_free(p); + return; + } + } + + /* Finally, if we still did not get a match, we check all PCBs that + are LISTENing for incoming connections. */ + prev = NULL; + for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) { + /* check if PCB is bound to specific netif */ + if ((lpcb->netif_idx != NETIF_NO_INDEX) && + (lpcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + prev = (struct tcp_pcb *)lpcb; + continue; + } + + if (lpcb->local_port == tcphdr->dest) { + if (IP_IS_ANY_TYPE_VAL(lpcb->local_ip)) { + /* found an ANY TYPE (IPv4/IPv6) match */ +#if SO_REUSE + lpcb_any = lpcb; + lpcb_prev = prev; +#else /* SO_REUSE */ + break; +#endif /* SO_REUSE */ + } else if (IP_ADDR_PCB_VERSION_MATCH_EXACT(lpcb, ip_current_dest_addr())) { + if (ip_addr_cmp(&lpcb->local_ip, ip_current_dest_addr())) { + /* found an exact match */ + break; + } else if (ip_addr_isany(&lpcb->local_ip)) { + /* found an ANY-match */ +#if SO_REUSE + lpcb_any = lpcb; + lpcb_prev = prev; +#else /* SO_REUSE */ + break; +#endif /* SO_REUSE */ + } + } + } + prev = (struct tcp_pcb *)lpcb; + } +#if SO_REUSE + /* first try specific local IP */ + if (lpcb == NULL) { + /* only pass to ANY if no specific local IP has been found */ + lpcb = lpcb_any; + prev = lpcb_prev; + } +#endif /* SO_REUSE */ + if (lpcb != NULL) { + /* Move this PCB to the front of the list so that subsequent + lookups will be faster (we exploit locality in TCP segment + arrivals). */ + if (prev != NULL) { + ((struct tcp_pcb_listen *)prev)->next = lpcb->next; + /* our successor is the remainder of the listening list */ + lpcb->next = tcp_listen_pcbs.listen_pcbs; + /* put this listening pcb at the head of the listening list */ + tcp_listen_pcbs.listen_pcbs = lpcb; + } else { + TCP_STATS_INC(tcp.cachehit); + } + + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: packed for LISTENing connection.\n")); +#ifdef LWIP_HOOK_TCP_INPACKET_PCB + if (LWIP_HOOK_TCP_INPACKET_PCB((struct tcp_pcb *)lpcb, tcphdr, tcphdr_optlen, + tcphdr_opt1len, tcphdr_opt2, p) == ERR_OK) +#endif + { + tcp_listen_input(lpcb); + } + pbuf_free(p); + return; + } + } + +#if TCP_INPUT_DEBUG + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("+-+-+-+-+-+-+-+-+-+-+-+-+-+- tcp_input: flags ")); + tcp_debug_print_flags(TCPH_FLAGS(tcphdr)); + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n")); +#endif /* TCP_INPUT_DEBUG */ + + +#ifdef LWIP_HOOK_TCP_INPACKET_PCB + if ((pcb != NULL) && LWIP_HOOK_TCP_INPACKET_PCB(pcb, tcphdr, tcphdr_optlen, + tcphdr_opt1len, tcphdr_opt2, p) != ERR_OK) { + pbuf_free(p); + return; + } +#endif + if (pcb != NULL) { + /* The incoming segment belongs to a connection. */ +#if TCP_INPUT_DEBUG + tcp_debug_print_state(pcb->state); +#endif /* TCP_INPUT_DEBUG */ + + /* Set up a tcp_seg structure. */ + inseg.next = NULL; + inseg.len = p->tot_len; + inseg.p = p; + inseg.tcphdr = tcphdr; + + recv_data = NULL; + recv_flags = 0; + recv_acked = 0; + + if (flags & TCP_PSH) { + p->flags |= PBUF_FLAG_PUSH; + } + + /* If there is data which was previously "refused" by upper layer */ + if (pcb->refused_data != NULL) { + if ((tcp_process_refused_data(pcb) == ERR_ABRT) || + ((pcb->refused_data != NULL) && (tcplen > 0))) { + /* pcb has been aborted or refused data is still refused and the new + segment contains data */ + if (pcb->rcv_ann_wnd == 0) { + /* this is a zero-window probe, we respond to it with current RCV.NXT + and drop the data segment */ + tcp_send_empty_ack(pcb); + } + TCP_STATS_INC(tcp.drop); + MIB2_STATS_INC(mib2.tcpinerrs); + goto aborted; + } + } + tcp_input_pcb = pcb; + err = tcp_process(pcb); + /* A return value of ERR_ABRT means that tcp_abort() was called + and that the pcb has been freed. If so, we don't do anything. */ + if (err != ERR_ABRT) { + if (recv_flags & TF_RESET) { + /* TF_RESET means that the connection was reset by the other + end. We then call the error callback to inform the + application that the connection is dead before we + deallocate the PCB. */ + TCP_EVENT_ERR(pcb->state, pcb->errf, pcb->callback_arg, ERR_RST); + tcp_pcb_remove(&tcp_active_pcbs, pcb); + tcp_free(pcb); + } else { + err = ERR_OK; + /* If the application has registered a "sent" function to be + called when new send buffer space is available, we call it + now. */ + if (recv_acked > 0) { + u16_t acked16; +#if LWIP_WND_SCALE + /* recv_acked is u32_t but the sent callback only takes a u16_t, + so we might have to call it multiple times. */ + u32_t acked = recv_acked; + while (acked > 0) { + acked16 = (u16_t)LWIP_MIN(acked, 0xffffu); + acked -= acked16; +#else + { + acked16 = recv_acked; +#endif + TCP_EVENT_SENT(pcb, (u16_t)acked16, err); + if (err == ERR_ABRT) { + goto aborted; + } + } + recv_acked = 0; + } + if (tcp_input_delayed_close(pcb)) { + goto aborted; + } +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + while (recv_data != NULL) { + struct pbuf *rest = NULL; + pbuf_split_64k(recv_data, &rest); +#else /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + if (recv_data != NULL) { +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + + LWIP_ASSERT("pcb->refused_data == NULL", pcb->refused_data == NULL); + if (pcb->flags & TF_RXCLOSED) { + /* received data although already closed -> abort (send RST) to + notify the remote host that not all data has been processed */ + pbuf_free(recv_data); +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + if (rest != NULL) { + pbuf_free(rest); + } +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + tcp_abort(pcb); + goto aborted; + } + + /* Notify application that data has been received. */ + TCP_EVENT_RECV(pcb, recv_data, ERR_OK, err); + if (err == ERR_ABRT) { +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + if (rest != NULL) { + pbuf_free(rest); + } +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + goto aborted; + } + + /* If the upper layer can't receive this data, store it */ + if (err != ERR_OK) { +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + if (rest != NULL) { + pbuf_cat(recv_data, rest); + } +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + pcb->refused_data = recv_data; + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: keep incoming packet, because pcb is \"full\"\n")); +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + break; + } else { + /* Upper layer received the data, go on with the rest if > 64K */ + recv_data = rest; +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + } + } + + /* If a FIN segment was received, we call the callback + function with a NULL buffer to indicate EOF. */ + if (recv_flags & TF_GOT_FIN) { + if (pcb->refused_data != NULL) { + /* Delay this if we have refused data. */ + pcb->refused_data->flags |= PBUF_FLAG_TCP_FIN; + } else { + /* correct rcv_wnd as the application won't call tcp_recved() + for the FIN's seqno */ + if (pcb->rcv_wnd != TCP_WND_MAX(pcb)) { + pcb->rcv_wnd++; + } + TCP_EVENT_CLOSED(pcb, err); + if (err == ERR_ABRT) { + goto aborted; + } + } + } + + tcp_input_pcb = NULL; + if (tcp_input_delayed_close(pcb)) { + goto aborted; + } + /* Try to send something out. */ + tcp_output(pcb); +#if TCP_INPUT_DEBUG +#if TCP_DEBUG + tcp_debug_print_state(pcb->state); +#endif /* TCP_DEBUG */ +#endif /* TCP_INPUT_DEBUG */ + } + } + /* Jump target if pcb has been aborted in a callback (by calling tcp_abort()). + Below this line, 'pcb' may not be dereferenced! */ +aborted: + tcp_input_pcb = NULL; + recv_data = NULL; + + /* give up our reference to inseg.p */ + if (inseg.p != NULL) { + pbuf_free(inseg.p); + inseg.p = NULL; + } + } else { + /* If no matching PCB was found, send a TCP RST (reset) to the + sender. */ + LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_input: no PCB match found, resetting.\n")); + if (!(TCPH_FLAGS(tcphdr) & TCP_RST)) { + TCP_STATS_INC(tcp.proterr); + TCP_STATS_INC(tcp.drop); + tcp_rst(NULL, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + } + pbuf_free(p); + } + + LWIP_ASSERT("tcp_input: tcp_pcbs_sane()", tcp_pcbs_sane()); + PERF_STOP("tcp_input"); + return; +dropped: + TCP_STATS_INC(tcp.drop); + MIB2_STATS_INC(mib2.tcpinerrs); + pbuf_free(p); +} + +/** Called from tcp_input to check for TF_CLOSED flag. This results in closing + * and deallocating a pcb at the correct place to ensure noone references it + * any more. + * @returns 1 if the pcb has been closed and deallocated, 0 otherwise + */ +static int +tcp_input_delayed_close(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_input_delayed_close: invalid pcb", pcb != NULL); + + if (recv_flags & TF_CLOSED) { + /* The connection has been closed and we will deallocate the + PCB. */ + if (!(pcb->flags & TF_RXCLOSED)) { + /* Connection closed although the application has only shut down the + tx side: call the PCB's err callback and indicate the closure to + ensure the application doesn't continue using the PCB. */ + TCP_EVENT_ERR(pcb->state, pcb->errf, pcb->callback_arg, ERR_CLSD); + } + tcp_pcb_remove(&tcp_active_pcbs, pcb); + tcp_free(pcb); + return 1; + } + return 0; +} + +/** + * Called by tcp_input() when a segment arrives for a listening + * connection (from tcp_input()). + * + * @param pcb the tcp_pcb_listen for which a segment arrived + * + * @note the segment which arrived is saved in global variables, therefore only the pcb + * involved is passed as a parameter to this function + */ +static void +tcp_listen_input(struct tcp_pcb_listen *pcb) +{ + struct tcp_pcb *npcb; + u32_t iss; + err_t rc; + + if (flags & TCP_RST) { + /* An incoming RST should be ignored. Return. */ + return; + } + + LWIP_ASSERT("tcp_listen_input: invalid pcb", pcb != NULL); + + /* In the LISTEN state, we check for incoming SYN segments, + creates a new PCB, and responds with a SYN|ACK. */ + if (flags & TCP_ACK) { + /* For incoming segments with the ACK flag set, respond with a + RST. */ + LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_listen_input: ACK in LISTEN, sending reset\n")); + tcp_rst((const struct tcp_pcb *)pcb, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + } else if (flags & TCP_SYN) { + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection request %"U16_F" -> %"U16_F".\n", tcphdr->src, tcphdr->dest)); +#if TCP_LISTEN_BACKLOG + if (pcb->accepts_pending >= pcb->backlog) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_listen_input: listen backlog exceeded for port %"U16_F"\n", tcphdr->dest)); + return; + } +#endif /* TCP_LISTEN_BACKLOG */ + npcb = tcp_alloc(pcb->prio); + /* If a new PCB could not be created (probably due to lack of memory), + we don't do anything, but rely on the sender will retransmit the + SYN at a time when we have more memory available. */ + if (npcb == NULL) { + err_t err; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_listen_input: could not allocate PCB\n")); + TCP_STATS_INC(tcp.memerr); + TCP_EVENT_ACCEPT(pcb, NULL, pcb->callback_arg, ERR_MEM, err); + LWIP_UNUSED_ARG(err); /* err not useful here */ + return; + } +#if TCP_LISTEN_BACKLOG + pcb->accepts_pending++; + tcp_set_flags(npcb, TF_BACKLOGPEND); +#endif /* TCP_LISTEN_BACKLOG */ + /* Set up the new PCB. */ + ip_addr_copy(npcb->local_ip, *ip_current_dest_addr()); + ip_addr_copy(npcb->remote_ip, *ip_current_src_addr()); + npcb->local_port = pcb->local_port; + npcb->remote_port = tcphdr->src; + npcb->state = SYN_RCVD; + npcb->rcv_nxt = seqno + 1; + npcb->rcv_ann_right_edge = npcb->rcv_nxt; + iss = tcp_next_iss(npcb); + npcb->snd_wl2 = iss; + npcb->snd_nxt = iss; + npcb->lastack = iss; + npcb->snd_lbb = iss; + npcb->snd_wl1 = seqno - 1;/* initialise to seqno-1 to force window update */ + npcb->callback_arg = pcb->callback_arg; +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG + npcb->listener = pcb; +#endif /* LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG */ + /* inherit socket options */ + npcb->so_options = pcb->so_options & SOF_INHERITED; + npcb->netif_idx = pcb->netif_idx; + /* Register the new PCB so that we can begin receiving segments + for it. */ + TCP_REG_ACTIVE(npcb); + + /* Parse any options in the SYN. */ + tcp_parseopt(npcb); + npcb->snd_wnd = tcphdr->wnd; + npcb->snd_wnd_max = npcb->snd_wnd; + +#if TCP_CALCULATE_EFF_SEND_MSS + npcb->mss = tcp_eff_send_mss(npcb->mss, &npcb->local_ip, &npcb->remote_ip); +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + + MIB2_STATS_INC(mib2.tcppassiveopens); + +#if LWIP_TCP_PCB_NUM_EXT_ARGS + if (tcp_ext_arg_invoke_callbacks_passive_open(pcb, npcb) != ERR_OK) { + tcp_abandon(npcb, 0); + return; + } +#endif + + /* Send a SYN|ACK together with the MSS option. */ + rc = tcp_enqueue_flags(npcb, TCP_SYN | TCP_ACK); + if (rc != ERR_OK) { + tcp_abandon(npcb, 0); + return; + } + tcp_output(npcb); + } + return; +} + +/** + * Called by tcp_input() when a segment arrives for a connection in + * TIME_WAIT. + * + * @param pcb the tcp_pcb for which a segment arrived + * + * @note the segment which arrived is saved in global variables, therefore only the pcb + * involved is passed as a parameter to this function + */ +static void +tcp_timewait_input(struct tcp_pcb *pcb) +{ + /* RFC 1337: in TIME_WAIT, ignore RST and ACK FINs + any 'acceptable' segments */ + /* RFC 793 3.9 Event Processing - Segment Arrives: + * - first check sequence number - we skip that one in TIME_WAIT (always + * acceptable since we only send ACKs) + * - second check the RST bit (... return) */ + if (flags & TCP_RST) { + return; + } + + LWIP_ASSERT("tcp_timewait_input: invalid pcb", pcb != NULL); + + /* - fourth, check the SYN bit, */ + if (flags & TCP_SYN) { + /* If an incoming segment is not acceptable, an acknowledgment + should be sent in reply */ + if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, pcb->rcv_nxt + pcb->rcv_wnd)) { + /* If the SYN is in the window it is an error, send a reset */ + tcp_rst(pcb, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + return; + } + } else if (flags & TCP_FIN) { + /* - eighth, check the FIN bit: Remain in the TIME-WAIT state. + Restart the 2 MSL time-wait timeout.*/ + pcb->tmr = tcp_ticks; + } + + if ((tcplen > 0)) { + /* Acknowledge data, FIN or out-of-window SYN */ + tcp_ack_now(pcb); + tcp_output(pcb); + } + return; +} + +/** + * Implements the TCP state machine. Called by tcp_input. In some + * states tcp_receive() is called to receive data. The tcp_seg + * argument will be freed by the caller (tcp_input()) unless the + * recv_data pointer in the pcb is set. + * + * @param pcb the tcp_pcb for which a segment arrived + * + * @note the segment which arrived is saved in global variables, therefore only the pcb + * involved is passed as a parameter to this function + */ +static err_t +tcp_process(struct tcp_pcb *pcb) +{ + struct tcp_seg *rseg; + u8_t acceptable = 0; + err_t err; + + err = ERR_OK; + + LWIP_ASSERT("tcp_process: invalid pcb", pcb != NULL); + + /* Process incoming RST segments. */ + if (flags & TCP_RST) { + /* First, determine if the reset is acceptable. */ + if (pcb->state == SYN_SENT) { + /* "In the SYN-SENT state (a RST received in response to an initial SYN), + the RST is acceptable if the ACK field acknowledges the SYN." */ + if (ackno == pcb->snd_nxt) { + acceptable = 1; + } + } else { + /* "In all states except SYN-SENT, all reset (RST) segments are validated + by checking their SEQ-fields." */ + if (seqno == pcb->rcv_nxt) { + acceptable = 1; + } else if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, + pcb->rcv_nxt + pcb->rcv_wnd)) { + /* If the sequence number is inside the window, we send a challenge ACK + and wait for a re-send with matching sequence number. + This follows RFC 5961 section 3.2 and addresses CVE-2004-0230 + (RST spoofing attack), which is present in RFC 793 RST handling. */ + tcp_ack_now(pcb); + } + } + + if (acceptable) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_process: Connection RESET\n")); + LWIP_ASSERT("tcp_input: pcb->state != CLOSED", pcb->state != CLOSED); + recv_flags |= TF_RESET; + tcp_clear_flags(pcb, TF_ACK_DELAY); + return ERR_RST; + } else { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_process: unacceptable reset seqno %"U32_F" rcv_nxt %"U32_F"\n", + seqno, pcb->rcv_nxt)); + LWIP_DEBUGF(TCP_DEBUG, ("tcp_process: unacceptable reset seqno %"U32_F" rcv_nxt %"U32_F"\n", + seqno, pcb->rcv_nxt)); + return ERR_OK; + } + } + + if ((flags & TCP_SYN) && (pcb->state != SYN_SENT && pcb->state != SYN_RCVD)) { + /* Cope with new connection attempt after remote end crashed */ + tcp_ack_now(pcb); + return ERR_OK; + } + + if ((pcb->flags & TF_RXCLOSED) == 0) { + /* Update the PCB (in)activity timer unless rx is closed (see tcp_shutdown) */ + pcb->tmr = tcp_ticks; + } + pcb->keep_cnt_sent = 0; + pcb->persist_probe = 0; + + tcp_parseopt(pcb); + + /* Do different things depending on the TCP state. */ + switch (pcb->state) { + case SYN_SENT: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("SYN-SENT: ackno %"U32_F" pcb->snd_nxt %"U32_F" unacked %"U32_F"\n", ackno, + pcb->snd_nxt, lwip_ntohl(pcb->unacked->tcphdr->seqno))); + /* received SYN ACK with expected sequence number? */ + if ((flags & TCP_ACK) && (flags & TCP_SYN) + && (ackno == pcb->lastack + 1)) { + pcb->rcv_nxt = seqno + 1; + pcb->rcv_ann_right_edge = pcb->rcv_nxt; + pcb->lastack = ackno; + pcb->snd_wnd = tcphdr->wnd; + pcb->snd_wnd_max = pcb->snd_wnd; + pcb->snd_wl1 = seqno - 1; /* initialise to seqno - 1 to force window update */ + pcb->state = ESTABLISHED; + +#if TCP_CALCULATE_EFF_SEND_MSS + pcb->mss = tcp_eff_send_mss(pcb->mss, &pcb->local_ip, &pcb->remote_ip); +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + + pcb->cwnd = LWIP_TCP_CALC_INITIAL_CWND(pcb->mss); + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_process (SENT): cwnd %"TCPWNDSIZE_F + " ssthresh %"TCPWNDSIZE_F"\n", + pcb->cwnd, pcb->ssthresh)); + LWIP_ASSERT("pcb->snd_queuelen > 0", (pcb->snd_queuelen > 0)); + --pcb->snd_queuelen; + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_process: SYN-SENT --queuelen %"TCPWNDSIZE_F"\n", (tcpwnd_size_t)pcb->snd_queuelen)); + rseg = pcb->unacked; + if (rseg == NULL) { + /* might happen if tcp_output fails in tcp_rexmit_rto() + in which case the segment is on the unsent list */ + rseg = pcb->unsent; + LWIP_ASSERT("no segment to free", rseg != NULL); + pcb->unsent = rseg->next; + } else { + pcb->unacked = rseg->next; + } + tcp_seg_free(rseg); + + /* If there's nothing left to acknowledge, stop the retransmit + timer, otherwise reset it to start again */ + if (pcb->unacked == NULL) { + pcb->rtime = -1; + } else { + pcb->rtime = 0; + pcb->nrtx = 0; + } + + /* Call the user specified function to call when successfully + * connected. */ + TCP_EVENT_CONNECTED(pcb, ERR_OK, err); + if (err == ERR_ABRT) { + return ERR_ABRT; + } + tcp_ack_now(pcb); + } + /* received ACK? possibly a half-open connection */ + else if (flags & TCP_ACK) { + /* send a RST to bring the other side in a non-synchronized state. */ + tcp_rst(pcb, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + /* Resend SYN immediately (don't wait for rto timeout) to establish + connection faster, but do not send more SYNs than we otherwise would + have, or we might get caught in a loop on loopback interfaces. */ + if (pcb->nrtx < TCP_SYNMAXRTX) { + pcb->rtime = 0; + tcp_rexmit_rto(pcb); + } + } + break; + case SYN_RCVD: + if (flags & TCP_ACK) { + /* expected ACK number? */ + if (TCP_SEQ_BETWEEN(ackno, pcb->lastack + 1, pcb->snd_nxt)) { + pcb->state = ESTABLISHED; + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection established %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG + if (pcb->listener == NULL) { + /* listen pcb might be closed by now */ + err = ERR_VAL; + } else +#endif /* LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG */ + { +#if LWIP_CALLBACK_API + LWIP_ASSERT("pcb->listener->accept != NULL", pcb->listener->accept != NULL); +#endif + tcp_backlog_accepted(pcb); + /* Call the accept function. */ + TCP_EVENT_ACCEPT(pcb->listener, pcb, pcb->callback_arg, ERR_OK, err); + } + if (err != ERR_OK) { + /* If the accept function returns with an error, we abort + * the connection. */ + /* Already aborted? */ + if (err != ERR_ABRT) { + tcp_abort(pcb); + } + return ERR_ABRT; + } + /* If there was any data contained within this ACK, + * we'd better pass it on to the application as well. */ + tcp_receive(pcb); + + /* Prevent ACK for SYN to generate a sent event */ + if (recv_acked != 0) { + recv_acked--; + } + + pcb->cwnd = LWIP_TCP_CALC_INITIAL_CWND(pcb->mss); + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_process (SYN_RCVD): cwnd %"TCPWNDSIZE_F + " ssthresh %"TCPWNDSIZE_F"\n", + pcb->cwnd, pcb->ssthresh)); + + if (recv_flags & TF_GOT_FIN) { + tcp_ack_now(pcb); + pcb->state = CLOSE_WAIT; + } + } else { + /* incorrect ACK number, send RST */ + tcp_rst(pcb, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + } + } else if ((flags & TCP_SYN) && (seqno == pcb->rcv_nxt - 1)) { + /* Looks like another copy of the SYN - retransmit our SYN-ACK */ + tcp_rexmit(pcb); + } + break; + case CLOSE_WAIT: + /* FALLTHROUGH */ + case ESTABLISHED: + tcp_receive(pcb); + if (recv_flags & TF_GOT_FIN) { /* passive close */ + tcp_ack_now(pcb); + pcb->state = CLOSE_WAIT; + } + break; + case FIN_WAIT_1: + tcp_receive(pcb); + if (recv_flags & TF_GOT_FIN) { + if ((flags & TCP_ACK) && (ackno == pcb->snd_nxt) && + pcb->unsent == NULL) { + LWIP_DEBUGF(TCP_DEBUG, + ("TCP connection closed: FIN_WAIT_1 %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); + tcp_ack_now(pcb); + tcp_pcb_purge(pcb); + TCP_RMV_ACTIVE(pcb); + pcb->state = TIME_WAIT; + TCP_REG(&tcp_tw_pcbs, pcb); + } else { + tcp_ack_now(pcb); + pcb->state = CLOSING; + } + } else if ((flags & TCP_ACK) && (ackno == pcb->snd_nxt) && + pcb->unsent == NULL) { + pcb->state = FIN_WAIT_2; + } + break; + case FIN_WAIT_2: + tcp_receive(pcb); + if (recv_flags & TF_GOT_FIN) { + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: FIN_WAIT_2 %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); + tcp_ack_now(pcb); + tcp_pcb_purge(pcb); + TCP_RMV_ACTIVE(pcb); + pcb->state = TIME_WAIT; + TCP_REG(&tcp_tw_pcbs, pcb); + } + break; + case CLOSING: + tcp_receive(pcb); + if ((flags & TCP_ACK) && ackno == pcb->snd_nxt && pcb->unsent == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: CLOSING %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); + tcp_pcb_purge(pcb); + TCP_RMV_ACTIVE(pcb); + pcb->state = TIME_WAIT; + TCP_REG(&tcp_tw_pcbs, pcb); + } + break; + case LAST_ACK: + tcp_receive(pcb); + if ((flags & TCP_ACK) && ackno == pcb->snd_nxt && pcb->unsent == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: LAST_ACK %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); + /* bugfix #21699: don't set pcb->state to CLOSED here or we risk leaking segments */ + recv_flags |= TF_CLOSED; + } + break; + default: + break; + } + return ERR_OK; +} + +#if TCP_QUEUE_OOSEQ +/** + * Insert segment into the list (segments covered with new one will be deleted) + * + * Called from tcp_receive() + */ +static void +tcp_oos_insert_segment(struct tcp_seg *cseg, struct tcp_seg *next) +{ + struct tcp_seg *old_seg; + + LWIP_ASSERT("tcp_oos_insert_segment: invalid cseg", cseg != NULL); + + if (TCPH_FLAGS(cseg->tcphdr) & TCP_FIN) { + /* received segment overlaps all following segments */ + tcp_segs_free(next); + next = NULL; + } else { + /* delete some following segments + oos queue may have segments with FIN flag */ + while (next && + TCP_SEQ_GEQ((seqno + cseg->len), + (next->tcphdr->seqno + next->len))) { + /* cseg with FIN already processed */ + if (TCPH_FLAGS(next->tcphdr) & TCP_FIN) { + TCPH_SET_FLAG(cseg->tcphdr, TCP_FIN); + } + old_seg = next; + next = next->next; + tcp_seg_free(old_seg); + } + if (next && + TCP_SEQ_GT(seqno + cseg->len, next->tcphdr->seqno)) { + /* We need to trim the incoming segment. */ + cseg->len = (u16_t)(next->tcphdr->seqno - seqno); + pbuf_realloc(cseg->p, cseg->len); + } + } + cseg->next = next; +} +#endif /* TCP_QUEUE_OOSEQ */ + +/** Remove segments from a list if the incoming ACK acknowledges them */ +static struct tcp_seg * +tcp_free_acked_segments(struct tcp_pcb *pcb, struct tcp_seg *seg_list, const char *dbg_list_name, + struct tcp_seg *dbg_other_seg_list) +{ + struct tcp_seg *next; + u16_t clen; + + LWIP_UNUSED_ARG(dbg_list_name); + LWIP_UNUSED_ARG(dbg_other_seg_list); + + while (seg_list != NULL && + TCP_SEQ_LEQ(lwip_ntohl(seg_list->tcphdr->seqno) + + TCP_TCPLEN(seg_list), ackno)) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: removing %"U32_F":%"U32_F" from pcb->%s\n", + lwip_ntohl(seg_list->tcphdr->seqno), + lwip_ntohl(seg_list->tcphdr->seqno) + TCP_TCPLEN(seg_list), + dbg_list_name)); + + next = seg_list; + seg_list = seg_list->next; + + clen = pbuf_clen(next->p); + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_receive: queuelen %"TCPWNDSIZE_F" ... ", + (tcpwnd_size_t)pcb->snd_queuelen)); + LWIP_ASSERT("pcb->snd_queuelen >= pbuf_clen(next->p)", (pcb->snd_queuelen >= clen)); + + pcb->snd_queuelen = (u16_t)(pcb->snd_queuelen - clen); + recv_acked = (tcpwnd_size_t)(recv_acked + next->len); + tcp_seg_free(next); + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("%"TCPWNDSIZE_F" (after freeing %s)\n", + (tcpwnd_size_t)pcb->snd_queuelen, + dbg_list_name)); + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_receive: valid queue length", + seg_list != NULL || dbg_other_seg_list != NULL); + } + } + return seg_list; +} + +/** + * Called by tcp_process. Checks if the given segment is an ACK for outstanding + * data, and if so frees the memory of the buffered data. Next, it places the + * segment on any of the receive queues (pcb->recved or pcb->ooseq). If the segment + * is buffered, the pbuf is referenced by pbuf_ref so that it will not be freed until + * it has been removed from the buffer. + * + * If the incoming segment constitutes an ACK for a segment that was used for RTT + * estimation, the RTT is estimated here as well. + * + * Called from tcp_process(). + */ +static void +tcp_receive(struct tcp_pcb *pcb) +{ + s16_t m; + u32_t right_wnd_edge; + int found_dupack = 0; + + LWIP_ASSERT("tcp_receive: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_receive: wrong state", pcb->state >= ESTABLISHED); + + if (flags & TCP_ACK) { + right_wnd_edge = pcb->snd_wnd + pcb->snd_wl2; + + /* Update window. */ + if (TCP_SEQ_LT(pcb->snd_wl1, seqno) || + (pcb->snd_wl1 == seqno && TCP_SEQ_LT(pcb->snd_wl2, ackno)) || + (pcb->snd_wl2 == ackno && (u32_t)SND_WND_SCALE(pcb, tcphdr->wnd) > pcb->snd_wnd)) { + pcb->snd_wnd = SND_WND_SCALE(pcb, tcphdr->wnd); + /* keep track of the biggest window announced by the remote host to calculate + the maximum segment size */ + if (pcb->snd_wnd_max < pcb->snd_wnd) { + pcb->snd_wnd_max = pcb->snd_wnd; + } + pcb->snd_wl1 = seqno; + pcb->snd_wl2 = ackno; + LWIP_DEBUGF(TCP_WND_DEBUG, ("tcp_receive: window update %"TCPWNDSIZE_F"\n", pcb->snd_wnd)); +#if TCP_WND_DEBUG + } else { + if (pcb->snd_wnd != (tcpwnd_size_t)SND_WND_SCALE(pcb, tcphdr->wnd)) { + LWIP_DEBUGF(TCP_WND_DEBUG, + ("tcp_receive: no window update lastack %"U32_F" ackno %" + U32_F" wl1 %"U32_F" seqno %"U32_F" wl2 %"U32_F"\n", + pcb->lastack, ackno, pcb->snd_wl1, seqno, pcb->snd_wl2)); + } +#endif /* TCP_WND_DEBUG */ + } + + /* (From Stevens TCP/IP Illustrated Vol II, p970.) Its only a + * duplicate ack if: + * 1) It doesn't ACK new data + * 2) length of received packet is zero (i.e. no payload) + * 3) the advertised window hasn't changed + * 4) There is outstanding unacknowledged data (retransmission timer running) + * 5) The ACK is == biggest ACK sequence number so far seen (snd_una) + * + * If it passes all five, should process as a dupack: + * a) dupacks < 3: do nothing + * b) dupacks == 3: fast retransmit + * c) dupacks > 3: increase cwnd + * + * If it only passes 1-3, should reset dupack counter (and add to + * stats, which we don't do in lwIP) + * + * If it only passes 1, should reset dupack counter + * + */ + + /* Clause 1 */ + if (TCP_SEQ_LEQ(ackno, pcb->lastack)) { + /* Clause 2 */ + if (tcplen == 0) { + /* Clause 3 */ + if (pcb->snd_wl2 + pcb->snd_wnd == right_wnd_edge) { + /* Clause 4 */ + if (pcb->rtime >= 0) { + /* Clause 5 */ + if (pcb->lastack == ackno) { + found_dupack = 1; + if ((u8_t)(pcb->dupacks + 1) > pcb->dupacks) { + ++pcb->dupacks; + } + if (pcb->dupacks > 3) { + /* Inflate the congestion window */ + TCP_WND_INC(pcb->cwnd, pcb->mss); + } + if (pcb->dupacks >= 3) { + /* Do fast retransmit (checked via TF_INFR, not via dupacks count) */ + tcp_rexmit_fast(pcb); + } + } + } + } + } + /* If Clause (1) or more is true, but not a duplicate ack, reset + * count of consecutive duplicate acks */ + if (!found_dupack) { + pcb->dupacks = 0; + } + } else if (TCP_SEQ_BETWEEN(ackno, pcb->lastack + 1, pcb->snd_nxt)) { + /* We come here when the ACK acknowledges new data. */ + tcpwnd_size_t acked; + + /* Reset the "IN Fast Retransmit" flag, since we are no longer + in fast retransmit. Also reset the congestion window to the + slow start threshold. */ + if (pcb->flags & TF_INFR) { + tcp_clear_flags(pcb, TF_INFR); + pcb->cwnd = pcb->ssthresh; + pcb->bytes_acked = 0; + } + + /* Reset the number of retransmissions. */ + pcb->nrtx = 0; + + /* Reset the retransmission time-out. */ + pcb->rto = (s16_t)((pcb->sa >> 3) + pcb->sv); + + /* Record how much data this ACK acks */ + acked = (tcpwnd_size_t)(ackno - pcb->lastack); + + /* Reset the fast retransmit variables. */ + pcb->dupacks = 0; + pcb->lastack = ackno; + + /* Update the congestion control variables (cwnd and + ssthresh). */ + if (pcb->state >= ESTABLISHED) { + if (pcb->cwnd < pcb->ssthresh) { + tcpwnd_size_t increase; + /* limit to 1 SMSS segment during period following RTO */ + u8_t num_seg = (pcb->flags & TF_RTO) ? 1 : 2; + /* RFC 3465, section 2.2 Slow Start */ + increase = LWIP_MIN(acked, (tcpwnd_size_t)(num_seg * pcb->mss)); + TCP_WND_INC(pcb->cwnd, increase); + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_receive: slow start cwnd %"TCPWNDSIZE_F"\n", pcb->cwnd)); + } else { + /* RFC 3465, section 2.1 Congestion Avoidance */ + TCP_WND_INC(pcb->bytes_acked, acked); + if (pcb->bytes_acked >= pcb->cwnd) { + pcb->bytes_acked = (tcpwnd_size_t)(pcb->bytes_acked - pcb->cwnd); + TCP_WND_INC(pcb->cwnd, pcb->mss); + } + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_receive: congestion avoidance cwnd %"TCPWNDSIZE_F"\n", pcb->cwnd)); + } + } + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: ACK for %"U32_F", unacked->seqno %"U32_F":%"U32_F"\n", + ackno, + pcb->unacked != NULL ? + lwip_ntohl(pcb->unacked->tcphdr->seqno) : 0, + pcb->unacked != NULL ? + lwip_ntohl(pcb->unacked->tcphdr->seqno) + TCP_TCPLEN(pcb->unacked) : 0)); + + /* Remove segment from the unacknowledged list if the incoming + ACK acknowledges them. */ + pcb->unacked = tcp_free_acked_segments(pcb, pcb->unacked, "unacked", pcb->unsent); + /* We go through the ->unsent list to see if any of the segments + on the list are acknowledged by the ACK. This may seem + strange since an "unsent" segment shouldn't be acked. The + rationale is that lwIP puts all outstanding segments on the + ->unsent list after a retransmission, so these segments may + in fact have been sent once. */ + pcb->unsent = tcp_free_acked_segments(pcb, pcb->unsent, "unsent", pcb->unacked); + + /* If there's nothing left to acknowledge, stop the retransmit + timer, otherwise reset it to start again */ + if (pcb->unacked == NULL) { + pcb->rtime = -1; + } else { + pcb->rtime = 0; + } + + pcb->polltmr = 0; + +#if TCP_OVERSIZE + if (pcb->unsent == NULL) { + pcb->unsent_oversize = 0; + } +#endif /* TCP_OVERSIZE */ + +#if LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS + if (ip_current_is_v6()) { + /* Inform neighbor reachability of forward progress. */ + nd6_reachability_hint(ip6_current_src_addr()); + } +#endif /* LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS*/ + + pcb->snd_buf = (tcpwnd_size_t)(pcb->snd_buf + recv_acked); + /* check if this ACK ends our retransmission of in-flight data */ + if (pcb->flags & TF_RTO) { + /* RTO is done if + 1) both queues are empty or + 2) unacked is empty and unsent head contains data not part of RTO or + 3) unacked head contains data not part of RTO */ + if (pcb->unacked == NULL) { + if ((pcb->unsent == NULL) || + (TCP_SEQ_LEQ(pcb->rto_end, lwip_ntohl(pcb->unsent->tcphdr->seqno)))) { + tcp_clear_flags(pcb, TF_RTO); + } + } else if (TCP_SEQ_LEQ(pcb->rto_end, lwip_ntohl(pcb->unacked->tcphdr->seqno))) { + tcp_clear_flags(pcb, TF_RTO); + } + } + /* End of ACK for new data processing. */ + } else { + /* Out of sequence ACK, didn't really ack anything */ + tcp_send_empty_ack(pcb); + } + + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_receive: pcb->rttest %"U32_F" rtseq %"U32_F" ackno %"U32_F"\n", + pcb->rttest, pcb->rtseq, ackno)); + + /* RTT estimation calculations. This is done by checking if the + incoming segment acknowledges the segment we use to take a + round-trip time measurement. */ + if (pcb->rttest && TCP_SEQ_LT(pcb->rtseq, ackno)) { + /* diff between this shouldn't exceed 32K since this are tcp timer ticks + and a round-trip shouldn't be that long... */ + m = (s16_t)(tcp_ticks - pcb->rttest); + + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_receive: experienced rtt %"U16_F" ticks (%"U16_F" msec).\n", + m, (u16_t)(m * TCP_SLOW_INTERVAL))); + + /* This is taken directly from VJs original code in his paper */ + m = (s16_t)(m - (pcb->sa >> 3)); + pcb->sa = (s16_t)(pcb->sa + m); + if (m < 0) { + m = (s16_t) - m; + } + m = (s16_t)(m - (pcb->sv >> 2)); + pcb->sv = (s16_t)(pcb->sv + m); + pcb->rto = (s16_t)((pcb->sa >> 3) + pcb->sv); + + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_receive: RTO %"U16_F" (%"U16_F" milliseconds)\n", + pcb->rto, (u16_t)(pcb->rto * TCP_SLOW_INTERVAL))); + + pcb->rttest = 0; + } + } + + /* If the incoming segment contains data, we must process it + further unless the pcb already received a FIN. + (RFC 793, chapter 3.9, "SEGMENT ARRIVES" in states CLOSE-WAIT, CLOSING, + LAST-ACK and TIME-WAIT: "Ignore the segment text.") */ + if ((tcplen > 0) && (pcb->state < CLOSE_WAIT)) { + /* This code basically does three things: + + +) If the incoming segment contains data that is the next + in-sequence data, this data is passed to the application. This + might involve trimming the first edge of the data. The rcv_nxt + variable and the advertised window are adjusted. + + +) If the incoming segment has data that is above the next + sequence number expected (->rcv_nxt), the segment is placed on + the ->ooseq queue. This is done by finding the appropriate + place in the ->ooseq queue (which is ordered by sequence + number) and trim the segment in both ends if needed. An + immediate ACK is sent to indicate that we received an + out-of-sequence segment. + + +) Finally, we check if the first segment on the ->ooseq queue + now is in sequence (i.e., if rcv_nxt >= ooseq->seqno). If + rcv_nxt > ooseq->seqno, we must trim the first edge of the + segment on ->ooseq before we adjust rcv_nxt. The data in the + segments that are now on sequence are chained onto the + incoming segment so that we only need to call the application + once. + */ + + /* First, we check if we must trim the first edge. We have to do + this if the sequence number of the incoming segment is less + than rcv_nxt, and the sequence number plus the length of the + segment is larger than rcv_nxt. */ + /* if (TCP_SEQ_LT(seqno, pcb->rcv_nxt)) { + if (TCP_SEQ_LT(pcb->rcv_nxt, seqno + tcplen)) {*/ + if (TCP_SEQ_BETWEEN(pcb->rcv_nxt, seqno + 1, seqno + tcplen - 1)) { + /* Trimming the first edge is done by pushing the payload + pointer in the pbuf downwards. This is somewhat tricky since + we do not want to discard the full contents of the pbuf up to + the new starting point of the data since we have to keep the + TCP header which is present in the first pbuf in the chain. + + What is done is really quite a nasty hack: the first pbuf in + the pbuf chain is pointed to by inseg.p. Since we need to be + able to deallocate the whole pbuf, we cannot change this + inseg.p pointer to point to any of the later pbufs in the + chain. Instead, we point the ->payload pointer in the first + pbuf to data in one of the later pbufs. We also set the + inseg.data pointer to point to the right place. This way, the + ->p pointer will still point to the first pbuf, but the + ->p->payload pointer will point to data in another pbuf. + + After we are done with adjusting the pbuf pointers we must + adjust the ->data pointer in the seg and the segment + length.*/ + + struct pbuf *p = inseg.p; + u32_t off32 = pcb->rcv_nxt - seqno; + u16_t new_tot_len, off; + LWIP_ASSERT("inseg.p != NULL", inseg.p); + LWIP_ASSERT("insane offset!", (off32 < 0xffff)); + off = (u16_t)off32; + LWIP_ASSERT("pbuf too short!", (((s32_t)inseg.p->tot_len) >= off)); + inseg.len -= off; + new_tot_len = (u16_t)(inseg.p->tot_len - off); + while (p->len < off) { + off -= p->len; + /* all pbufs up to and including this one have len==0, so tot_len is equal */ + p->tot_len = new_tot_len; + p->len = 0; + p = p->next; + } + /* cannot fail... */ + pbuf_remove_header(p, off); + inseg.tcphdr->seqno = seqno = pcb->rcv_nxt; + } else { + if (TCP_SEQ_LT(seqno, pcb->rcv_nxt)) { + /* the whole segment is < rcv_nxt */ + /* must be a duplicate of a packet that has already been correctly handled */ + + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: duplicate seqno %"U32_F"\n", seqno)); + tcp_ack_now(pcb); + } + } + + /* The sequence number must be within the window (above rcv_nxt + and below rcv_nxt + rcv_wnd) in order to be further + processed. */ + if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, + pcb->rcv_nxt + pcb->rcv_wnd - 1)) { + if (pcb->rcv_nxt == seqno) { + /* The incoming segment is the next in sequence. We check if + we have to trim the end of the segment and update rcv_nxt + and pass the data to the application. */ + tcplen = TCP_TCPLEN(&inseg); + + if (tcplen > pcb->rcv_wnd) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, + ("tcp_receive: other end overran receive window" + "seqno %"U32_F" len %"U16_F" right edge %"U32_F"\n", + seqno, tcplen, pcb->rcv_nxt + pcb->rcv_wnd)); + if (TCPH_FLAGS(inseg.tcphdr) & TCP_FIN) { + /* Must remove the FIN from the header as we're trimming + * that byte of sequence-space from the packet */ + TCPH_FLAGS_SET(inseg.tcphdr, TCPH_FLAGS(inseg.tcphdr) & ~(unsigned int)TCP_FIN); + } + /* Adjust length of segment to fit in the window. */ + TCPWND_CHECK16(pcb->rcv_wnd); + inseg.len = (u16_t)pcb->rcv_wnd; + if (TCPH_FLAGS(inseg.tcphdr) & TCP_SYN) { + inseg.len -= 1; + } + pbuf_realloc(inseg.p, inseg.len); + tcplen = TCP_TCPLEN(&inseg); + LWIP_ASSERT("tcp_receive: segment not trimmed correctly to rcv_wnd\n", + (seqno + tcplen) == (pcb->rcv_nxt + pcb->rcv_wnd)); + } +#if TCP_QUEUE_OOSEQ + /* Received in-sequence data, adjust ooseq data if: + - FIN has been received or + - inseq overlaps with ooseq */ + if (pcb->ooseq != NULL) { + if (TCPH_FLAGS(inseg.tcphdr) & TCP_FIN) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, + ("tcp_receive: received in-order FIN, binning ooseq queue\n")); + /* Received in-order FIN means anything that was received + * out of order must now have been received in-order, so + * bin the ooseq queue */ + while (pcb->ooseq != NULL) { + struct tcp_seg *old_ooseq = pcb->ooseq; + pcb->ooseq = pcb->ooseq->next; + tcp_seg_free(old_ooseq); + } + } else { + struct tcp_seg *next = pcb->ooseq; + /* Remove all segments on ooseq that are covered by inseg already. + * FIN is copied from ooseq to inseg if present. */ + while (next && + TCP_SEQ_GEQ(seqno + tcplen, + next->tcphdr->seqno + next->len)) { + struct tcp_seg *tmp; + /* inseg cannot have FIN here (already processed above) */ + if ((TCPH_FLAGS(next->tcphdr) & TCP_FIN) != 0 && + (TCPH_FLAGS(inseg.tcphdr) & TCP_SYN) == 0) { + TCPH_SET_FLAG(inseg.tcphdr, TCP_FIN); + tcplen = TCP_TCPLEN(&inseg); + } + tmp = next; + next = next->next; + tcp_seg_free(tmp); + } + /* Now trim right side of inseg if it overlaps with the first + * segment on ooseq */ + if (next && + TCP_SEQ_GT(seqno + tcplen, + next->tcphdr->seqno)) { + /* inseg cannot have FIN here (already processed above) */ + inseg.len = (u16_t)(next->tcphdr->seqno - seqno); + if (TCPH_FLAGS(inseg.tcphdr) & TCP_SYN) { + inseg.len -= 1; + } + pbuf_realloc(inseg.p, inseg.len); + tcplen = TCP_TCPLEN(&inseg); + LWIP_ASSERT("tcp_receive: segment not trimmed correctly to ooseq queue\n", + (seqno + tcplen) == next->tcphdr->seqno); + } + pcb->ooseq = next; + } + } +#endif /* TCP_QUEUE_OOSEQ */ + + pcb->rcv_nxt = seqno + tcplen; + + /* Update the receiver's (our) window. */ + LWIP_ASSERT("tcp_receive: tcplen > rcv_wnd\n", pcb->rcv_wnd >= tcplen); + pcb->rcv_wnd -= tcplen; + + tcp_update_rcv_ann_wnd(pcb); + + /* If there is data in the segment, we make preparations to + pass this up to the application. The ->recv_data variable + is used for holding the pbuf that goes to the + application. The code for reassembling out-of-sequence data + chains its data on this pbuf as well. + + If the segment was a FIN, we set the TF_GOT_FIN flag that will + be used to indicate to the application that the remote side has + closed its end of the connection. */ + if (inseg.p->tot_len > 0) { + recv_data = inseg.p; + /* Since this pbuf now is the responsibility of the + application, we delete our reference to it so that we won't + (mistakingly) deallocate it. */ + inseg.p = NULL; + } + if (TCPH_FLAGS(inseg.tcphdr) & TCP_FIN) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: received FIN.\n")); + recv_flags |= TF_GOT_FIN; + } + +#if TCP_QUEUE_OOSEQ + /* We now check if we have segments on the ->ooseq queue that + are now in sequence. */ + while (pcb->ooseq != NULL && + pcb->ooseq->tcphdr->seqno == pcb->rcv_nxt) { + + struct tcp_seg *cseg = pcb->ooseq; + seqno = pcb->ooseq->tcphdr->seqno; + + pcb->rcv_nxt += TCP_TCPLEN(cseg); + LWIP_ASSERT("tcp_receive: ooseq tcplen > rcv_wnd\n", + pcb->rcv_wnd >= TCP_TCPLEN(cseg)); + pcb->rcv_wnd -= TCP_TCPLEN(cseg); + + tcp_update_rcv_ann_wnd(pcb); + + if (cseg->p->tot_len > 0) { + /* Chain this pbuf onto the pbuf that we will pass to + the application. */ + /* With window scaling, this can overflow recv_data->tot_len, but + that's not a problem since we explicitly fix that before passing + recv_data to the application. */ + if (recv_data) { + pbuf_cat(recv_data, cseg->p); + } else { + recv_data = cseg->p; + } + cseg->p = NULL; + } + if (TCPH_FLAGS(cseg->tcphdr) & TCP_FIN) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: dequeued FIN.\n")); + recv_flags |= TF_GOT_FIN; + if (pcb->state == ESTABLISHED) { /* force passive close or we can move to active close */ + pcb->state = CLOSE_WAIT; + } + } + + pcb->ooseq = cseg->next; + tcp_seg_free(cseg); + } +#if LWIP_TCP_SACK_OUT + if (pcb->flags & TF_SACK) { + if (pcb->ooseq != NULL) { + /* Some segments may have been removed from ooseq, let's remove all SACKs that + describe anything before the new beginning of that list. */ + tcp_remove_sacks_lt(pcb, pcb->ooseq->tcphdr->seqno); + } else if (LWIP_TCP_SACK_VALID(pcb, 0)) { + /* ooseq has been cleared. Nothing to SACK */ + memset(pcb->rcv_sacks, 0, sizeof(pcb->rcv_sacks)); + } + } +#endif /* LWIP_TCP_SACK_OUT */ +#endif /* TCP_QUEUE_OOSEQ */ + + + /* Acknowledge the segment(s). */ + tcp_ack(pcb); + +#if LWIP_TCP_SACK_OUT + if (LWIP_TCP_SACK_VALID(pcb, 0)) { + /* Normally the ACK for the data received could be piggy-backed on a data packet, + but lwIP currently does not support including SACKs in data packets. So we force + it to respond with an empty ACK packet (only if there is at least one SACK to be sent). + NOTE: tcp_send_empty_ack() on success clears the ACK flags (set by tcp_ack()) */ + tcp_send_empty_ack(pcb); + } +#endif /* LWIP_TCP_SACK_OUT */ + +#if LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS + if (ip_current_is_v6()) { + /* Inform neighbor reachability of forward progress. */ + nd6_reachability_hint(ip6_current_src_addr()); + } +#endif /* LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS*/ + + } else { + /* We get here if the incoming segment is out-of-sequence. */ + +#if TCP_QUEUE_OOSEQ + /* We queue the segment on the ->ooseq queue. */ + if (pcb->ooseq == NULL) { + pcb->ooseq = tcp_seg_copy(&inseg); +#if LWIP_TCP_SACK_OUT + if (pcb->flags & TF_SACK) { + /* All the SACKs should be invalid, so we can simply store the most recent one: */ + pcb->rcv_sacks[0].left = seqno; + pcb->rcv_sacks[0].right = seqno + inseg.len; + } +#endif /* LWIP_TCP_SACK_OUT */ + } else { + /* If the queue is not empty, we walk through the queue and + try to find a place where the sequence number of the + incoming segment is between the sequence numbers of the + previous and the next segment on the ->ooseq queue. That is + the place where we put the incoming segment. If needed, we + trim the second edges of the previous and the incoming + segment so that it will fit into the sequence. + + If the incoming segment has the same sequence number as a + segment on the ->ooseq queue, we discard the segment that + contains less data. */ + +#if LWIP_TCP_SACK_OUT + /* This is the left edge of the lowest possible SACK range. + It may start before the newly received segment (possibly adjusted below). */ + u32_t sackbeg = TCP_SEQ_LT(seqno, pcb->ooseq->tcphdr->seqno) ? seqno : pcb->ooseq->tcphdr->seqno; +#endif /* LWIP_TCP_SACK_OUT */ + struct tcp_seg *next, *prev = NULL; + for (next = pcb->ooseq; next != NULL; next = next->next) { + if (seqno == next->tcphdr->seqno) { + /* The sequence number of the incoming segment is the + same as the sequence number of the segment on + ->ooseq. We check the lengths to see which one to + discard. */ + if (inseg.len > next->len) { + /* The incoming segment is larger than the old + segment. We replace some segments with the new + one. */ + struct tcp_seg *cseg = tcp_seg_copy(&inseg); + if (cseg != NULL) { + if (prev != NULL) { + prev->next = cseg; + } else { + pcb->ooseq = cseg; + } + tcp_oos_insert_segment(cseg, next); + } + break; + } else { + /* Either the lengths are the same or the incoming + segment was smaller than the old one; in either + case, we ditch the incoming segment. */ + break; + } + } else { + if (prev == NULL) { + if (TCP_SEQ_LT(seqno, next->tcphdr->seqno)) { + /* The sequence number of the incoming segment is lower + than the sequence number of the first segment on the + queue. We put the incoming segment first on the + queue. */ + struct tcp_seg *cseg = tcp_seg_copy(&inseg); + if (cseg != NULL) { + pcb->ooseq = cseg; + tcp_oos_insert_segment(cseg, next); + } + break; + } + } else { + /*if (TCP_SEQ_LT(prev->tcphdr->seqno, seqno) && + TCP_SEQ_LT(seqno, next->tcphdr->seqno)) {*/ + if (TCP_SEQ_BETWEEN(seqno, prev->tcphdr->seqno + 1, next->tcphdr->seqno - 1)) { + /* The sequence number of the incoming segment is in + between the sequence numbers of the previous and + the next segment on ->ooseq. We trim trim the previous + segment, delete next segments that included in received segment + and trim received, if needed. */ + struct tcp_seg *cseg = tcp_seg_copy(&inseg); + if (cseg != NULL) { + if (TCP_SEQ_GT(prev->tcphdr->seqno + prev->len, seqno)) { + /* We need to trim the prev segment. */ + prev->len = (u16_t)(seqno - prev->tcphdr->seqno); + pbuf_realloc(prev->p, prev->len); + } + prev->next = cseg; + tcp_oos_insert_segment(cseg, next); + } + break; + } + } + +#if LWIP_TCP_SACK_OUT + /* The new segment goes after the 'next' one. If there is a "hole" in sequence numbers + between 'prev' and the beginning of 'next', we want to move sackbeg. */ + if (prev != NULL && prev->tcphdr->seqno + prev->len != next->tcphdr->seqno) { + sackbeg = next->tcphdr->seqno; + } +#endif /* LWIP_TCP_SACK_OUT */ + + /* We don't use 'prev' below, so let's set it to current 'next'. + This way even if we break the loop below, 'prev' will be pointing + at the segment right in front of the newly added one. */ + prev = next; + + /* If the "next" segment is the last segment on the + ooseq queue, we add the incoming segment to the end + of the list. */ + if (next->next == NULL && + TCP_SEQ_GT(seqno, next->tcphdr->seqno)) { + if (TCPH_FLAGS(next->tcphdr) & TCP_FIN) { + /* segment "next" already contains all data */ + break; + } + next->next = tcp_seg_copy(&inseg); + if (next->next != NULL) { + if (TCP_SEQ_GT(next->tcphdr->seqno + next->len, seqno)) { + /* We need to trim the last segment. */ + next->len = (u16_t)(seqno - next->tcphdr->seqno); + pbuf_realloc(next->p, next->len); + } + /* check if the remote side overruns our receive window */ + if (TCP_SEQ_GT((u32_t)tcplen + seqno, pcb->rcv_nxt + (u32_t)pcb->rcv_wnd)) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, + ("tcp_receive: other end overran receive window" + "seqno %"U32_F" len %"U16_F" right edge %"U32_F"\n", + seqno, tcplen, pcb->rcv_nxt + pcb->rcv_wnd)); + if (TCPH_FLAGS(next->next->tcphdr) & TCP_FIN) { + /* Must remove the FIN from the header as we're trimming + * that byte of sequence-space from the packet */ + TCPH_FLAGS_SET(next->next->tcphdr, TCPH_FLAGS(next->next->tcphdr) & ~TCP_FIN); + } + /* Adjust length of segment to fit in the window. */ + next->next->len = (u16_t)(pcb->rcv_nxt + pcb->rcv_wnd - seqno); + pbuf_realloc(next->next->p, next->next->len); + tcplen = TCP_TCPLEN(next->next); + LWIP_ASSERT("tcp_receive: segment not trimmed correctly to rcv_wnd\n", + (seqno + tcplen) == (pcb->rcv_nxt + pcb->rcv_wnd)); + } + } + break; + } + } + } + +#if LWIP_TCP_SACK_OUT + if (pcb->flags & TF_SACK) { + if (prev == NULL) { + /* The new segment is at the beginning. sackbeg should already be set properly. + We need to find the right edge. */ + next = pcb->ooseq; + } else if (prev->next != NULL) { + /* The new segment was added after 'prev'. If there is a "hole" between 'prev' and 'prev->next', + we need to move sackbeg. After that we should find the right edge. */ + next = prev->next; + if (prev->tcphdr->seqno + prev->len != next->tcphdr->seqno) { + sackbeg = next->tcphdr->seqno; + } + } else { + next = NULL; + } + if (next != NULL) { + u32_t sackend = next->tcphdr->seqno; + for ( ; (next != NULL) && (sackend == next->tcphdr->seqno); next = next->next) { + sackend += next->len; + } + tcp_add_sack(pcb, sackbeg, sackend); + } + } +#endif /* LWIP_TCP_SACK_OUT */ + } +#if defined(TCP_OOSEQ_BYTES_LIMIT) || defined(TCP_OOSEQ_PBUFS_LIMIT) + { + /* Check that the data on ooseq doesn't exceed one of the limits + and throw away everything above that limit. */ +#ifdef TCP_OOSEQ_BYTES_LIMIT + const u32_t ooseq_max_blen = TCP_OOSEQ_BYTES_LIMIT(pcb); + u32_t ooseq_blen = 0; +#endif +#ifdef TCP_OOSEQ_PBUFS_LIMIT + const u16_t ooseq_max_qlen = TCP_OOSEQ_PBUFS_LIMIT(pcb); + u16_t ooseq_qlen = 0; +#endif + struct tcp_seg *next, *prev = NULL; + for (next = pcb->ooseq; next != NULL; prev = next, next = next->next) { + struct pbuf *p = next->p; + int stop_here = 0; +#ifdef TCP_OOSEQ_BYTES_LIMIT + ooseq_blen += p->tot_len; + if (ooseq_blen > ooseq_max_blen) { + stop_here = 1; + } +#endif +#ifdef TCP_OOSEQ_PBUFS_LIMIT + ooseq_qlen += pbuf_clen(p); + if (ooseq_qlen > ooseq_max_qlen) { + stop_here = 1; + } +#endif + if (stop_here) { +#if LWIP_TCP_SACK_OUT + if (pcb->flags & TF_SACK) { + /* Let's remove all SACKs from next's seqno up. */ + tcp_remove_sacks_gt(pcb, next->tcphdr->seqno); + } +#endif /* LWIP_TCP_SACK_OUT */ + /* too much ooseq data, dump this and everything after it */ + tcp_segs_free(next); + if (prev == NULL) { + /* first ooseq segment is too much, dump the whole queue */ + pcb->ooseq = NULL; + } else { + /* just dump 'next' and everything after it */ + prev->next = NULL; + } + break; + } + } + } +#endif /* TCP_OOSEQ_BYTES_LIMIT || TCP_OOSEQ_PBUFS_LIMIT */ +#endif /* TCP_QUEUE_OOSEQ */ + + /* We send the ACK packet after we've (potentially) dealt with SACKs, + so they can be included in the acknowledgment. */ + tcp_send_empty_ack(pcb); + } + } else { + /* The incoming segment is not within the window. */ + tcp_send_empty_ack(pcb); + } + } else { + /* Segments with length 0 is taken care of here. Segments that + fall out of the window are ACKed. */ + if (!TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, pcb->rcv_nxt + pcb->rcv_wnd - 1)) { + tcp_ack_now(pcb); + } + } +} + +static u8_t +tcp_get_next_optbyte(void) +{ + u16_t optidx = tcp_optidx++; + if ((tcphdr_opt2 == NULL) || (optidx < tcphdr_opt1len)) { + u8_t *opts = (u8_t *)tcphdr + TCP_HLEN; + return opts[optidx]; + } else { + u8_t idx = (u8_t)(optidx - tcphdr_opt1len); + return tcphdr_opt2[idx]; + } +} + +/** + * Parses the options contained in the incoming segment. + * + * Called from tcp_listen_input() and tcp_process(). + * Currently, only the MSS option is supported! + * + * @param pcb the tcp_pcb for which a segment arrived + */ +static void +tcp_parseopt(struct tcp_pcb *pcb) +{ + u8_t data; + u16_t mss; +#if LWIP_TCP_TIMESTAMPS + u32_t tsval; +#endif + + LWIP_ASSERT("tcp_parseopt: invalid pcb", pcb != NULL); + + /* Parse the TCP MSS option, if present. */ + if (tcphdr_optlen != 0) { + for (tcp_optidx = 0; tcp_optidx < tcphdr_optlen; ) { + u8_t opt = tcp_get_next_optbyte(); + switch (opt) { + case LWIP_TCP_OPT_EOL: + /* End of options. */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: EOL\n")); + return; + case LWIP_TCP_OPT_NOP: + /* NOP option. */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: NOP\n")); + break; + case LWIP_TCP_OPT_MSS: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: MSS\n")); + if (tcp_get_next_optbyte() != LWIP_TCP_OPT_LEN_MSS || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_MSS) > tcphdr_optlen) { + /* Bad length */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + return; + } + /* An MSS option with the right option length. */ + mss = (u16_t)(tcp_get_next_optbyte() << 8); + mss |= tcp_get_next_optbyte(); + /* Limit the mss to the configured TCP_MSS and prevent division by zero */ + pcb->mss = ((mss > TCP_MSS) || (mss == 0)) ? TCP_MSS : mss; + break; +#if LWIP_WND_SCALE + case LWIP_TCP_OPT_WS: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: WND_SCALE\n")); + if (tcp_get_next_optbyte() != LWIP_TCP_OPT_LEN_WS || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_WS) > tcphdr_optlen) { + /* Bad length */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + return; + } + /* An WND_SCALE option with the right option length. */ + data = tcp_get_next_optbyte(); + /* If syn was received with wnd scale option, + activate wnd scale opt, but only if this is not a retransmission */ + if ((flags & TCP_SYN) && !(pcb->flags & TF_WND_SCALE)) { + pcb->snd_scale = data; + if (pcb->snd_scale > 14U) { + pcb->snd_scale = 14U; + } + pcb->rcv_scale = TCP_RCV_SCALE; + tcp_set_flags(pcb, TF_WND_SCALE); + /* window scaling is enabled, we can use the full receive window */ + LWIP_ASSERT("window not at default value", pcb->rcv_wnd == TCPWND_MIN16(TCP_WND)); + LWIP_ASSERT("window not at default value", pcb->rcv_ann_wnd == TCPWND_MIN16(TCP_WND)); + pcb->rcv_wnd = pcb->rcv_ann_wnd = TCP_WND; + } + break; +#endif /* LWIP_WND_SCALE */ +#if LWIP_TCP_TIMESTAMPS + case LWIP_TCP_OPT_TS: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: TS\n")); + if (tcp_get_next_optbyte() != LWIP_TCP_OPT_LEN_TS || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_TS) > tcphdr_optlen) { + /* Bad length */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + return; + } + /* TCP timestamp option with valid length */ + tsval = tcp_get_next_optbyte(); + tsval |= (tcp_get_next_optbyte() << 8); + tsval |= (tcp_get_next_optbyte() << 16); + tsval |= (tcp_get_next_optbyte() << 24); + if (flags & TCP_SYN) { + pcb->ts_recent = lwip_ntohl(tsval); + /* Enable sending timestamps in every segment now that we know + the remote host supports it. */ + tcp_set_flags(pcb, TF_TIMESTAMP); + } else if (TCP_SEQ_BETWEEN(pcb->ts_lastacksent, seqno, seqno + tcplen)) { + pcb->ts_recent = lwip_ntohl(tsval); + } + /* Advance to next option (6 bytes already read) */ + tcp_optidx += LWIP_TCP_OPT_LEN_TS - 6; + break; +#endif /* LWIP_TCP_TIMESTAMPS */ +#if LWIP_TCP_SACK_OUT + case LWIP_TCP_OPT_SACK_PERM: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: SACK_PERM\n")); + if (tcp_get_next_optbyte() != LWIP_TCP_OPT_LEN_SACK_PERM || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_SACK_PERM) > tcphdr_optlen) { + /* Bad length */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + return; + } + /* TCP SACK_PERM option with valid length */ + if (flags & TCP_SYN) { + /* We only set it if we receive it in a SYN (or SYN+ACK) packet */ + tcp_set_flags(pcb, TF_SACK); + } + break; +#endif /* LWIP_TCP_SACK_OUT */ + default: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: other\n")); + data = tcp_get_next_optbyte(); + if (data < 2) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + /* If the length field is zero, the options are malformed + and we don't process them further. */ + return; + } + /* All other options have a length field, so that we easily + can skip past them. */ + tcp_optidx += data - 2; + } + } + } +} + +void +tcp_trigger_input_pcb_close(void) +{ + recv_flags |= TF_CLOSED; +} + +#if LWIP_TCP_SACK_OUT +/** + * Called by tcp_receive() to add new SACK entry. + * + * The new SACK entry will be placed at the beginning of rcv_sacks[], as the newest one. + * Existing SACK entries will be "pushed back", to preserve their order. + * This is the behavior described in RFC 2018, section 4. + * + * @param pcb the tcp_pcb for which a segment arrived + * @param left the left side of the SACK (the first sequence number) + * @param right the right side of the SACK (the first sequence number past this SACK) + */ +static void +tcp_add_sack(struct tcp_pcb *pcb, u32_t left, u32_t right) +{ + u8_t i; + u8_t unused_idx; + + if ((pcb->flags & TF_SACK) == 0 || !TCP_SEQ_LT(left, right)) { + return; + } + + /* First, let's remove all SACKs that are no longer needed (because they overlap with the newest one), + while moving all other SACKs forward. + We run this loop for all entries, until we find the first invalid one. + There is no point checking after that. */ + for (i = unused_idx = 0; (i < LWIP_TCP_MAX_SACK_NUM) && LWIP_TCP_SACK_VALID(pcb, i); ++i) { + /* We only want to use SACK at [i] if it doesn't overlap with left:right range. + It does not overlap if its right side is before the newly added SACK, + or if its left side is after the newly added SACK. + NOTE: The equality should not really happen, but it doesn't hurt. */ + if (TCP_SEQ_LEQ(pcb->rcv_sacks[i].right, left) || TCP_SEQ_LEQ(right, pcb->rcv_sacks[i].left)) { + if (unused_idx != i) { + /* We don't need to copy if it's already in the right spot */ + pcb->rcv_sacks[unused_idx] = pcb->rcv_sacks[i]; + } + ++unused_idx; + } + } + + /* Now 'unused_idx' is the index of the first invalid SACK entry, + anywhere between 0 (no valid entries) and LWIP_TCP_MAX_SACK_NUM (all entries are valid). + We want to clear this and all following SACKs. + However, we will be adding another one in the front (and shifting everything else back). + So let's just iterate from the back, and set each entry to the one to the left if it's valid, + or to 0 if it is not. */ + for (i = LWIP_TCP_MAX_SACK_NUM - 1; i > 0; --i) { + /* [i] is the index we are setting, and the value should be at index [i-1], + or 0 if that index is unused (>= unused_idx). */ + if (i - 1 >= unused_idx) { + /* [i-1] is unused. Let's clear [i]. */ + pcb->rcv_sacks[i].left = pcb->rcv_sacks[i].right = 0; + } else { + pcb->rcv_sacks[i] = pcb->rcv_sacks[i - 1]; + } + } + + /* And now we can store the newest SACK */ + pcb->rcv_sacks[0].left = left; + pcb->rcv_sacks[0].right = right; +} + +/** + * Called to remove a range of SACKs. + * + * SACK entries will be removed or adjusted to not acknowledge any sequence + * numbers that are less than 'seq' passed. It not only invalidates entries, + * but also moves all entries that are still valid to the beginning. + * + * @param pcb the tcp_pcb to modify + * @param seq the lowest sequence number to keep in SACK entries + */ +static void +tcp_remove_sacks_lt(struct tcp_pcb *pcb, u32_t seq) +{ + u8_t i; + u8_t unused_idx; + + /* We run this loop for all entries, until we find the first invalid one. + There is no point checking after that. */ + for (i = unused_idx = 0; (i < LWIP_TCP_MAX_SACK_NUM) && LWIP_TCP_SACK_VALID(pcb, i); ++i) { + /* We only want to use SACK at index [i] if its right side is > 'seq'. */ + if (TCP_SEQ_GT(pcb->rcv_sacks[i].right, seq)) { + if (unused_idx != i) { + /* We only copy it if it's not in the right spot already. */ + pcb->rcv_sacks[unused_idx] = pcb->rcv_sacks[i]; + } + /* NOTE: It is possible that its left side is < 'seq', in which case we should adjust it. */ + if (TCP_SEQ_LT(pcb->rcv_sacks[unused_idx].left, seq)) { + pcb->rcv_sacks[unused_idx].left = seq; + } + ++unused_idx; + } + } + + /* We also need to invalidate everything from 'unused_idx' till the end */ + for (i = unused_idx; i < LWIP_TCP_MAX_SACK_NUM; ++i) { + pcb->rcv_sacks[i].left = pcb->rcv_sacks[i].right = 0; + } +} + +#if defined(TCP_OOSEQ_BYTES_LIMIT) || defined(TCP_OOSEQ_PBUFS_LIMIT) +/** + * Called to remove a range of SACKs. + * + * SACK entries will be removed or adjusted to not acknowledge any sequence + * numbers that are greater than (or equal to) 'seq' passed. It not only invalidates entries, + * but also moves all entries that are still valid to the beginning. + * + * @param pcb the tcp_pcb to modify + * @param seq the highest sequence number to keep in SACK entries + */ +static void +tcp_remove_sacks_gt(struct tcp_pcb *pcb, u32_t seq) +{ + u8_t i; + u8_t unused_idx; + + /* We run this loop for all entries, until we find the first invalid one. + There is no point checking after that. */ + for (i = unused_idx = 0; (i < LWIP_TCP_MAX_SACK_NUM) && LWIP_TCP_SACK_VALID(pcb, i); ++i) { + /* We only want to use SACK at index [i] if its left side is < 'seq'. */ + if (TCP_SEQ_LT(pcb->rcv_sacks[i].left, seq)) { + if (unused_idx != i) { + /* We only copy it if it's not in the right spot already. */ + pcb->rcv_sacks[unused_idx] = pcb->rcv_sacks[i]; + } + /* NOTE: It is possible that its right side is > 'seq', in which case we should adjust it. */ + if (TCP_SEQ_GT(pcb->rcv_sacks[unused_idx].right, seq)) { + pcb->rcv_sacks[unused_idx].right = seq; + } + ++unused_idx; + } + } + + /* We also need to invalidate everything from 'unused_idx' till the end */ + for (i = unused_idx; i < LWIP_TCP_MAX_SACK_NUM; ++i) { + pcb->rcv_sacks[i].left = pcb->rcv_sacks[i].right = 0; + } +} +#endif /* TCP_OOSEQ_BYTES_LIMIT || TCP_OOSEQ_PBUFS_LIMIT */ + +#endif /* LWIP_TCP_SACK_OUT */ + +#endif /* LWIP_TCP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/tcp_out.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/tcp_out.c new file mode 100644 index 0000000..375e18d --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/tcp_out.c @@ -0,0 +1,2190 @@ +/** + * @file + * Transmission Control Protocol, outgoing traffic + * + * The output functions of TCP. + * + * There are two distinct ways for TCP segments to get sent: + * - queued data: these are segments transferring data or segments containing + * SYN or FIN (which both count as one sequence number). They are created as + * struct @ref pbuf together with a struct tcp_seg and enqueue to the + * unsent list of the pcb. They are sent by tcp_output: + * - @ref tcp_write : creates data segments + * - @ref tcp_split_unsent_seg : splits a data segment + * - @ref tcp_enqueue_flags : creates SYN-only or FIN-only segments + * - @ref tcp_output / tcp_output_segment : finalize the tcp header + * (e.g. sequence numbers, options, checksum) and output to IP + * - the various tcp_rexmit functions shuffle around segments between the + * unsent an unacked lists to retransmit them + * - tcp_create_segment and tcp_pbuf_prealloc allocate pbuf and + * segment for these functions + * - direct send: these segments don't contain data but control the connection + * behaviour. They are created as pbuf only and sent directly without + * enqueueing them: + * - @ref tcp_send_empty_ack sends an ACK-only segment + * - @ref tcp_rst sends a RST segment + * - @ref tcp_keepalive sends a keepalive segment + * - @ref tcp_zero_window_probe sends a window probe segment + * - tcp_output_alloc_header allocates a header-only pbuf for these functions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/priv/tcp_priv.h" +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/inet_chksum.h" +#include "lwip/stats.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#if LWIP_TCP_TIMESTAMPS +#include "lwip/sys.h" +#endif + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/* Allow to add custom TCP header options by defining this hook */ +#ifdef LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH +#define LWIP_TCP_OPT_LENGTH_SEGMENT(flags, pcb) LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH(pcb, LWIP_TCP_OPT_LENGTH(flags)) +#else +#define LWIP_TCP_OPT_LENGTH_SEGMENT(flags, pcb) LWIP_TCP_OPT_LENGTH(flags) +#endif + +/* Define some copy-macros for checksum-on-copy so that the code looks + nicer by preventing too many ifdef's. */ +#if TCP_CHECKSUM_ON_COPY +#define TCP_DATA_COPY(dst, src, len, seg) do { \ + tcp_seg_add_chksum(LWIP_CHKSUM_COPY(dst, src, len), \ + len, &seg->chksum, &seg->chksum_swapped); \ + seg->flags |= TF_SEG_DATA_CHECKSUMMED; } while(0) +#define TCP_DATA_COPY2(dst, src, len, chksum, chksum_swapped) \ + tcp_seg_add_chksum(LWIP_CHKSUM_COPY(dst, src, len), len, chksum, chksum_swapped); +#else /* TCP_CHECKSUM_ON_COPY*/ +#define TCP_DATA_COPY(dst, src, len, seg) MEMCPY(dst, src, len) +#define TCP_DATA_COPY2(dst, src, len, chksum, chksum_swapped) MEMCPY(dst, src, len) +#endif /* TCP_CHECKSUM_ON_COPY*/ + +/** Define this to 1 for an extra check that the output checksum is valid + * (usefule when the checksum is generated by the application, not the stack) */ +#ifndef TCP_CHECKSUM_ON_COPY_SANITY_CHECK +#define TCP_CHECKSUM_ON_COPY_SANITY_CHECK 0 +#endif +/* Allow to override the failure of sanity check from warning to e.g. hard failure */ +#if TCP_CHECKSUM_ON_COPY_SANITY_CHECK +#ifndef TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL +#define TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL(msg) LWIP_DEBUGF(TCP_DEBUG | LWIP_DBG_LEVEL_WARNING, msg) +#endif +#endif + +#if TCP_OVERSIZE +/** The size of segment pbufs created when TCP_OVERSIZE is enabled */ +#ifndef TCP_OVERSIZE_CALC_LENGTH +#define TCP_OVERSIZE_CALC_LENGTH(length) ((length) + TCP_OVERSIZE) +#endif +#endif + +/* Forward declarations.*/ +static err_t tcp_output_segment(struct tcp_seg *seg, struct tcp_pcb *pcb, struct netif *netif); + +/* tcp_route: common code that returns a fixed bound netif or calls ip_route */ +static struct netif * +tcp_route(const struct tcp_pcb *pcb, const ip_addr_t *src, const ip_addr_t *dst) +{ + LWIP_UNUSED_ARG(src); /* in case IPv4-only and source-based routing is disabled */ + + if ((pcb != NULL) && (pcb->netif_idx != NETIF_NO_INDEX)) { + return netif_get_by_index(pcb->netif_idx); + } else { + return ip_route(src, dst); + } +} + +/** + * Create a TCP segment with prefilled header. + * + * Called by @ref tcp_write, @ref tcp_enqueue_flags and @ref tcp_split_unsent_seg + * + * @param pcb Protocol control block for the TCP connection. + * @param p pbuf that is used to hold the TCP header. + * @param hdrflags TCP flags for header. + * @param seqno TCP sequence number of this packet + * @param optflags options to include in TCP header + * @return a new tcp_seg pointing to p, or NULL. + * The TCP header is filled in except ackno and wnd. + * p is freed on failure. + */ +static struct tcp_seg * +tcp_create_segment(const struct tcp_pcb *pcb, struct pbuf *p, u8_t hdrflags, u32_t seqno, u8_t optflags) +{ + struct tcp_seg *seg; + u8_t optlen; + + LWIP_ASSERT("tcp_create_segment: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_create_segment: invalid pbuf", p != NULL); + + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb); + + if ((seg = (struct tcp_seg *)memp_malloc(MEMP_TCP_SEG)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_create_segment: no memory.\n")); + pbuf_free(p); + return NULL; + } + seg->flags = optflags; + seg->next = NULL; + seg->p = p; + LWIP_ASSERT("p->tot_len >= optlen", p->tot_len >= optlen); + seg->len = p->tot_len - optlen; +#if TCP_OVERSIZE_DBGCHECK + seg->oversize_left = 0; +#endif /* TCP_OVERSIZE_DBGCHECK */ +#if TCP_CHECKSUM_ON_COPY + seg->chksum = 0; + seg->chksum_swapped = 0; + /* check optflags */ + LWIP_ASSERT("invalid optflags passed: TF_SEG_DATA_CHECKSUMMED", + (optflags & TF_SEG_DATA_CHECKSUMMED) == 0); +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* build TCP header */ + if (pbuf_add_header(p, TCP_HLEN)) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_create_segment: no room for TCP header in pbuf.\n")); + TCP_STATS_INC(tcp.err); + tcp_seg_free(seg); + return NULL; + } + seg->tcphdr = (struct tcp_hdr *)seg->p->payload; + seg->tcphdr->src = lwip_htons(pcb->local_port); + seg->tcphdr->dest = lwip_htons(pcb->remote_port); + seg->tcphdr->seqno = lwip_htonl(seqno); + /* ackno is set in tcp_output */ + TCPH_HDRLEN_FLAGS_SET(seg->tcphdr, (5 + optlen / 4), hdrflags); + /* wnd and chksum are set in tcp_output */ + seg->tcphdr->urgp = 0; + return seg; +} + +/** + * Allocate a PBUF_RAM pbuf, perhaps with extra space at the end. + * + * This function is like pbuf_alloc(layer, length, PBUF_RAM) except + * there may be extra bytes available at the end. + * + * Called by @ref tcp_write + * + * @param layer flag to define header size. + * @param length size of the pbuf's payload. + * @param max_length maximum usable size of payload+oversize. + * @param oversize pointer to a u16_t that will receive the number of usable tail bytes. + * @param pcb The TCP connection that will enqueue the pbuf. + * @param apiflags API flags given to tcp_write. + * @param first_seg true when this pbuf will be used in the first enqueued segment. + */ +#if TCP_OVERSIZE +static struct pbuf * +tcp_pbuf_prealloc(pbuf_layer layer, u16_t length, u16_t max_length, + u16_t *oversize, const struct tcp_pcb *pcb, u8_t apiflags, + u8_t first_seg) +{ + struct pbuf *p; + u16_t alloc = length; + + LWIP_ASSERT("tcp_pbuf_prealloc: invalid oversize", oversize != NULL); + LWIP_ASSERT("tcp_pbuf_prealloc: invalid pcb", pcb != NULL); + +#if LWIP_NETIF_TX_SINGLE_PBUF + LWIP_UNUSED_ARG(max_length); + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(apiflags); + LWIP_UNUSED_ARG(first_seg); + alloc = max_length; +#else /* LWIP_NETIF_TX_SINGLE_PBUF */ + if (length < max_length) { + /* Should we allocate an oversized pbuf, or just the minimum + * length required? If tcp_write is going to be called again + * before this segment is transmitted, we want the oversized + * buffer. If the segment will be transmitted immediately, we can + * save memory by allocating only length. We use a simple + * heuristic based on the following information: + * + * Did the user set TCP_WRITE_FLAG_MORE? + * + * Will the Nagle algorithm defer transmission of this segment? + */ + if ((apiflags & TCP_WRITE_FLAG_MORE) || + (!(pcb->flags & TF_NODELAY) && + (!first_seg || + pcb->unsent != NULL || + pcb->unacked != NULL))) { + alloc = LWIP_MIN(max_length, LWIP_MEM_ALIGN_SIZE(TCP_OVERSIZE_CALC_LENGTH(length))); + } + } +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + p = pbuf_alloc(layer, alloc, PBUF_RAM); + if (p == NULL) { + return NULL; + } + LWIP_ASSERT("need unchained pbuf", p->next == NULL); + *oversize = p->len - length; + /* trim p->len to the currently used size */ + p->len = p->tot_len = length; + return p; +} +#else /* TCP_OVERSIZE */ +#define tcp_pbuf_prealloc(layer, length, mx, os, pcb, api, fst) pbuf_alloc((layer), (length), PBUF_RAM) +#endif /* TCP_OVERSIZE */ + +#if TCP_CHECKSUM_ON_COPY +/** Add a checksum of newly added data to the segment. + * + * Called by tcp_write and tcp_split_unsent_seg. + */ +static void +tcp_seg_add_chksum(u16_t chksum, u16_t len, u16_t *seg_chksum, + u8_t *seg_chksum_swapped) +{ + u32_t helper; + /* add chksum to old chksum and fold to u16_t */ + helper = chksum + *seg_chksum; + chksum = FOLD_U32T(helper); + if ((len & 1) != 0) { + *seg_chksum_swapped = 1 - *seg_chksum_swapped; + chksum = SWAP_BYTES_IN_WORD(chksum); + } + *seg_chksum = chksum; +} +#endif /* TCP_CHECKSUM_ON_COPY */ + +/** Checks if tcp_write is allowed or not (checks state, snd_buf and snd_queuelen). + * + * @param pcb the tcp pcb to check for + * @param len length of data to send (checked agains snd_buf) + * @return ERR_OK if tcp_write is allowed to proceed, another err_t otherwise + */ +static err_t +tcp_write_checks(struct tcp_pcb *pcb, u16_t len) +{ + LWIP_ASSERT("tcp_write_checks: invalid pcb", pcb != NULL); + + /* connection is in invalid state for data transmission? */ + if ((pcb->state != ESTABLISHED) && + (pcb->state != CLOSE_WAIT) && + (pcb->state != SYN_SENT) && + (pcb->state != SYN_RCVD)) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_STATE | LWIP_DBG_LEVEL_SEVERE, ("tcp_write() called in invalid state\n")); + return ERR_CONN; + } else if (len == 0) { + return ERR_OK; + } + + /* fail on too much data */ + if (len > pcb->snd_buf) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_write: too much data (len=%"U16_F" > snd_buf=%"TCPWNDSIZE_F")\n", + len, pcb->snd_buf)); + tcp_set_flags(pcb, TF_NAGLEMEMERR); + return ERR_MEM; + } + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: queuelen: %"TCPWNDSIZE_F"\n", (tcpwnd_size_t)pcb->snd_queuelen)); + + /* If total number of pbufs on the unsent/unacked queues exceeds the + * configured maximum, return an error */ + /* check for configured max queuelen and possible overflow */ + if (pcb->snd_queuelen >= LWIP_MIN(TCP_SND_QUEUELEN, (TCP_SNDQUEUELEN_OVERFLOW + 1))) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_write: too long queue %"U16_F" (max %"U16_F")\n", + pcb->snd_queuelen, (u16_t)TCP_SND_QUEUELEN)); + TCP_STATS_INC(tcp.memerr); + tcp_set_flags(pcb, TF_NAGLEMEMERR); + return ERR_MEM; + } + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_write: pbufs on queue => at least one queue non-empty", + pcb->unacked != NULL || pcb->unsent != NULL); + } else { + LWIP_ASSERT("tcp_write: no pbufs on queue => both queues empty", + pcb->unacked == NULL && pcb->unsent == NULL); + } + return ERR_OK; +} + +/** + * @ingroup tcp_raw + * Write data for sending (but does not send it immediately). + * + * It waits in the expectation of more data being sent soon (as + * it can send them more efficiently by combining them together). + * To prompt the system to send data now, call tcp_output() after + * calling tcp_write(). + * + * This function enqueues the data pointed to by the argument dataptr. The length of + * the data is passed as the len parameter. The apiflags can be one or more of: + * - TCP_WRITE_FLAG_COPY: indicates whether the new memory should be allocated + * for the data to be copied into. If this flag is not given, no new memory + * should be allocated and the data should only be referenced by pointer. This + * also means that the memory behind dataptr must not change until the data is + * ACKed by the remote host + * - TCP_WRITE_FLAG_MORE: indicates that more data follows. If this is omitted, + * the PSH flag is set in the last segment created by this call to tcp_write. + * If this flag is given, the PSH flag is not set. + * + * The tcp_write() function will fail and return ERR_MEM if the length + * of the data exceeds the current send buffer size or if the length of + * the queue of outgoing segment is larger than the upper limit defined + * in lwipopts.h. The number of bytes available in the output queue can + * be retrieved with the tcp_sndbuf() function. + * + * The proper way to use this function is to call the function with at + * most tcp_sndbuf() bytes of data. If the function returns ERR_MEM, + * the application should wait until some of the currently enqueued + * data has been successfully received by the other host and try again. + * + * @param pcb Protocol control block for the TCP connection to enqueue data for. + * @param arg Pointer to the data to be enqueued for sending. + * @param len Data length in bytes + * @param apiflags combination of following flags : + * - TCP_WRITE_FLAG_COPY (0x01) data will be copied into memory belonging to the stack + * - TCP_WRITE_FLAG_MORE (0x02) for TCP connection, PSH flag will not be set on last segment sent, + * @return ERR_OK if enqueued, another err_t on error + */ +err_t +tcp_write(struct tcp_pcb *pcb, const void *arg, u16_t len, u8_t apiflags) +{ + struct pbuf *concat_p = NULL; + struct tcp_seg *last_unsent = NULL, *seg = NULL, *prev_seg = NULL, *queue = NULL; + u16_t pos = 0; /* position in 'arg' data */ + u16_t queuelen; + u8_t optlen; + u8_t optflags = 0; +#if TCP_OVERSIZE + u16_t oversize = 0; + u16_t oversize_used = 0; +#if TCP_OVERSIZE_DBGCHECK + u16_t oversize_add = 0; +#endif /* TCP_OVERSIZE_DBGCHECK*/ +#endif /* TCP_OVERSIZE */ + u16_t extendlen = 0; +#if TCP_CHECKSUM_ON_COPY + u16_t concat_chksum = 0; + u8_t concat_chksum_swapped = 0; + u16_t concat_chksummed = 0; +#endif /* TCP_CHECKSUM_ON_COPY */ + err_t err; + u16_t mss_local; + + LWIP_ERROR("tcp_write: invalid pcb", pcb != NULL, return ERR_ARG); + + /* don't allocate segments bigger than half the maximum window we ever received */ + mss_local = LWIP_MIN(pcb->mss, TCPWND_MIN16(pcb->snd_wnd_max / 2)); + mss_local = mss_local ? mss_local : pcb->mss; + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_NETIF_TX_SINGLE_PBUF + /* Always copy to try to create single pbufs for TX */ + apiflags |= TCP_WRITE_FLAG_COPY; +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_write(pcb=%p, data=%p, len=%"U16_F", apiflags=%"U16_F")\n", + (void *)pcb, arg, len, (u16_t)apiflags)); + LWIP_ERROR("tcp_write: arg == NULL (programmer violates API)", + arg != NULL, return ERR_ARG;); + + err = tcp_write_checks(pcb, len); + if (err != ERR_OK) { + return err; + } + queuelen = pcb->snd_queuelen; + +#if LWIP_TCP_TIMESTAMPS + if ((pcb->flags & TF_TIMESTAMP)) { + /* Make sure the timestamp option is only included in data segments if we + agreed about it with the remote host. */ + optflags = TF_SEG_OPTS_TS; + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(TF_SEG_OPTS_TS, pcb); + /* ensure that segments can hold at least one data byte... */ + mss_local = LWIP_MAX(mss_local, LWIP_TCP_OPT_LEN_TS + 1); + } else +#endif /* LWIP_TCP_TIMESTAMPS */ + { + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb); + } + + + /* + * TCP segmentation is done in three phases with increasing complexity: + * + * 1. Copy data directly into an oversized pbuf. + * 2. Chain a new pbuf to the end of pcb->unsent. + * 3. Create new segments. + * + * We may run out of memory at any point. In that case we must + * return ERR_MEM and not change anything in pcb. Therefore, all + * changes are recorded in local variables and committed at the end + * of the function. Some pcb fields are maintained in local copies: + * + * queuelen = pcb->snd_queuelen + * oversize = pcb->unsent_oversize + * + * These variables are set consistently by the phases: + * + * seg points to the last segment tampered with. + * + * pos records progress as data is segmented. + */ + + /* Find the tail of the unsent queue. */ + if (pcb->unsent != NULL) { + u16_t space; + u16_t unsent_optlen; + + /* @todo: this could be sped up by keeping last_unsent in the pcb */ + for (last_unsent = pcb->unsent; last_unsent->next != NULL; + last_unsent = last_unsent->next); + + /* Usable space at the end of the last unsent segment */ + unsent_optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(last_unsent->flags, pcb); + LWIP_ASSERT("mss_local is too small", mss_local >= last_unsent->len + unsent_optlen); + space = mss_local - (last_unsent->len + unsent_optlen); + + /* + * Phase 1: Copy data directly into an oversized pbuf. + * + * The number of bytes copied is recorded in the oversize_used + * variable. The actual copying is done at the bottom of the + * function. + */ +#if TCP_OVERSIZE +#if TCP_OVERSIZE_DBGCHECK + /* check that pcb->unsent_oversize matches last_unsent->oversize_left */ + LWIP_ASSERT("unsent_oversize mismatch (pcb vs. last_unsent)", + pcb->unsent_oversize == last_unsent->oversize_left); +#endif /* TCP_OVERSIZE_DBGCHECK */ + oversize = pcb->unsent_oversize; + if (oversize > 0) { + LWIP_ASSERT("inconsistent oversize vs. space", oversize <= space); + seg = last_unsent; + oversize_used = LWIP_MIN(space, LWIP_MIN(oversize, len)); + pos += oversize_used; + oversize -= oversize_used; + space -= oversize_used; + } + /* now we are either finished or oversize is zero */ + LWIP_ASSERT("inconsistent oversize vs. len", (oversize == 0) || (pos == len)); +#endif /* TCP_OVERSIZE */ + +#if !LWIP_NETIF_TX_SINGLE_PBUF + /* + * Phase 2: Chain a new pbuf to the end of pcb->unsent. + * + * As an exception when NOT copying the data, if the given data buffer + * directly follows the last unsent data buffer in memory, extend the last + * ROM pbuf reference to the buffer, thus saving a ROM pbuf allocation. + * + * We don't extend segments containing SYN/FIN flags or options + * (len==0). The new pbuf is kept in concat_p and pbuf_cat'ed at + * the end. + * + * This phase is skipped for LWIP_NETIF_TX_SINGLE_PBUF as we could only execute + * it after rexmit puts a segment from unacked to unsent and at this point, + * oversize info is lost. + */ + if ((pos < len) && (space > 0) && (last_unsent->len > 0)) { + u16_t seglen = LWIP_MIN(space, len - pos); + seg = last_unsent; + + /* Create a pbuf with a copy or reference to seglen bytes. We + * can use PBUF_RAW here since the data appears in the middle of + * a segment. A header will never be prepended. */ + if (apiflags & TCP_WRITE_FLAG_COPY) { + /* Data is copied */ + if ((concat_p = tcp_pbuf_prealloc(PBUF_RAW, seglen, space, &oversize, pcb, apiflags, 1)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n", + seglen)); + goto memerr; + } +#if TCP_OVERSIZE_DBGCHECK + oversize_add = oversize; +#endif /* TCP_OVERSIZE_DBGCHECK */ + TCP_DATA_COPY2(concat_p->payload, (const u8_t *)arg + pos, seglen, &concat_chksum, &concat_chksum_swapped); +#if TCP_CHECKSUM_ON_COPY + concat_chksummed += seglen; +#endif /* TCP_CHECKSUM_ON_COPY */ + queuelen += pbuf_clen(concat_p); + } else { + /* Data is not copied */ + /* If the last unsent pbuf is of type PBUF_ROM, try to extend it. */ + struct pbuf *p; + for (p = last_unsent->p; p->next != NULL; p = p->next); + if (((p->type_internal & (PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS | PBUF_TYPE_FLAG_DATA_VOLATILE)) == 0) && + (const u8_t *)p->payload + p->len == (const u8_t *)arg) { + LWIP_ASSERT("tcp_write: ROM pbufs cannot be oversized", pos == 0); + extendlen = seglen; + } else { + if ((concat_p = pbuf_alloc(PBUF_RAW, seglen, PBUF_ROM)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_write: could not allocate memory for zero-copy pbuf\n")); + goto memerr; + } + /* reference the non-volatile payload data */ + ((struct pbuf_rom *)concat_p)->payload = (const u8_t *)arg + pos; + queuelen += pbuf_clen(concat_p); + } +#if TCP_CHECKSUM_ON_COPY + /* calculate the checksum of nocopy-data */ + tcp_seg_add_chksum(~inet_chksum((const u8_t *)arg + pos, seglen), seglen, + &concat_chksum, &concat_chksum_swapped); + concat_chksummed += seglen; +#endif /* TCP_CHECKSUM_ON_COPY */ + } + + pos += seglen; + } +#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ + } else { +#if TCP_OVERSIZE + LWIP_ASSERT("unsent_oversize mismatch (pcb->unsent is NULL)", + pcb->unsent_oversize == 0); +#endif /* TCP_OVERSIZE */ + } + + /* + * Phase 3: Create new segments. + * + * The new segments are chained together in the local 'queue' + * variable, ready to be appended to pcb->unsent. + */ + while (pos < len) { + struct pbuf *p; + u16_t left = len - pos; + u16_t max_len = mss_local - optlen; + u16_t seglen = LWIP_MIN(left, max_len); +#if TCP_CHECKSUM_ON_COPY + u16_t chksum = 0; + u8_t chksum_swapped = 0; +#endif /* TCP_CHECKSUM_ON_COPY */ + + if (apiflags & TCP_WRITE_FLAG_COPY) { + /* If copy is set, memory should be allocated and data copied + * into pbuf */ + if ((p = tcp_pbuf_prealloc(PBUF_TRANSPORT, seglen + optlen, mss_local, &oversize, pcb, apiflags, queue == NULL)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n", seglen)); + goto memerr; + } + LWIP_ASSERT("tcp_write: check that first pbuf can hold the complete seglen", + (p->len >= seglen)); + TCP_DATA_COPY2((char *)p->payload + optlen, (const u8_t *)arg + pos, seglen, &chksum, &chksum_swapped); + } else { + /* Copy is not set: First allocate a pbuf for holding the data. + * Since the referenced data is available at least until it is + * sent out on the link (as it has to be ACKed by the remote + * party) we can safely use PBUF_ROM instead of PBUF_REF here. + */ + struct pbuf *p2; +#if TCP_OVERSIZE + LWIP_ASSERT("oversize == 0", oversize == 0); +#endif /* TCP_OVERSIZE */ + if ((p2 = pbuf_alloc(PBUF_TRANSPORT, seglen, PBUF_ROM)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: could not allocate memory for zero-copy pbuf\n")); + goto memerr; + } +#if TCP_CHECKSUM_ON_COPY + /* calculate the checksum of nocopy-data */ + chksum = ~inet_chksum((const u8_t *)arg + pos, seglen); + if (seglen & 1) { + chksum_swapped = 1; + chksum = SWAP_BYTES_IN_WORD(chksum); + } +#endif /* TCP_CHECKSUM_ON_COPY */ + /* reference the non-volatile payload data */ + ((struct pbuf_rom *)p2)->payload = (const u8_t *)arg + pos; + + /* Second, allocate a pbuf for the headers. */ + if ((p = pbuf_alloc(PBUF_TRANSPORT, optlen, PBUF_RAM)) == NULL) { + /* If allocation fails, we have to deallocate the data pbuf as + * well. */ + pbuf_free(p2); + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: could not allocate memory for header pbuf\n")); + goto memerr; + } + /* Concatenate the headers and data pbufs together. */ + pbuf_cat(p/*header*/, p2/*data*/); + } + + queuelen += pbuf_clen(p); + + /* Now that there are more segments queued, we check again if the + * length of the queue exceeds the configured maximum or + * overflows. */ + if (queuelen > LWIP_MIN(TCP_SND_QUEUELEN, TCP_SNDQUEUELEN_OVERFLOW)) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: queue too long %"U16_F" (%d)\n", + queuelen, (int)TCP_SND_QUEUELEN)); + pbuf_free(p); + goto memerr; + } + + if ((seg = tcp_create_segment(pcb, p, 0, pcb->snd_lbb + pos, optflags)) == NULL) { + goto memerr; + } +#if TCP_OVERSIZE_DBGCHECK + seg->oversize_left = oversize; +#endif /* TCP_OVERSIZE_DBGCHECK */ +#if TCP_CHECKSUM_ON_COPY + seg->chksum = chksum; + seg->chksum_swapped = chksum_swapped; + seg->flags |= TF_SEG_DATA_CHECKSUMMED; +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* first segment of to-be-queued data? */ + if (queue == NULL) { + queue = seg; + } else { + /* Attach the segment to the end of the queued segments */ + LWIP_ASSERT("prev_seg != NULL", prev_seg != NULL); + prev_seg->next = seg; + } + /* remember last segment of to-be-queued data for next iteration */ + prev_seg = seg; + + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_TRACE, ("tcp_write: queueing %"U32_F":%"U32_F"\n", + lwip_ntohl(seg->tcphdr->seqno), + lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg))); + + pos += seglen; + } + + /* + * All three segmentation phases were successful. We can commit the + * transaction. + */ +#if TCP_OVERSIZE_DBGCHECK + if ((last_unsent != NULL) && (oversize_add != 0)) { + last_unsent->oversize_left += oversize_add; + } +#endif /* TCP_OVERSIZE_DBGCHECK */ + + /* + * Phase 1: If data has been added to the preallocated tail of + * last_unsent, we update the length fields of the pbuf chain. + */ +#if TCP_OVERSIZE + if (oversize_used > 0) { + struct pbuf *p; + /* Bump tot_len of whole chain, len of tail */ + for (p = last_unsent->p; p; p = p->next) { + p->tot_len += oversize_used; + if (p->next == NULL) { + TCP_DATA_COPY((char *)p->payload + p->len, arg, oversize_used, last_unsent); + p->len += oversize_used; + } + } + last_unsent->len += oversize_used; +#if TCP_OVERSIZE_DBGCHECK + LWIP_ASSERT("last_unsent->oversize_left >= oversize_used", + last_unsent->oversize_left >= oversize_used); + last_unsent->oversize_left -= oversize_used; +#endif /* TCP_OVERSIZE_DBGCHECK */ + } + pcb->unsent_oversize = oversize; +#endif /* TCP_OVERSIZE */ + + /* + * Phase 2: concat_p can be concatenated onto last_unsent->p, unless we + * determined that the last ROM pbuf can be extended to include the new data. + */ + if (concat_p != NULL) { + LWIP_ASSERT("tcp_write: cannot concatenate when pcb->unsent is empty", + (last_unsent != NULL)); + pbuf_cat(last_unsent->p, concat_p); + last_unsent->len += concat_p->tot_len; + } else if (extendlen > 0) { + struct pbuf *p; + LWIP_ASSERT("tcp_write: extension of reference requires reference", + last_unsent != NULL && last_unsent->p != NULL); + for (p = last_unsent->p; p->next != NULL; p = p->next) { + p->tot_len += extendlen; + } + p->tot_len += extendlen; + p->len += extendlen; + last_unsent->len += extendlen; + } + +#if TCP_CHECKSUM_ON_COPY + if (concat_chksummed) { + LWIP_ASSERT("tcp_write: concat checksum needs concatenated data", + concat_p != NULL || extendlen > 0); + /*if concat checksumm swapped - swap it back */ + if (concat_chksum_swapped) { + concat_chksum = SWAP_BYTES_IN_WORD(concat_chksum); + } + tcp_seg_add_chksum(concat_chksum, concat_chksummed, &last_unsent->chksum, + &last_unsent->chksum_swapped); + last_unsent->flags |= TF_SEG_DATA_CHECKSUMMED; + } +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* + * Phase 3: Append queue to pcb->unsent. Queue may be NULL, but that + * is harmless + */ + if (last_unsent == NULL) { + pcb->unsent = queue; + } else { + last_unsent->next = queue; + } + + /* + * Finally update the pcb state. + */ + pcb->snd_lbb += len; + pcb->snd_buf -= len; + pcb->snd_queuelen = queuelen; + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: %"S16_F" (after enqueued)\n", + pcb->snd_queuelen)); + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_write: valid queue length", + pcb->unacked != NULL || pcb->unsent != NULL); + } + + /* Set the PSH flag in the last segment that we enqueued. */ + if (seg != NULL && seg->tcphdr != NULL && ((apiflags & TCP_WRITE_FLAG_MORE) == 0)) { + TCPH_SET_FLAG(seg->tcphdr, TCP_PSH); + } + + return ERR_OK; +memerr: + tcp_set_flags(pcb, TF_NAGLEMEMERR); + TCP_STATS_INC(tcp.memerr); + + if (concat_p != NULL) { + pbuf_free(concat_p); + } + if (queue != NULL) { + tcp_segs_free(queue); + } + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_write: valid queue length", pcb->unacked != NULL || + pcb->unsent != NULL); + } + LWIP_DEBUGF(TCP_QLEN_DEBUG | LWIP_DBG_STATE, ("tcp_write: %"S16_F" (with mem err)\n", pcb->snd_queuelen)); + return ERR_MEM; +} + +/** + * Split segment on the head of the unsent queue. If return is not + * ERR_OK, existing head remains intact + * + * The split is accomplished by creating a new TCP segment and pbuf + * which holds the remainder payload after the split. The original + * pbuf is trimmed to new length. This allows splitting of read-only + * pbufs + * + * @param pcb the tcp_pcb for which to split the unsent head + * @param split the amount of payload to remain in the head + */ +err_t +tcp_split_unsent_seg(struct tcp_pcb *pcb, u16_t split) +{ + struct tcp_seg *seg = NULL, *useg = NULL; + struct pbuf *p = NULL; + u8_t optlen; + u8_t optflags; + u8_t split_flags; + u8_t remainder_flags; + u16_t remainder; + u16_t offset; +#if TCP_CHECKSUM_ON_COPY + u16_t chksum = 0; + u8_t chksum_swapped = 0; + struct pbuf *q; +#endif /* TCP_CHECKSUM_ON_COPY */ + + LWIP_ASSERT("tcp_split_unsent_seg: invalid pcb", pcb != NULL); + + useg = pcb->unsent; + if (useg == NULL) { + return ERR_MEM; + } + + if (split == 0) { + LWIP_ASSERT("Can't split segment into length 0", 0); + return ERR_VAL; + } + + if (useg->len <= split) { + return ERR_OK; + } + + LWIP_ASSERT("split <= mss", split <= pcb->mss); + LWIP_ASSERT("useg->len > 0", useg->len > 0); + + /* We should check that we don't exceed TCP_SND_QUEUELEN but we need + * to split this packet so we may actually exceed the max value by + * one! + */ + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue: split_unsent_seg: %u\n", (unsigned int)pcb->snd_queuelen)); + + optflags = useg->flags; +#if TCP_CHECKSUM_ON_COPY + /* Remove since checksum is not stored until after tcp_create_segment() */ + optflags &= ~TF_SEG_DATA_CHECKSUMMED; +#endif /* TCP_CHECKSUM_ON_COPY */ + optlen = LWIP_TCP_OPT_LENGTH(optflags); + remainder = useg->len - split; + + /* Create new pbuf for the remainder of the split */ + p = pbuf_alloc(PBUF_TRANSPORT, remainder + optlen, PBUF_RAM); + if (p == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_split_unsent_seg: could not allocate memory for pbuf remainder %u\n", remainder)); + goto memerr; + } + + /* Offset into the original pbuf is past TCP/IP headers, options, and split amount */ + offset = useg->p->tot_len - useg->len + split; + /* Copy remainder into new pbuf, headers and options will not be filled out */ + if (pbuf_copy_partial(useg->p, (u8_t *)p->payload + optlen, remainder, offset ) != remainder) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_split_unsent_seg: could not copy pbuf remainder %u\n", remainder)); + goto memerr; + } +#if TCP_CHECKSUM_ON_COPY + /* calculate the checksum on remainder data */ + tcp_seg_add_chksum(~inet_chksum((const u8_t *)p->payload + optlen, remainder), remainder, + &chksum, &chksum_swapped); +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* Options are created when calling tcp_output() */ + + /* Migrate flags from original segment */ + split_flags = TCPH_FLAGS(useg->tcphdr); + remainder_flags = 0; /* ACK added in tcp_output() */ + + if (split_flags & TCP_PSH) { + split_flags &= ~TCP_PSH; + remainder_flags |= TCP_PSH; + } + if (split_flags & TCP_FIN) { + split_flags &= ~TCP_FIN; + remainder_flags |= TCP_FIN; + } + /* SYN should be left on split, RST should not be present with data */ + + seg = tcp_create_segment(pcb, p, remainder_flags, lwip_ntohl(useg->tcphdr->seqno) + split, optflags); + if (seg == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_split_unsent_seg: could not create new TCP segment\n")); + goto memerr; + } + +#if TCP_CHECKSUM_ON_COPY + seg->chksum = chksum; + seg->chksum_swapped = chksum_swapped; + seg->flags |= TF_SEG_DATA_CHECKSUMMED; +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* Remove this segment from the queue since trimming it may free pbufs */ + pcb->snd_queuelen -= pbuf_clen(useg->p); + + /* Trim the original pbuf into our split size. At this point our remainder segment must be setup + successfully because we are modifying the original segment */ + pbuf_realloc(useg->p, useg->p->tot_len - remainder); + useg->len -= remainder; + TCPH_SET_FLAG(useg->tcphdr, split_flags); +#if TCP_OVERSIZE_DBGCHECK + /* By trimming, realloc may have actually shrunk the pbuf, so clear oversize_left */ + useg->oversize_left = 0; +#endif /* TCP_OVERSIZE_DBGCHECK */ + + /* Add back to the queue with new trimmed pbuf */ + pcb->snd_queuelen += pbuf_clen(useg->p); + +#if TCP_CHECKSUM_ON_COPY + /* The checksum on the split segment is now incorrect. We need to re-run it over the split */ + useg->chksum = 0; + useg->chksum_swapped = 0; + q = useg->p; + offset = q->tot_len - useg->len; /* Offset due to exposed headers */ + + /* Advance to the pbuf where the offset ends */ + while (q != NULL && offset > q->len) { + offset -= q->len; + q = q->next; + } + LWIP_ASSERT("Found start of payload pbuf", q != NULL); + /* Checksum the first payload pbuf accounting for offset, then other pbufs are all payload */ + for (; q != NULL; offset = 0, q = q->next) { + tcp_seg_add_chksum(~inet_chksum((const u8_t *)q->payload + offset, q->len - offset), q->len - offset, + &useg->chksum, &useg->chksum_swapped); + } +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* Update number of segments on the queues. Note that length now may + * exceed TCP_SND_QUEUELEN! We don't have to touch pcb->snd_buf + * because the total amount of data is constant when packet is split */ + pcb->snd_queuelen += pbuf_clen(seg->p); + + /* Finally insert remainder into queue after split (which stays head) */ + seg->next = useg->next; + useg->next = seg; + +#if TCP_OVERSIZE + /* If remainder is last segment on the unsent, ensure we clear the oversize amount + * because the remainder is always sized to the exact remaining amount */ + if (seg->next == NULL) { + pcb->unsent_oversize = 0; + } +#endif /* TCP_OVERSIZE */ + + return ERR_OK; +memerr: + TCP_STATS_INC(tcp.memerr); + + LWIP_ASSERT("seg == NULL", seg == NULL); + if (p != NULL) { + pbuf_free(p); + } + + return ERR_MEM; +} + +/** + * Called by tcp_close() to send a segment including FIN flag but not data. + * This FIN may be added to an existing segment or a new, otherwise empty + * segment is enqueued. + * + * @param pcb the tcp_pcb over which to send a segment + * @return ERR_OK if sent, another err_t otherwise + */ +err_t +tcp_send_fin(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_send_fin: invalid pcb", pcb != NULL); + + /* first, try to add the fin to the last unsent segment */ + if (pcb->unsent != NULL) { + struct tcp_seg *last_unsent; + for (last_unsent = pcb->unsent; last_unsent->next != NULL; + last_unsent = last_unsent->next); + + if ((TCPH_FLAGS(last_unsent->tcphdr) & (TCP_SYN | TCP_FIN | TCP_RST)) == 0) { + /* no SYN/FIN/RST flag in the header, we can add the FIN flag */ + TCPH_SET_FLAG(last_unsent->tcphdr, TCP_FIN); + tcp_set_flags(pcb, TF_FIN); + return ERR_OK; + } + } + /* no data, no length, flags, copy=1, no optdata */ + return tcp_enqueue_flags(pcb, TCP_FIN); +} + +/** + * Enqueue SYN or FIN for transmission. + * + * Called by @ref tcp_connect, tcp_listen_input, and @ref tcp_close + * (via @ref tcp_send_fin) + * + * @param pcb Protocol control block for the TCP connection. + * @param flags TCP header flags to set in the outgoing segment. + */ +err_t +tcp_enqueue_flags(struct tcp_pcb *pcb, u8_t flags) +{ + struct pbuf *p; + struct tcp_seg *seg; + u8_t optflags = 0; + u8_t optlen = 0; + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: queuelen: %"U16_F"\n", (u16_t)pcb->snd_queuelen)); + + LWIP_ASSERT("tcp_enqueue_flags: need either TCP_SYN or TCP_FIN in flags (programmer violates API)", + (flags & (TCP_SYN | TCP_FIN)) != 0); + LWIP_ASSERT("tcp_enqueue_flags: invalid pcb", pcb != NULL); + + /* No need to check pcb->snd_queuelen if only SYN or FIN are allowed! */ + + /* Get options for this segment. This is a special case since this is the + only place where a SYN can be sent. */ + if (flags & TCP_SYN) { + optflags = TF_SEG_OPTS_MSS; +#if LWIP_WND_SCALE + if ((pcb->state != SYN_RCVD) || (pcb->flags & TF_WND_SCALE)) { + /* In a (sent in state SYN_RCVD), the window scale option may only + be sent if we received a window scale option from the remote host. */ + optflags |= TF_SEG_OPTS_WND_SCALE; + } +#endif /* LWIP_WND_SCALE */ +#if LWIP_TCP_SACK_OUT + if ((pcb->state != SYN_RCVD) || (pcb->flags & TF_SACK)) { + /* In a (sent in state SYN_RCVD), the SACK_PERM option may only + be sent if we received a SACK_PERM option from the remote host. */ + optflags |= TF_SEG_OPTS_SACK_PERM; + } +#endif /* LWIP_TCP_SACK_OUT */ + } +#if LWIP_TCP_TIMESTAMPS + if ((pcb->flags & TF_TIMESTAMP) || ((flags & TCP_SYN) && (pcb->state != SYN_RCVD))) { + /* Make sure the timestamp option is only included in data segments if we + agreed about it with the remote host (and in active open SYN segments). */ + optflags |= TF_SEG_OPTS_TS; + } +#endif /* LWIP_TCP_TIMESTAMPS */ + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb); + + /* Allocate pbuf with room for TCP header + options */ + if ((p = pbuf_alloc(PBUF_TRANSPORT, optlen, PBUF_RAM)) == NULL) { + tcp_set_flags(pcb, TF_NAGLEMEMERR); + TCP_STATS_INC(tcp.memerr); + return ERR_MEM; + } + LWIP_ASSERT("tcp_enqueue_flags: check that first pbuf can hold optlen", + (p->len >= optlen)); + + /* Allocate memory for tcp_seg, and fill in fields. */ + if ((seg = tcp_create_segment(pcb, p, flags, pcb->snd_lbb, optflags)) == NULL) { + tcp_set_flags(pcb, TF_NAGLEMEMERR); + TCP_STATS_INC(tcp.memerr); + return ERR_MEM; + } + LWIP_ASSERT("seg->tcphdr not aligned", ((mem_ptr_t)seg->tcphdr % LWIP_MIN(MEM_ALIGNMENT, 4)) == 0); + LWIP_ASSERT("tcp_enqueue_flags: invalid segment length", seg->len == 0); + + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_TRACE, + ("tcp_enqueue_flags: queueing %"U32_F":%"U32_F" (0x%"X16_F")\n", + lwip_ntohl(seg->tcphdr->seqno), + lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg), + (u16_t)flags)); + + /* Now append seg to pcb->unsent queue */ + if (pcb->unsent == NULL) { + pcb->unsent = seg; + } else { + struct tcp_seg *useg; + for (useg = pcb->unsent; useg->next != NULL; useg = useg->next); + useg->next = seg; + } +#if TCP_OVERSIZE + /* The new unsent tail has no space */ + pcb->unsent_oversize = 0; +#endif /* TCP_OVERSIZE */ + + /* SYN and FIN bump the sequence number */ + if ((flags & TCP_SYN) || (flags & TCP_FIN)) { + pcb->snd_lbb++; + /* optlen does not influence snd_buf */ + } + if (flags & TCP_FIN) { + tcp_set_flags(pcb, TF_FIN); + } + + /* update number of segments on the queues */ + pcb->snd_queuelen += pbuf_clen(seg->p); + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: %"S16_F" (after enqueued)\n", pcb->snd_queuelen)); + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_enqueue_flags: invalid queue length", + pcb->unacked != NULL || pcb->unsent != NULL); + } + + return ERR_OK; +} + +#if LWIP_TCP_TIMESTAMPS +/* Build a timestamp option (12 bytes long) at the specified options pointer) + * + * @param pcb tcp_pcb + * @param opts option pointer where to store the timestamp option + */ +static void +tcp_build_timestamp_option(const struct tcp_pcb *pcb, u32_t *opts) +{ + LWIP_ASSERT("tcp_build_timestamp_option: invalid pcb", pcb != NULL); + + /* Pad with two NOP options to make everything nicely aligned */ + opts[0] = PP_HTONL(0x0101080A); + opts[1] = lwip_htonl(sys_now()); + opts[2] = lwip_htonl(pcb->ts_recent); +} +#endif + +#if LWIP_TCP_SACK_OUT +/** + * Calculates the number of SACK entries that should be generated. + * It takes into account whether TF_SACK flag is set, + * the number of SACK entries in tcp_pcb that are valid, + * as well as the available options size. + * + * @param pcb tcp_pcb + * @param optlen the length of other TCP options (in bytes) + * @return the number of SACK ranges that can be used + */ +static u8_t +tcp_get_num_sacks(const struct tcp_pcb *pcb, u8_t optlen) +{ + u8_t num_sacks = 0; + + LWIP_ASSERT("tcp_get_num_sacks: invalid pcb", pcb != NULL); + + if (pcb->flags & TF_SACK) { + u8_t i; + + /* The first SACK takes up 12 bytes (it includes SACK header and two NOP options), + each additional one - 8 bytes. */ + optlen += 12; + + /* Max options size = 40, number of SACK array entries = LWIP_TCP_MAX_SACK_NUM */ + for (i = 0; (i < LWIP_TCP_MAX_SACK_NUM) && (optlen <= TCP_MAX_OPTION_BYTES) && + LWIP_TCP_SACK_VALID(pcb, i); ++i) { + ++num_sacks; + optlen += 8; + } + } + + return num_sacks; +} + +/** Build a SACK option (12 or more bytes long) at the specified options pointer) + * + * @param pcb tcp_pcb + * @param opts option pointer where to store the SACK option + * @param num_sacks the number of SACKs to store + */ +static void +tcp_build_sack_option(const struct tcp_pcb *pcb, u32_t *opts, u8_t num_sacks) +{ + u8_t i; + + LWIP_ASSERT("tcp_build_sack_option: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_build_sack_option: invalid opts", opts != NULL); + + /* Pad with two NOP options to make everything nicely aligned. + We add the length (of just the SACK option, not the NOPs in front of it), + which is 2B of header, plus 8B for each SACK. */ + *(opts++) = PP_HTONL(0x01010500 + 2 + num_sacks * 8); + + for (i = 0; i < num_sacks; ++i) { + *(opts++) = lwip_htonl(pcb->rcv_sacks[i].left); + *(opts++) = lwip_htonl(pcb->rcv_sacks[i].right); + } +} + +#endif + +#if LWIP_WND_SCALE +/** Build a window scale option (3 bytes long) at the specified options pointer) + * + * @param opts option pointer where to store the window scale option + */ +static void +tcp_build_wnd_scale_option(u32_t *opts) +{ + LWIP_ASSERT("tcp_build_wnd_scale_option: invalid opts", opts != NULL); + + /* Pad with one NOP option to make everything nicely aligned */ + opts[0] = PP_HTONL(0x01030300 | TCP_RCV_SCALE); +} +#endif + +/** + * @ingroup tcp_raw + * Find out what we can send and send it + * + * @param pcb Protocol control block for the TCP connection to send data + * @return ERR_OK if data has been sent or nothing to send + * another err_t on error + */ +err_t +tcp_output(struct tcp_pcb *pcb) +{ + struct tcp_seg *seg, *useg; + u32_t wnd, snd_nxt; + err_t err; + struct netif *netif; +#if TCP_CWND_DEBUG + s16_t i = 0; +#endif /* TCP_CWND_DEBUG */ + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("tcp_output: invalid pcb", pcb != NULL); + /* pcb->state LISTEN not allowed here */ + LWIP_ASSERT("don't call tcp_output for listen-pcbs", + pcb->state != LISTEN); + + /* First, check if we are invoked by the TCP input processing + code. If so, we do not output anything. Instead, we rely on the + input processing code to call us when input processing is done + with. */ + if (tcp_input_pcb == pcb) { + return ERR_OK; + } + + wnd = LWIP_MIN(pcb->snd_wnd, pcb->cwnd); + + seg = pcb->unsent; + + if (seg == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: nothing to send (%p)\n", + (void *)pcb->unsent)); + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"TCPWNDSIZE_F + ", cwnd %"TCPWNDSIZE_F", wnd %"U32_F + ", seg == NULL, ack %"U32_F"\n", + pcb->snd_wnd, pcb->cwnd, wnd, pcb->lastack)); + + /* If the TF_ACK_NOW flag is set and the ->unsent queue is empty, construct + * an empty ACK segment and send it. */ + if (pcb->flags & TF_ACK_NOW) { + return tcp_send_empty_ack(pcb); + } + /* nothing to send: shortcut out of here */ + goto output_done; + } else { + LWIP_DEBUGF(TCP_CWND_DEBUG, + ("tcp_output: snd_wnd %"TCPWNDSIZE_F", cwnd %"TCPWNDSIZE_F", wnd %"U32_F + ", effwnd %"U32_F", seq %"U32_F", ack %"U32_F"\n", + pcb->snd_wnd, pcb->cwnd, wnd, + lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len, + lwip_ntohl(seg->tcphdr->seqno), pcb->lastack)); + } + + netif = tcp_route(pcb, &pcb->local_ip, &pcb->remote_ip); + if (netif == NULL) { + return ERR_RTE; + } + + /* If we don't have a local IP address, we get one from netif */ + if (ip_addr_isany(&pcb->local_ip)) { + const ip_addr_t *local_ip = ip_netif_get_local_ip(netif, &pcb->remote_ip); + if (local_ip == NULL) { + return ERR_RTE; + } + ip_addr_copy(pcb->local_ip, *local_ip); + } + + /* Handle the current segment not fitting within the window */ + if (lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len > wnd) { + /* We need to start the persistent timer when the next unsent segment does not fit + * within the remaining (could be 0) send window and RTO timer is not running (we + * have no in-flight data). If window is still too small after persist timer fires, + * then we split the segment. We don't consider the congestion window since a cwnd + * smaller than 1 SMSS implies in-flight data + */ + if (wnd == pcb->snd_wnd && pcb->unacked == NULL && pcb->persist_backoff == 0) { + pcb->persist_cnt = 0; + pcb->persist_backoff = 1; + pcb->persist_probe = 0; + } + /* We need an ACK, but can't send data now, so send an empty ACK */ + if (pcb->flags & TF_ACK_NOW) { + return tcp_send_empty_ack(pcb); + } + goto output_done; + } + /* Stop persist timer, above conditions are not active */ + pcb->persist_backoff = 0; + + /* useg should point to last segment on unacked queue */ + useg = pcb->unacked; + if (useg != NULL) { + for (; useg->next != NULL; useg = useg->next); + } + /* data available and window allows it to be sent? */ + while (seg != NULL && + lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len <= wnd) { + LWIP_ASSERT("RST not expected here!", + (TCPH_FLAGS(seg->tcphdr) & TCP_RST) == 0); + /* Stop sending if the nagle algorithm would prevent it + * Don't stop: + * - if tcp_write had a memory error before (prevent delayed ACK timeout) or + * - if FIN was already enqueued for this PCB (SYN is always alone in a segment - + * either seg->next != NULL or pcb->unacked == NULL; + * RST is no sent using tcp_write/tcp_output. + */ + if ((tcp_do_output_nagle(pcb) == 0) && + ((pcb->flags & (TF_NAGLEMEMERR | TF_FIN)) == 0)) { + break; + } +#if TCP_CWND_DEBUG + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"TCPWNDSIZE_F", cwnd %"TCPWNDSIZE_F", wnd %"U32_F", effwnd %"U32_F", seq %"U32_F", ack %"U32_F", i %"S16_F"\n", + pcb->snd_wnd, pcb->cwnd, wnd, + lwip_ntohl(seg->tcphdr->seqno) + seg->len - + pcb->lastack, + lwip_ntohl(seg->tcphdr->seqno), pcb->lastack, i)); + ++i; +#endif /* TCP_CWND_DEBUG */ + + if (pcb->state != SYN_SENT) { + TCPH_SET_FLAG(seg->tcphdr, TCP_ACK); + } + + err = tcp_output_segment(seg, pcb, netif); + if (err != ERR_OK) { + /* segment could not be sent, for whatever reason */ + tcp_set_flags(pcb, TF_NAGLEMEMERR); + return err; + } +#if TCP_OVERSIZE_DBGCHECK + seg->oversize_left = 0; +#endif /* TCP_OVERSIZE_DBGCHECK */ + pcb->unsent = seg->next; + if (pcb->state != SYN_SENT) { + tcp_clear_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + } + snd_nxt = lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg); + if (TCP_SEQ_LT(pcb->snd_nxt, snd_nxt)) { + pcb->snd_nxt = snd_nxt; + } + /* put segment on unacknowledged list if length > 0 */ + if (TCP_TCPLEN(seg) > 0) { + seg->next = NULL; + /* unacked list is empty? */ + if (pcb->unacked == NULL) { + pcb->unacked = seg; + useg = seg; + /* unacked list is not empty? */ + } else { + /* In the case of fast retransmit, the packet should not go to the tail + * of the unacked queue, but rather somewhere before it. We need to check for + * this case. -STJ Jul 27, 2004 */ + if (TCP_SEQ_LT(lwip_ntohl(seg->tcphdr->seqno), lwip_ntohl(useg->tcphdr->seqno))) { + /* add segment to before tail of unacked list, keeping the list sorted */ + struct tcp_seg **cur_seg = &(pcb->unacked); + while (*cur_seg && + TCP_SEQ_LT(lwip_ntohl((*cur_seg)->tcphdr->seqno), lwip_ntohl(seg->tcphdr->seqno))) { + cur_seg = &((*cur_seg)->next ); + } + seg->next = (*cur_seg); + (*cur_seg) = seg; + } else { + /* add segment to tail of unacked list */ + useg->next = seg; + useg = useg->next; + } + } + /* do not queue empty segments on the unacked list */ + } else { + tcp_seg_free(seg); + } + seg = pcb->unsent; + } +#if TCP_OVERSIZE + if (pcb->unsent == NULL) { + /* last unsent has been removed, reset unsent_oversize */ + pcb->unsent_oversize = 0; + } +#endif /* TCP_OVERSIZE */ + +output_done: + tcp_clear_flags(pcb, TF_NAGLEMEMERR); + return ERR_OK; +} + +/** Check if a segment's pbufs are used by someone else than TCP. + * This can happen on retransmission if the pbuf of this segment is still + * referenced by the netif driver due to deferred transmission. + * This is the case (only!) if someone down the TX call path called + * pbuf_ref() on one of the pbufs! + * + * @arg seg the tcp segment to check + * @return 1 if ref != 1, 0 if ref == 1 + */ +static int +tcp_output_segment_busy(const struct tcp_seg *seg) +{ + LWIP_ASSERT("tcp_output_segment_busy: invalid seg", seg != NULL); + + /* We only need to check the first pbuf here: + If a pbuf is queued for transmission, a driver calls pbuf_ref(), + which only changes the ref count of the first pbuf */ + if (seg->p->ref != 1) { + /* other reference found */ + return 1; + } + /* no other references found */ + return 0; +} + +/** + * Called by tcp_output() to actually send a TCP segment over IP. + * + * @param seg the tcp_seg to send + * @param pcb the tcp_pcb for the TCP connection used to send the segment + * @param netif the netif used to send the segment + */ +static err_t +tcp_output_segment(struct tcp_seg *seg, struct tcp_pcb *pcb, struct netif *netif) +{ + err_t err; + u16_t len; + u32_t *opts; +#if TCP_CHECKSUM_ON_COPY + int seg_chksum_was_swapped = 0; +#endif + + LWIP_ASSERT("tcp_output_segment: invalid seg", seg != NULL); + LWIP_ASSERT("tcp_output_segment: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_output_segment: invalid netif", netif != NULL); + + if (tcp_output_segment_busy(seg)) { + /* This should not happen: rexmit functions should have checked this. + However, since this function modifies p->len, we must not continue in this case. */ + LWIP_DEBUGF(TCP_RTO_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_output_segment: segment busy\n")); + return ERR_OK; + } + + /* The TCP header has already been constructed, but the ackno and + wnd fields remain. */ + seg->tcphdr->ackno = lwip_htonl(pcb->rcv_nxt); + + /* advertise our receive window size in this TCP segment */ +#if LWIP_WND_SCALE + if (seg->flags & TF_SEG_OPTS_WND_SCALE) { + /* The Window field in a SYN segment itself (the only type where we send + the window scale option) is never scaled. */ + seg->tcphdr->wnd = lwip_htons(TCPWND_MIN16(pcb->rcv_ann_wnd)); + } else +#endif /* LWIP_WND_SCALE */ + { + seg->tcphdr->wnd = lwip_htons(TCPWND_MIN16(RCV_WND_SCALE(pcb, pcb->rcv_ann_wnd))); + } + + pcb->rcv_ann_right_edge = pcb->rcv_nxt + pcb->rcv_ann_wnd; + + /* Add any requested options. NB MSS option is only set on SYN + packets, so ignore it here */ + /* cast through void* to get rid of alignment warnings */ + opts = (u32_t *)(void *)(seg->tcphdr + 1); + if (seg->flags & TF_SEG_OPTS_MSS) { + u16_t mss; +#if TCP_CALCULATE_EFF_SEND_MSS + mss = tcp_eff_send_mss_netif(TCP_MSS, netif, &pcb->remote_ip); +#else /* TCP_CALCULATE_EFF_SEND_MSS */ + mss = TCP_MSS; +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + *opts = TCP_BUILD_MSS_OPTION(mss); + opts += 1; + } +#if LWIP_TCP_TIMESTAMPS + pcb->ts_lastacksent = pcb->rcv_nxt; + + if (seg->flags & TF_SEG_OPTS_TS) { + tcp_build_timestamp_option(pcb, opts); + opts += 3; + } +#endif +#if LWIP_WND_SCALE + if (seg->flags & TF_SEG_OPTS_WND_SCALE) { + tcp_build_wnd_scale_option(opts); + opts += 1; + } +#endif +#if LWIP_TCP_SACK_OUT + if (seg->flags & TF_SEG_OPTS_SACK_PERM) { + /* Pad with two NOP options to make everything nicely aligned + * NOTE: When we send both timestamp and SACK_PERM options, + * we could use the first two NOPs before the timestamp to store SACK_PERM option, + * but that would complicate the code. + */ + *(opts++) = PP_HTONL(0x01010402); + } +#endif + + /* Set retransmission timer running if it is not currently enabled + This must be set before checking the route. */ + if (pcb->rtime < 0) { + pcb->rtime = 0; + } + + if (pcb->rttest == 0) { + pcb->rttest = tcp_ticks; + pcb->rtseq = lwip_ntohl(seg->tcphdr->seqno); + + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_output_segment: rtseq %"U32_F"\n", pcb->rtseq)); + } + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output_segment: %"U32_F":%"U32_F"\n", + lwip_htonl(seg->tcphdr->seqno), lwip_htonl(seg->tcphdr->seqno) + + seg->len)); + + len = (u16_t)((u8_t *)seg->tcphdr - (u8_t *)seg->p->payload); + if (len == 0) { + /** Exclude retransmitted segments from this count. */ + MIB2_STATS_INC(mib2.tcpoutsegs); + } + + seg->p->len -= len; + seg->p->tot_len -= len; + + seg->p->payload = seg->tcphdr; + + seg->tcphdr->chksum = 0; + +#ifdef LWIP_HOOK_TCP_OUT_ADD_TCPOPTS + opts = LWIP_HOOK_TCP_OUT_ADD_TCPOPTS(seg->p, seg->tcphdr, pcb, opts); +#endif + LWIP_ASSERT("options not filled", (u8_t *)opts == ((u8_t *)(seg->tcphdr + 1)) + LWIP_TCP_OPT_LENGTH_SEGMENT(seg->flags, pcb)); + +#if CHECKSUM_GEN_TCP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) { +#if TCP_CHECKSUM_ON_COPY + u32_t acc; +#if TCP_CHECKSUM_ON_COPY_SANITY_CHECK + u16_t chksum_slow = ip_chksum_pseudo(seg->p, IP_PROTO_TCP, + seg->p->tot_len, &pcb->local_ip, &pcb->remote_ip); +#endif /* TCP_CHECKSUM_ON_COPY_SANITY_CHECK */ + if ((seg->flags & TF_SEG_DATA_CHECKSUMMED) == 0) { + LWIP_ASSERT("data included but not checksummed", + seg->p->tot_len == TCPH_HDRLEN_BYTES(seg->tcphdr)); + } + + /* rebuild TCP header checksum (TCP header changes for retransmissions!) */ + acc = ip_chksum_pseudo_partial(seg->p, IP_PROTO_TCP, + seg->p->tot_len, TCPH_HDRLEN_BYTES(seg->tcphdr), &pcb->local_ip, &pcb->remote_ip); + /* add payload checksum */ + if (seg->chksum_swapped) { + seg_chksum_was_swapped = 1; + seg->chksum = SWAP_BYTES_IN_WORD(seg->chksum); + seg->chksum_swapped = 0; + } + acc = (u16_t)~acc + seg->chksum; + seg->tcphdr->chksum = (u16_t)~FOLD_U32T(acc); +#if TCP_CHECKSUM_ON_COPY_SANITY_CHECK + if (chksum_slow != seg->tcphdr->chksum) { + TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL( + ("tcp_output_segment: calculated checksum is %"X16_F" instead of %"X16_F"\n", + seg->tcphdr->chksum, chksum_slow)); + seg->tcphdr->chksum = chksum_slow; + } +#endif /* TCP_CHECKSUM_ON_COPY_SANITY_CHECK */ +#else /* TCP_CHECKSUM_ON_COPY */ + seg->tcphdr->chksum = ip_chksum_pseudo(seg->p, IP_PROTO_TCP, + seg->p->tot_len, &pcb->local_ip, &pcb->remote_ip); +#endif /* TCP_CHECKSUM_ON_COPY */ + } +#endif /* CHECKSUM_GEN_TCP */ + TCP_STATS_INC(tcp.xmit); + + NETIF_SET_HINTS(netif, &(pcb->netif_hints)); + err = ip_output_if(seg->p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl, + pcb->tos, IP_PROTO_TCP, netif); + NETIF_RESET_HINTS(netif); + +#if TCP_CHECKSUM_ON_COPY + if (seg_chksum_was_swapped) { + /* if data is added to this segment later, chksum needs to be swapped, + so restore this now */ + seg->chksum = SWAP_BYTES_IN_WORD(seg->chksum); + seg->chksum_swapped = 1; + } +#endif + + return err; +} + +/** + * Requeue all unacked segments for retransmission + * + * Called by tcp_slowtmr() for slow retransmission. + * + * @param pcb the tcp_pcb for which to re-enqueue all unacked segments + */ +err_t +tcp_rexmit_rto_prepare(struct tcp_pcb *pcb) +{ + struct tcp_seg *seg; + + LWIP_ASSERT("tcp_rexmit_rto_prepare: invalid pcb", pcb != NULL); + + if (pcb->unacked == NULL) { + return ERR_VAL; + } + + /* Move all unacked segments to the head of the unsent queue. + However, give up if any of the unsent pbufs are still referenced by the + netif driver due to deferred transmission. No point loading the link further + if it is struggling to flush its buffered writes. */ + for (seg = pcb->unacked; seg->next != NULL; seg = seg->next) { + if (tcp_output_segment_busy(seg)) { + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_rexmit_rto: segment busy\n")); + return ERR_VAL; + } + } + if (tcp_output_segment_busy(seg)) { + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_rexmit_rto: segment busy\n")); + return ERR_VAL; + } + /* concatenate unsent queue after unacked queue */ + seg->next = pcb->unsent; +#if TCP_OVERSIZE_DBGCHECK + /* if last unsent changed, we need to update unsent_oversize */ + if (pcb->unsent == NULL) { + pcb->unsent_oversize = seg->oversize_left; + } +#endif /* TCP_OVERSIZE_DBGCHECK */ + /* unsent queue is the concatenated queue (of unacked, unsent) */ + pcb->unsent = pcb->unacked; + /* unacked queue is now empty */ + pcb->unacked = NULL; + + /* Mark RTO in-progress */ + tcp_set_flags(pcb, TF_RTO); + /* Record the next byte following retransmit */ + pcb->rto_end = lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg); + /* Don't take any RTT measurements after retransmitting. */ + pcb->rttest = 0; + + return ERR_OK; +} + +/** + * Requeue all unacked segments for retransmission + * + * Called by tcp_slowtmr() for slow retransmission. + * + * @param pcb the tcp_pcb for which to re-enqueue all unacked segments + */ +void +tcp_rexmit_rto_commit(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_rexmit_rto_commit: invalid pcb", pcb != NULL); + + /* increment number of retransmissions */ + if (pcb->nrtx < 0xFF) { + ++pcb->nrtx; + } + /* Do the actual retransmission */ + tcp_output(pcb); +} + +/** + * Requeue all unacked segments for retransmission + * + * Called by tcp_process() only, tcp_slowtmr() needs to do some things between + * "prepare" and "commit". + * + * @param pcb the tcp_pcb for which to re-enqueue all unacked segments + */ +void +tcp_rexmit_rto(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_rexmit_rto: invalid pcb", pcb != NULL); + + if (tcp_rexmit_rto_prepare(pcb) == ERR_OK) { + tcp_rexmit_rto_commit(pcb); + } +} + +/** + * Requeue the first unacked segment for retransmission + * + * Called by tcp_receive() for fast retransmit. + * + * @param pcb the tcp_pcb for which to retransmit the first unacked segment + */ +err_t +tcp_rexmit(struct tcp_pcb *pcb) +{ + struct tcp_seg *seg; + struct tcp_seg **cur_seg; + + LWIP_ASSERT("tcp_rexmit: invalid pcb", pcb != NULL); + + if (pcb->unacked == NULL) { + return ERR_VAL; + } + + seg = pcb->unacked; + + /* Give up if the segment is still referenced by the netif driver + due to deferred transmission. */ + if (tcp_output_segment_busy(seg)) { + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_rexmit busy\n")); + return ERR_VAL; + } + + /* Move the first unacked segment to the unsent queue */ + /* Keep the unsent queue sorted. */ + pcb->unacked = seg->next; + + cur_seg = &(pcb->unsent); + while (*cur_seg && + TCP_SEQ_LT(lwip_ntohl((*cur_seg)->tcphdr->seqno), lwip_ntohl(seg->tcphdr->seqno))) { + cur_seg = &((*cur_seg)->next ); + } + seg->next = *cur_seg; + *cur_seg = seg; +#if TCP_OVERSIZE + if (seg->next == NULL) { + /* the retransmitted segment is last in unsent, so reset unsent_oversize */ + pcb->unsent_oversize = 0; + } +#endif /* TCP_OVERSIZE */ + + if (pcb->nrtx < 0xFF) { + ++pcb->nrtx; + } + + /* Don't take any rtt measurements after retransmitting. */ + pcb->rttest = 0; + + /* Do the actual retransmission. */ + MIB2_STATS_INC(mib2.tcpretranssegs); + /* No need to call tcp_output: we are always called from tcp_input() + and thus tcp_output directly returns. */ + return ERR_OK; +} + + +/** + * Handle retransmission after three dupacks received + * + * @param pcb the tcp_pcb for which to retransmit the first unacked segment + */ +void +tcp_rexmit_fast(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_rexmit_fast: invalid pcb", pcb != NULL); + + if (pcb->unacked != NULL && !(pcb->flags & TF_INFR)) { + /* This is fast retransmit. Retransmit the first unacked segment. */ + LWIP_DEBUGF(TCP_FR_DEBUG, + ("tcp_receive: dupacks %"U16_F" (%"U32_F + "), fast retransmit %"U32_F"\n", + (u16_t)pcb->dupacks, pcb->lastack, + lwip_ntohl(pcb->unacked->tcphdr->seqno))); + if (tcp_rexmit(pcb) == ERR_OK) { + /* Set ssthresh to half of the minimum of the current + * cwnd and the advertised window */ + pcb->ssthresh = LWIP_MIN(pcb->cwnd, pcb->snd_wnd) / 2; + + /* The minimum value for ssthresh should be 2 MSS */ + if (pcb->ssthresh < (2U * pcb->mss)) { + LWIP_DEBUGF(TCP_FR_DEBUG, + ("tcp_receive: The minimum value for ssthresh %"TCPWNDSIZE_F + " should be min 2 mss %"U16_F"...\n", + pcb->ssthresh, (u16_t)(2 * pcb->mss))); + pcb->ssthresh = 2 * pcb->mss; + } + + pcb->cwnd = pcb->ssthresh + 3 * pcb->mss; + tcp_set_flags(pcb, TF_INFR); + + /* Reset the retransmission timer to prevent immediate rto retransmissions */ + pcb->rtime = 0; + } + } +} + +static struct pbuf * +tcp_output_alloc_header_common(u32_t ackno, u16_t optlen, u16_t datalen, + u32_t seqno_be /* already in network byte order */, + u16_t src_port, u16_t dst_port, u8_t flags, u16_t wnd) +{ + struct tcp_hdr *tcphdr; + struct pbuf *p; + + p = pbuf_alloc(PBUF_IP, TCP_HLEN + optlen + datalen, PBUF_RAM); + if (p != NULL) { + LWIP_ASSERT("check that first pbuf can hold struct tcp_hdr", + (p->len >= TCP_HLEN + optlen)); + tcphdr = (struct tcp_hdr *)p->payload; + tcphdr->src = lwip_htons(src_port); + tcphdr->dest = lwip_htons(dst_port); + tcphdr->seqno = seqno_be; + tcphdr->ackno = lwip_htonl(ackno); + TCPH_HDRLEN_FLAGS_SET(tcphdr, (5 + optlen / 4), flags); + tcphdr->wnd = lwip_htons(wnd); + tcphdr->chksum = 0; + tcphdr->urgp = 0; + } + return p; +} + +/** Allocate a pbuf and create a tcphdr at p->payload, used for output + * functions other than the default tcp_output -> tcp_output_segment + * (e.g. tcp_send_empty_ack, etc.) + * + * @param pcb tcp pcb for which to send a packet (used to initialize tcp_hdr) + * @param optlen length of header-options + * @param datalen length of tcp data to reserve in pbuf + * @param seqno_be seqno in network byte order (big-endian) + * @return pbuf with p->payload being the tcp_hdr + */ +static struct pbuf * +tcp_output_alloc_header(struct tcp_pcb *pcb, u16_t optlen, u16_t datalen, + u32_t seqno_be /* already in network byte order */) +{ + struct pbuf *p; + + LWIP_ASSERT("tcp_output_alloc_header: invalid pcb", pcb != NULL); + + p = tcp_output_alloc_header_common(pcb->rcv_nxt, optlen, datalen, + seqno_be, pcb->local_port, pcb->remote_port, TCP_ACK, + TCPWND_MIN16(RCV_WND_SCALE(pcb, pcb->rcv_ann_wnd))); + if (p != NULL) { + /* If we're sending a packet, update the announced right window edge */ + pcb->rcv_ann_right_edge = pcb->rcv_nxt + pcb->rcv_ann_wnd; + } + return p; +} + +/* Fill in options for control segments */ +static void +tcp_output_fill_options(const struct tcp_pcb *pcb, struct pbuf *p, u8_t optflags, u8_t num_sacks) +{ + struct tcp_hdr *tcphdr; + u32_t *opts; + u16_t sacks_len = 0; + + LWIP_ASSERT("tcp_output_fill_options: invalid pbuf", p != NULL); + + tcphdr = (struct tcp_hdr *)p->payload; + opts = (u32_t *)(void *)(tcphdr + 1); + + /* NB. MSS and window scale options are only sent on SYNs, so ignore them here */ + +#if LWIP_TCP_TIMESTAMPS + if (optflags & TF_SEG_OPTS_TS) { + tcp_build_timestamp_option(pcb, opts); + opts += 3; + } +#endif + +#if LWIP_TCP_SACK_OUT + if (pcb && (num_sacks > 0)) { + tcp_build_sack_option(pcb, opts, num_sacks); + /* 1 word for SACKs header (including 2xNOP), and 2 words for each SACK */ + sacks_len = 1 + num_sacks * 2; + opts += sacks_len; + } +#else + LWIP_UNUSED_ARG(num_sacks); +#endif + +#ifdef LWIP_HOOK_TCP_OUT_ADD_TCPOPTS + opts = LWIP_HOOK_TCP_OUT_ADD_TCPOPTS(p, tcphdr, pcb, opts); +#endif + + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(sacks_len); + LWIP_ASSERT("options not filled", (u8_t *)opts == ((u8_t *)(tcphdr + 1)) + sacks_len * 4 + LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb)); + LWIP_UNUSED_ARG(optflags); /* for LWIP_NOASSERT */ + LWIP_UNUSED_ARG(opts); /* for LWIP_NOASSERT */ +} + +/** Output a control segment pbuf to IP. + * + * Called from tcp_rst, tcp_send_empty_ack, tcp_keepalive and tcp_zero_window_probe, + * this function combines selecting a netif for transmission, generating the tcp + * header checksum and calling ip_output_if while handling netif hints and stats. + */ +static err_t +tcp_output_control_segment(const struct tcp_pcb *pcb, struct pbuf *p, + const ip_addr_t *src, const ip_addr_t *dst) +{ + err_t err; + struct netif *netif; + + LWIP_ASSERT("tcp_output_control_segment: invalid pbuf", p != NULL); + + netif = tcp_route(pcb, src, dst); + if (netif == NULL) { + err = ERR_RTE; + } else { + u8_t ttl, tos; +#if CHECKSUM_GEN_TCP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) { + struct tcp_hdr *tcphdr = (struct tcp_hdr *)p->payload; + tcphdr->chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len, + src, dst); + } +#endif + if (pcb != NULL) { + NETIF_SET_HINTS(netif, LWIP_CONST_CAST(struct netif_hint*, &(pcb->netif_hints))); + ttl = pcb->ttl; + tos = pcb->tos; + } else { + /* Send output with hardcoded TTL/HL since we have no access to the pcb */ + ttl = TCP_TTL; + tos = 0; + } + TCP_STATS_INC(tcp.xmit); + err = ip_output_if(p, src, dst, ttl, tos, IP_PROTO_TCP, netif); + NETIF_RESET_HINTS(netif); + } + pbuf_free(p); + return err; +} + +/** + * Send a TCP RESET packet (empty segment with RST flag set) either to + * abort a connection or to show that there is no matching local connection + * for a received segment. + * + * Called by tcp_abort() (to abort a local connection), tcp_input() (if no + * matching local pcb was found), tcp_listen_input() (if incoming segment + * has ACK flag set) and tcp_process() (received segment in the wrong state) + * + * Since a RST segment is in most cases not sent for an active connection, + * tcp_rst() has a number of arguments that are taken from a tcp_pcb for + * most other segment output functions. + * + * @param pcb TCP pcb (may be NULL if no pcb is available) + * @param seqno the sequence number to use for the outgoing segment + * @param ackno the acknowledge number to use for the outgoing segment + * @param local_ip the local IP address to send the segment from + * @param remote_ip the remote IP address to send the segment to + * @param local_port the local TCP port to send the segment from + * @param remote_port the remote TCP port to send the segment to + */ +void +tcp_rst(const struct tcp_pcb *pcb, u32_t seqno, u32_t ackno, + const ip_addr_t *local_ip, const ip_addr_t *remote_ip, + u16_t local_port, u16_t remote_port) +{ + struct pbuf *p; + u16_t wnd; + u8_t optlen; + + LWIP_ASSERT("tcp_rst: invalid local_ip", local_ip != NULL); + LWIP_ASSERT("tcp_rst: invalid remote_ip", remote_ip != NULL); + + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb); + +#if LWIP_WND_SCALE + wnd = PP_HTONS(((TCP_WND >> TCP_RCV_SCALE) & 0xFFFF)); +#else + wnd = PP_HTONS(TCP_WND); +#endif + + p = tcp_output_alloc_header_common(ackno, optlen, 0, lwip_htonl(seqno), local_port, + remote_port, TCP_RST | TCP_ACK, wnd); + if (p == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_rst: could not allocate memory for pbuf\n")); + return; + } + tcp_output_fill_options(pcb, p, 0, optlen); + + MIB2_STATS_INC(mib2.tcpoutrsts); + + tcp_output_control_segment(pcb, p, local_ip, remote_ip); + LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_rst: seqno %"U32_F" ackno %"U32_F".\n", seqno, ackno)); +} + +/** + * Send an ACK without data. + * + * @param pcb Protocol control block for the TCP connection to send the ACK + */ +err_t +tcp_send_empty_ack(struct tcp_pcb *pcb) +{ + err_t err; + struct pbuf *p; + u8_t optlen, optflags = 0; + u8_t num_sacks = 0; + + LWIP_ASSERT("tcp_send_empty_ack: invalid pcb", pcb != NULL); + +#if LWIP_TCP_TIMESTAMPS + if (pcb->flags & TF_TIMESTAMP) { + optflags = TF_SEG_OPTS_TS; + } +#endif + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb); + +#if LWIP_TCP_SACK_OUT + /* For now, SACKs are only sent with empty ACKs */ + if ((num_sacks = tcp_get_num_sacks(pcb, optlen)) > 0) { + optlen += 4 + num_sacks * 8; /* 4 bytes for header (including 2*NOP), plus 8B for each SACK */ + } +#endif + + p = tcp_output_alloc_header(pcb, optlen, 0, lwip_htonl(pcb->snd_nxt)); + if (p == NULL) { + /* let tcp_fasttmr retry sending this ACK */ + tcp_set_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: (ACK) could not allocate pbuf\n")); + return ERR_BUF; + } + tcp_output_fill_options(pcb, p, optflags, num_sacks); + +#if LWIP_TCP_TIMESTAMPS + pcb->ts_lastacksent = pcb->rcv_nxt; +#endif + + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, + ("tcp_output: sending ACK for %"U32_F"\n", pcb->rcv_nxt)); + err = tcp_output_control_segment(pcb, p, &pcb->local_ip, &pcb->remote_ip); + if (err != ERR_OK) { + /* let tcp_fasttmr retry sending this ACK */ + tcp_set_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + } else { + /* remove ACK flags from the PCB, as we sent an empty ACK now */ + tcp_clear_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + } + + return err; +} + +/** + * Send keepalive packets to keep a connection active although + * no data is sent over it. + * + * Called by tcp_slowtmr() + * + * @param pcb the tcp_pcb for which to send a keepalive packet + */ +err_t +tcp_keepalive(struct tcp_pcb *pcb) +{ + err_t err; + struct pbuf *p; + u8_t optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb); + + LWIP_ASSERT("tcp_keepalive: invalid pcb", pcb != NULL); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: sending KEEPALIVE probe to ")); + ip_addr_debug_print_val(TCP_DEBUG, pcb->remote_ip); + LWIP_DEBUGF(TCP_DEBUG, ("\n")); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: tcp_ticks %"U32_F" pcb->tmr %"U32_F" pcb->keep_cnt_sent %"U16_F"\n", + tcp_ticks, pcb->tmr, (u16_t)pcb->keep_cnt_sent)); + + p = tcp_output_alloc_header(pcb, optlen, 0, lwip_htonl(pcb->snd_nxt - 1)); + if (p == NULL) { + LWIP_DEBUGF(TCP_DEBUG, + ("tcp_keepalive: could not allocate memory for pbuf\n")); + return ERR_MEM; + } + tcp_output_fill_options(pcb, p, 0, optlen); + err = tcp_output_control_segment(pcb, p, &pcb->local_ip, &pcb->remote_ip); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: seqno %"U32_F" ackno %"U32_F" err %d.\n", + pcb->snd_nxt - 1, pcb->rcv_nxt, (int)err)); + return err; +} + +/** + * Send persist timer zero-window probes to keep a connection active + * when a window update is lost. + * + * Called by tcp_slowtmr() + * + * @param pcb the tcp_pcb for which to send a zero-window probe packet + */ +err_t +tcp_zero_window_probe(struct tcp_pcb *pcb) +{ + err_t err; + struct pbuf *p; + struct tcp_hdr *tcphdr; + struct tcp_seg *seg; + u16_t len; + u8_t is_fin; + u32_t snd_nxt; + u8_t optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb); + + LWIP_ASSERT("tcp_zero_window_probe: invalid pcb", pcb != NULL); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: sending ZERO WINDOW probe to ")); + ip_addr_debug_print_val(TCP_DEBUG, pcb->remote_ip); + LWIP_DEBUGF(TCP_DEBUG, ("\n")); + + LWIP_DEBUGF(TCP_DEBUG, + ("tcp_zero_window_probe: tcp_ticks %"U32_F + " pcb->tmr %"U32_F" pcb->keep_cnt_sent %"U16_F"\n", + tcp_ticks, pcb->tmr, (u16_t)pcb->keep_cnt_sent)); + + /* Only consider unsent, persist timer should be off when there is data in-flight */ + seg = pcb->unsent; + if (seg == NULL) { + /* Not expected, persist timer should be off when the send buffer is empty */ + return ERR_OK; + } + + /* increment probe count. NOTE: we record probe even if it fails + to actually transmit due to an error. This ensures memory exhaustion/ + routing problem doesn't leave a zero-window pcb as an indefinite zombie. + RTO mechanism has similar behavior, see pcb->nrtx */ + if (pcb->persist_probe < 0xFF) { + ++pcb->persist_probe; + } + + is_fin = ((TCPH_FLAGS(seg->tcphdr) & TCP_FIN) != 0) && (seg->len == 0); + /* we want to send one seqno: either FIN or data (no options) */ + len = is_fin ? 0 : 1; + + p = tcp_output_alloc_header(pcb, optlen, len, seg->tcphdr->seqno); + if (p == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: no memory for pbuf\n")); + return ERR_MEM; + } + tcphdr = (struct tcp_hdr *)p->payload; + + if (is_fin) { + /* FIN segment, no data */ + TCPH_FLAGS_SET(tcphdr, TCP_ACK | TCP_FIN); + } else { + /* Data segment, copy in one byte from the head of the unacked queue */ + char *d = ((char *)p->payload + TCP_HLEN); + /* Depending on whether the segment has already been sent (unacked) or not + (unsent), seg->p->payload points to the IP header or TCP header. + Ensure we copy the first TCP data byte: */ + pbuf_copy_partial(seg->p, d, 1, seg->p->tot_len - seg->len); + } + + /* The byte may be acknowledged without the window being opened. */ + snd_nxt = lwip_ntohl(seg->tcphdr->seqno) + 1; + if (TCP_SEQ_LT(pcb->snd_nxt, snd_nxt)) { + pcb->snd_nxt = snd_nxt; + } + tcp_output_fill_options(pcb, p, 0, optlen); + + err = tcp_output_control_segment(pcb, p, &pcb->local_ip, &pcb->remote_ip); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: seqno %"U32_F + " ackno %"U32_F" err %d.\n", + pcb->snd_nxt - 1, pcb->rcv_nxt, (int)err)); + return err; +} +#endif /* LWIP_TCP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/timeouts.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/timeouts.c new file mode 100644 index 0000000..c3431e8 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/timeouts.c @@ -0,0 +1,451 @@ +/** + * @file + * Stack-internal timers implementation. + * This file includes timer callbacks for stack-internal timers as well as + * functions to set up or stop timers and check for expired timers. + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * Simon Goldschmidt + * + */ + +#include "lwip/opt.h" + +#include "lwip/timeouts.h" +#include "lwip/priv/tcp_priv.h" + +#include "lwip/def.h" +#include "lwip/memp.h" +#include "lwip/priv/tcpip_priv.h" + +#include "lwip/ip4_frag.h" +#include "lwip/etharp.h" +#include "lwip/dhcp.h" +#include "lwip/autoip.h" +#include "lwip/igmp.h" +#include "lwip/dns.h" +#include "lwip/nd6.h" +#include "lwip/ip6_frag.h" +#include "lwip/mld6.h" +#include "lwip/dhcp6.h" +#include "lwip/sys.h" +#include "lwip/pbuf.h" + +#if LWIP_DEBUG_TIMERNAMES +#define HANDLER(x) x, #x +#else /* LWIP_DEBUG_TIMERNAMES */ +#define HANDLER(x) x +#endif /* LWIP_DEBUG_TIMERNAMES */ + +#define LWIP_MAX_TIMEOUT 0x7fffffff + +/* Check if timer's expiry time is greater than time and care about u32_t wraparounds */ +#define TIME_LESS_THAN(t, compare_to) ( (((u32_t)((t)-(compare_to))) > LWIP_MAX_TIMEOUT) ? 1 : 0 ) + +/** This array contains all stack-internal cyclic timers. To get the number of + * timers, use LWIP_ARRAYSIZE() */ +const struct lwip_cyclic_timer lwip_cyclic_timers[] = { +#if LWIP_TCP + /* The TCP timer is a special case: it does not have to run always and + is triggered to start from TCP using tcp_timer_needed() */ + {TCP_TMR_INTERVAL, HANDLER(tcp_tmr)}, +#endif /* LWIP_TCP */ +#if LWIP_IPV4 +#if IP_REASSEMBLY + {IP_TMR_INTERVAL, HANDLER(ip_reass_tmr)}, +#endif /* IP_REASSEMBLY */ +#if LWIP_ARP + {ARP_TMR_INTERVAL, HANDLER(etharp_tmr)}, +#endif /* LWIP_ARP */ +#if LWIP_DHCP + {DHCP_COARSE_TIMER_MSECS, HANDLER(dhcp_coarse_tmr)}, + {DHCP_FINE_TIMER_MSECS, HANDLER(dhcp_fine_tmr)}, +#endif /* LWIP_DHCP */ +#if LWIP_AUTOIP + {AUTOIP_TMR_INTERVAL, HANDLER(autoip_tmr)}, +#endif /* LWIP_AUTOIP */ +#if LWIP_IGMP + {IGMP_TMR_INTERVAL, HANDLER(igmp_tmr)}, +#endif /* LWIP_IGMP */ +#endif /* LWIP_IPV4 */ +#if LWIP_DNS + {DNS_TMR_INTERVAL, HANDLER(dns_tmr)}, +#endif /* LWIP_DNS */ +#if LWIP_IPV6 + {ND6_TMR_INTERVAL, HANDLER(nd6_tmr)}, +#if LWIP_IPV6_REASS + {IP6_REASS_TMR_INTERVAL, HANDLER(ip6_reass_tmr)}, +#endif /* LWIP_IPV6_REASS */ +#if LWIP_IPV6_MLD + {MLD6_TMR_INTERVAL, HANDLER(mld6_tmr)}, +#endif /* LWIP_IPV6_MLD */ +#if LWIP_IPV6_DHCP6 + {DHCP6_TIMER_MSECS, HANDLER(dhcp6_tmr)}, +#endif /* LWIP_IPV6_DHCP6 */ +#endif /* LWIP_IPV6 */ +}; +const int lwip_num_cyclic_timers = LWIP_ARRAYSIZE(lwip_cyclic_timers); + +#if LWIP_TIMERS && !LWIP_TIMERS_CUSTOM + +/** The one and only timeout list */ +static struct sys_timeo *next_timeout; + +static u32_t current_timeout_due_time; + +#if LWIP_TESTMODE +struct sys_timeo** +sys_timeouts_get_next_timeout(void) +{ + return &next_timeout; +} +#endif + +#if LWIP_TCP +/** global variable that shows if the tcp timer is currently scheduled or not */ +static int tcpip_tcp_timer_active; + +/** + * Timer callback function that calls tcp_tmr() and reschedules itself. + * + * @param arg unused argument + */ +static void +tcpip_tcp_timer(void *arg) +{ + LWIP_UNUSED_ARG(arg); + + /* call TCP timer handler */ + tcp_tmr(); + /* timer still needed? */ + if (tcp_active_pcbs || tcp_tw_pcbs) { + /* restart timer */ + sys_timeout(TCP_TMR_INTERVAL, tcpip_tcp_timer, NULL); + } else { + /* disable timer */ + tcpip_tcp_timer_active = 0; + } +} + +/** + * Called from TCP_REG when registering a new PCB: + * the reason is to have the TCP timer only running when + * there are active (or time-wait) PCBs. + */ +void +tcp_timer_needed(void) +{ + LWIP_ASSERT_CORE_LOCKED(); + + /* timer is off but needed again? */ + if (!tcpip_tcp_timer_active && (tcp_active_pcbs || tcp_tw_pcbs)) { + /* enable and start timer */ + tcpip_tcp_timer_active = 1; + sys_timeout(TCP_TMR_INTERVAL, tcpip_tcp_timer, NULL); + } +} +#endif /* LWIP_TCP */ + +static void +#if LWIP_DEBUG_TIMERNAMES +sys_timeout_abs(u32_t abs_time, sys_timeout_handler handler, void *arg, const char *handler_name) +#else /* LWIP_DEBUG_TIMERNAMES */ +sys_timeout_abs(u32_t abs_time, sys_timeout_handler handler, void *arg) +#endif +{ + struct sys_timeo *timeout, *t; + + timeout = (struct sys_timeo *)memp_malloc(MEMP_SYS_TIMEOUT); + if (timeout == NULL) { + LWIP_ASSERT("sys_timeout: timeout != NULL, pool MEMP_SYS_TIMEOUT is empty", timeout != NULL); + return; + } + + timeout->next = NULL; + timeout->h = handler; + timeout->arg = arg; + timeout->time = abs_time; + +#if LWIP_DEBUG_TIMERNAMES + timeout->handler_name = handler_name; + LWIP_DEBUGF(TIMERS_DEBUG, ("sys_timeout: %p abs_time=%"U32_F" handler=%s arg=%p\n", + (void *)timeout, abs_time, handler_name, (void *)arg)); +#endif /* LWIP_DEBUG_TIMERNAMES */ + + if (next_timeout == NULL) { + next_timeout = timeout; + return; + } + if (TIME_LESS_THAN(timeout->time, next_timeout->time)) { + timeout->next = next_timeout; + next_timeout = timeout; + } else { + for (t = next_timeout; t != NULL; t = t->next) { + if ((t->next == NULL) || TIME_LESS_THAN(timeout->time, t->next->time)) { + timeout->next = t->next; + t->next = timeout; + break; + } + } + } +} + +/** + * Timer callback function that calls cyclic->handler() and reschedules itself. + * + * @param arg unused argument + */ +#if !LWIP_TESTMODE +static +#endif +void +lwip_cyclic_timer(void *arg) +{ + u32_t now; + u32_t next_timeout_time; + const struct lwip_cyclic_timer *cyclic = (const struct lwip_cyclic_timer *)arg; + +#if LWIP_DEBUG_TIMERNAMES + LWIP_DEBUGF(TIMERS_DEBUG, ("tcpip: %s()\n", cyclic->handler_name)); +#endif + cyclic->handler(); + + now = sys_now(); + next_timeout_time = (u32_t)(current_timeout_due_time + cyclic->interval_ms); /* overflow handled by TIME_LESS_THAN macro */ + if (TIME_LESS_THAN(next_timeout_time, now)) { + /* timer would immediately expire again -> "overload" -> restart without any correction */ +#if LWIP_DEBUG_TIMERNAMES + sys_timeout_abs((u32_t)(now + cyclic->interval_ms), lwip_cyclic_timer, arg, cyclic->handler_name); +#else + sys_timeout_abs((u32_t)(now + cyclic->interval_ms), lwip_cyclic_timer, arg); +#endif + + } else { + /* correct cyclic interval with handler execution delay and sys_check_timeouts jitter */ +#if LWIP_DEBUG_TIMERNAMES + sys_timeout_abs(next_timeout_time, lwip_cyclic_timer, arg, cyclic->handler_name); +#else + sys_timeout_abs(next_timeout_time, lwip_cyclic_timer, arg); +#endif + } +} + +/** Initialize this module */ +void sys_timeouts_init(void) +{ + size_t i; + /* tcp_tmr() at index 0 is started on demand */ + for (i = (LWIP_TCP ? 1 : 0); i < LWIP_ARRAYSIZE(lwip_cyclic_timers); i++) { + /* we have to cast via size_t to get rid of const warning + (this is OK as cyclic_timer() casts back to const* */ + sys_timeout(lwip_cyclic_timers[i].interval_ms, lwip_cyclic_timer, LWIP_CONST_CAST(void *, &lwip_cyclic_timers[i])); + } +} + +/** + * Create a one-shot timer (aka timeout). Timeouts are processed in the + * following cases: + * - while waiting for a message using sys_timeouts_mbox_fetch() + * - by calling sys_check_timeouts() (NO_SYS==1 only) + * + * @param msecs time in milliseconds after that the timer should expire + * @param handler callback function to call when msecs have elapsed + * @param arg argument to pass to the callback function + */ +#if LWIP_DEBUG_TIMERNAMES +void +sys_timeout_debug(u32_t msecs, sys_timeout_handler handler, void *arg, const char *handler_name) +#else /* LWIP_DEBUG_TIMERNAMES */ +void +sys_timeout(u32_t msecs, sys_timeout_handler handler, void *arg) +#endif /* LWIP_DEBUG_TIMERNAMES */ +{ + u32_t next_timeout_time; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("Timeout time too long, max is LWIP_UINT32_MAX/4 msecs", msecs <= (LWIP_UINT32_MAX / 4)); + + next_timeout_time = (u32_t)(sys_now() + msecs); /* overflow handled by TIME_LESS_THAN macro */ + +#if LWIP_DEBUG_TIMERNAMES + sys_timeout_abs(next_timeout_time, handler, arg, handler_name); +#else + sys_timeout_abs(next_timeout_time, handler, arg); +#endif +} + +/** + * Go through timeout list (for this task only) and remove the first matching + * entry (subsequent entries remain untouched), even though the timeout has not + * triggered yet. + * + * @param handler callback function that would be called by the timeout + * @param arg callback argument that would be passed to handler +*/ +void +sys_untimeout(sys_timeout_handler handler, void *arg) +{ + struct sys_timeo *prev_t, *t; + + LWIP_ASSERT_CORE_LOCKED(); + + if (next_timeout == NULL) { + return; + } + + for (t = next_timeout, prev_t = NULL; t != NULL; prev_t = t, t = t->next) { + if ((t->h == handler) && (t->arg == arg)) { + /* We have a match */ + /* Unlink from previous in list */ + if (prev_t == NULL) { + next_timeout = t->next; + } else { + prev_t->next = t->next; + } + memp_free(MEMP_SYS_TIMEOUT, t); + return; + } + } + return; +} + +/** + * @ingroup lwip_nosys + * Handle timeouts for NO_SYS==1 (i.e. without using + * tcpip_thread/sys_timeouts_mbox_fetch(). Uses sys_now() to call timeout + * handler functions when timeouts expire. + * + * Must be called periodically from your main loop. + */ +void +sys_check_timeouts(void) +{ + u32_t now; + + LWIP_ASSERT_CORE_LOCKED(); + + /* Process only timers expired at the start of the function. */ + now = sys_now(); + + do { + struct sys_timeo *tmptimeout; + sys_timeout_handler handler; + void *arg; + + PBUF_CHECK_FREE_OOSEQ(); + + tmptimeout = next_timeout; + if (tmptimeout == NULL) { + return; + } + + if (TIME_LESS_THAN(now, tmptimeout->time)) { + return; + } + + /* Timeout has expired */ + next_timeout = tmptimeout->next; + handler = tmptimeout->h; + arg = tmptimeout->arg; + current_timeout_due_time = tmptimeout->time; +#if LWIP_DEBUG_TIMERNAMES + if (handler != NULL) { + LWIP_DEBUGF(TIMERS_DEBUG, ("sct calling h=%s t=%"U32_F" arg=%p\n", + tmptimeout->handler_name, sys_now() - tmptimeout->time, arg)); + } +#endif /* LWIP_DEBUG_TIMERNAMES */ + memp_free(MEMP_SYS_TIMEOUT, tmptimeout); + if (handler != NULL) { + handler(arg); + } + LWIP_TCPIP_THREAD_ALIVE(); + + /* Repeat until all expired timers have been called */ + } while (1); +} + +/** Rebase the timeout times to the current time. + * This is necessary if sys_check_timeouts() hasn't been called for a long + * time (e.g. while saving energy) to prevent all timer functions of that + * period being called. + */ +void +sys_restart_timeouts(void) +{ + u32_t now; + u32_t base; + struct sys_timeo *t; + + if (next_timeout == NULL) { + return; + } + + now = sys_now(); + base = next_timeout->time; + + for (t = next_timeout; t != NULL; t = t->next) { + t->time = (t->time - base) + now; + } +} + +/** Return the time left before the next timeout is due. If no timeouts are + * enqueued, returns 0xffffffff + */ +u32_t +sys_timeouts_sleeptime(void) +{ + u32_t now; + + LWIP_ASSERT_CORE_LOCKED(); + + if (next_timeout == NULL) { + return SYS_TIMEOUTS_SLEEPTIME_INFINITE; + } + now = sys_now(); + if (TIME_LESS_THAN(next_timeout->time, now)) { + return 0; + } else { + u32_t ret = (u32_t)(next_timeout->time - now); + LWIP_ASSERT("invalid sleeptime", ret <= LWIP_MAX_TIMEOUT); + return ret; + } +} + +#else /* LWIP_TIMERS && !LWIP_TIMERS_CUSTOM */ +/* Satisfy the TCP code which calls this function */ +void +tcp_timer_needed(void) +{ +} +#endif /* LWIP_TIMERS && !LWIP_TIMERS_CUSTOM */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/udp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/udp.c new file mode 100644 index 0000000..4f45f8d --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/udp.c @@ -0,0 +1,1314 @@ +/** + * @file + * User Datagram Protocol module\n + * The code for the User Datagram Protocol UDP & UDPLite (RFC 3828).\n + * See also @ref udp_raw + * + * @defgroup udp_raw UDP + * @ingroup callbackstyle_api + * User Datagram Protocol module\n + * @see @ref api + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/* @todo Check the use of '(struct udp_pcb).chksum_len_rx'! + */ + +#include "lwip/opt.h" + +#if LWIP_UDP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/udp.h" +#include "lwip/def.h" +#include "lwip/memp.h" +#include "lwip/inet_chksum.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" +#include "lwip/icmp.h" +#include "lwip/icmp6.h" +#include "lwip/stats.h" +#include "lwip/snmp.h" +#include "lwip/dhcp.h" + +#include + +#ifndef UDP_LOCAL_PORT_RANGE_START +/* From http://www.iana.org/assignments/port-numbers: + "The Dynamic and/or Private Ports are those from 49152 through 65535" */ +#define UDP_LOCAL_PORT_RANGE_START 0xc000 +#define UDP_LOCAL_PORT_RANGE_END 0xffff +#define UDP_ENSURE_LOCAL_PORT_RANGE(port) ((u16_t)(((port) & (u16_t)~UDP_LOCAL_PORT_RANGE_START) + UDP_LOCAL_PORT_RANGE_START)) +#endif + +/* last local UDP port */ +static u16_t udp_port = UDP_LOCAL_PORT_RANGE_START; + +/* The list of UDP PCBs */ +/* exported in udp.h (was static) */ +struct udp_pcb *udp_pcbs; + +/** + * Initialize this module. + */ +void +udp_init(void) +{ +#ifdef LWIP_RAND + udp_port = UDP_ENSURE_LOCAL_PORT_RANGE(LWIP_RAND()); +#endif /* LWIP_RAND */ +} + +/** + * Allocate a new local UDP port. + * + * @return a new (free) local UDP port number + */ +static u16_t +udp_new_port(void) +{ + u16_t n = 0; + struct udp_pcb *pcb; + +again: + if (udp_port++ == UDP_LOCAL_PORT_RANGE_END) { + udp_port = UDP_LOCAL_PORT_RANGE_START; + } + /* Check all PCBs. */ + for (pcb = udp_pcbs; pcb != NULL; pcb = pcb->next) { + if (pcb->local_port == udp_port) { + if (++n > (UDP_LOCAL_PORT_RANGE_END - UDP_LOCAL_PORT_RANGE_START)) { + return 0; + } + goto again; + } + } + return udp_port; +} + +/** Common code to see if the current input packet matches the pcb + * (current input packet is accessed via ip(4/6)_current_* macros) + * + * @param pcb pcb to check + * @param inp network interface on which the datagram was received (only used for IPv4) + * @param broadcast 1 if his is an IPv4 broadcast (global or subnet-only), 0 otherwise (only used for IPv4) + * @return 1 on match, 0 otherwise + */ +static u8_t +udp_input_local_match(struct udp_pcb *pcb, struct netif *inp, u8_t broadcast) +{ + LWIP_UNUSED_ARG(inp); /* in IPv6 only case */ + LWIP_UNUSED_ARG(broadcast); /* in IPv6 only case */ + + LWIP_ASSERT("udp_input_local_match: invalid pcb", pcb != NULL); + LWIP_ASSERT("udp_input_local_match: invalid netif", inp != NULL); + + /* check if PCB is bound to specific netif */ + if ((pcb->netif_idx != NETIF_NO_INDEX) && + (pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + return 0; + } + + /* Dual-stack: PCBs listening to any IP type also listen to any IP address */ + if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { +#if LWIP_IPV4 && IP_SOF_BROADCAST_RECV + if ((broadcast != 0) && !ip_get_option(pcb, SOF_BROADCAST)) { + return 0; + } +#endif /* LWIP_IPV4 && IP_SOF_BROADCAST_RECV */ + return 1; + } + + /* Only need to check PCB if incoming IP version matches PCB IP version */ + if (IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ip_current_dest_addr())) { +#if LWIP_IPV4 + /* Special case: IPv4 broadcast: all or broadcasts in my subnet + * Note: broadcast variable can only be 1 if it is an IPv4 broadcast */ + if (broadcast != 0) { +#if IP_SOF_BROADCAST_RECV + if (ip_get_option(pcb, SOF_BROADCAST)) +#endif /* IP_SOF_BROADCAST_RECV */ + { + if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip)) || + ((ip4_current_dest_addr()->addr == IPADDR_BROADCAST)) || + ip4_addr_netcmp(ip_2_ip4(&pcb->local_ip), ip4_current_dest_addr(), netif_ip4_netmask(inp))) { + return 1; + } + } + } else +#endif /* LWIP_IPV4 */ + /* Handle IPv4 and IPv6: all or exact match */ + if (ip_addr_isany(&pcb->local_ip) || ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { + return 1; + } + } + + return 0; +} + +/** + * Process an incoming UDP datagram. + * + * Given an incoming UDP datagram (as a chain of pbufs) this function + * finds a corresponding UDP PCB and hands over the pbuf to the pcbs + * recv function. If no pcb is found or the datagram is incorrect, the + * pbuf is freed. + * + * @param p pbuf to be demultiplexed to a UDP PCB (p->payload pointing to the UDP header) + * @param inp network interface on which the datagram was received. + * + */ +void +udp_input(struct pbuf *p, struct netif *inp) +{ + struct udp_hdr *udphdr; + struct udp_pcb *pcb, *prev; + struct udp_pcb *uncon_pcb; + u16_t src, dest; + u8_t broadcast; + u8_t for_us = 0; + + LWIP_UNUSED_ARG(inp); + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("udp_input: invalid pbuf", p != NULL); + LWIP_ASSERT("udp_input: invalid netif", inp != NULL); + + PERF_START; + + UDP_STATS_INC(udp.recv); + + /* Check minimum length (UDP header) */ + if (p->len < UDP_HLEN) { + /* drop short packets */ + LWIP_DEBUGF(UDP_DEBUG, + ("udp_input: short UDP datagram (%"U16_F" bytes) discarded\n", p->tot_len)); + UDP_STATS_INC(udp.lenerr); + UDP_STATS_INC(udp.drop); + MIB2_STATS_INC(mib2.udpinerrors); + pbuf_free(p); + goto end; + } + + udphdr = (struct udp_hdr *)p->payload; + + /* is broadcast packet ? */ + broadcast = ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif()); + + LWIP_DEBUGF(UDP_DEBUG, ("udp_input: received datagram of length %"U16_F"\n", p->tot_len)); + + /* convert src and dest ports to host byte order */ + src = lwip_ntohs(udphdr->src); + dest = lwip_ntohs(udphdr->dest); + + udp_debug_print(udphdr); + + /* print the UDP source and destination */ + LWIP_DEBUGF(UDP_DEBUG, ("udp (")); + ip_addr_debug_print_val(UDP_DEBUG, *ip_current_dest_addr()); + LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F") <-- (", lwip_ntohs(udphdr->dest))); + ip_addr_debug_print_val(UDP_DEBUG, *ip_current_src_addr()); + LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F")\n", lwip_ntohs(udphdr->src))); + + pcb = NULL; + prev = NULL; + uncon_pcb = NULL; + /* Iterate through the UDP pcb list for a matching pcb. + * 'Perfect match' pcbs (connected to the remote port & ip address) are + * preferred. If no perfect match is found, the first unconnected pcb that + * matches the local port and ip address gets the datagram. */ + for (pcb = udp_pcbs; pcb != NULL; pcb = pcb->next) { + /* print the PCB local and remote address */ + LWIP_DEBUGF(UDP_DEBUG, ("pcb (")); + ip_addr_debug_print_val(UDP_DEBUG, pcb->local_ip); + LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F") <-- (", pcb->local_port)); + ip_addr_debug_print_val(UDP_DEBUG, pcb->remote_ip); + LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F")\n", pcb->remote_port)); + + /* compare PCB local addr+port to UDP destination addr+port */ + if ((pcb->local_port == dest) && + (udp_input_local_match(pcb, inp, broadcast) != 0)) { + if ((pcb->flags & UDP_FLAGS_CONNECTED) == 0) { + if (uncon_pcb == NULL) { + /* the first unconnected matching PCB */ + uncon_pcb = pcb; +#if LWIP_IPV4 + } else if (broadcast && ip4_current_dest_addr()->addr == IPADDR_BROADCAST) { + /* global broadcast address (only valid for IPv4; match was checked before) */ + if (!IP_IS_V4_VAL(uncon_pcb->local_ip) || !ip4_addr_cmp(ip_2_ip4(&uncon_pcb->local_ip), netif_ip4_addr(inp))) { + /* uncon_pcb does not match the input netif, check this pcb */ + if (IP_IS_V4_VAL(pcb->local_ip) && ip4_addr_cmp(ip_2_ip4(&pcb->local_ip), netif_ip4_addr(inp))) { + /* better match */ + uncon_pcb = pcb; + } + } +#endif /* LWIP_IPV4 */ + } +#if SO_REUSE + else if (!ip_addr_isany(&pcb->local_ip)) { + /* prefer specific IPs over catch-all */ + uncon_pcb = pcb; + } +#endif /* SO_REUSE */ + } + + /* compare PCB remote addr+port to UDP source addr+port */ + if ((pcb->remote_port == src) && + (ip_addr_isany_val(pcb->remote_ip) || + ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()))) { + /* the first fully matching PCB */ + if (prev != NULL) { + /* move the pcb to the front of udp_pcbs so that is + found faster next time */ + prev->next = pcb->next; + pcb->next = udp_pcbs; + udp_pcbs = pcb; + } else { + UDP_STATS_INC(udp.cachehit); + } + break; + } + } + + prev = pcb; + } + /* no fully matching pcb found? then look for an unconnected pcb */ + if (pcb == NULL) { + pcb = uncon_pcb; + } + + /* Check checksum if this is a match or if it was directed at us. */ + if (pcb != NULL) { + for_us = 1; + } else { +#if LWIP_IPV6 + if (ip_current_is_v6()) { + for_us = netif_get_ip6_addr_match(inp, ip6_current_dest_addr()) >= 0; + } +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + if (!ip_current_is_v6()) { + for_us = ip4_addr_cmp(netif_ip4_addr(inp), ip4_current_dest_addr()); + } +#endif /* LWIP_IPV4 */ + } + + if (for_us) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_input: calculating checksum\n")); +#if CHECKSUM_CHECK_UDP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_UDP) { +#if LWIP_UDPLITE + if (ip_current_header_proto() == IP_PROTO_UDPLITE) { + /* Do the UDP Lite checksum */ + u16_t chklen = lwip_ntohs(udphdr->len); + if (chklen < sizeof(struct udp_hdr)) { + if (chklen == 0) { + /* For UDP-Lite, checksum length of 0 means checksum + over the complete packet (See RFC 3828 chap. 3.1) */ + chklen = p->tot_len; + } else { + /* At least the UDP-Lite header must be covered by the + checksum! (Again, see RFC 3828 chap. 3.1) */ + goto chkerr; + } + } + if (ip_chksum_pseudo_partial(p, IP_PROTO_UDPLITE, + p->tot_len, chklen, + ip_current_src_addr(), ip_current_dest_addr()) != 0) { + goto chkerr; + } + } else +#endif /* LWIP_UDPLITE */ + { + if (udphdr->chksum != 0) { + if (ip_chksum_pseudo(p, IP_PROTO_UDP, p->tot_len, + ip_current_src_addr(), + ip_current_dest_addr()) != 0) { + goto chkerr; + } + } + } + } +#endif /* CHECKSUM_CHECK_UDP */ + if (pbuf_remove_header(p, UDP_HLEN)) { + /* Can we cope with this failing? Just assert for now */ + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + UDP_STATS_INC(udp.drop); + MIB2_STATS_INC(mib2.udpinerrors); + pbuf_free(p); + goto end; + } + + if (pcb != NULL) { + MIB2_STATS_INC(mib2.udpindatagrams); +#if SO_REUSE && SO_REUSE_RXTOALL + if (ip_get_option(pcb, SOF_REUSEADDR) && + (broadcast || ip_addr_ismulticast(ip_current_dest_addr()))) { + /* pass broadcast- or multicast packets to all multicast pcbs + if SOF_REUSEADDR is set on the first match */ + struct udp_pcb *mpcb; + for (mpcb = udp_pcbs; mpcb != NULL; mpcb = mpcb->next) { + if (mpcb != pcb) { + /* compare PCB local addr+port to UDP destination addr+port */ + if ((mpcb->local_port == dest) && + (udp_input_local_match(mpcb, inp, broadcast) != 0)) { + /* pass a copy of the packet to all local matches */ + if (mpcb->recv != NULL) { + struct pbuf *q; + q = pbuf_clone(PBUF_RAW, PBUF_POOL, p); + if (q != NULL) { + mpcb->recv(mpcb->recv_arg, mpcb, q, ip_current_src_addr(), src); + } + } + } + } + } + } +#endif /* SO_REUSE && SO_REUSE_RXTOALL */ + /* callback */ + if (pcb->recv != NULL) { + /* now the recv function is responsible for freeing p */ + pcb->recv(pcb->recv_arg, pcb, p, ip_current_src_addr(), src); + } else { + /* no recv function registered? then we have to free the pbuf! */ + pbuf_free(p); + goto end; + } + } else { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_input: not for us.\n")); + +#if LWIP_ICMP || LWIP_ICMP6 + /* No match was found, send ICMP destination port unreachable unless + destination address was broadcast/multicast. */ + if (!broadcast && !ip_addr_ismulticast(ip_current_dest_addr())) { + /* move payload pointer back to ip header */ + pbuf_header_force(p, (s16_t)(ip_current_header_tot_len() + UDP_HLEN)); + icmp_port_unreach(ip_current_is_v6(), p); + } +#endif /* LWIP_ICMP || LWIP_ICMP6 */ + UDP_STATS_INC(udp.proterr); + UDP_STATS_INC(udp.drop); + MIB2_STATS_INC(mib2.udpnoports); + pbuf_free(p); + } + } else { + pbuf_free(p); + } +end: + PERF_STOP("udp_input"); + return; +#if CHECKSUM_CHECK_UDP +chkerr: + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("udp_input: UDP (or UDP Lite) datagram discarded due to failing checksum\n")); + UDP_STATS_INC(udp.chkerr); + UDP_STATS_INC(udp.drop); + MIB2_STATS_INC(mib2.udpinerrors); + pbuf_free(p); + PERF_STOP("udp_input"); +#endif /* CHECKSUM_CHECK_UDP */ +} + +/** + * @ingroup udp_raw + * Sends the pbuf p using UDP. The pbuf is not deallocated. + * + * + * @param pcb UDP PCB used to send the data. + * @param p chain of pbuf's to be sent. + * + * The datagram will be sent to the current remote_ip & remote_port + * stored in pcb. If the pcb is not bound to a port, it will + * automatically be bound to a random port. + * + * @return lwIP error code. + * - ERR_OK. Successful. No error occurred. + * - ERR_MEM. Out of memory. + * - ERR_RTE. Could not find route to destination address. + * - ERR_VAL. No PCB or PCB is dual-stack + * - More errors could be returned by lower protocol layers. + * + * @see udp_disconnect() udp_sendto() + */ +err_t +udp_send(struct udp_pcb *pcb, struct pbuf *p) +{ + LWIP_ERROR("udp_send: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_send: invalid pbuf", p != NULL, return ERR_ARG); + + if (IP_IS_ANY_TYPE_VAL(pcb->remote_ip)) { + return ERR_VAL; + } + + /* send to the packet using remote ip and port stored in the pcb */ + return udp_sendto(pcb, p, &pcb->remote_ip, pcb->remote_port); +} + +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP +/** @ingroup udp_raw + * Same as udp_send() but with checksum + */ +err_t +udp_send_chksum(struct udp_pcb *pcb, struct pbuf *p, + u8_t have_chksum, u16_t chksum) +{ + LWIP_ERROR("udp_send_chksum: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_send_chksum: invalid pbuf", p != NULL, return ERR_ARG); + + if (IP_IS_ANY_TYPE_VAL(pcb->remote_ip)) { + return ERR_VAL; + } + + /* send to the packet using remote ip and port stored in the pcb */ + return udp_sendto_chksum(pcb, p, &pcb->remote_ip, pcb->remote_port, + have_chksum, chksum); +} +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + +/** + * @ingroup udp_raw + * Send data to a specified address using UDP. + * + * @param pcb UDP PCB used to send the data. + * @param p chain of pbuf's to be sent. + * @param dst_ip Destination IP address. + * @param dst_port Destination UDP port. + * + * dst_ip & dst_port are expected to be in the same byte order as in the pcb. + * + * If the PCB already has a remote address association, it will + * be restored after the data is sent. + * + * @return lwIP error code (@see udp_send for possible error codes) + * + * @see udp_disconnect() udp_send() + */ +err_t +udp_sendto(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port) +{ +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_chksum(pcb, p, dst_ip, dst_port, 0, 0); +} + +/** @ingroup udp_raw + * Same as udp_sendto(), but with checksum */ +err_t +udp_sendto_chksum(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, + u16_t dst_port, u8_t have_chksum, u16_t chksum) +{ +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + struct netif *netif; + + LWIP_ERROR("udp_sendto: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto: invalid pbuf", p != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto: invalid dst_ip", dst_ip != NULL, return ERR_ARG); + + if (!IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { + return ERR_VAL; + } + + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_send\n")); + + if (pcb->netif_idx != NETIF_NO_INDEX) { + netif = netif_get_by_index(pcb->netif_idx); + } else { +#if LWIP_MULTICAST_TX_OPTIONS + netif = NULL; + if (ip_addr_ismulticast(dst_ip)) { + /* For IPv6, the interface to use for packets with a multicast destination + * is specified using an interface index. The same approach may be used for + * IPv4 as well, in which case it overrides the IPv4 multicast override + * address below. Here we have to look up the netif by going through the + * list, but by doing so we skip a route lookup. If the interface index has + * gone stale, we fall through and do the regular route lookup after all. */ + if (pcb->mcast_ifindex != NETIF_NO_INDEX) { + netif = netif_get_by_index(pcb->mcast_ifindex); + } +#if LWIP_IPV4 + else +#if LWIP_IPV6 + if (IP_IS_V4(dst_ip)) +#endif /* LWIP_IPV6 */ + { + /* IPv4 does not use source-based routing by default, so we use an + administratively selected interface for multicast by default. + However, this can be overridden by setting an interface address + in pcb->mcast_ip4 that is used for routing. If this routing lookup + fails, we try regular routing as though no override was set. */ + if (!ip4_addr_isany_val(pcb->mcast_ip4) && + !ip4_addr_cmp(&pcb->mcast_ip4, IP4_ADDR_BROADCAST)) { + netif = ip4_route_src(ip_2_ip4(&pcb->local_ip), &pcb->mcast_ip4); + } + } +#endif /* LWIP_IPV4 */ + } + + if (netif == NULL) +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + { + /* find the outgoing network interface for this packet */ + netif = ip_route(&pcb->local_ip, dst_ip); + } + } + + /* no outgoing network interface could be found? */ + if (netif == NULL) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("udp_send: No route to ")); + ip_addr_debug_print(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, dst_ip); + LWIP_DEBUGF(UDP_DEBUG, ("\n")); + UDP_STATS_INC(udp.rterr); + return ERR_RTE; + } +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_if_chksum(pcb, p, dst_ip, dst_port, netif, have_chksum, chksum); +#else /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + return udp_sendto_if(pcb, p, dst_ip, dst_port, netif); +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ +} + +/** + * @ingroup udp_raw + * Send data to a specified address using UDP. + * The netif used for sending can be specified. + * + * This function exists mainly for DHCP, to be able to send UDP packets + * on a netif that is still down. + * + * @param pcb UDP PCB used to send the data. + * @param p chain of pbuf's to be sent. + * @param dst_ip Destination IP address. + * @param dst_port Destination UDP port. + * @param netif the netif used for sending. + * + * dst_ip & dst_port are expected to be in the same byte order as in the pcb. + * + * @return lwIP error code (@see udp_send for possible error codes) + * + * @see udp_disconnect() udp_send() + */ +err_t +udp_sendto_if(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, struct netif *netif) +{ +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_if_chksum(pcb, p, dst_ip, dst_port, netif, 0, 0); +} + +/** Same as udp_sendto_if(), but with checksum */ +err_t +udp_sendto_if_chksum(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, + u16_t dst_port, struct netif *netif, u8_t have_chksum, + u16_t chksum) +{ +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + const ip_addr_t *src_ip; + + LWIP_ERROR("udp_sendto_if: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if: invalid pbuf", p != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if: invalid dst_ip", dst_ip != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if: invalid netif", netif != NULL, return ERR_ARG); + + if (!IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { + return ERR_VAL; + } + + /* PCB local address is IP_ANY_ADDR or multicast? */ +#if LWIP_IPV6 + if (IP_IS_V6(dst_ip)) { + if (ip6_addr_isany(ip_2_ip6(&pcb->local_ip)) || + ip6_addr_ismulticast(ip_2_ip6(&pcb->local_ip))) { + src_ip = ip6_select_source_address(netif, ip_2_ip6(dst_ip)); + if (src_ip == NULL) { + /* No suitable source address was found. */ + return ERR_RTE; + } + } else { + /* use UDP PCB local IPv6 address as source address, if still valid. */ + if (netif_get_ip6_addr_match(netif, ip_2_ip6(&pcb->local_ip)) < 0) { + /* Address isn't valid anymore. */ + return ERR_RTE; + } + src_ip = &pcb->local_ip; + } + } +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 && LWIP_IPV6 + else +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_IPV4 + if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip)) || + ip4_addr_ismulticast(ip_2_ip4(&pcb->local_ip))) { + /* if the local_ip is any or multicast + * use the outgoing network interface IP address as source address */ + src_ip = netif_ip_addr4(netif); + } else { + /* check if UDP PCB local IP address is correct + * this could be an old address if netif->ip_addr has changed */ + if (!ip4_addr_cmp(ip_2_ip4(&(pcb->local_ip)), netif_ip4_addr(netif))) { + /* local_ip doesn't match, drop the packet */ + return ERR_RTE; + } + /* use UDP PCB local IP address as source address */ + src_ip = &pcb->local_ip; + } +#endif /* LWIP_IPV4 */ +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_if_src_chksum(pcb, p, dst_ip, dst_port, netif, have_chksum, chksum, src_ip); +#else /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + return udp_sendto_if_src(pcb, p, dst_ip, dst_port, netif, src_ip); +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ +} + +/** @ingroup udp_raw + * Same as @ref udp_sendto_if, but with source address */ +err_t +udp_sendto_if_src(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, struct netif *netif, const ip_addr_t *src_ip) +{ +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_if_src_chksum(pcb, p, dst_ip, dst_port, netif, 0, 0, src_ip); +} + +/** Same as udp_sendto_if_src(), but with checksum */ +err_t +udp_sendto_if_src_chksum(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, + u16_t dst_port, struct netif *netif, u8_t have_chksum, + u16_t chksum, const ip_addr_t *src_ip) +{ +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + struct udp_hdr *udphdr; + err_t err; + struct pbuf *q; /* q will be sent down the stack */ + u8_t ip_proto; + u8_t ttl; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_sendto_if_src: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if_src: invalid pbuf", p != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if_src: invalid dst_ip", dst_ip != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if_src: invalid src_ip", src_ip != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if_src: invalid netif", netif != NULL, return ERR_ARG); + + if (!IP_ADDR_PCB_VERSION_MATCH(pcb, src_ip) || + !IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { + return ERR_VAL; + } + +#if LWIP_IPV4 && IP_SOF_BROADCAST + /* broadcast filter? */ + if (!ip_get_option(pcb, SOF_BROADCAST) && +#if LWIP_IPV6 + IP_IS_V4(dst_ip) && +#endif /* LWIP_IPV6 */ + ip_addr_isbroadcast(dst_ip, netif)) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("udp_sendto_if: SOF_BROADCAST not enabled on pcb %p\n", (void *)pcb)); + return ERR_VAL; + } +#endif /* LWIP_IPV4 && IP_SOF_BROADCAST */ + + /* if the PCB is not yet bound to a port, bind it here */ + if (pcb->local_port == 0) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_send: not yet bound to a port, binding now\n")); + err = udp_bind(pcb, &pcb->local_ip, pcb->local_port); + if (err != ERR_OK) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("udp_send: forced port bind failed\n")); + return err; + } + } + + /* packet too large to add a UDP header without causing an overflow? */ + if ((u16_t)(p->tot_len + UDP_HLEN) < p->tot_len) { + return ERR_MEM; + } + /* not enough space to add an UDP header to first pbuf in given p chain? */ + if (pbuf_add_header(p, UDP_HLEN)) { + /* allocate header in a separate new pbuf */ + q = pbuf_alloc(PBUF_IP, UDP_HLEN, PBUF_RAM); + /* new header pbuf could not be allocated? */ + if (q == NULL) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("udp_send: could not allocate header\n")); + return ERR_MEM; + } + if (p->tot_len != 0) { + /* chain header q in front of given pbuf p (only if p contains data) */ + pbuf_chain(q, p); + } + /* first pbuf q points to header pbuf */ + LWIP_DEBUGF(UDP_DEBUG, + ("udp_send: added header pbuf %p before given pbuf %p\n", (void *)q, (void *)p)); + } else { + /* adding space for header within p succeeded */ + /* first pbuf q equals given pbuf */ + q = p; + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: added header in given pbuf %p\n", (void *)p)); + } + LWIP_ASSERT("check that first pbuf can hold struct udp_hdr", + (q->len >= sizeof(struct udp_hdr))); + /* q now represents the packet to be sent */ + udphdr = (struct udp_hdr *)q->payload; + udphdr->src = lwip_htons(pcb->local_port); + udphdr->dest = lwip_htons(dst_port); + /* in UDP, 0 checksum means 'no checksum' */ + udphdr->chksum = 0x0000; + + /* Multicast Loop? */ +#if LWIP_MULTICAST_TX_OPTIONS + if (((pcb->flags & UDP_FLAGS_MULTICAST_LOOP) != 0) && ip_addr_ismulticast(dst_ip)) { + q->flags |= PBUF_FLAG_MCASTLOOP; + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: sending datagram of length %"U16_F"\n", q->tot_len)); + +#if LWIP_UDPLITE + /* UDP Lite protocol? */ + if (pcb->flags & UDP_FLAGS_UDPLITE) { + u16_t chklen, chklen_hdr; + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP LITE packet length %"U16_F"\n", q->tot_len)); + /* set UDP message length in UDP header */ + chklen_hdr = chklen = pcb->chksum_len_tx; + if ((chklen < sizeof(struct udp_hdr)) || (chklen > q->tot_len)) { + if (chklen != 0) { + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP LITE pcb->chksum_len is illegal: %"U16_F"\n", chklen)); + } + /* For UDP-Lite, checksum length of 0 means checksum + over the complete packet. (See RFC 3828 chap. 3.1) + At least the UDP-Lite header must be covered by the + checksum, therefore, if chksum_len has an illegal + value, we generate the checksum over the complete + packet to be safe. */ + chklen_hdr = 0; + chklen = q->tot_len; + } + udphdr->len = lwip_htons(chklen_hdr); + /* calculate checksum */ +#if CHECKSUM_GEN_UDP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_UDP) { +#if LWIP_CHECKSUM_ON_COPY + if (have_chksum) { + chklen = UDP_HLEN; + } +#endif /* LWIP_CHECKSUM_ON_COPY */ + udphdr->chksum = ip_chksum_pseudo_partial(q, IP_PROTO_UDPLITE, + q->tot_len, chklen, src_ip, dst_ip); +#if LWIP_CHECKSUM_ON_COPY + if (have_chksum) { + u32_t acc; + acc = udphdr->chksum + (u16_t)~(chksum); + udphdr->chksum = FOLD_U32T(acc); + } +#endif /* LWIP_CHECKSUM_ON_COPY */ + + /* chksum zero must become 0xffff, as zero means 'no checksum' */ + if (udphdr->chksum == 0x0000) { + udphdr->chksum = 0xffff; + } + } +#endif /* CHECKSUM_GEN_UDP */ + + ip_proto = IP_PROTO_UDPLITE; + } else +#endif /* LWIP_UDPLITE */ + { /* UDP */ + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP packet length %"U16_F"\n", q->tot_len)); + udphdr->len = lwip_htons(q->tot_len); + /* calculate checksum */ +#if CHECKSUM_GEN_UDP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_UDP) { + /* Checksum is mandatory over IPv6. */ + if (IP_IS_V6(dst_ip) || (pcb->flags & UDP_FLAGS_NOCHKSUM) == 0) { + u16_t udpchksum; +#if LWIP_CHECKSUM_ON_COPY + if (have_chksum) { + u32_t acc; + udpchksum = ip_chksum_pseudo_partial(q, IP_PROTO_UDP, + q->tot_len, UDP_HLEN, src_ip, dst_ip); + acc = udpchksum + (u16_t)~(chksum); + udpchksum = FOLD_U32T(acc); + } else +#endif /* LWIP_CHECKSUM_ON_COPY */ + { + udpchksum = ip_chksum_pseudo(q, IP_PROTO_UDP, q->tot_len, + src_ip, dst_ip); + } + + /* chksum zero must become 0xffff, as zero means 'no checksum' */ + if (udpchksum == 0x0000) { + udpchksum = 0xffff; + } + udphdr->chksum = udpchksum; + } + } +#endif /* CHECKSUM_GEN_UDP */ + ip_proto = IP_PROTO_UDP; + } + + /* Determine TTL to use */ +#if LWIP_MULTICAST_TX_OPTIONS + ttl = (ip_addr_ismulticast(dst_ip) ? udp_get_multicast_ttl(pcb) : pcb->ttl); +#else /* LWIP_MULTICAST_TX_OPTIONS */ + ttl = pcb->ttl; +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP checksum 0x%04"X16_F"\n", udphdr->chksum)); + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: ip_output_if (,,,,0x%02"X16_F",)\n", (u16_t)ip_proto)); + /* output to IP */ + NETIF_SET_HINTS(netif, &(pcb->netif_hints)); + err = ip_output_if_src(q, src_ip, dst_ip, ttl, pcb->tos, ip_proto, netif); + NETIF_RESET_HINTS(netif); + + /* @todo: must this be increased even if error occurred? */ + MIB2_STATS_INC(mib2.udpoutdatagrams); + + /* did we chain a separate header pbuf earlier? */ + if (q != p) { + /* free the header pbuf */ + pbuf_free(q); + q = NULL; + /* p is still referenced by the caller, and will live on */ + } + + UDP_STATS_INC(udp.xmit); + return err; +} + +/** + * @ingroup udp_raw + * Bind an UDP PCB. + * + * @param pcb UDP PCB to be bound with a local address ipaddr and port. + * @param ipaddr local IP address to bind with. Use IP_ANY_TYPE to + * bind to all local interfaces. + * @param port local UDP port to bind with. Use 0 to automatically bind + * to a random port between UDP_LOCAL_PORT_RANGE_START and + * UDP_LOCAL_PORT_RANGE_END. + * + * ipaddr & port are expected to be in the same byte order as in the pcb. + * + * @return lwIP error code. + * - ERR_OK. Successful. No error occurred. + * - ERR_USE. The specified ipaddr and port are already bound to by + * another UDP PCB. + * + * @see udp_disconnect() + */ +err_t +udp_bind(struct udp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port) +{ + struct udp_pcb *ipcb; + u8_t rebind; +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + ip_addr_t zoned_ipaddr; +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY; + } +#else /* LWIP_IPV4 */ + LWIP_ERROR("udp_bind: invalid ipaddr", ipaddr != NULL, return ERR_ARG); +#endif /* LWIP_IPV4 */ + + LWIP_ERROR("udp_bind: invalid pcb", pcb != NULL, return ERR_ARG); + + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_bind(ipaddr = ")); + ip_addr_debug_print(UDP_DEBUG | LWIP_DBG_TRACE, ipaddr); + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, (", port = %"U16_F")\n", port)); + + rebind = 0; + /* Check for double bind and rebind of the same pcb */ + for (ipcb = udp_pcbs; ipcb != NULL; ipcb = ipcb->next) { + /* is this UDP PCB already on active list? */ + if (pcb == ipcb) { + rebind = 1; + break; + } + } + +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now. + * This is legacy support: scope-aware callers should always provide properly + * zoned source addresses. Do the zone selection before the address-in-use + * check below; as such we have to make a temporary copy of the address. */ + if (IP_IS_V6(ipaddr) && ip6_addr_lacks_zone(ip_2_ip6(ipaddr), IP6_UNKNOWN)) { + ip_addr_copy(zoned_ipaddr, *ipaddr); + ip6_addr_select_zone(ip_2_ip6(&zoned_ipaddr), ip_2_ip6(&zoned_ipaddr)); + ipaddr = &zoned_ipaddr; + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + /* no port specified? */ + if (port == 0) { + port = udp_new_port(); + if (port == 0) { + /* no more ports available in local range */ + LWIP_DEBUGF(UDP_DEBUG, ("udp_bind: out of free UDP ports\n")); + return ERR_USE; + } + } else { + for (ipcb = udp_pcbs; ipcb != NULL; ipcb = ipcb->next) { + if (pcb != ipcb) { + /* By default, we don't allow to bind to a port that any other udp + PCB is already bound to, unless *all* PCBs with that port have tha + REUSEADDR flag set. */ +#if SO_REUSE + if (!ip_get_option(pcb, SOF_REUSEADDR) || + !ip_get_option(ipcb, SOF_REUSEADDR)) +#endif /* SO_REUSE */ + { + /* port matches that of PCB in list and REUSEADDR not set -> reject */ + if ((ipcb->local_port == port) && + /* IP address matches or any IP used? */ + (ip_addr_cmp(&ipcb->local_ip, ipaddr) || ip_addr_isany(ipaddr) || + ip_addr_isany(&ipcb->local_ip))) { + /* other PCB already binds to this local IP and port */ + LWIP_DEBUGF(UDP_DEBUG, + ("udp_bind: local port %"U16_F" already bound by another pcb\n", port)); + return ERR_USE; + } + } + } + } + } + + ip_addr_set_ipaddr(&pcb->local_ip, ipaddr); + + pcb->local_port = port; + mib2_udp_bind(pcb); + /* pcb not active yet? */ + if (rebind == 0) { + /* place the PCB on the active list if not already there */ + pcb->next = udp_pcbs; + udp_pcbs = pcb; + } + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("udp_bind: bound to ")); + ip_addr_debug_print_val(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, pcb->local_ip); + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, (", port %"U16_F")\n", pcb->local_port)); + return ERR_OK; +} + +/** + * @ingroup udp_raw + * Bind an UDP PCB to a specific netif. + * After calling this function, all packets received via this PCB + * are guaranteed to have come in via the specified netif, and all + * outgoing packets will go out via the specified netif. + * + * @param pcb UDP PCB to be bound. + * @param netif netif to bind udp pcb to. Can be NULL. + * + * @see udp_disconnect() + */ +void +udp_bind_netif(struct udp_pcb *pcb, const struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif != NULL) { + pcb->netif_idx = netif_get_index(netif); + } else { + pcb->netif_idx = NETIF_NO_INDEX; + } +} + +/** + * @ingroup udp_raw + * Sets the remote end of the pcb. This function does not generate any + * network traffic, but only sets the remote address of the pcb. + * + * @param pcb UDP PCB to be connected with remote address ipaddr and port. + * @param ipaddr remote IP address to connect with. + * @param port remote UDP port to connect with. + * + * @return lwIP error code + * + * ipaddr & port are expected to be in the same byte order as in the pcb. + * + * The udp pcb is bound to a random local port if not already bound. + * + * @see udp_disconnect() + */ +err_t +udp_connect(struct udp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port) +{ + struct udp_pcb *ipcb; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_connect: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_connect: invalid ipaddr", ipaddr != NULL, return ERR_ARG); + + if (pcb->local_port == 0) { + err_t err = udp_bind(pcb, &pcb->local_ip, pcb->local_port); + if (err != ERR_OK) { + return err; + } + } + + ip_addr_set_ipaddr(&pcb->remote_ip, ipaddr); +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now, + * using the bound address to make a more informed decision when possible. */ + if (IP_IS_V6(&pcb->remote_ip) && + ip6_addr_lacks_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNKNOWN)) { + ip6_addr_select_zone(ip_2_ip6(&pcb->remote_ip), ip_2_ip6(&pcb->local_ip)); + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + pcb->remote_port = port; + pcb->flags |= UDP_FLAGS_CONNECTED; + + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("udp_connect: connected to ")); + ip_addr_debug_print_val(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + pcb->remote_ip); + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, (", port %"U16_F")\n", pcb->remote_port)); + + /* Insert UDP PCB into the list of active UDP PCBs. */ + for (ipcb = udp_pcbs; ipcb != NULL; ipcb = ipcb->next) { + if (pcb == ipcb) { + /* already on the list, just return */ + return ERR_OK; + } + } + /* PCB not yet on the list, add PCB now */ + pcb->next = udp_pcbs; + udp_pcbs = pcb; + return ERR_OK; +} + +/** + * @ingroup udp_raw + * Remove the remote end of the pcb. This function does not generate + * any network traffic, but only removes the remote address of the pcb. + * + * @param pcb the udp pcb to disconnect. + */ +void +udp_disconnect(struct udp_pcb *pcb) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_disconnect: invalid pcb", pcb != NULL, return); + + /* reset remote address association */ +#if LWIP_IPV4 && LWIP_IPV6 + if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { + ip_addr_copy(pcb->remote_ip, *IP_ANY_TYPE); + } else { +#endif + ip_addr_set_any(IP_IS_V6_VAL(pcb->remote_ip), &pcb->remote_ip); +#if LWIP_IPV4 && LWIP_IPV6 + } +#endif + pcb->remote_port = 0; + pcb->netif_idx = NETIF_NO_INDEX; + /* mark PCB as unconnected */ + udp_clear_flags(pcb, UDP_FLAGS_CONNECTED); +} + +/** + * @ingroup udp_raw + * Set a receive callback for a UDP PCB. + * This callback will be called when receiving a datagram for the pcb. + * + * @param pcb the pcb for which to set the recv callback + * @param recv function pointer of the callback function + * @param recv_arg additional argument to pass to the callback function + */ +void +udp_recv(struct udp_pcb *pcb, udp_recv_fn recv, void *recv_arg) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_recv: invalid pcb", pcb != NULL, return); + + /* remember recv() callback and user data */ + pcb->recv = recv; + pcb->recv_arg = recv_arg; +} + +/** + * @ingroup udp_raw + * Removes and deallocates the pcb. + * + * @param pcb UDP PCB to be removed. The PCB is removed from the list of + * UDP PCB's and the data structure is freed from memory. + * + * @see udp_new() + */ +void +udp_remove(struct udp_pcb *pcb) +{ + struct udp_pcb *pcb2; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_remove: invalid pcb", pcb != NULL, return); + + mib2_udp_unbind(pcb); + /* pcb to be removed is first in list? */ + if (udp_pcbs == pcb) { + /* make list start at 2nd pcb */ + udp_pcbs = udp_pcbs->next; + /* pcb not 1st in list */ + } else { + for (pcb2 = udp_pcbs; pcb2 != NULL; pcb2 = pcb2->next) { + /* find pcb in udp_pcbs list */ + if (pcb2->next != NULL && pcb2->next == pcb) { + /* remove pcb from list */ + pcb2->next = pcb->next; + break; + } + } + } + memp_free(MEMP_UDP_PCB, pcb); +} + +/** + * @ingroup udp_raw + * Creates a new UDP pcb which can be used for UDP communication. The + * pcb is not active until it has either been bound to a local address + * or connected to a remote address. + * + * @return The UDP PCB which was created. NULL if the PCB data structure + * could not be allocated. + * + * @see udp_remove() + */ +struct udp_pcb * +udp_new(void) +{ + struct udp_pcb *pcb; + + LWIP_ASSERT_CORE_LOCKED(); + + pcb = (struct udp_pcb *)memp_malloc(MEMP_UDP_PCB); + /* could allocate UDP PCB? */ + if (pcb != NULL) { + /* UDP Lite: by initializing to all zeroes, chksum_len is set to 0 + * which means checksum is generated over the whole datagram per default + * (recommended as default by RFC 3828). */ + /* initialize PCB to all zeroes */ + memset(pcb, 0, sizeof(struct udp_pcb)); + pcb->ttl = UDP_TTL; +#if LWIP_MULTICAST_TX_OPTIONS + udp_set_multicast_ttl(pcb, UDP_TTL); +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + } + return pcb; +} + +/** + * @ingroup udp_raw + * Create a UDP PCB for specific IP type. + * The pcb is not active until it has either been bound to a local address + * or connected to a remote address. + * + * @param type IP address type, see @ref lwip_ip_addr_type definitions. + * If you want to listen to IPv4 and IPv6 (dual-stack) packets, + * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE. + * @return The UDP PCB which was created. NULL if the PCB data structure + * could not be allocated. + * + * @see udp_remove() + */ +struct udp_pcb * +udp_new_ip_type(u8_t type) +{ + struct udp_pcb *pcb; + + LWIP_ASSERT_CORE_LOCKED(); + + pcb = udp_new(); +#if LWIP_IPV4 && LWIP_IPV6 + if (pcb != NULL) { + IP_SET_TYPE_VAL(pcb->local_ip, type); + IP_SET_TYPE_VAL(pcb->remote_ip, type); + } +#else + LWIP_UNUSED_ARG(type); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + return pcb; +} + +/** This function is called from netif.c when address is changed + * + * @param old_addr IP address of the netif before change + * @param new_addr IP address of the netif after change + */ +void udp_netif_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr) +{ + struct udp_pcb *upcb; + + if (!ip_addr_isany(old_addr) && !ip_addr_isany(new_addr)) { + for (upcb = udp_pcbs; upcb != NULL; upcb = upcb->next) { + /* PCB bound to current local interface address? */ + if (ip_addr_cmp(&upcb->local_ip, old_addr)) { + /* The PCB is bound to the old ipaddr and + * is set to bound to the new one instead */ + ip_addr_copy(upcb->local_ip, *new_addr); + } + } + } +} + +#if UDP_DEBUG +/** + * Print UDP header information for debug purposes. + * + * @param udphdr pointer to the udp header in memory. + */ +void +udp_debug_print(struct udp_hdr *udphdr) +{ + LWIP_DEBUGF(UDP_DEBUG, ("UDP header:\n")); + LWIP_DEBUGF(UDP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(UDP_DEBUG, ("| %5"U16_F" | %5"U16_F" | (src port, dest port)\n", + lwip_ntohs(udphdr->src), lwip_ntohs(udphdr->dest))); + LWIP_DEBUGF(UDP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(UDP_DEBUG, ("| %5"U16_F" | 0x%04"X16_F" | (len, chksum)\n", + lwip_ntohs(udphdr->len), lwip_ntohs(udphdr->chksum))); + LWIP_DEBUGF(UDP_DEBUG, ("+-------------------------------+\n")); +} +#endif /* UDP_DEBUG */ + +#endif /* LWIP_UDP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/posix/arpa/inet.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/posix/arpa/inet.h new file mode 100644 index 0000000..68267e5 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/posix/arpa/inet.h @@ -0,0 +1,33 @@ +/** + * @file + * This file is a posix wrapper for lwip/sockets.h. + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/sockets.h" diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/posix/net/if.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/posix/net/if.h new file mode 100644 index 0000000..a17e8ad --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/posix/net/if.h @@ -0,0 +1,36 @@ +/** + * @file + * This file is a posix wrapper for lwip/if_api.h. + */ + +/* + * Copyright (c) 2017 Joel Cunningham, Garmin International, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/if_api.h" diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/posix/netdb.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/posix/netdb.h new file mode 100644 index 0000000..29abbaf --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/posix/netdb.h @@ -0,0 +1,33 @@ +/** + * @file + * This file is a posix wrapper for lwip/netdb.h. + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/netdb.h" diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/posix/sys/socket.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/posix/sys/socket.h new file mode 100644 index 0000000..68267e5 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/posix/sys/socket.h @@ -0,0 +1,33 @@ +/** + * @file + * This file is a posix wrapper for lwip/sockets.h. + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/sockets.h" diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/stdc/errno.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/stdc/errno.h new file mode 100644 index 0000000..a3bbfd2 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/stdc/errno.h @@ -0,0 +1,33 @@ +/** + * @file + * This file is a posix/stdc wrapper for lwip/errno.h. + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/errno.h" diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/altcp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/altcp.h new file mode 100644 index 0000000..08bb8d5 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/altcp.h @@ -0,0 +1,201 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread)\n + * + * This file contains the generic API. + * For more details see @ref altcp_api. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_H +#define LWIP_HDR_ALTCP_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/tcpbase.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct altcp_pcb; +struct altcp_functions; + +typedef err_t (*altcp_accept_fn)(void *arg, struct altcp_pcb *new_conn, err_t err); +typedef err_t (*altcp_connected_fn)(void *arg, struct altcp_pcb *conn, err_t err); +typedef err_t (*altcp_recv_fn)(void *arg, struct altcp_pcb *conn, struct pbuf *p, err_t err); +typedef err_t (*altcp_sent_fn)(void *arg, struct altcp_pcb *conn, u16_t len); +typedef err_t (*altcp_poll_fn)(void *arg, struct altcp_pcb *conn); +typedef void (*altcp_err_fn)(void *arg, err_t err); + +typedef struct altcp_pcb* (*altcp_new_fn)(void *arg, u8_t ip_type); + +struct altcp_pcb { + const struct altcp_functions *fns; + struct altcp_pcb *inner_conn; + void *arg; + void *state; + /* application callbacks */ + altcp_accept_fn accept; + altcp_connected_fn connected; + altcp_recv_fn recv; + altcp_sent_fn sent; + altcp_poll_fn poll; + altcp_err_fn err; + u8_t pollinterval; +}; + +/** @ingroup altcp */ +typedef struct altcp_allocator_s { + /** Allocator function */ + altcp_new_fn alloc; + /** Argument to allocator function */ + void *arg; +} altcp_allocator_t; + +struct altcp_pcb *altcp_new(altcp_allocator_t *allocator); +struct altcp_pcb *altcp_new_ip6(altcp_allocator_t *allocator); +struct altcp_pcb *altcp_new_ip_type(altcp_allocator_t *allocator, u8_t ip_type); + +void altcp_arg(struct altcp_pcb *conn, void *arg); +void altcp_accept(struct altcp_pcb *conn, altcp_accept_fn accept); +void altcp_recv(struct altcp_pcb *conn, altcp_recv_fn recv); +void altcp_sent(struct altcp_pcb *conn, altcp_sent_fn sent); +void altcp_poll(struct altcp_pcb *conn, altcp_poll_fn poll, u8_t interval); +void altcp_err(struct altcp_pcb *conn, altcp_err_fn err); + +void altcp_recved(struct altcp_pcb *conn, u16_t len); +err_t altcp_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port); +err_t altcp_connect(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected); + +/* return conn for source code compatibility to tcp callback API only */ +struct altcp_pcb *altcp_listen_with_backlog_and_err(struct altcp_pcb *conn, u8_t backlog, err_t *err); +#define altcp_listen_with_backlog(conn, backlog) altcp_listen_with_backlog_and_err(conn, backlog, NULL) +/** @ingroup altcp */ +#define altcp_listen(conn) altcp_listen_with_backlog_and_err(conn, TCP_DEFAULT_LISTEN_BACKLOG, NULL) + +void altcp_abort(struct altcp_pcb *conn); +err_t altcp_close(struct altcp_pcb *conn); +err_t altcp_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx); + +err_t altcp_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags); +err_t altcp_output(struct altcp_pcb *conn); + +u16_t altcp_mss(struct altcp_pcb *conn); +u16_t altcp_sndbuf(struct altcp_pcb *conn); +u16_t altcp_sndqueuelen(struct altcp_pcb *conn); +void altcp_nagle_disable(struct altcp_pcb *conn); +void altcp_nagle_enable(struct altcp_pcb *conn); +int altcp_nagle_disabled(struct altcp_pcb *conn); + +void altcp_setprio(struct altcp_pcb *conn, u8_t prio); + +err_t altcp_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port); +ip_addr_t *altcp_get_ip(struct altcp_pcb *conn, int local); +u16_t altcp_get_port(struct altcp_pcb *conn, int local); + +#ifdef LWIP_DEBUG +enum tcp_state altcp_dbg_get_tcp_state(struct altcp_pcb *conn); +#endif + +#ifdef __cplusplus +} +#endif + +#else /* LWIP_ALTCP */ + +/* ALTCP disabled, define everything to link against tcp callback API (e.g. to get a small non-ssl httpd) */ + +#include "lwip/tcp.h" + +#define altcp_accept_fn tcp_accept_fn +#define altcp_connected_fn tcp_connected_fn +#define altcp_recv_fn tcp_recv_fn +#define altcp_sent_fn tcp_sent_fn +#define altcp_poll_fn tcp_poll_fn +#define altcp_err_fn tcp_err_fn + +#define altcp_pcb tcp_pcb +#define altcp_tcp_new_ip_type tcp_new_ip_type +#define altcp_tcp_new tcp_new +#define altcp_tcp_new_ip6 tcp_new_ip6 + +#define altcp_new(allocator) tcp_new() +#define altcp_new_ip6(allocator) tcp_new_ip6() +#define altcp_new_ip_type(allocator, ip_type) tcp_new_ip_type(ip_type) + +#define altcp_arg tcp_arg +#define altcp_accept tcp_accept +#define altcp_recv tcp_recv +#define altcp_sent tcp_sent +#define altcp_poll tcp_poll +#define altcp_err tcp_err + +#define altcp_recved tcp_recved +#define altcp_bind tcp_bind +#define altcp_connect tcp_connect + +#define altcp_listen_with_backlog_and_err tcp_listen_with_backlog_and_err +#define altcp_listen_with_backlog tcp_listen_with_backlog +#define altcp_listen tcp_listen + +#define altcp_abort tcp_abort +#define altcp_close tcp_close +#define altcp_shutdown tcp_shutdown + +#define altcp_write tcp_write +#define altcp_output tcp_output + +#define altcp_mss tcp_mss +#define altcp_sndbuf tcp_sndbuf +#define altcp_sndqueuelen tcp_sndqueuelen +#define altcp_nagle_disable tcp_nagle_disable +#define altcp_nagle_enable tcp_nagle_enable +#define altcp_nagle_disabled tcp_nagle_disabled +#define altcp_setprio tcp_setprio + +#define altcp_get_tcp_addrinfo tcp_get_tcp_addrinfo +#define altcp_get_ip(pcb, local) ((local) ? (&(pcb)->local_ip) : (&(pcb)->remote_ip)) + +#ifdef LWIP_DEBUG +#define altcp_dbg_get_tcp_state tcp_dbg_get_tcp_state +#endif + +#endif /* LWIP_ALTCP */ + +#endif /* LWIP_HDR_ALTCP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tcp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tcp.h new file mode 100644 index 0000000..53550c7 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tcp.h @@ -0,0 +1,72 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread)\n + * This interface mimics the tcp callback API to the application while preventing + * direct linking (much like virtual functions). + * This way, an application can make use of other application layer protocols + * on top of TCP without knowing the details (e.g. TLS, proxy connection). + * + * This file contains the base implementation calling into tcp. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_TCP_H +#define LWIP_HDR_ALTCP_TCP_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/altcp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct altcp_pcb *altcp_tcp_new_ip_type(u8_t ip_type); + +#define altcp_tcp_new() altcp_tcp_new_ip_type(IPADDR_TYPE_V4) +#define altcp_tcp_new_ip6() altcp_tcp_new_ip_type(IPADDR_TYPE_V6) + +struct altcp_pcb *altcp_tcp_alloc(void *arg, u8_t ip_type); + +struct tcp_pcb; +struct altcp_pcb *altcp_tcp_wrap(struct tcp_pcb *tpcb); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_ALTCP */ + +#endif /* LWIP_HDR_ALTCP_TCP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tls.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tls.h new file mode 100644 index 0000000..c5a6820 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tls.h @@ -0,0 +1,117 @@ +/** + * @file + * Application layered TCP/TLS connection API (to be used from TCPIP thread) + * + * @defgroup altcp_tls TLS layer + * @ingroup altcp + * This file contains function prototypes for a TLS layer. + * A port to ARM mbedtls is provided in the apps/ tree + * (LWIP_ALTCP_TLS_MBEDTLS option). + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_TLS_H +#define LWIP_HDR_ALTCP_TLS_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#if LWIP_ALTCP_TLS + +#include "lwip/altcp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @ingroup altcp_tls + * ALTCP_TLS configuration handle, content depends on port (e.g. mbedtls) + */ +struct altcp_tls_config; + +/** @ingroup altcp_tls + * Create an ALTCP_TLS server configuration handle + */ +struct altcp_tls_config *altcp_tls_create_config_server_privkey_cert(const u8_t *privkey, size_t privkey_len, + const u8_t *privkey_pass, size_t privkey_pass_len, + const u8_t *cert, size_t cert_len); + +/** @ingroup altcp_tls + * Create an ALTCP_TLS client configuration handle + */ +struct altcp_tls_config *altcp_tls_create_config_client(const u8_t *cert, size_t cert_len); + +/** @ingroup altcp_tls + * Create an ALTCP_TLS client configuration handle with two-way server/client authentication + */ +struct altcp_tls_config *altcp_tls_create_config_client_2wayauth(const u8_t *ca, size_t ca_len, const u8_t *privkey, size_t privkey_len, + const u8_t *privkey_pass, size_t privkey_pass_len, + const u8_t *cert, size_t cert_len); + +/** @ingroup altcp_tls + * Free an ALTCP_TLS configuration handle + */ +void altcp_tls_free_config(struct altcp_tls_config *conf); + +/** @ingroup altcp_tls + * Create new ALTCP_TLS layer wrapping an existing pcb as inner connection (e.g. TLS over TCP) + */ +struct altcp_pcb *altcp_tls_wrap(struct altcp_tls_config *config, struct altcp_pcb *inner_pcb); + +/** @ingroup altcp_tls + * Create new ALTCP_TLS pcb and its inner tcp pcb + */ +struct altcp_pcb *altcp_tls_new(struct altcp_tls_config *config, u8_t ip_type); + +/** @ingroup altcp_tls + * Create new ALTCP_TLS layer pcb and its inner tcp pcb. + * Same as @ref altcp_tls_new but this allocator function fits to + * @ref altcp_allocator_t / @ref altcp_new.\n + 'arg' must contain a struct altcp_tls_config *. + */ +struct altcp_pcb *altcp_tls_alloc(void *arg, u8_t ip_type); + +/** @ingroup altcp_tls + * Return pointer to internal TLS context so application can tweak it. + * Real type depends on port (e.g. mbedtls) + */ +void *altcp_tls_context(struct altcp_pcb *conn); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_ALTCP_TLS */ +#endif /* LWIP_ALTCP */ +#endif /* LWIP_HDR_ALTCP_TLS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/api.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/api.h new file mode 100644 index 0000000..eca6ac2 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/api.h @@ -0,0 +1,431 @@ +/** + * @file + * netconn API (to be used from non-TCPIP threads) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_API_H +#define LWIP_HDR_API_H + +#include "lwip/opt.h" + +#if LWIP_NETCONN || LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ +/* Note: Netconn API is always available when sockets are enabled - + * sockets are implemented on top of them */ + +#include "lwip/arch.h" +#include "lwip/netbuf.h" +#include "lwip/sys.h" +#include "lwip/ip_addr.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Throughout this file, IP addresses and port numbers are expected to be in + * the same byte order as in the corresponding pcb. + */ + +/* Flags for netconn_write (u8_t) */ +#define NETCONN_NOFLAG 0x00 +#define NETCONN_NOCOPY 0x00 /* Only for source code compatibility */ +#define NETCONN_COPY 0x01 +#define NETCONN_MORE 0x02 +#define NETCONN_DONTBLOCK 0x04 +#define NETCONN_NOAUTORCVD 0x08 /* prevent netconn_recv_data_tcp() from updating the tcp window - must be done manually via netconn_tcp_recvd() */ +#define NETCONN_NOFIN 0x10 /* upper layer already received data, leave FIN in queue until called again */ + +/* Flags for struct netconn.flags (u8_t) */ +/** This netconn had an error, don't block on recvmbox/acceptmbox any more */ +#define NETCONN_FLAG_MBOXCLOSED 0x01 +/** Should this netconn avoid blocking? */ +#define NETCONN_FLAG_NON_BLOCKING 0x02 +/** Was the last connect action a non-blocking one? */ +#define NETCONN_FLAG_IN_NONBLOCKING_CONNECT 0x04 +#if LWIP_NETCONN_FULLDUPLEX + /** The mbox of this netconn is being deallocated, don't use it anymore */ +#define NETCONN_FLAG_MBOXINVALID 0x08 +#endif /* LWIP_NETCONN_FULLDUPLEX */ +/** If a nonblocking write has been rejected before, poll_tcp needs to + check if the netconn is writable again */ +#define NETCONN_FLAG_CHECK_WRITESPACE 0x10 +#if LWIP_IPV6 +/** If this flag is set then only IPv6 communication is allowed on the + netconn. As per RFC#3493 this features defaults to OFF allowing + dual-stack usage by default. */ +#define NETCONN_FLAG_IPV6_V6ONLY 0x20 +#endif /* LWIP_IPV6 */ +#if LWIP_NETBUF_RECVINFO +/** Received packet info will be recorded for this netconn */ +#define NETCONN_FLAG_PKTINFO 0x40 +#endif /* LWIP_NETBUF_RECVINFO */ +/** A FIN has been received but not passed to the application yet */ +#define NETCONN_FIN_RX_PENDING 0x80 + +/* Helpers to process several netconn_types by the same code */ +#define NETCONNTYPE_GROUP(t) ((t)&0xF0) +#define NETCONNTYPE_DATAGRAM(t) ((t)&0xE0) +#if LWIP_IPV6 +#define NETCONN_TYPE_IPV6 0x08 +#define NETCONNTYPE_ISIPV6(t) (((t)&NETCONN_TYPE_IPV6) != 0) +#define NETCONNTYPE_ISUDPLITE(t) (((t)&0xF3) == NETCONN_UDPLITE) +#define NETCONNTYPE_ISUDPNOCHKSUM(t) (((t)&0xF3) == NETCONN_UDPNOCHKSUM) +#else /* LWIP_IPV6 */ +#define NETCONNTYPE_ISIPV6(t) (0) +#define NETCONNTYPE_ISUDPLITE(t) ((t) == NETCONN_UDPLITE) +#define NETCONNTYPE_ISUDPNOCHKSUM(t) ((t) == NETCONN_UDPNOCHKSUM) +#endif /* LWIP_IPV6 */ + +/** @ingroup netconn_common + * Protocol family and type of the netconn + */ +enum netconn_type { + NETCONN_INVALID = 0, + /** TCP IPv4 */ + NETCONN_TCP = 0x10, +#if LWIP_IPV6 + /** TCP IPv6 */ + NETCONN_TCP_IPV6 = NETCONN_TCP | NETCONN_TYPE_IPV6 /* 0x18 */, +#endif /* LWIP_IPV6 */ + /** UDP IPv4 */ + NETCONN_UDP = 0x20, + /** UDP IPv4 lite */ + NETCONN_UDPLITE = 0x21, + /** UDP IPv4 no checksum */ + NETCONN_UDPNOCHKSUM = 0x22, + +#if LWIP_IPV6 + /** UDP IPv6 (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ + NETCONN_UDP_IPV6 = NETCONN_UDP | NETCONN_TYPE_IPV6 /* 0x28 */, + /** UDP IPv6 lite (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ + NETCONN_UDPLITE_IPV6 = NETCONN_UDPLITE | NETCONN_TYPE_IPV6 /* 0x29 */, + /** UDP IPv6 no checksum (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ + NETCONN_UDPNOCHKSUM_IPV6 = NETCONN_UDPNOCHKSUM | NETCONN_TYPE_IPV6 /* 0x2a */, +#endif /* LWIP_IPV6 */ + + /** Raw connection IPv4 */ + NETCONN_RAW = 0x40 +#if LWIP_IPV6 + /** Raw connection IPv6 (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ + , NETCONN_RAW_IPV6 = NETCONN_RAW | NETCONN_TYPE_IPV6 /* 0x48 */ +#endif /* LWIP_IPV6 */ +}; + +/** Current state of the netconn. Non-TCP netconns are always + * in state NETCONN_NONE! */ +enum netconn_state { + NETCONN_NONE, + NETCONN_WRITE, + NETCONN_LISTEN, + NETCONN_CONNECT, + NETCONN_CLOSE +}; + +/** Used to inform the callback function about changes + * + * Event explanation: + * + * In the netconn implementation, there are three ways to block a client: + * + * - accept mbox (sys_arch_mbox_fetch(&conn->acceptmbox, &accept_ptr, 0); in netconn_accept()) + * - receive mbox (sys_arch_mbox_fetch(&conn->recvmbox, &buf, 0); in netconn_recv_data()) + * - send queue is full (sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); in lwip_netconn_do_write()) + * + * The events have to be seen as events signaling the state of these mboxes/semaphores. For non-blocking + * connections, you need to know in advance whether a call to a netconn function call would block or not, + * and these events tell you about that. + * + * RCVPLUS events say: Safe to perform a potentially blocking call call once more. + * They are counted in sockets - three RCVPLUS events for accept mbox means you are safe + * to call netconn_accept 3 times without being blocked. + * Same thing for receive mbox. + * + * RCVMINUS events say: Your call to to a possibly blocking function is "acknowledged". + * Socket implementation decrements the counter. + * + * For TX, there is no need to count, its merely a flag. SENDPLUS means you may send something. + * SENDPLUS occurs when enough data was delivered to peer so netconn_send() can be called again. + * A SENDMINUS event occurs when the next call to a netconn_send() would be blocking. + */ +enum netconn_evt { + NETCONN_EVT_RCVPLUS, + NETCONN_EVT_RCVMINUS, + NETCONN_EVT_SENDPLUS, + NETCONN_EVT_SENDMINUS, + NETCONN_EVT_ERROR +}; + +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +/** Used for netconn_join_leave_group() */ +enum netconn_igmp { + NETCONN_JOIN, + NETCONN_LEAVE +}; +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ + +#if LWIP_DNS +/* Used for netconn_gethostbyname_addrtype(), these should match the DNS_ADDRTYPE defines in dns.h */ +#define NETCONN_DNS_DEFAULT NETCONN_DNS_IPV4_IPV6 +#define NETCONN_DNS_IPV4 0 +#define NETCONN_DNS_IPV6 1 +#define NETCONN_DNS_IPV4_IPV6 2 /* try to resolve IPv4 first, try IPv6 if IPv4 fails only */ +#define NETCONN_DNS_IPV6_IPV4 3 /* try to resolve IPv6 first, try IPv4 if IPv6 fails only */ +#endif /* LWIP_DNS */ + +/* forward-declare some structs to avoid to include their headers */ +struct ip_pcb; +struct tcp_pcb; +struct udp_pcb; +struct raw_pcb; +struct netconn; +struct api_msg; + +/** A callback prototype to inform about events for a netconn */ +typedef void (* netconn_callback)(struct netconn *, enum netconn_evt, u16_t len); + +/** A netconn descriptor */ +struct netconn { + /** type of the netconn (TCP, UDP or RAW) */ + enum netconn_type type; + /** current state of the netconn */ + enum netconn_state state; + /** the lwIP internal protocol control block */ + union { + struct ip_pcb *ip; + struct tcp_pcb *tcp; + struct udp_pcb *udp; + struct raw_pcb *raw; + } pcb; + /** the last asynchronous unreported error this netconn had */ + err_t pending_err; +#if !LWIP_NETCONN_SEM_PER_THREAD + /** sem that is used to synchronously execute functions in the core context */ + sys_sem_t op_completed; +#endif + /** mbox where received packets are stored until they are fetched + by the netconn application thread (can grow quite big) */ + sys_mbox_t recvmbox; +#if LWIP_TCP + /** mbox where new connections are stored until processed + by the application thread */ + sys_mbox_t acceptmbox; +#endif /* LWIP_TCP */ +#if LWIP_NETCONN_FULLDUPLEX + /** number of threads waiting on an mbox. This is required to unblock + all threads when closing while threads are waiting. */ + int mbox_threads_waiting; +#endif + /** only used for socket layer */ +#if LWIP_SOCKET + int socket; +#endif /* LWIP_SOCKET */ +#if LWIP_SO_SNDTIMEO + /** timeout to wait for sending data (which means enqueueing data for sending + in internal buffers) in milliseconds */ + s32_t send_timeout; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVTIMEO + /** timeout in milliseconds to wait for new data to be received + (or connections to arrive for listening netconns) */ + u32_t recv_timeout; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF + /** maximum amount of bytes queued in recvmbox + not used for TCP: adjust TCP_WND instead! */ + int recv_bufsize; + /** number of bytes currently in recvmbox to be received, + tested against recv_bufsize to limit bytes on recvmbox + for UDP and RAW, used for FIONREAD */ + int recv_avail; +#endif /* LWIP_SO_RCVBUF */ +#if LWIP_SO_LINGER + /** values <0 mean linger is disabled, values > 0 are seconds to linger */ + s16_t linger; +#endif /* LWIP_SO_LINGER */ + /** flags holding more netconn-internal state, see NETCONN_FLAG_* defines */ + u8_t flags; +#if LWIP_TCP + /** TCP: when data passed to netconn_write doesn't fit into the send buffer, + this temporarily stores the message. + Also used during connect and close. */ + struct api_msg *current_msg; +#endif /* LWIP_TCP */ + /** A callback function that is informed about events for this netconn */ + netconn_callback callback; +}; + +/** This vector type is passed to @ref netconn_write_vectors_partly to send + * multiple buffers at once. + * ATTENTION: This type has to directly map struct iovec since one is casted + * into the other! + */ +struct netvector { + /** pointer to the application buffer that contains the data to send */ + const void *ptr; + /** size of the application data to send */ + size_t len; +}; + +/** Register an Network connection event */ +#define API_EVENT(c,e,l) if (c->callback) { \ + (*c->callback)(c, e, l); \ + } + +/* Network connection functions: */ + +/** @ingroup netconn_common + * Create new netconn connection + * @param t @ref netconn_type */ +#define netconn_new(t) netconn_new_with_proto_and_callback(t, 0, NULL) +#define netconn_new_with_callback(t, c) netconn_new_with_proto_and_callback(t, 0, c) +struct netconn *netconn_new_with_proto_and_callback(enum netconn_type t, u8_t proto, + netconn_callback callback); +err_t netconn_prepare_delete(struct netconn *conn); +err_t netconn_delete(struct netconn *conn); +/** Get the type of a netconn (as enum netconn_type). */ +#define netconn_type(conn) (conn->type) + +err_t netconn_getaddr(struct netconn *conn, ip_addr_t *addr, + u16_t *port, u8_t local); +/** @ingroup netconn_common */ +#define netconn_peer(c,i,p) netconn_getaddr(c,i,p,0) +/** @ingroup netconn_common */ +#define netconn_addr(c,i,p) netconn_getaddr(c,i,p,1) + +err_t netconn_bind(struct netconn *conn, const ip_addr_t *addr, u16_t port); +err_t netconn_bind_if(struct netconn *conn, u8_t if_idx); +err_t netconn_connect(struct netconn *conn, const ip_addr_t *addr, u16_t port); +err_t netconn_disconnect (struct netconn *conn); +err_t netconn_listen_with_backlog(struct netconn *conn, u8_t backlog); +/** @ingroup netconn_tcp */ +#define netconn_listen(conn) netconn_listen_with_backlog(conn, TCP_DEFAULT_LISTEN_BACKLOG) +err_t netconn_accept(struct netconn *conn, struct netconn **new_conn); +err_t netconn_recv(struct netconn *conn, struct netbuf **new_buf); +err_t netconn_recv_udp_raw_netbuf(struct netconn *conn, struct netbuf **new_buf); +err_t netconn_recv_udp_raw_netbuf_flags(struct netconn *conn, struct netbuf **new_buf, u8_t apiflags); +err_t netconn_recv_tcp_pbuf(struct netconn *conn, struct pbuf **new_buf); +err_t netconn_recv_tcp_pbuf_flags(struct netconn *conn, struct pbuf **new_buf, u8_t apiflags); +err_t netconn_tcp_recvd(struct netconn *conn, size_t len); +err_t netconn_sendto(struct netconn *conn, struct netbuf *buf, + const ip_addr_t *addr, u16_t port); +err_t netconn_send(struct netconn *conn, struct netbuf *buf); +err_t netconn_write_partly(struct netconn *conn, const void *dataptr, size_t size, + u8_t apiflags, size_t *bytes_written); +err_t netconn_write_vectors_partly(struct netconn *conn, struct netvector *vectors, u16_t vectorcnt, + u8_t apiflags, size_t *bytes_written); +/** @ingroup netconn_tcp */ +#define netconn_write(conn, dataptr, size, apiflags) \ + netconn_write_partly(conn, dataptr, size, apiflags, NULL) +err_t netconn_close(struct netconn *conn); +err_t netconn_shutdown(struct netconn *conn, u8_t shut_rx, u8_t shut_tx); + +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +err_t netconn_join_leave_group(struct netconn *conn, const ip_addr_t *multiaddr, + const ip_addr_t *netif_addr, enum netconn_igmp join_or_leave); +err_t netconn_join_leave_group_netif(struct netconn *conn, const ip_addr_t *multiaddr, + u8_t if_idx, enum netconn_igmp join_or_leave); +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ +#if LWIP_DNS +#if LWIP_IPV4 && LWIP_IPV6 +err_t netconn_gethostbyname_addrtype(const char *name, ip_addr_t *addr, u8_t dns_addrtype); +#define netconn_gethostbyname(name, addr) netconn_gethostbyname_addrtype(name, addr, NETCONN_DNS_DEFAULT) +#else /* LWIP_IPV4 && LWIP_IPV6 */ +err_t netconn_gethostbyname(const char *name, ip_addr_t *addr); +#define netconn_gethostbyname_addrtype(name, addr, dns_addrtype) netconn_gethostbyname(name, addr) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#endif /* LWIP_DNS */ + +err_t netconn_err(struct netconn *conn); +#define netconn_recv_bufsize(conn) ((conn)->recv_bufsize) + +#define netconn_set_flags(conn, set_flags) do { (conn)->flags = (u8_t)((conn)->flags | (set_flags)); } while(0) +#define netconn_clear_flags(conn, clr_flags) do { (conn)->flags = (u8_t)((conn)->flags & (u8_t)(~(clr_flags) & 0xff)); } while(0) +#define netconn_is_flag_set(conn, flag) (((conn)->flags & (flag)) != 0) + +/** Set the blocking status of netconn calls (@todo: write/send is missing) */ +#define netconn_set_nonblocking(conn, val) do { if(val) { \ + netconn_set_flags(conn, NETCONN_FLAG_NON_BLOCKING); \ +} else { \ + netconn_clear_flags(conn, NETCONN_FLAG_NON_BLOCKING); }} while(0) +/** Get the blocking status of netconn calls (@todo: write/send is missing) */ +#define netconn_is_nonblocking(conn) (((conn)->flags & NETCONN_FLAG_NON_BLOCKING) != 0) + +#if LWIP_IPV6 +/** @ingroup netconn_common + * TCP: Set the IPv6 ONLY status of netconn calls (see NETCONN_FLAG_IPV6_V6ONLY) + */ +#define netconn_set_ipv6only(conn, val) do { if(val) { \ + netconn_set_flags(conn, NETCONN_FLAG_IPV6_V6ONLY); \ +} else { \ + netconn_clear_flags(conn, NETCONN_FLAG_IPV6_V6ONLY); }} while(0) +/** @ingroup netconn_common + * TCP: Get the IPv6 ONLY status of netconn calls (see NETCONN_FLAG_IPV6_V6ONLY) + */ +#define netconn_get_ipv6only(conn) (((conn)->flags & NETCONN_FLAG_IPV6_V6ONLY) != 0) +#endif /* LWIP_IPV6 */ + +#if LWIP_SO_SNDTIMEO +/** Set the send timeout in milliseconds */ +#define netconn_set_sendtimeout(conn, timeout) ((conn)->send_timeout = (timeout)) +/** Get the send timeout in milliseconds */ +#define netconn_get_sendtimeout(conn) ((conn)->send_timeout) +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_RCVTIMEO +/** Set the receive timeout in milliseconds */ +#define netconn_set_recvtimeout(conn, timeout) ((conn)->recv_timeout = (timeout)) +/** Get the receive timeout in milliseconds */ +#define netconn_get_recvtimeout(conn) ((conn)->recv_timeout) +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF +/** Set the receive buffer in bytes */ +#define netconn_set_recvbufsize(conn, recvbufsize) ((conn)->recv_bufsize = (recvbufsize)) +/** Get the receive buffer in bytes */ +#define netconn_get_recvbufsize(conn) ((conn)->recv_bufsize) +#endif /* LWIP_SO_RCVBUF*/ + +#if LWIP_NETCONN_SEM_PER_THREAD +void netconn_thread_init(void); +void netconn_thread_cleanup(void); +#else /* LWIP_NETCONN_SEM_PER_THREAD */ +#define netconn_thread_init() +#define netconn_thread_cleanup() +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_NETCONN || LWIP_SOCKET */ + +#endif /* LWIP_HDR_API_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_proxyconnect.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_proxyconnect.h new file mode 100644 index 0000000..a7c3527 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_proxyconnect.h @@ -0,0 +1,79 @@ +/** + * @file + * Application layered TCP connection API that executes a proxy-connect. + * + * This file provides a starting layer that executes a proxy-connect e.g. to + * set up TLS connections through a http proxy. + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#ifndef LWIP_HDR_APPS_ALTCP_PROXYCONNECT_H +#define LWIP_HDR_APPS_ALTCP_PROXYCONNECT_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct altcp_proxyconnect_config { + ip_addr_t proxy_addr; + u16_t proxy_port; +}; + + +struct altcp_pcb *altcp_proxyconnect_new(struct altcp_proxyconnect_config *config, struct altcp_pcb *inner_pcb); +struct altcp_pcb *altcp_proxyconnect_new_tcp(struct altcp_proxyconnect_config *config, u8_t ip_type); + +struct altcp_pcb *altcp_proxyconnect_alloc(void *arg, u8_t ip_type); + +#if LWIP_ALTCP_TLS +struct altcp_proxyconnect_tls_config { + struct altcp_proxyconnect_config proxy; + struct altcp_tls_config *tls_config; +}; + +struct altcp_pcb *altcp_proxyconnect_tls_alloc(void *arg, u8_t ip_type); +#endif /* LWIP_ALTCP_TLS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_ALTCP */ +#endif /* LWIP_HDR_APPS_ALTCP_PROXYCONNECT_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_tls_mbedtls_opts.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_tls_mbedtls_opts.h new file mode 100644 index 0000000..63f72e3 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_tls_mbedtls_opts.h @@ -0,0 +1,67 @@ +/** + * @file + * Application layered TCP/TLS connection API (to be used from TCPIP thread) + * + * This file contains options for an mbedtls port of the TLS layer. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_TLS_OPTS_H +#define LWIP_HDR_ALTCP_TLS_OPTS_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +/** LWIP_ALTCP_TLS_MBEDTLS==1: use mbedTLS for TLS support for altcp API + * mbedtls include directory must be reachable via include search path + */ +#ifndef LWIP_ALTCP_TLS_MBEDTLS +#define LWIP_ALTCP_TLS_MBEDTLS 0 +#endif + +/** Configure debug level of this file */ +#ifndef ALTCP_MBEDTLS_DEBUG +#define ALTCP_MBEDTLS_DEBUG LWIP_DBG_OFF +#endif + +/** Set a session timeout in seconds for the basic session cache + * ATTENTION: Using a session cache can lower security by reusing keys! + */ +#ifndef ALTCP_MBEDTLS_SESSION_CACHE_TIMEOUT_SECONDS +#define ALTCP_MBEDTLS_SESSION_CACHE_TIMEOUT_SECONDS 0 +#endif + +#endif /* LWIP_ALTCP */ + +#endif /* LWIP_HDR_ALTCP_TLS_OPTS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/fs.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/fs.h new file mode 100644 index 0000000..02bde1f --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/fs.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_APPS_FS_H +#define LWIP_HDR_APPS_FS_H + +#include "httpd_opts.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define FS_READ_EOF -1 +#define FS_READ_DELAYED -2 + +#if HTTPD_PRECALCULATED_CHECKSUM +struct fsdata_chksum { + u32_t offset; + u16_t chksum; + u16_t len; +}; +#endif /* HTTPD_PRECALCULATED_CHECKSUM */ + +#define FS_FILE_FLAGS_HEADER_INCLUDED 0x01 +#define FS_FILE_FLAGS_HEADER_PERSISTENT 0x02 +#define FS_FILE_FLAGS_HEADER_HTTPVER_1_1 0x04 +#define FS_FILE_FLAGS_SSI 0x08 + +/** Define FS_FILE_EXTENSION_T_DEFINED if you have typedef'ed to your private + * pointer type (defaults to 'void' so the default usage is 'void*') + */ +#ifndef FS_FILE_EXTENSION_T_DEFINED +typedef void fs_file_extension; +#endif + +struct fs_file { + const char *data; + int len; + int index; + /* pextension is free for implementations to hold private (extensional) + arbitrary data, e.g. holding some file state or file system handle */ + fs_file_extension *pextension; +#if HTTPD_PRECALCULATED_CHECKSUM + const struct fsdata_chksum *chksum; + u16_t chksum_count; +#endif /* HTTPD_PRECALCULATED_CHECKSUM */ + u8_t flags; +#if LWIP_HTTPD_CUSTOM_FILES + u8_t is_custom_file; +#endif /* LWIP_HTTPD_CUSTOM_FILES */ +#if LWIP_HTTPD_FILE_STATE + void *state; +#endif /* LWIP_HTTPD_FILE_STATE */ +}; + +#if LWIP_HTTPD_FS_ASYNC_READ +typedef void (*fs_wait_cb)(void *arg); +#endif /* LWIP_HTTPD_FS_ASYNC_READ */ + +err_t fs_open(struct fs_file *file, const char *name); +void fs_close(struct fs_file *file); +#if LWIP_HTTPD_DYNAMIC_FILE_READ +#if LWIP_HTTPD_FS_ASYNC_READ +int fs_read_async(struct fs_file *file, char *buffer, int count, fs_wait_cb callback_fn, void *callback_arg); +#else /* LWIP_HTTPD_FS_ASYNC_READ */ +int fs_read(struct fs_file *file, char *buffer, int count); +#endif /* LWIP_HTTPD_FS_ASYNC_READ */ +#endif /* LWIP_HTTPD_DYNAMIC_FILE_READ */ +#if LWIP_HTTPD_FS_ASYNC_READ +int fs_is_file_ready(struct fs_file *file, fs_wait_cb callback_fn, void *callback_arg); +#endif /* LWIP_HTTPD_FS_ASYNC_READ */ +int fs_bytes_left(struct fs_file *file); + +#if LWIP_HTTPD_FILE_STATE +/** This user-defined function is called when a file is opened. */ +void *fs_state_init(struct fs_file *file, const char *name); +/** This user-defined function is called when a file is closed. */ +void fs_state_free(struct fs_file *file, void *state); +#endif /* #if LWIP_HTTPD_FILE_STATE */ + +struct fsdata_file { + const struct fsdata_file *next; + const unsigned char *name; + const unsigned char *data; + int len; + u8_t flags; +#if HTTPD_PRECALCULATED_CHECKSUM + u16_t chksum_count; + const struct fsdata_chksum *chksum; +#endif /* HTTPD_PRECALCULATED_CHECKSUM */ +}; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_FS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/http_client.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/http_client.h new file mode 100644 index 0000000..f4754b3 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/http_client.h @@ -0,0 +1,160 @@ +/** + * @file + * HTTP client + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#ifndef LWIP_HDR_APPS_HTTP_CLIENT_H +#define LWIP_HDR_APPS_HTTP_CLIENT_H + +#include "lwip/opt.h" +#include "lwip/ip_addr.h" +#include "lwip/err.h" +#include "lwip/altcp.h" +#include "lwip/prot/iana.h" +#include "lwip/pbuf.h" + +#if LWIP_TCP && LWIP_CALLBACK_API + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @ingroup httpc + * HTTPC_HAVE_FILE_IO: define this to 1 to have functions dowloading directly + * to disk via fopen/fwrite. + * These functions are example implementations of the interface only. + */ +#ifndef LWIP_HTTPC_HAVE_FILE_IO +#define LWIP_HTTPC_HAVE_FILE_IO 0 +#endif + +/** + * @ingroup httpc + * The default TCP port used for HTTP + */ +#define HTTP_DEFAULT_PORT LWIP_IANA_PORT_HTTP + +/** + * @ingroup httpc + * HTTP client result codes + */ +typedef enum ehttpc_result { + /** File successfully received */ + HTTPC_RESULT_OK = 0, + /** Unknown error */ + HTTPC_RESULT_ERR_UNKNOWN = 1, + /** Connection to server failed */ + HTTPC_RESULT_ERR_CONNECT = 2, + /** Failed to resolve server hostname */ + HTTPC_RESULT_ERR_HOSTNAME = 3, + /** Connection unexpectedly closed by remote server */ + HTTPC_RESULT_ERR_CLOSED = 4, + /** Connection timed out (server didn't respond in time) */ + HTTPC_RESULT_ERR_TIMEOUT = 5, + /** Server responded with an error code */ + HTTPC_RESULT_ERR_SVR_RESP = 6, + /** Local memory error */ + HTTPC_RESULT_ERR_MEM = 7, + /** Local abort */ + HTTPC_RESULT_LOCAL_ABORT = 8, + /** Content length mismatch */ + HTTPC_RESULT_ERR_CONTENT_LEN = 9 +} httpc_result_t; + +typedef struct _httpc_state httpc_state_t; + +/** + * @ingroup httpc + * Prototype of a http client callback function + * + * @param arg argument specified when initiating the request + * @param httpc_result result of the http transfer (see enum httpc_result_t) + * @param rx_content_len number of bytes received (without headers) + * @param srv_res this contains the http status code received (if any) + * @param err an error returned by internal lwip functions, can help to specify + * the source of the error but must not necessarily be != ERR_OK + */ +typedef void (*httpc_result_fn)(void *arg, httpc_result_t httpc_result, u32_t rx_content_len, u32_t srv_res, err_t err); + +/** + * @ingroup httpc + * Prototype of http client callback: called when the headers are received + * + * @param connection http client connection + * @param arg argument specified when initiating the request + * @param hdr header pbuf(s) (may contain data also) + * @param hdr_len length of the heders in 'hdr' + * @param content_len content length as received in the headers (-1 if not received) + * @return if != ERR_OK is returned, the connection is aborted + */ +typedef err_t (*httpc_headers_done_fn)(httpc_state_t *connection, void *arg, struct pbuf *hdr, u16_t hdr_len, u32_t content_len); + +typedef struct _httpc_connection { + ip_addr_t proxy_addr; + u16_t proxy_port; + u8_t use_proxy; + /* @todo: add username:pass? */ + +#if LWIP_ALTCP + altcp_allocator_t *altcp_allocator; +#endif + + /* this callback is called when the transfer is finished (or aborted) */ + httpc_result_fn result_fn; + /* this callback is called after receiving the http headers + It can abort the connection by returning != ERR_OK */ + httpc_headers_done_fn headers_done_fn; +} httpc_connection_t; + +err_t httpc_get_file(const ip_addr_t* server_addr, u16_t port, const char* uri, const httpc_connection_t *settings, + altcp_recv_fn recv_fn, void* callback_arg, httpc_state_t **connection); +err_t httpc_get_file_dns(const char* server_name, u16_t port, const char* uri, const httpc_connection_t *settings, + altcp_recv_fn recv_fn, void* callback_arg, httpc_state_t **connection); + +#if LWIP_HTTPC_HAVE_FILE_IO +err_t httpc_get_file_to_disk(const ip_addr_t* server_addr, u16_t port, const char* uri, const httpc_connection_t *settings, + void* callback_arg, const char* local_file_name, httpc_state_t **connection); +err_t httpc_get_file_dns_to_disk(const char* server_name, u16_t port, const char* uri, const httpc_connection_t *settings, + void* callback_arg, const char* local_file_name, httpc_state_t **connection); +#endif /* LWIP_HTTPC_HAVE_FILE_IO */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_TCP && LWIP_CALLBACK_API */ + +#endif /* LWIP_HDR_APPS_HTTP_CLIENT_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd.h new file mode 100644 index 0000000..615261e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd.h @@ -0,0 +1,255 @@ +/** + * @file + * HTTP server + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + * This version of the file has been modified by Texas Instruments to offer + * simple server-side-include (SSI) and Common Gateway Interface (CGI) + * capability. + */ + +#ifndef LWIP_HDR_APPS_HTTPD_H +#define LWIP_HDR_APPS_HTTPD_H + +#include "httpd_opts.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_HTTPD_CGI + +/** + * @ingroup httpd + * Function pointer for a CGI script handler. + * + * This function is called each time the HTTPD server is asked for a file + * whose name was previously registered as a CGI function using a call to + * http_set_cgi_handlers. The iIndex parameter provides the index of the + * CGI within the cgis array passed to http_set_cgi_handlers. Parameters + * pcParam and pcValue provide access to the parameters provided along with + * the URI. iNumParams provides a count of the entries in the pcParam and + * pcValue arrays. Each entry in the pcParam array contains the name of a + * parameter with the corresponding entry in the pcValue array containing the + * value for that parameter. Note that pcParam may contain multiple elements + * with the same name if, for example, a multi-selection list control is used + * in the form generating the data. + * + * The function should return a pointer to a character string which is the + * path and filename of the response that is to be sent to the connected + * browser, for example "/thanks.htm" or "/response/error.ssi". + * + * The maximum number of parameters that will be passed to this function via + * iNumParams is defined by LWIP_HTTPD_MAX_CGI_PARAMETERS. Any parameters in + * the incoming HTTP request above this number will be discarded. + * + * Requests intended for use by this CGI mechanism must be sent using the GET + * method (which encodes all parameters within the URI rather than in a block + * later in the request). Attempts to use the POST method will result in the + * request being ignored. + * + */ +typedef const char *(*tCGIHandler)(int iIndex, int iNumParams, char *pcParam[], + char *pcValue[]); + +/** + * @ingroup httpd + * Structure defining the base filename (URL) of a CGI and the associated + * function which is to be called when that URL is requested. + */ +typedef struct +{ + const char *pcCGIName; + tCGIHandler pfnCGIHandler; +} tCGI; + +void http_set_cgi_handlers(const tCGI *pCGIs, int iNumHandlers); + +#endif /* LWIP_HTTPD_CGI */ + +#if LWIP_HTTPD_CGI || LWIP_HTTPD_CGI_SSI + +#if LWIP_HTTPD_CGI_SSI +/* we have to prototype this struct here to make it available for the handler */ +struct fs_file; + +/** Define this generic CGI handler in your application. + * It is called once for every URI with parameters. + * The parameters can be stored to the object passed as connection_state, which + * is allocated to file->state via fs_state_init() from fs_open() or fs_open_custom(). + * Content creation via SSI or complete dynamic files can retrieve the CGI params from there. + */ +extern void httpd_cgi_handler(struct fs_file *file, const char* uri, int iNumParams, + char **pcParam, char **pcValue +#if defined(LWIP_HTTPD_FILE_STATE) && LWIP_HTTPD_FILE_STATE + , void *connection_state +#endif /* LWIP_HTTPD_FILE_STATE */ + ); +#endif /* LWIP_HTTPD_CGI_SSI */ + +#endif /* LWIP_HTTPD_CGI || LWIP_HTTPD_CGI_SSI */ + +#if LWIP_HTTPD_SSI + +/** + * @ingroup httpd + * Function pointer for the SSI tag handler callback. + * + * This function will be called each time the HTTPD server detects a tag of the + * form in files with extensions mentioned in the g_pcSSIExtensions + * array (currently .shtml, .shtm, .ssi, .xml, .json) where "name" appears as + * one of the tags supplied to http_set_ssi_handler in the tags array. The + * returned insert string, which will be appended after the the string + * "" in file sent back to the client, should be written to pointer + * pcInsert. iInsertLen contains the size of the buffer pointed to by + * pcInsert. The iIndex parameter provides the zero-based index of the tag as + * found in the tags array and identifies the tag that is to be processed. + * + * The handler returns the number of characters written to pcInsert excluding + * any terminating NULL or HTTPD_SSI_TAG_UNKNOWN when tag is not recognized. + * + * Note that the behavior of this SSI mechanism is somewhat different from the + * "normal" SSI processing as found in, for example, the Apache web server. In + * this case, the inserted text is appended following the SSI tag rather than + * replacing the tag entirely. This allows for an implementation that does not + * require significant additional buffering of output data yet which will still + * offer usable SSI functionality. One downside to this approach is when + * attempting to use SSI within JavaScript. The SSI tag is structured to + * resemble an HTML comment but this syntax does not constitute a comment + * within JavaScript and, hence, leaving the tag in place will result in + * problems in these cases. In order to avoid these problems, define + * LWIP_HTTPD_SSI_INCLUDE_TAG as zero in your lwip options file, or use JavaScript + * style block comments in the form / * # name * / (without the spaces). + */ +typedef u16_t (*tSSIHandler)( +#if LWIP_HTTPD_SSI_RAW + const char* ssi_tag_name, +#else /* LWIP_HTTPD_SSI_RAW */ + int iIndex, +#endif /* LWIP_HTTPD_SSI_RAW */ + char *pcInsert, int iInsertLen +#if LWIP_HTTPD_SSI_MULTIPART + , u16_t current_tag_part, u16_t *next_tag_part +#endif /* LWIP_HTTPD_SSI_MULTIPART */ +#if defined(LWIP_HTTPD_FILE_STATE) && LWIP_HTTPD_FILE_STATE + , void *connection_state +#endif /* LWIP_HTTPD_FILE_STATE */ + ); + +/** Set the SSI handler function + * (if LWIP_HTTPD_SSI_RAW==1, only the first argument is used) + */ +void http_set_ssi_handler(tSSIHandler pfnSSIHandler, + const char **ppcTags, int iNumTags); + +/** For LWIP_HTTPD_SSI_RAW==1, return this to indicate the tag is unknown. + * In this case, the webserver writes a warning into the page. + * You can also just return 0 to write nothing for unknown tags. + */ +#define HTTPD_SSI_TAG_UNKNOWN 0xFFFF + +#endif /* LWIP_HTTPD_SSI */ + +#if LWIP_HTTPD_SUPPORT_POST + +/* These functions must be implemented by the application */ + +/** + * @ingroup httpd + * Called when a POST request has been received. The application can decide + * whether to accept it or not. + * + * @param connection Unique connection identifier, valid until httpd_post_end + * is called. + * @param uri The HTTP header URI receiving the POST request. + * @param http_request The raw HTTP request (the first packet, normally). + * @param http_request_len Size of 'http_request'. + * @param content_len Content-Length from HTTP header. + * @param response_uri Filename of response file, to be filled when denying the + * request + * @param response_uri_len Size of the 'response_uri' buffer. + * @param post_auto_wnd Set this to 0 to let the callback code handle window + * updates by calling 'httpd_post_data_recved' (to throttle rx speed) + * default is 1 (httpd handles window updates automatically) + * @return ERR_OK: Accept the POST request, data may be passed in + * another err_t: Deny the POST request, send back 'bad request'. + */ +err_t httpd_post_begin(void *connection, const char *uri, const char *http_request, + u16_t http_request_len, int content_len, char *response_uri, + u16_t response_uri_len, u8_t *post_auto_wnd); + +/** + * @ingroup httpd + * Called for each pbuf of data that has been received for a POST. + * ATTENTION: The application is responsible for freeing the pbufs passed in! + * + * @param connection Unique connection identifier. + * @param p Received data. + * @return ERR_OK: Data accepted. + * another err_t: Data denied, http_post_get_response_uri will be called. + */ +err_t httpd_post_receive_data(void *connection, struct pbuf *p); + +/** + * @ingroup httpd + * Called when all data is received or when the connection is closed. + * The application must return the filename/URI of a file to send in response + * to this POST request. If the response_uri buffer is untouched, a 404 + * response is returned. + * + * @param connection Unique connection identifier. + * @param response_uri Filename of response file, to be filled when denying the request + * @param response_uri_len Size of the 'response_uri' buffer. + */ +void httpd_post_finished(void *connection, char *response_uri, u16_t response_uri_len); + +#if LWIP_HTTPD_POST_MANUAL_WND +void httpd_post_data_recved(void *connection, u16_t recved_len); +#endif /* LWIP_HTTPD_POST_MANUAL_WND */ + +#endif /* LWIP_HTTPD_SUPPORT_POST */ + +void httpd_init(void); + +#if HTTPD_ENABLE_HTTPS +struct altcp_tls_config; +void httpd_inits(struct altcp_tls_config *conf); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_HTTPD_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd_opts.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd_opts.h new file mode 100644 index 0000000..733e117 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd_opts.h @@ -0,0 +1,396 @@ +/** + * @file + * HTTP server options list + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + * This version of the file has been modified by Texas Instruments to offer + * simple server-side-include (SSI) and Common Gateway Interface (CGI) + * capability. + */ + +#ifndef LWIP_HDR_APPS_HTTPD_OPTS_H +#define LWIP_HDR_APPS_HTTPD_OPTS_H + +#include "lwip/opt.h" +#include "lwip/prot/iana.h" + +/** + * @defgroup httpd_opts Options + * @ingroup httpd + * @{ + */ + +/** Set this to 1 to support CGI (old style). + * + * This old style CGI support works by registering an array of URLs and + * associated CGI handler functions (@ref http_set_cgi_handlers). + * This list is scanned just before fs_open is called from request handling. + * The handler can return a new URL that is used internally by the httpd to + * load the returned page (passed to fs_open). + * + * Use this CGI type e.g. to execute specific actions and return a page that + * does not depend on the CGI parameters. + */ +#if !defined LWIP_HTTPD_CGI || defined __DOXYGEN__ +#define LWIP_HTTPD_CGI 0 +#endif + +/** Set this to 1 to support CGI (new style). + * + * This new style CGI support works by calling a global function + * (@ref tCGIHandler) for all URLs that are found. fs_open is called first + * and the URL can not be written by the CGI handler. Instead, this handler gets + * passed the http file state, an object where it can store information derived + * from the CGI URL or parameters. This file state is later passed to SSI, so + * the SSI code can return data depending on CGI input. + * + * Use this CGI handler if you want CGI information passed on to SSI. + */ +#if !defined LWIP_HTTPD_CGI_SSI || defined __DOXYGEN__ +#define LWIP_HTTPD_CGI_SSI 0 +#endif + +/** Set this to 1 to support SSI (Server-Side-Includes) + * + * In contrast to other http servers, this only calls a preregistered callback + * function (@see http_set_ssi_handler) for each tag (in the format of + * ) encountered in SSI-enabled pages. + * SSI-enabled pages must have one of the predefined SSI-enabled file extensions. + * All files with one of these extensions are parsed when sent. + * + * A downside of the current SSI implementation is that persistent connections + * don't work, as the file length is not known in advance (and httpd currently + * relies on the Content-Length header for persistent connections). + * + * To save memory, the maximum tag length is limited (@see LWIP_HTTPD_MAX_TAG_NAME_LEN). + * To save memory, the maximum insertion string length is limited (@see + * LWIP_HTTPD_MAX_TAG_INSERT_LEN). If this is not enought, @ref LWIP_HTTPD_SSI_MULTIPART + * can be used. + */ +#if !defined LWIP_HTTPD_SSI || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI 0 +#endif + +/** Set this to 1 to implement an SSI tag handler callback that gets a const char* + * to the tag (instead of an index into a pre-registered array of known tags) + * If this is 0, the SSI handler callback function is only called pre-registered tags. + */ +#if !defined LWIP_HTTPD_SSI_RAW || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI_RAW 0 +#endif + +/** Set this to 0 to prevent parsing the file extension at runtime to decide + * if a file should be scanned for SSI tags or not. + * Default is 1 (file extensions are checked using the g_pcSSIExtensions array) + * Set to 2 to override this runtime test function. + * + * This is enabled by default, but if you only use a newer version of makefsdata + * supporting the "-ssi" option, this info is already present in + */ +#if !defined LWIP_HTTPD_SSI_BY_FILE_EXTENSION || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI_BY_FILE_EXTENSION 1 +#endif + +/** Set this to 1 to support HTTP POST */ +#if !defined LWIP_HTTPD_SUPPORT_POST || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_POST 0 +#endif + +/* The maximum number of parameters that the CGI handler can be sent. */ +#if !defined LWIP_HTTPD_MAX_CGI_PARAMETERS || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_CGI_PARAMETERS 16 +#endif + +/** LWIP_HTTPD_SSI_MULTIPART==1: SSI handler function is called with 2 more + * arguments indicating a counter for insert string that are too long to be + * inserted at once: the SSI handler function must then set 'next_tag_part' + * which will be passed back to it in the next call. */ +#if !defined LWIP_HTTPD_SSI_MULTIPART || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI_MULTIPART 0 +#endif + +/* The maximum length of the string comprising the SSI tag name + * ATTENTION: tags longer than this are ignored, not truncated! + */ +#if !defined LWIP_HTTPD_MAX_TAG_NAME_LEN || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_TAG_NAME_LEN 8 +#endif + +/* The maximum length of string that can be returned to replace any given tag + * If this buffer is not long enough, use LWIP_HTTPD_SSI_MULTIPART. + */ +#if !defined LWIP_HTTPD_MAX_TAG_INSERT_LEN || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_TAG_INSERT_LEN 192 +#endif + +#if !defined LWIP_HTTPD_POST_MANUAL_WND || defined __DOXYGEN__ +#define LWIP_HTTPD_POST_MANUAL_WND 0 +#endif + +/** This string is passed in the HTTP header as "Server: " */ +#if !defined HTTPD_SERVER_AGENT || defined __DOXYGEN__ +#define HTTPD_SERVER_AGENT "lwIP/" LWIP_VERSION_STRING " (http://savannah.nongnu.org/projects/lwip)" +#endif + +/** Set this to 1 if you want to include code that creates HTTP headers + * at runtime. Default is off: HTTP headers are then created statically + * by the makefsdata tool. Static headers mean smaller code size, but + * the (readonly) fsdata will grow a bit as every file includes the HTTP + * header. */ +#if !defined LWIP_HTTPD_DYNAMIC_HEADERS || defined __DOXYGEN__ +#define LWIP_HTTPD_DYNAMIC_HEADERS 0 +#endif + +#if !defined HTTPD_DEBUG || defined __DOXYGEN__ +#define HTTPD_DEBUG LWIP_DBG_OFF +#endif + +/** Set this to 1 to use a memp pool for allocating + * struct http_state instead of the heap. + * If enabled, you'll need to define MEMP_NUM_PARALLEL_HTTPD_CONNS + * (and MEMP_NUM_PARALLEL_HTTPD_SSI_CONNS for SSI) to set the size of + * the pool(s). + */ +#if !defined HTTPD_USE_MEM_POOL || defined __DOXYGEN__ +#define HTTPD_USE_MEM_POOL 0 +#endif + +/** The server port for HTTPD to use */ +#if !defined HTTPD_SERVER_PORT || defined __DOXYGEN__ +#define HTTPD_SERVER_PORT LWIP_IANA_PORT_HTTP +#endif + +/** The https server port for HTTPD to use */ +#if !defined HTTPD_SERVER_PORT_HTTPS || defined __DOXYGEN__ +#define HTTPD_SERVER_PORT_HTTPS LWIP_IANA_PORT_HTTPS +#endif + +/** Enable https support? */ +#if !defined HTTPD_ENABLE_HTTPS || defined __DOXYGEN__ +#define HTTPD_ENABLE_HTTPS 0 +#endif + +/** Maximum retries before the connection is aborted/closed. + * - number of times pcb->poll is called -> default is 4*500ms = 2s; + * - reset when pcb->sent is called + */ +#if !defined HTTPD_MAX_RETRIES || defined __DOXYGEN__ +#define HTTPD_MAX_RETRIES 4 +#endif + +/** The poll delay is X*500ms */ +#if !defined HTTPD_POLL_INTERVAL || defined __DOXYGEN__ +#define HTTPD_POLL_INTERVAL 4 +#endif + +/** Priority for tcp pcbs created by HTTPD (very low by default). + * Lower priorities get killed first when running out of memory. + */ +#if !defined HTTPD_TCP_PRIO || defined __DOXYGEN__ +#define HTTPD_TCP_PRIO TCP_PRIO_MIN +#endif + +/** Set this to 1 to enable timing each file sent */ +#if !defined LWIP_HTTPD_TIMING || defined __DOXYGEN__ +#define LWIP_HTTPD_TIMING 0 +#endif +/** Set this to 1 to enable timing each file sent */ +#if !defined HTTPD_DEBUG_TIMING || defined __DOXYGEN__ +#define HTTPD_DEBUG_TIMING LWIP_DBG_OFF +#endif + +/** Set this to one to show error pages when parsing a request fails instead + of simply closing the connection. */ +#if !defined LWIP_HTTPD_SUPPORT_EXTSTATUS || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_EXTSTATUS 0 +#endif + +/** Set this to 0 to drop support for HTTP/0.9 clients (to save some bytes) */ +#if !defined LWIP_HTTPD_SUPPORT_V09 || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_V09 1 +#endif + +/** Set this to 1 to enable HTTP/1.1 persistent connections. + * ATTENTION: If the generated file system includes HTTP headers, these must + * include the "Connection: keep-alive" header (pass argument "-11" to makefsdata). + */ +#if !defined LWIP_HTTPD_SUPPORT_11_KEEPALIVE || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_11_KEEPALIVE 0 +#endif + +/** Set this to 1 to support HTTP request coming in in multiple packets/pbufs */ +#if !defined LWIP_HTTPD_SUPPORT_REQUESTLIST || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_REQUESTLIST 1 +#endif + +#if LWIP_HTTPD_SUPPORT_REQUESTLIST +/** Number of rx pbufs to enqueue to parse an incoming request (up to the first + newline) */ +#if !defined LWIP_HTTPD_REQ_QUEUELEN || defined __DOXYGEN__ +#define LWIP_HTTPD_REQ_QUEUELEN 5 +#endif + +/** Number of (TCP payload-) bytes (in pbufs) to enqueue to parse and incoming + request (up to the first double-newline) */ +#if !defined LWIP_HTTPD_REQ_BUFSIZE || defined __DOXYGEN__ +#define LWIP_HTTPD_REQ_BUFSIZE LWIP_HTTPD_MAX_REQ_LENGTH +#endif + +/** Defines the maximum length of a HTTP request line (up to the first CRLF, + copied from pbuf into this a global buffer when pbuf- or packet-queues + are received - otherwise the input pbuf is used directly) */ +#if !defined LWIP_HTTPD_MAX_REQ_LENGTH || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_REQ_LENGTH LWIP_MIN(1023, (LWIP_HTTPD_REQ_QUEUELEN * PBUF_POOL_BUFSIZE)) +#endif +#endif /* LWIP_HTTPD_SUPPORT_REQUESTLIST */ + +/** This is the size of a static buffer used when URIs end with '/'. + * In this buffer, the directory requested is concatenated with all the + * configured default file names. + * Set to 0 to disable checking default filenames on non-root directories. + */ +#if !defined LWIP_HTTPD_MAX_REQUEST_URI_LEN || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_REQUEST_URI_LEN 63 +#endif + +/** Maximum length of the filename to send as response to a POST request, + * filled in by the application when a POST is finished. + */ +#if !defined LWIP_HTTPD_POST_MAX_RESPONSE_URI_LEN || defined __DOXYGEN__ +#define LWIP_HTTPD_POST_MAX_RESPONSE_URI_LEN 63 +#endif + +/** Set this to 0 to not send the SSI tag (default is on, so the tag will + * be sent in the HTML page */ +#if !defined LWIP_HTTPD_SSI_INCLUDE_TAG || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI_INCLUDE_TAG 1 +#endif + +/** Set this to 1 to call tcp_abort when tcp_close fails with memory error. + * This can be used to prevent consuming all memory in situations where the + * HTTP server has low priority compared to other communication. */ +#if !defined LWIP_HTTPD_ABORT_ON_CLOSE_MEM_ERROR || defined __DOXYGEN__ +#define LWIP_HTTPD_ABORT_ON_CLOSE_MEM_ERROR 0 +#endif + +/** Set this to 1 to kill the oldest connection when running out of + * memory for 'struct http_state' or 'struct http_ssi_state'. + * ATTENTION: This puts all connections on a linked list, so may be kind of slow. + */ +#if !defined LWIP_HTTPD_KILL_OLD_ON_CONNECTIONS_EXCEEDED || defined __DOXYGEN__ +#define LWIP_HTTPD_KILL_OLD_ON_CONNECTIONS_EXCEEDED 0 +#endif + +/** Set this to 1 to send URIs without extension without headers + * (who uses this at all??) */ +#if !defined LWIP_HTTPD_OMIT_HEADER_FOR_EXTENSIONLESS_URI || defined __DOXYGEN__ +#define LWIP_HTTPD_OMIT_HEADER_FOR_EXTENSIONLESS_URI 0 +#endif + +/** Default: Tags are sent from struct http_state and are therefore volatile */ +#if !defined HTTP_IS_TAG_VOLATILE || defined __DOXYGEN__ +#define HTTP_IS_TAG_VOLATILE(ptr) TCP_WRITE_FLAG_COPY +#endif + +/* By default, the httpd is limited to send 2*pcb->mss to keep resource usage low + when http is not an important protocol in the device. */ +#if !defined HTTPD_LIMIT_SENDING_TO_2MSS || defined __DOXYGEN__ +#define HTTPD_LIMIT_SENDING_TO_2MSS 1 +#endif + +/* Define this to a function that returns the maximum amount of data to enqueue. + The function have this signature: u16_t fn(struct altcp_pcb* pcb); + The best place to define this is the hooks file (@see LWIP_HOOK_FILENAME) */ +#if !defined HTTPD_MAX_WRITE_LEN || defined __DOXYGEN__ +#if HTTPD_LIMIT_SENDING_TO_2MSS +#define HTTPD_MAX_WRITE_LEN(pcb) ((u16_t)(2 * altcp_mss(pcb))) +#endif +#endif + +/*------------------- FS OPTIONS -------------------*/ + +/** Set this to 1 and provide the functions: + * - "int fs_open_custom(struct fs_file *file, const char *name)" + * Called first for every opened file to allow opening files + * that are not included in fsdata(_custom).c + * - "void fs_close_custom(struct fs_file *file)" + * Called to free resources allocated by fs_open_custom(). + */ +#if !defined LWIP_HTTPD_CUSTOM_FILES || defined __DOXYGEN__ +#define LWIP_HTTPD_CUSTOM_FILES 0 +#endif + +/** Set this to 1 to support fs_read() to dynamically read file data. + * Without this (default=off), only one-block files are supported, + * and the contents must be ready after fs_open(). + */ +#if !defined LWIP_HTTPD_DYNAMIC_FILE_READ || defined __DOXYGEN__ +#define LWIP_HTTPD_DYNAMIC_FILE_READ 0 +#endif + +/** Set this to 1 to include an application state argument per file + * that is opened. This allows to keep a state per connection/file. + */ +#if !defined LWIP_HTTPD_FILE_STATE || defined __DOXYGEN__ +#define LWIP_HTTPD_FILE_STATE 0 +#endif + +/** HTTPD_PRECALCULATED_CHECKSUM==1: include precompiled checksums for + * predefined (MSS-sized) chunks of the files to prevent having to calculate + * the checksums at runtime. */ +#if !defined HTTPD_PRECALCULATED_CHECKSUM || defined __DOXYGEN__ +#define HTTPD_PRECALCULATED_CHECKSUM 0 +#endif + +/** LWIP_HTTPD_FS_ASYNC_READ==1: support asynchronous read operations + * (fs_read_async returns FS_READ_DELAYED and calls a callback when finished). + */ +#if !defined LWIP_HTTPD_FS_ASYNC_READ || defined __DOXYGEN__ +#define LWIP_HTTPD_FS_ASYNC_READ 0 +#endif + +/** Filename (including path) to use as FS data file */ +#if !defined HTTPD_FSDATA_FILE || defined __DOXYGEN__ +/* HTTPD_USE_CUSTOM_FSDATA: Compatibility with deprecated lwIP option */ +#if defined(HTTPD_USE_CUSTOM_FSDATA) && (HTTPD_USE_CUSTOM_FSDATA != 0) +#define HTTPD_FSDATA_FILE "fsdata_custom.c" +#else +#define HTTPD_FSDATA_FILE "fsdata.c" +#endif +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_HTTPD_OPTS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/lwiperf.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/lwiperf.h new file mode 100644 index 0000000..ff4d6f7 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/lwiperf.h @@ -0,0 +1,100 @@ +/** + * @file + * lwIP iPerf server implementation + */ + +/* + * Copyright (c) 2014 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_APPS_LWIPERF_H +#define LWIP_HDR_APPS_LWIPERF_H + +#include "lwip/opt.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define LWIPERF_TCP_PORT_DEFAULT 5001 + +/** lwIPerf test results */ +enum lwiperf_report_type +{ + /** The server side test is done */ + LWIPERF_TCP_DONE_SERVER, + /** The client side test is done */ + LWIPERF_TCP_DONE_CLIENT, + /** Local error lead to test abort */ + LWIPERF_TCP_ABORTED_LOCAL, + /** Data check error lead to test abort */ + LWIPERF_TCP_ABORTED_LOCAL_DATAERROR, + /** Transmit error lead to test abort */ + LWIPERF_TCP_ABORTED_LOCAL_TXERROR, + /** Remote side aborted the test */ + LWIPERF_TCP_ABORTED_REMOTE +}; + +/** Control */ +enum lwiperf_client_type +{ + /** Unidirectional tx only test */ + LWIPERF_CLIENT, + /** Do a bidirectional test simultaneously */ + LWIPERF_DUAL, + /** Do a bidirectional test individually */ + LWIPERF_TRADEOFF +}; + +/** Prototype of a report function that is called when a session is finished. + This report function can show the test results. + @param report_type contains the test result */ +typedef void (*lwiperf_report_fn)(void *arg, enum lwiperf_report_type report_type, + const ip_addr_t* local_addr, u16_t local_port, const ip_addr_t* remote_addr, u16_t remote_port, + u32_t bytes_transferred, u32_t ms_duration, u32_t bandwidth_kbitpsec); + +void* lwiperf_start_tcp_server(const ip_addr_t* local_addr, u16_t local_port, + lwiperf_report_fn report_fn, void* report_arg); +void* lwiperf_start_tcp_server_default(lwiperf_report_fn report_fn, void* report_arg); +void* lwiperf_start_tcp_client(const ip_addr_t* remote_addr, u16_t remote_port, + enum lwiperf_client_type type, + lwiperf_report_fn report_fn, void* report_arg); +void* lwiperf_start_tcp_client_default(const ip_addr_t* remote_addr, + lwiperf_report_fn report_fn, void* report_arg); + +void lwiperf_abort(void* lwiperf_session); + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_LWIPERF_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns.h new file mode 100644 index 0000000..3c3d054 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns.h @@ -0,0 +1,105 @@ +/** + * @file + * MDNS responder + */ + + /* + * Copyright (c) 2015 Verisure Innovation AB + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Ekman + * + */ + +#ifndef LWIP_HDR_APPS_MDNS_H +#define LWIP_HDR_APPS_MDNS_H + +#include "lwip/apps/mdns_opts.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_MDNS_RESPONDER + +enum mdns_sd_proto { + DNSSD_PROTO_UDP = 0, + DNSSD_PROTO_TCP = 1 +}; + +#define MDNS_PROBING_CONFLICT 0 +#define MDNS_PROBING_SUCCESSFUL 1 + +#define MDNS_LABEL_MAXLEN 63 + +struct mdns_host; +struct mdns_service; + +/** Callback function to add text to a reply, called when generating the reply */ +typedef void (*service_get_txt_fn_t)(struct mdns_service *service, void *txt_userdata); + +/** Callback function to let application know the result of probing network for name + * uniqueness, called with result MDNS_PROBING_SUCCESSFUL if no other node claimed + * use for the name for the netif or a service and is safe to use, or MDNS_PROBING_CONFLICT + * if another node is already using it and mdns is disabled on this interface */ +typedef void (*mdns_name_result_cb_t)(struct netif* netif, u8_t result); + +void mdns_resp_init(void); + +void mdns_resp_register_name_result_cb(mdns_name_result_cb_t cb); + +err_t mdns_resp_add_netif(struct netif *netif, const char *hostname, u32_t dns_ttl); +err_t mdns_resp_remove_netif(struct netif *netif); +err_t mdns_resp_rename_netif(struct netif *netif, const char *hostname); + +s8_t mdns_resp_add_service(struct netif *netif, const char *name, const char *service, enum mdns_sd_proto proto, u16_t port, u32_t dns_ttl, service_get_txt_fn_t txt_fn, void *txt_userdata); +err_t mdns_resp_del_service(struct netif *netif, s8_t slot); +err_t mdns_resp_rename_service(struct netif *netif, s8_t slot, const char *name); + +err_t mdns_resp_add_service_txtitem(struct mdns_service *service, const char *txt, u8_t txt_len); + +void mdns_resp_restart(struct netif *netif); +void mdns_resp_announce(struct netif *netif); + +/** + * @ingroup mdns + * Announce IP settings have changed on netif. + * Call this in your callback registered by netif_set_status_callback(). + * No need to call this function when LWIP_NETIF_EXT_STATUS_CALLBACK==1, + * this handled automatically for you. + * @param netif The network interface where settings have changed. + */ +#define mdns_resp_netif_settings_changed(netif) mdns_resp_announce(netif) + +#endif /* LWIP_MDNS_RESPONDER */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_MDNS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_opts.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_opts.h new file mode 100644 index 0000000..74d3f41 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_opts.h @@ -0,0 +1,81 @@ +/** + * @file + * MDNS responder + */ + + /* + * Copyright (c) 2015 Verisure Innovation AB + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Ekman + * + */ + +#ifndef LWIP_HDR_APPS_MDNS_OPTS_H +#define LWIP_HDR_APPS_MDNS_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup mdns_opts Options + * @ingroup mdns + * @{ + */ + +/** + * LWIP_MDNS_RESPONDER==1: Turn on multicast DNS module. UDP must be available for MDNS + * transport. IGMP is needed for IPv4 multicast. + */ +#ifndef LWIP_MDNS_RESPONDER +#define LWIP_MDNS_RESPONDER 0 +#endif /* LWIP_MDNS_RESPONDER */ + +/** The maximum number of services per netif */ +#ifndef MDNS_MAX_SERVICES +#define MDNS_MAX_SERVICES 1 +#endif + +/** MDNS_RESP_USENETIF_EXTCALLBACK==1: register an ext_callback on the netif + * to automatically restart probing/announcing on status or address change. + */ +#ifndef MDNS_RESP_USENETIF_EXTCALLBACK +#define MDNS_RESP_USENETIF_EXTCALLBACK LWIP_NETIF_EXT_STATUS_CALLBACK +#endif + +/** + * MDNS_DEBUG: Enable debugging for multicast DNS. + */ +#ifndef MDNS_DEBUG +#define MDNS_DEBUG LWIP_DBG_OFF +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_MDNS_OPTS_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_priv.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_priv.h new file mode 100644 index 0000000..2394157 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_priv.h @@ -0,0 +1,74 @@ +/** + * @file + * MDNS responder private definitions + */ + + /* + * Copyright (c) 2015 Verisure Innovation AB + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Ekman + * + */ +#ifndef LWIP_HDR_MDNS_PRIV_H +#define LWIP_HDR_MDNS_PRIV_H + +#include "lwip/apps/mdns_opts.h" +#include "lwip/pbuf.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_MDNS_RESPONDER + +/* Domain struct and methods - visible for unit tests */ + +#define MDNS_DOMAIN_MAXLEN 256 +#define MDNS_READNAME_ERROR 0xFFFF + +struct mdns_domain { + /* Encoded domain name */ + u8_t name[MDNS_DOMAIN_MAXLEN]; + /* Total length of domain name, including zero */ + u16_t length; + /* Set if compression of this domain is not allowed */ + u8_t skip_compression; +}; + +err_t mdns_domain_add_label(struct mdns_domain *domain, const char *label, u8_t len); +u16_t mdns_readname(struct pbuf *p, u16_t offset, struct mdns_domain *domain); +int mdns_domain_eq(struct mdns_domain *a, struct mdns_domain *b); +u16_t mdns_compress_domain(struct pbuf *pbuf, u16_t *offset, struct mdns_domain *domain); + +#endif /* LWIP_MDNS_RESPONDER */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MDNS_PRIV_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt.h new file mode 100644 index 0000000..e796ae1 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt.h @@ -0,0 +1,205 @@ +/** + * @file + * MQTT client + */ + +/* + * Copyright (c) 2016 Erik Andersson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Andersson + * + */ +#ifndef LWIP_HDR_APPS_MQTT_CLIENT_H +#define LWIP_HDR_APPS_MQTT_CLIENT_H + +#include "lwip/apps/mqtt_opts.h" +#include "lwip/err.h" +#include "lwip/ip_addr.h" +#include "lwip/prot/iana.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct mqtt_client_s mqtt_client_t; + +#if LWIP_ALTCP && LWIP_ALTCP_TLS +struct altcp_tls_config; +#endif + +/** @ingroup mqtt + * Default MQTT port (non-TLS) */ +#define MQTT_PORT LWIP_IANA_PORT_MQTT +/** @ingroup mqtt + * Default MQTT TLS port */ +#define MQTT_TLS_PORT LWIP_IANA_PORT_SECURE_MQTT + +/*---------------------------------------------------------------------------------------------- */ +/* Connection with server */ + +/** + * @ingroup mqtt + * Client information and connection parameters */ +struct mqtt_connect_client_info_t { + /** Client identifier, must be set by caller */ + const char *client_id; + /** User name, set to NULL if not used */ + const char* client_user; + /** Password, set to NULL if not used */ + const char* client_pass; + /** keep alive time in seconds, 0 to disable keep alive functionality*/ + u16_t keep_alive; + /** will topic, set to NULL if will is not to be used, + will_msg, will_qos and will retain are then ignored */ + const char* will_topic; + /** will_msg, see will_topic */ + const char* will_msg; + /** will_qos, see will_topic */ + u8_t will_qos; + /** will_retain, see will_topic */ + u8_t will_retain; +#if LWIP_ALTCP && LWIP_ALTCP_TLS + /** TLS configuration for secure connections */ + struct altcp_tls_config *tls_config; +#endif +}; + +/** + * @ingroup mqtt + * Connection status codes */ +typedef enum +{ + /** Accepted */ + MQTT_CONNECT_ACCEPTED = 0, + /** Refused protocol version */ + MQTT_CONNECT_REFUSED_PROTOCOL_VERSION = 1, + /** Refused identifier */ + MQTT_CONNECT_REFUSED_IDENTIFIER = 2, + /** Refused server */ + MQTT_CONNECT_REFUSED_SERVER = 3, + /** Refused user credentials */ + MQTT_CONNECT_REFUSED_USERNAME_PASS = 4, + /** Refused not authorized */ + MQTT_CONNECT_REFUSED_NOT_AUTHORIZED_ = 5, + /** Disconnected */ + MQTT_CONNECT_DISCONNECTED = 256, + /** Timeout */ + MQTT_CONNECT_TIMEOUT = 257 +} mqtt_connection_status_t; + +/** + * @ingroup mqtt + * Function prototype for mqtt connection status callback. Called when + * client has connected to the server after initiating a mqtt connection attempt by + * calling mqtt_client_connect() or when connection is closed by server or an error + * + * @param client MQTT client itself + * @param arg Additional argument to pass to the callback function + * @param status Connect result code or disconnection notification @see mqtt_connection_status_t + * + */ +typedef void (*mqtt_connection_cb_t)(mqtt_client_t *client, void *arg, mqtt_connection_status_t status); + + +/** + * @ingroup mqtt + * Data callback flags */ +enum { + /** Flag set when last fragment of data arrives in data callback */ + MQTT_DATA_FLAG_LAST = 1 +}; + +/** + * @ingroup mqtt + * Function prototype for MQTT incoming publish data callback function. Called when data + * arrives to a subscribed topic @see mqtt_subscribe + * + * @param arg Additional argument to pass to the callback function + * @param data User data, pointed object, data may not be referenced after callback return, + NULL is passed when all publish data are delivered + * @param len Length of publish data fragment + * @param flags MQTT_DATA_FLAG_LAST set when this call contains the last part of data from publish message + * + */ +typedef void (*mqtt_incoming_data_cb_t)(void *arg, const u8_t *data, u16_t len, u8_t flags); + + +/** + * @ingroup mqtt + * Function prototype for MQTT incoming publish function. Called when an incoming publish + * arrives to a subscribed topic @see mqtt_subscribe + * + * @param arg Additional argument to pass to the callback function + * @param topic Zero terminated Topic text string, topic may not be referenced after callback return + * @param tot_len Total length of publish data, if set to 0 (no publish payload) data callback will not be invoked + */ +typedef void (*mqtt_incoming_publish_cb_t)(void *arg, const char *topic, u32_t tot_len); + + +/** + * @ingroup mqtt + * Function prototype for mqtt request callback. Called when a subscribe, unsubscribe + * or publish request has completed + * @param arg Pointer to user data supplied when invoking request + * @param err ERR_OK on success + * ERR_TIMEOUT if no response was received within timeout, + * ERR_ABRT if (un)subscribe was denied + */ +typedef void (*mqtt_request_cb_t)(void *arg, err_t err); + + +err_t mqtt_client_connect(mqtt_client_t *client, const ip_addr_t *ipaddr, u16_t port, mqtt_connection_cb_t cb, void *arg, + const struct mqtt_connect_client_info_t *client_info); + +void mqtt_disconnect(mqtt_client_t *client); + +mqtt_client_t *mqtt_client_new(void); +void mqtt_client_free(mqtt_client_t* client); + +u8_t mqtt_client_is_connected(mqtt_client_t *client); + +void mqtt_set_inpub_callback(mqtt_client_t *client, mqtt_incoming_publish_cb_t, + mqtt_incoming_data_cb_t data_cb, void *arg); + +err_t mqtt_sub_unsub(mqtt_client_t *client, const char *topic, u8_t qos, mqtt_request_cb_t cb, void *arg, u8_t sub); + +/** @ingroup mqtt + *Subscribe to topic */ +#define mqtt_subscribe(client, topic, qos, cb, arg) mqtt_sub_unsub(client, topic, qos, cb, arg, 1) +/** @ingroup mqtt + * Unsubscribe to topic */ +#define mqtt_unsubscribe(client, topic, cb, arg) mqtt_sub_unsub(client, topic, 0, cb, arg, 0) + +err_t mqtt_publish(mqtt_client_t *client, const char *topic, const void *payload, u16_t payload_length, u8_t qos, u8_t retain, + mqtt_request_cb_t cb, void *arg); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_MQTT_CLIENT_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_opts.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_opts.h new file mode 100644 index 0000000..e056130 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_opts.h @@ -0,0 +1,103 @@ +/** + * @file + * MQTT client options + */ + +/* + * Copyright (c) 2016 Erik Andersson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Andersson + * + */ +#ifndef LWIP_HDR_APPS_MQTT_OPTS_H +#define LWIP_HDR_APPS_MQTT_OPTS_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup mqtt_opts Options + * @ingroup mqtt + * @{ + */ + +/** + * Output ring-buffer size, must be able to fit largest outgoing publish message topic+payloads + */ +#ifndef MQTT_OUTPUT_RINGBUF_SIZE +#define MQTT_OUTPUT_RINGBUF_SIZE 256 +#endif + +/** + * Number of bytes in receive buffer, must be at least the size of the longest incoming topic + 8 + * If one wants to avoid fragmented incoming publish, set length to max incoming topic length + max payload length + 8 + */ +#ifndef MQTT_VAR_HEADER_BUFFER_LEN +#define MQTT_VAR_HEADER_BUFFER_LEN 128 +#endif + +/** + * Maximum number of pending subscribe, unsubscribe and publish requests to server . + */ +#ifndef MQTT_REQ_MAX_IN_FLIGHT +#define MQTT_REQ_MAX_IN_FLIGHT 4 +#endif + +/** + * Seconds between each cyclic timer call. + */ +#ifndef MQTT_CYCLIC_TIMER_INTERVAL +#define MQTT_CYCLIC_TIMER_INTERVAL 5 +#endif + +/** + * Publish, subscribe and unsubscribe request timeout in seconds. + */ +#ifndef MQTT_REQ_TIMEOUT +#define MQTT_REQ_TIMEOUT 30 +#endif + +/** + * Seconds for MQTT connect response timeout after sending connect request + */ +#ifndef MQTT_CONNECT_TIMOUT +#define MQTT_CONNECT_TIMOUT 100 +#endif + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_MQTT_OPTS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_priv.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_priv.h new file mode 100644 index 0000000..04d2336 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_priv.h @@ -0,0 +1,104 @@ +/** + * @file + * MQTT client (private interface) + */ + +/* + * Copyright (c) 2016 Erik Andersson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Andersson + * + */ +#ifndef LWIP_HDR_APPS_MQTT_PRIV_H +#define LWIP_HDR_APPS_MQTT_PRIV_H + +#include "lwip/apps/mqtt.h" +#include "lwip/altcp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** Pending request item, binds application callback to pending server requests */ +struct mqtt_request_t +{ + /** Next item in list, NULL means this is the last in chain, + next pointing at itself means request is unallocated */ + struct mqtt_request_t *next; + /** Callback to upper layer */ + mqtt_request_cb_t cb; + void *arg; + /** MQTT packet identifier */ + u16_t pkt_id; + /** Expire time relative to element before this */ + u16_t timeout_diff; +}; + +/** Ring buffer */ +struct mqtt_ringbuf_t { + u16_t put; + u16_t get; + u8_t buf[MQTT_OUTPUT_RINGBUF_SIZE]; +}; + +/** MQTT client */ +struct mqtt_client_s +{ + /** Timers and timeouts */ + u16_t cyclic_tick; + u16_t keep_alive; + u16_t server_watchdog; + /** Packet identifier generator*/ + u16_t pkt_id_seq; + /** Packet identifier of pending incoming publish */ + u16_t inpub_pkt_id; + /** Connection state */ + u8_t conn_state; + struct altcp_pcb *conn; + /** Connection callback */ + void *connect_arg; + mqtt_connection_cb_t connect_cb; + /** Pending requests to server */ + struct mqtt_request_t *pend_req_queue; + struct mqtt_request_t req_list[MQTT_REQ_MAX_IN_FLIGHT]; + void *inpub_arg; + /** Incoming data callback */ + mqtt_incoming_data_cb_t data_cb; + mqtt_incoming_publish_cb_t pub_cb; + /** Input */ + u32_t msg_idx; + u8_t rx_buffer[MQTT_VAR_HEADER_BUFFER_LEN]; + /** Output ring-buffer */ + struct mqtt_ringbuf_t output; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_MQTT_PRIV_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns.h new file mode 100644 index 0000000..c5aedb6 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns.h @@ -0,0 +1,51 @@ +/** + * @file + * NETBIOS name service responder + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ +#ifndef LWIP_HDR_APPS_NETBIOS_H +#define LWIP_HDR_APPS_NETBIOS_H + +#include "lwip/apps/netbiosns_opts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void netbiosns_init(void); +#ifndef NETBIOS_LWIP_NAME +void netbiosns_set_name(const char* hostname); +#endif +void netbiosns_stop(void); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_NETBIOS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns_opts.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns_opts.h new file mode 100644 index 0000000..0bab59e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns_opts.h @@ -0,0 +1,66 @@ +/** + * @file + * NETBIOS name service responder options + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ +#ifndef LWIP_HDR_APPS_NETBIOS_OPTS_H +#define LWIP_HDR_APPS_NETBIOS_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup netbiosns_opts Options + * @ingroup netbiosns + * @{ + */ + +/** NetBIOS name of lwip device + * This must be uppercase until NETBIOS_STRCMP() is defined to a string + * comparision function that is case insensitive. + * If you want to use the netif's hostname, use this (with LWIP_NETIF_HOSTNAME): + * (ip_current_netif() != NULL ? ip_current_netif()->hostname != NULL ? ip_current_netif()->hostname : "" : "") + * + * If this is not defined, netbiosns_set_name() can be called at runtime to change the name. + */ +#ifdef __DOXYGEN__ +#define NETBIOS_LWIP_NAME "NETBIOSLWIPDEV" +#endif + +/** Respond to NetBIOS name queries + * Default is disabled + */ +#if !defined LWIP_NETBIOS_RESPOND_NAME_QUERY || defined __DOXYGEN__ +#define LWIP_NETBIOS_RESPOND_NAME_QUERY 0 +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_NETBIOS_OPTS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp.h new file mode 100644 index 0000000..e6076a1 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp.h @@ -0,0 +1,128 @@ +#ifndef LWIP_HDR_APPS_SMTP_H +#define LWIP_HDR_APPS_SMTP_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "lwip/apps/smtp_opts.h" +#include "lwip/err.h" +#include "lwip/prot/iana.h" + +/** The default TCP port used for SMTP */ +#define SMTP_DEFAULT_PORT LWIP_IANA_PORT_SMTP +/** The default TCP port used for SMTPS */ +#define SMTPS_DEFAULT_PORT LWIP_IANA_PORT_SMTPS + +/** Email successfully sent */ +#define SMTP_RESULT_OK 0 +/** Unknown error */ +#define SMTP_RESULT_ERR_UNKNOWN 1 +/** Connection to server failed */ +#define SMTP_RESULT_ERR_CONNECT 2 +/** Failed to resolve server hostname */ +#define SMTP_RESULT_ERR_HOSTNAME 3 +/** Connection unexpectedly closed by remote server */ +#define SMTP_RESULT_ERR_CLOSED 4 +/** Connection timed out (server didn't respond in time) */ +#define SMTP_RESULT_ERR_TIMEOUT 5 +/** Server responded with an unknown response code */ +#define SMTP_RESULT_ERR_SVR_RESP 6 +/** Out of resources locally */ +#define SMTP_RESULT_ERR_MEM 7 + +/** Prototype of an smtp callback function + * + * @param arg argument specified when initiating the email + * @param smtp_result result of the mail transfer (see defines SMTP_RESULT_*) + * @param srv_err if aborted by the server, this contains the error code received + * @param err an error returned by internal lwip functions, can help to specify + * the source of the error but must not necessarily be != ERR_OK + */ +typedef void (*smtp_result_fn)(void *arg, u8_t smtp_result, u16_t srv_err, err_t err); + +/** This structure is used as argument for smtp_send_mail_int(), + * which in turn can be used with tcpip_callback() to send mail + * from interrupt context, e.g. like this: + * struct smtp_send_request *req; (to be filled) + * tcpip_try_callback(smtp_send_mail_int, (void*)req); + * + * For member description, see parameter description of smtp_send_mail(). + * When using with tcpip_callback, this structure has to stay allocated + * (e.g. using mem_malloc/mem_free) until its 'callback_fn' is called. + */ +struct smtp_send_request { + const char *from; + const char* to; + const char* subject; + const char* body; + smtp_result_fn callback_fn; + void* callback_arg; + /** If this is != 0, data is *not* copied into an extra buffer + * but used from the pointers supplied in this struct. + * This means less memory usage, but data must stay untouched until + * the callback function is called. */ + u8_t static_data; +}; + + +#if SMTP_BODYDH + +#ifndef SMTP_BODYDH_BUFFER_SIZE +#define SMTP_BODYDH_BUFFER_SIZE 256 +#endif /* SMTP_BODYDH_BUFFER_SIZE */ + +struct smtp_bodydh { + u16_t state; + u16_t length; /* Length of content in buffer */ + char buffer[SMTP_BODYDH_BUFFER_SIZE]; /* buffer for generated content */ +#ifdef SMTP_BODYDH_USER_SIZE + u8_t user[SMTP_BODYDH_USER_SIZE]; +#endif /* SMTP_BODYDH_USER_SIZE */ +}; + +enum bdh_retvals_e { + BDH_DONE = 0, + BDH_WORKING +}; + +/** Prototype of an smtp body callback function + * It receives a struct smtp_bodydh, and a buffer to write data, + * must return BDH_WORKING to be called again and BDH_DONE when + * it has finished processing. This one tries to fill one TCP buffer with + * data, your function will be repeatedly called until that happens; so if you + * know you'll be taking too long to serve your request, pause once in a while + * by writing length=0 to avoid hogging system resources + * + * @param arg argument specified when initiating the email + * @param smtp_bodydh state handling + buffer structure + */ +typedef int (*smtp_bodycback_fn)(void *arg, struct smtp_bodydh *bodydh); + +err_t smtp_send_mail_bodycback(const char *from, const char* to, const char* subject, + smtp_bodycback_fn bodycback_fn, smtp_result_fn callback_fn, void* callback_arg); + +#endif /* SMTP_BODYDH */ + + +err_t smtp_set_server_addr(const char* server); +void smtp_set_server_port(u16_t port); +#if LWIP_ALTCP && LWIP_ALTCP_TLS +struct altcp_tls_config; +void smtp_set_tls_config(struct altcp_tls_config *tls_config); +#endif +err_t smtp_set_auth(const char* username, const char* pass); +err_t smtp_send_mail(const char *from, const char* to, const char* subject, const char* body, + smtp_result_fn callback_fn, void* callback_arg); +err_t smtp_send_mail_static(const char *from, const char* to, const char* subject, const char* body, + smtp_result_fn callback_fn, void* callback_arg); +void smtp_send_mail_int(void *arg); +#ifdef LWIP_DEBUG +const char* smtp_result_str(u8_t smtp_result); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SMTP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp_opts.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp_opts.h new file mode 100644 index 0000000..29c0c08 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp_opts.h @@ -0,0 +1,81 @@ +#ifndef LWIP_HDR_APPS_SMTP_OPTS_H +#define LWIP_HDR_APPS_SMTP_OPTS_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup smtp_opts Options + * @ingroup smtp + * + * @{ + */ + +/** Set this to 1 to enable data handler callback on BODY */ +#ifndef SMTP_BODYDH +#define SMTP_BODYDH 0 +#endif + +/** SMTP_DEBUG: Enable debugging for SNTP. */ +#ifndef SMTP_DEBUG +#define SMTP_DEBUG LWIP_DBG_OFF +#endif + +/** Maximum length reserved for server name including terminating 0 byte */ +#ifndef SMTP_MAX_SERVERNAME_LEN +#define SMTP_MAX_SERVERNAME_LEN 256 +#endif + +/** Maximum length reserved for username */ +#ifndef SMTP_MAX_USERNAME_LEN +#define SMTP_MAX_USERNAME_LEN 32 +#endif + +/** Maximum length reserved for password */ +#ifndef SMTP_MAX_PASS_LEN +#define SMTP_MAX_PASS_LEN 32 +#endif + +/** Set this to 0 if you know the authentication data will not change + * during the smtp session, which saves some heap space. */ +#ifndef SMTP_COPY_AUTHDATA +#define SMTP_COPY_AUTHDATA 1 +#endif + +/** Set this to 0 to save some code space if you know for sure that all data + * passed to this module conforms to the requirements in the SMTP RFC. + * WARNING: use this with care! + */ +#ifndef SMTP_CHECK_DATA +#define SMTP_CHECK_DATA 1 +#endif + +/** Set this to 1 to enable AUTH PLAIN support */ +#ifndef SMTP_SUPPORT_AUTH_PLAIN +#define SMTP_SUPPORT_AUTH_PLAIN 1 +#endif + +/** Set this to 1 to enable AUTH LOGIN support */ +#ifndef SMTP_SUPPORT_AUTH_LOGIN +#define SMTP_SUPPORT_AUTH_LOGIN 1 +#endif + +/* Memory allocation/deallocation can be overridden... */ +#ifndef SMTP_STATE_MALLOC +#define SMTP_STATE_MALLOC(size) mem_malloc(size) +#define SMTP_STATE_FREE(ptr) mem_free(ptr) +#endif + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* SMTP_OPTS_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp.h new file mode 100644 index 0000000..15b154e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp.h @@ -0,0 +1,135 @@ +/** + * @file + * SNMP server main API - start and basic configuration + */ + +/* + * Copyright (c) 2001, 2002 Leon Woestenberg + * Copyright (c) 2001, 2002 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Leon Woestenberg + * Martin Hentschel + * + */ +#ifndef LWIP_HDR_APPS_SNMP_H +#define LWIP_HDR_APPS_SNMP_H + +#include "lwip/apps/snmp_opts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/err.h" +#include "lwip/apps/snmp_core.h" + +/** SNMP variable binding descriptor (publically needed for traps) */ +struct snmp_varbind +{ + /** pointer to next varbind, NULL for last in list */ + struct snmp_varbind *next; + /** pointer to previous varbind, NULL for first in list */ + struct snmp_varbind *prev; + + /** object identifier */ + struct snmp_obj_id oid; + + /** value ASN1 type */ + u8_t type; + /** object value length */ + u16_t value_len; + /** object value */ + void *value; +}; + +/** + * @ingroup snmp_core + * Agent setup, start listening to port 161. + */ +void snmp_init(void); +void snmp_set_mibs(const struct snmp_mib **mibs, u8_t num_mibs); + +void snmp_set_device_enterprise_oid(const struct snmp_obj_id* device_enterprise_oid); +const struct snmp_obj_id* snmp_get_device_enterprise_oid(void); + +void snmp_trap_dst_enable(u8_t dst_idx, u8_t enable); +void snmp_trap_dst_ip_set(u8_t dst_idx, const ip_addr_t *dst); + +/** Generic trap: cold start */ +#define SNMP_GENTRAP_COLDSTART 0 +/** Generic trap: warm start */ +#define SNMP_GENTRAP_WARMSTART 1 +/** Generic trap: link down */ +#define SNMP_GENTRAP_LINKDOWN 2 +/** Generic trap: link up */ +#define SNMP_GENTRAP_LINKUP 3 +/** Generic trap: authentication failure */ +#define SNMP_GENTRAP_AUTH_FAILURE 4 +/** Generic trap: EGP neighbor lost */ +#define SNMP_GENTRAP_EGP_NEIGHBOR_LOSS 5 +/** Generic trap: enterprise specific */ +#define SNMP_GENTRAP_ENTERPRISE_SPECIFIC 6 + +err_t snmp_send_trap_generic(s32_t generic_trap); +err_t snmp_send_trap_specific(s32_t specific_trap, struct snmp_varbind *varbinds); +err_t snmp_send_trap(const struct snmp_obj_id* oid, s32_t generic_trap, s32_t specific_trap, struct snmp_varbind *varbinds); + +#define SNMP_AUTH_TRAPS_DISABLED 0 +#define SNMP_AUTH_TRAPS_ENABLED 1 +void snmp_set_auth_traps_enabled(u8_t enable); +u8_t snmp_get_auth_traps_enabled(void); + +u8_t snmp_v1_enabled(void); +u8_t snmp_v2c_enabled(void); +u8_t snmp_v3_enabled(void); +void snmp_v1_enable(u8_t enable); +void snmp_v2c_enable(u8_t enable); +void snmp_v3_enable(u8_t enable); + +const char * snmp_get_community(void); +const char * snmp_get_community_write(void); +const char * snmp_get_community_trap(void); +void snmp_set_community(const char * const community); +void snmp_set_community_write(const char * const community); +void snmp_set_community_trap(const char * const community); + +void snmp_coldstart_trap(void); +void snmp_authfail_trap(void); + +typedef void (*snmp_write_callback_fct)(const u32_t* oid, u8_t oid_len, void* callback_arg); +void snmp_set_write_callback(snmp_write_callback_fct write_callback, void* callback_arg); + +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_core.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_core.h new file mode 100644 index 0000000..4748df6 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_core.h @@ -0,0 +1,377 @@ +/** + * @file + * SNMP core API for implementing MIBs + */ + +/* + * Copyright (c) 2006 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Christiaan Simons + * Martin Hentschel + */ + +#ifndef LWIP_HDR_APPS_SNMP_CORE_H +#define LWIP_HDR_APPS_SNMP_CORE_H + +#include "lwip/apps/snmp_opts.h" + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip_addr.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* basic ASN1 defines */ +#define SNMP_ASN1_CLASS_UNIVERSAL 0x00 +#define SNMP_ASN1_CLASS_APPLICATION 0x40 +#define SNMP_ASN1_CLASS_CONTEXT 0x80 +#define SNMP_ASN1_CLASS_PRIVATE 0xC0 + +#define SNMP_ASN1_CONTENTTYPE_PRIMITIVE 0x00 +#define SNMP_ASN1_CONTENTTYPE_CONSTRUCTED 0x20 + +/* universal tags (from ASN.1 spec.) */ +#define SNMP_ASN1_UNIVERSAL_END_OF_CONTENT 0 +#define SNMP_ASN1_UNIVERSAL_INTEGER 2 +#define SNMP_ASN1_UNIVERSAL_OCTET_STRING 4 +#define SNMP_ASN1_UNIVERSAL_NULL 5 +#define SNMP_ASN1_UNIVERSAL_OBJECT_ID 6 +#define SNMP_ASN1_UNIVERSAL_SEQUENCE_OF 16 + +/* application specific (SNMP) tags (from SNMPv2-SMI) */ +#define SNMP_ASN1_APPLICATION_IPADDR 0 /* [APPLICATION 0] IMPLICIT OCTET STRING (SIZE (4)) */ +#define SNMP_ASN1_APPLICATION_COUNTER 1 /* [APPLICATION 1] IMPLICIT INTEGER (0..4294967295) => u32_t */ +#define SNMP_ASN1_APPLICATION_GAUGE 2 /* [APPLICATION 2] IMPLICIT INTEGER (0..4294967295) => u32_t */ +#define SNMP_ASN1_APPLICATION_TIMETICKS 3 /* [APPLICATION 3] IMPLICIT INTEGER (0..4294967295) => u32_t */ +#define SNMP_ASN1_APPLICATION_OPAQUE 4 /* [APPLICATION 4] IMPLICIT OCTET STRING */ +#define SNMP_ASN1_APPLICATION_COUNTER64 6 /* [APPLICATION 6] IMPLICIT INTEGER (0..18446744073709551615) */ + +/* context specific (SNMP) tags (from RFC 1905) */ +#define SNMP_ASN1_CONTEXT_VARBIND_NO_SUCH_INSTANCE 1 + +/* full ASN1 type defines */ +#define SNMP_ASN1_TYPE_END_OF_CONTENT (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_END_OF_CONTENT) +#define SNMP_ASN1_TYPE_INTEGER (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_INTEGER) +#define SNMP_ASN1_TYPE_OCTET_STRING (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_OCTET_STRING) +#define SNMP_ASN1_TYPE_NULL (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_NULL) +#define SNMP_ASN1_TYPE_OBJECT_ID (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_OBJECT_ID) +#define SNMP_ASN1_TYPE_SEQUENCE (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_CONSTRUCTED | SNMP_ASN1_UNIVERSAL_SEQUENCE_OF) +#define SNMP_ASN1_TYPE_IPADDR (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_IPADDR) +#define SNMP_ASN1_TYPE_IPADDRESS SNMP_ASN1_TYPE_IPADDR +#define SNMP_ASN1_TYPE_COUNTER (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_COUNTER) +#define SNMP_ASN1_TYPE_COUNTER32 SNMP_ASN1_TYPE_COUNTER +#define SNMP_ASN1_TYPE_GAUGE (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_GAUGE) +#define SNMP_ASN1_TYPE_GAUGE32 SNMP_ASN1_TYPE_GAUGE +#define SNMP_ASN1_TYPE_UNSIGNED32 SNMP_ASN1_TYPE_GAUGE +#define SNMP_ASN1_TYPE_TIMETICKS (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_TIMETICKS) +#define SNMP_ASN1_TYPE_OPAQUE (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_OPAQUE) +#if LWIP_HAVE_INT64 +#define SNMP_ASN1_TYPE_COUNTER64 (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_COUNTER64) +#endif + +#define SNMP_VARBIND_EXCEPTION_OFFSET 0xF0 +#define SNMP_VARBIND_EXCEPTION_MASK 0x0F + +/** error codes predefined by SNMP prot. */ +typedef enum { + SNMP_ERR_NOERROR = 0, +/* +outdated v1 error codes. do not use anmore! +#define SNMP_ERR_NOSUCHNAME 2 use SNMP_ERR_NOSUCHINSTANCE instead +#define SNMP_ERR_BADVALUE 3 use SNMP_ERR_WRONGTYPE,SNMP_ERR_WRONGLENGTH,SNMP_ERR_WRONGENCODING or SNMP_ERR_WRONGVALUE instead +#define SNMP_ERR_READONLY 4 use SNMP_ERR_NOTWRITABLE instead +*/ + SNMP_ERR_GENERROR = 5, + SNMP_ERR_NOACCESS = 6, + SNMP_ERR_WRONGTYPE = 7, + SNMP_ERR_WRONGLENGTH = 8, + SNMP_ERR_WRONGENCODING = 9, + SNMP_ERR_WRONGVALUE = 10, + SNMP_ERR_NOCREATION = 11, + SNMP_ERR_INCONSISTENTVALUE = 12, + SNMP_ERR_RESOURCEUNAVAILABLE = 13, + SNMP_ERR_COMMITFAILED = 14, + SNMP_ERR_UNDOFAILED = 15, + SNMP_ERR_NOTWRITABLE = 17, + SNMP_ERR_INCONSISTENTNAME = 18, + + SNMP_ERR_NOSUCHINSTANCE = SNMP_VARBIND_EXCEPTION_OFFSET + SNMP_ASN1_CONTEXT_VARBIND_NO_SUCH_INSTANCE +} snmp_err_t; + +/** internal object identifier representation */ +struct snmp_obj_id +{ + u8_t len; + u32_t id[SNMP_MAX_OBJ_ID_LEN]; +}; + +struct snmp_obj_id_const_ref +{ + u8_t len; + const u32_t* id; +}; + +extern const struct snmp_obj_id_const_ref snmp_zero_dot_zero; /* administrative identifier from SNMPv2-SMI */ + +/** SNMP variant value, used as reference in struct snmp_node_instance and table implementation */ +union snmp_variant_value +{ + void* ptr; + const void* const_ptr; + u32_t u32; + s32_t s32; +#if LWIP_HAVE_INT64 + u64_t u64; +#endif +}; + + +/** +SNMP MIB node types + tree node is the only node the stack can process in order to walk the tree, + all other nodes are assumed to be leaf nodes. + This cannot be an enum because users may want to define their own node types. +*/ +#define SNMP_NODE_TREE 0x00 +/* predefined leaf node types */ +#define SNMP_NODE_SCALAR 0x01 +#define SNMP_NODE_SCALAR_ARRAY 0x02 +#define SNMP_NODE_TABLE 0x03 +#define SNMP_NODE_THREADSYNC 0x04 + +/** node "base class" layout, the mandatory fields for a node */ +struct snmp_node +{ + /** one out of SNMP_NODE_TREE or any leaf node type (like SNMP_NODE_SCALAR) */ + u8_t node_type; + /** the number assigned to this node which used as part of the full OID */ + u32_t oid; +}; + +/** SNMP node instance access types */ +typedef enum { + SNMP_NODE_INSTANCE_ACCESS_READ = 1, + SNMP_NODE_INSTANCE_ACCESS_WRITE = 2, + SNMP_NODE_INSTANCE_READ_ONLY = SNMP_NODE_INSTANCE_ACCESS_READ, + SNMP_NODE_INSTANCE_READ_WRITE = (SNMP_NODE_INSTANCE_ACCESS_READ | SNMP_NODE_INSTANCE_ACCESS_WRITE), + SNMP_NODE_INSTANCE_WRITE_ONLY = SNMP_NODE_INSTANCE_ACCESS_WRITE, + SNMP_NODE_INSTANCE_NOT_ACCESSIBLE = 0 +} snmp_access_t; + +struct snmp_node_instance; + +typedef s16_t (*node_instance_get_value_method)(struct snmp_node_instance*, void*); +typedef snmp_err_t (*node_instance_set_test_method)(struct snmp_node_instance*, u16_t, void*); +typedef snmp_err_t (*node_instance_set_value_method)(struct snmp_node_instance*, u16_t, void*); +typedef void (*node_instance_release_method)(struct snmp_node_instance*); + +#define SNMP_GET_VALUE_RAW_DATA 0x4000 /* do not use 0x8000 because return value of node_instance_get_value_method is signed16 and 0x8000 would be the signed bit */ + +/** SNMP node instance */ +struct snmp_node_instance +{ + /** prefilled with the node, get_instance() is called on; may be changed by user to any value to pass an arbitrary node between calls to get_instance() and get_value/test_value/set_value */ + const struct snmp_node* node; + /** prefilled with the instance id requested; for get_instance() this is the exact oid requested; for get_next_instance() this is the relative starting point, stack expects relative oid of next node here */ + struct snmp_obj_id instance_oid; + + /** ASN type for this object (see snmp_asn1.h for definitions) */ + u8_t asn1_type; + /** one out of instance access types defined above (SNMP_NODE_INSTANCE_READ_ONLY,...) */ + snmp_access_t access; + + /** returns object value for the given object identifier. Return values <0 to indicate an error */ + node_instance_get_value_method get_value; + /** tests length and/or range BEFORE setting */ + node_instance_set_test_method set_test; + /** sets object value, only called when set_test() was successful */ + node_instance_set_value_method set_value; + /** called in any case when the instance is not required anymore by stack (useful for freeing memory allocated in get_instance/get_next_instance methods) */ + node_instance_release_method release_instance; + + /** reference to pass arbitrary value between calls to get_instance() and get_value/test_value/set_value */ + union snmp_variant_value reference; + /** see reference (if reference is a pointer, the length of underlying data may be stored here or anything else) */ + u32_t reference_len; +}; + + +/** SNMP tree node */ +struct snmp_tree_node +{ + /** inherited "base class" members */ + struct snmp_node node; + u16_t subnode_count; + const struct snmp_node* const *subnodes; +}; + +#define SNMP_CREATE_TREE_NODE(oid, subnodes) \ + {{ SNMP_NODE_TREE, (oid) }, \ + (u16_t)LWIP_ARRAYSIZE(subnodes), (subnodes) } + +#define SNMP_CREATE_EMPTY_TREE_NODE(oid) \ + {{ SNMP_NODE_TREE, (oid) }, \ + 0, NULL } + +/** SNMP leaf node */ +struct snmp_leaf_node +{ + /** inherited "base class" members */ + struct snmp_node node; + snmp_err_t (*get_instance)(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + snmp_err_t (*get_next_instance)(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +}; + +/** represents a single mib with its base oid and root node */ +struct snmp_mib +{ + const u32_t *base_oid; + u8_t base_oid_len; + const struct snmp_node *root_node; +}; + +#define SNMP_MIB_CREATE(oid_list, root_node) { (oid_list), (u8_t)LWIP_ARRAYSIZE(oid_list), root_node } + +/** OID range structure */ +struct snmp_oid_range +{ + u32_t min; + u32_t max; +}; + +/** checks if incoming OID length and values are in allowed ranges */ +u8_t snmp_oid_in_range(const u32_t *oid_in, u8_t oid_len, const struct snmp_oid_range *oid_ranges, u8_t oid_ranges_len); + +typedef enum { + SNMP_NEXT_OID_STATUS_SUCCESS, + SNMP_NEXT_OID_STATUS_NO_MATCH, + SNMP_NEXT_OID_STATUS_BUF_TO_SMALL +} snmp_next_oid_status_t; + +/** state for next_oid_init / next_oid_check functions */ +struct snmp_next_oid_state +{ + const u32_t* start_oid; + u8_t start_oid_len; + + u32_t* next_oid; + u8_t next_oid_len; + u8_t next_oid_max_len; + + snmp_next_oid_status_t status; + void* reference; +}; + +void snmp_next_oid_init(struct snmp_next_oid_state *state, + const u32_t *start_oid, u8_t start_oid_len, + u32_t *next_oid_buf, u8_t next_oid_max_len); +u8_t snmp_next_oid_precheck(struct snmp_next_oid_state *state, const u32_t *oid, u8_t oid_len); +u8_t snmp_next_oid_check(struct snmp_next_oid_state *state, const u32_t *oid, u8_t oid_len, void* reference); + +void snmp_oid_assign(struct snmp_obj_id* target, const u32_t *oid, u8_t oid_len); +void snmp_oid_combine(struct snmp_obj_id* target, const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len); +void snmp_oid_prefix(struct snmp_obj_id* target, const u32_t *oid, u8_t oid_len); +void snmp_oid_append(struct snmp_obj_id* target, const u32_t *oid, u8_t oid_len); +u8_t snmp_oid_equal(const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len); +s8_t snmp_oid_compare(const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len); + +#if LWIP_IPV4 +u8_t snmp_oid_to_ip4(const u32_t *oid, ip4_addr_t *ip); +void snmp_ip4_to_oid(const ip4_addr_t *ip, u32_t *oid); +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 +u8_t snmp_oid_to_ip6(const u32_t *oid, ip6_addr_t *ip); +void snmp_ip6_to_oid(const ip6_addr_t *ip, u32_t *oid); +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 || LWIP_IPV6 +u8_t snmp_ip_to_oid(const ip_addr_t *ip, u32_t *oid); +u8_t snmp_ip_port_to_oid(const ip_addr_t *ip, u16_t port, u32_t *oid); + +u8_t snmp_oid_to_ip(const u32_t *oid, u8_t oid_len, ip_addr_t *ip); +u8_t snmp_oid_to_ip_port(const u32_t *oid, u8_t oid_len, ip_addr_t *ip, u16_t *port); +#endif /* LWIP_IPV4 || LWIP_IPV6 */ + +struct netif; +u8_t netif_to_num(const struct netif *netif); + +snmp_err_t snmp_set_test_ok(struct snmp_node_instance* instance, u16_t value_len, void* value); /* generic function which can be used if test is always successful */ + +err_t snmp_decode_bits(const u8_t *buf, u32_t buf_len, u32_t *bit_value); +err_t snmp_decode_truthvalue(const s32_t *asn1_value, u8_t *bool_value); +u8_t snmp_encode_bits(u8_t *buf, u32_t buf_len, u32_t bit_value, u8_t bit_count); +u8_t snmp_encode_truthvalue(s32_t *asn1_value, u32_t bool_value); + +struct snmp_statistics +{ + u32_t inpkts; + u32_t outpkts; + u32_t inbadversions; + u32_t inbadcommunitynames; + u32_t inbadcommunityuses; + u32_t inasnparseerrs; + u32_t intoobigs; + u32_t innosuchnames; + u32_t inbadvalues; + u32_t inreadonlys; + u32_t ingenerrs; + u32_t intotalreqvars; + u32_t intotalsetvars; + u32_t ingetrequests; + u32_t ingetnexts; + u32_t insetrequests; + u32_t ingetresponses; + u32_t intraps; + u32_t outtoobigs; + u32_t outnosuchnames; + u32_t outbadvalues; + u32_t outgenerrs; + u32_t outgetrequests; + u32_t outgetnexts; + u32_t outsetrequests; + u32_t outgetresponses; + u32_t outtraps; +#if LWIP_SNMP_V3 + u32_t unsupportedseclevels; + u32_t notintimewindows; + u32_t unknownusernames; + u32_t unknownengineids; + u32_t wrongdigests; + u32_t decryptionerrors; +#endif +}; + +extern struct snmp_statistics snmp_stats; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_SNMP */ + +#endif /* LWIP_HDR_APPS_SNMP_CORE_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_mib2.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_mib2.h new file mode 100644 index 0000000..392ee25 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_mib2.h @@ -0,0 +1,78 @@ +/** + * @file + * SNMP MIB2 API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ +#ifndef LWIP_HDR_APPS_SNMP_MIB2_H +#define LWIP_HDR_APPS_SNMP_MIB2_H + +#include "lwip/apps/snmp_opts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ +#if SNMP_LWIP_MIB2 + +#include "lwip/apps/snmp_core.h" + +extern const struct snmp_mib mib2; + +#if SNMP_USE_NETCONN +#include "lwip/apps/snmp_threadsync.h" +void snmp_mib2_lwip_synchronizer(snmp_threadsync_called_fn fn, void* arg); +extern struct snmp_threadsync_instance snmp_mib2_lwip_locks; +#endif + +#ifndef SNMP_SYSSERVICES +#define SNMP_SYSSERVICES ((1 << 6) | (1 << 3) | ((IP_FORWARD) << 2)) +#endif + +void snmp_mib2_set_sysdescr(const u8_t* str, const u16_t* len); /* read-only be defintion */ +void snmp_mib2_set_syscontact(u8_t *ocstr, u16_t *ocstrlen, u16_t bufsize); +void snmp_mib2_set_syscontact_readonly(const u8_t *ocstr, const u16_t *ocstrlen); +void snmp_mib2_set_sysname(u8_t *ocstr, u16_t *ocstrlen, u16_t bufsize); +void snmp_mib2_set_sysname_readonly(const u8_t *ocstr, const u16_t *ocstrlen); +void snmp_mib2_set_syslocation(u8_t *ocstr, u16_t *ocstrlen, u16_t bufsize); +void snmp_mib2_set_syslocation_readonly(const u8_t *ocstr, const u16_t *ocstrlen); + +#endif /* SNMP_LWIP_MIB2 */ +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_MIB2_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_opts.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_opts.h new file mode 100644 index 0000000..1c63504 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_opts.h @@ -0,0 +1,297 @@ +/** + * @file + * SNMP server options list + */ + +/* + * Copyright (c) 2015 Dirk Ziegelmeier + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ +#ifndef LWIP_HDR_SNMP_OPTS_H +#define LWIP_HDR_SNMP_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup snmp_opts Options + * @ingroup snmp + * @{ + */ + +/** + * LWIP_SNMP==1: This enables the lwIP SNMP agent. UDP must be available + * for SNMP transport. + * If you want to use your own SNMP agent, leave this disabled. + * To integrate MIB2 of an external agent, you need to enable + * LWIP_MIB2_CALLBACKS and MIB2_STATS. This will give you the callbacks + * and statistics counters you need to get MIB2 working. + */ +#if !defined LWIP_SNMP || defined __DOXYGEN__ +#define LWIP_SNMP 0 +#endif + +/** + * SNMP_USE_NETCONN: Use netconn API instead of raw API. + * Makes SNMP agent run in a worker thread, so blocking operations + * can be done in MIB calls. + */ +#if !defined SNMP_USE_NETCONN || defined __DOXYGEN__ +#define SNMP_USE_NETCONN 0 +#endif + +/** + * SNMP_USE_RAW: Use raw API. + * SNMP agent does not run in a worker thread, so blocking operations + * should not be done in MIB calls. + */ +#if !defined SNMP_USE_RAW || defined __DOXYGEN__ +#define SNMP_USE_RAW 1 +#endif + +#if SNMP_USE_NETCONN && SNMP_USE_RAW +#error SNMP stack can use only one of the APIs {raw, netconn} +#endif + +#if LWIP_SNMP && !SNMP_USE_NETCONN && !SNMP_USE_RAW +#error SNMP stack needs a receive API and UDP {raw, netconn} +#endif + +#if SNMP_USE_NETCONN +/** + * SNMP_STACK_SIZE: Stack size of SNMP netconn worker thread + */ +#if !defined SNMP_STACK_SIZE || defined __DOXYGEN__ +#define SNMP_STACK_SIZE DEFAULT_THREAD_STACKSIZE +#endif + +/** + * SNMP_THREAD_PRIO: SNMP netconn worker thread priority + */ +#if !defined SNMP_THREAD_PRIO || defined __DOXYGEN__ +#define SNMP_THREAD_PRIO DEFAULT_THREAD_PRIO +#endif +#endif /* SNMP_USE_NETCONN */ + +/** + * SNMP_TRAP_DESTINATIONS: Number of trap destinations. At least one trap + * destination is required + */ +#if !defined SNMP_TRAP_DESTINATIONS || defined __DOXYGEN__ +#define SNMP_TRAP_DESTINATIONS 1 +#endif + +/** + * Only allow SNMP write actions that are 'safe' (e.g. disabling netifs is not + * a safe action and disabled when SNMP_SAFE_REQUESTS = 1). + * Unsafe requests are disabled by default! + */ +#if !defined SNMP_SAFE_REQUESTS || defined __DOXYGEN__ +#define SNMP_SAFE_REQUESTS 1 +#endif + +/** + * The maximum length of strings used. + */ +#if !defined SNMP_MAX_OCTET_STRING_LEN || defined __DOXYGEN__ +#define SNMP_MAX_OCTET_STRING_LEN 127 +#endif + +/** + * The maximum number of Sub ID's inside an object identifier. + * Indirectly this also limits the maximum depth of SNMP tree. + */ +#if !defined SNMP_MAX_OBJ_ID_LEN || defined __DOXYGEN__ +#define SNMP_MAX_OBJ_ID_LEN 50 +#endif + +#if !defined SNMP_MAX_VALUE_SIZE || defined __DOXYGEN__ +/** + * The minimum size of a value. + */ +#define SNMP_MIN_VALUE_SIZE (2 * sizeof(u32_t*)) /* size required to store the basic types (8 bytes for counter64) */ +/** + * The maximum size of a value. + */ +#define SNMP_MAX_VALUE_SIZE LWIP_MAX(LWIP_MAX((SNMP_MAX_OCTET_STRING_LEN), sizeof(u32_t)*(SNMP_MAX_OBJ_ID_LEN)), SNMP_MIN_VALUE_SIZE) +#endif + +/** + * The snmp read-access community. Used for write-access and traps, too + * unless SNMP_COMMUNITY_WRITE or SNMP_COMMUNITY_TRAP are enabled, respectively. + */ +#if !defined SNMP_COMMUNITY || defined __DOXYGEN__ +#define SNMP_COMMUNITY "public" +#endif + +/** + * The snmp write-access community. + * Set this community to "" in order to disallow any write access. + */ +#if !defined SNMP_COMMUNITY_WRITE || defined __DOXYGEN__ +#define SNMP_COMMUNITY_WRITE "private" +#endif + +/** + * The snmp community used for sending traps. + */ +#if !defined SNMP_COMMUNITY_TRAP || defined __DOXYGEN__ +#define SNMP_COMMUNITY_TRAP "public" +#endif + +/** + * The maximum length of community string. + * If community names shall be adjusted at runtime via snmp_set_community() calls, + * enter here the possible maximum length (+1 for terminating null character). + */ +#if !defined SNMP_MAX_COMMUNITY_STR_LEN || defined __DOXYGEN__ +#define SNMP_MAX_COMMUNITY_STR_LEN LWIP_MAX(LWIP_MAX(sizeof(SNMP_COMMUNITY), sizeof(SNMP_COMMUNITY_WRITE)), sizeof(SNMP_COMMUNITY_TRAP)) +#endif + +/** + * The OID identifiying the device. This may be the enterprise OID itself or any OID located below it in tree. + */ +#if !defined SNMP_DEVICE_ENTERPRISE_OID || defined __DOXYGEN__ +#define SNMP_LWIP_ENTERPRISE_OID 26381 +/** + * IANA assigned enterprise ID for lwIP is 26381 + * @see http://www.iana.org/assignments/enterprise-numbers + * + * @note this enterprise ID is assigned to the lwIP project, + * all object identifiers living under this ID are assigned + * by the lwIP maintainers! + * @note don't change this define, use snmp_set_device_enterprise_oid() + * + * If you need to create your own private MIB you'll need + * to apply for your own enterprise ID with IANA: + * http://www.iana.org/numbers.html + */ +#define SNMP_DEVICE_ENTERPRISE_OID {1, 3, 6, 1, 4, 1, SNMP_LWIP_ENTERPRISE_OID} +/** + * Length of SNMP_DEVICE_ENTERPRISE_OID + */ +#define SNMP_DEVICE_ENTERPRISE_OID_LEN 7 +#endif + +/** + * SNMP_DEBUG: Enable debugging for SNMP messages. + */ +#if !defined SNMP_DEBUG || defined __DOXYGEN__ +#define SNMP_DEBUG LWIP_DBG_OFF +#endif + +/** + * SNMP_MIB_DEBUG: Enable debugging for SNMP MIBs. + */ +#if !defined SNMP_MIB_DEBUG || defined __DOXYGEN__ +#define SNMP_MIB_DEBUG LWIP_DBG_OFF +#endif + +/** + * Indicates if the MIB2 implementation of LWIP SNMP stack is used. + */ +#if !defined SNMP_LWIP_MIB2 || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2 LWIP_SNMP +#endif + +/** + * Value return for sysDesc field of MIB2. + */ +#if !defined SNMP_LWIP_MIB2_SYSDESC || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2_SYSDESC "lwIP" +#endif + +/** + * Value return for sysName field of MIB2. + * To make sysName field settable, call snmp_mib2_set_sysname() to provide the necessary buffers. + */ +#if !defined SNMP_LWIP_MIB2_SYSNAME || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2_SYSNAME "FQDN-unk" +#endif + +/** + * Value return for sysContact field of MIB2. + * To make sysContact field settable, call snmp_mib2_set_syscontact() to provide the necessary buffers. + */ +#if !defined SNMP_LWIP_MIB2_SYSCONTACT || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2_SYSCONTACT "" +#endif + +/** + * Value return for sysLocation field of MIB2. + * To make sysLocation field settable, call snmp_mib2_set_syslocation() to provide the necessary buffers. + */ +#if !defined SNMP_LWIP_MIB2_SYSLOCATION || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2_SYSLOCATION "" +#endif + +/** + * This value is used to limit the repetitions processed in GetBulk requests (value == 0 means no limitation). + * This may be useful to limit the load for a single request. + * According to SNMP RFC 1905 it is allowed to not return all requested variables from a GetBulk request if system load would be too high. + * so the effect is that the client will do more requests to gather all data. + * For the stack this could be useful in case that SNMP processing is done in TCP/IP thread. In this situation a request with many + * repetitions could block the thread for a longer time. Setting limit here will keep the stack more responsive. + */ +#if !defined SNMP_LWIP_GETBULK_MAX_REPETITIONS || defined __DOXYGEN__ +#define SNMP_LWIP_GETBULK_MAX_REPETITIONS 0 +#endif + +/** + * @} + */ + +/* + ------------------------------------ + ---------- SNMPv3 options ---------- + ------------------------------------ +*/ + +/** + * LWIP_SNMP_V3==1: This enables EXPERIMENTAL SNMPv3 support. LWIP_SNMP must + * also be enabled. + * THIS IS UNDER DEVELOPMENT AND SHOULD NOT BE ENABLED IN PRODUCTS. + */ +#ifndef LWIP_SNMP_V3 +#define LWIP_SNMP_V3 0 +#endif + +#ifndef LWIP_SNMP_V3_MBEDTLS +#define LWIP_SNMP_V3_MBEDTLS LWIP_SNMP_V3 +#endif + +#ifndef LWIP_SNMP_V3_CRYPTO +#define LWIP_SNMP_V3_CRYPTO LWIP_SNMP_V3_MBEDTLS +#endif + +#ifndef LWIP_SNMP_CONFIGURE_VERSIONS +#define LWIP_SNMP_CONFIGURE_VERSIONS 0 +#endif + +#endif /* LWIP_HDR_SNMP_OPTS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_scalar.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_scalar.h new file mode 100644 index 0000000..99bcdf9 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_scalar.h @@ -0,0 +1,113 @@ +/** + * @file + * SNMP server MIB API to implement scalar nodes + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Martin Hentschel + * + */ + +#ifndef LWIP_HDR_APPS_SNMP_SCALAR_H +#define LWIP_HDR_APPS_SNMP_SCALAR_H + +#include "lwip/apps/snmp_opts.h" +#include "lwip/apps/snmp_core.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +/** basic scalar node */ +struct snmp_scalar_node +{ + /** inherited "base class" members */ + struct snmp_leaf_node node; + u8_t asn1_type; + snmp_access_t access; + node_instance_get_value_method get_value; + node_instance_set_test_method set_test; + node_instance_set_value_method set_value; +}; + + +snmp_err_t snmp_scalar_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_scalar_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +#define SNMP_SCALAR_CREATE_NODE(oid, access, asn1_type, get_value_method, set_test_method, set_value_method) \ + {{{ SNMP_NODE_SCALAR, (oid) }, \ + snmp_scalar_get_instance, \ + snmp_scalar_get_next_instance }, \ + (asn1_type), (access), (get_value_method), (set_test_method), (set_value_method) } + +#define SNMP_SCALAR_CREATE_NODE_READONLY(oid, asn1_type, get_value_method) SNMP_SCALAR_CREATE_NODE(oid, SNMP_NODE_INSTANCE_READ_ONLY, asn1_type, get_value_method, NULL, NULL) + +/** scalar array node - a tree node which contains scalars only as children */ +struct snmp_scalar_array_node_def +{ + u32_t oid; + u8_t asn1_type; + snmp_access_t access; +}; + +typedef s16_t (*snmp_scalar_array_get_value_method)(const struct snmp_scalar_array_node_def*, void*); +typedef snmp_err_t (*snmp_scalar_array_set_test_method)(const struct snmp_scalar_array_node_def*, u16_t, void*); +typedef snmp_err_t (*snmp_scalar_array_set_value_method)(const struct snmp_scalar_array_node_def*, u16_t, void*); + +/** basic scalar array node */ +struct snmp_scalar_array_node +{ + /** inherited "base class" members */ + struct snmp_leaf_node node; + u16_t array_node_count; + const struct snmp_scalar_array_node_def* array_nodes; + snmp_scalar_array_get_value_method get_value; + snmp_scalar_array_set_test_method set_test; + snmp_scalar_array_set_value_method set_value; +}; + +snmp_err_t snmp_scalar_array_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_scalar_array_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +#define SNMP_SCALAR_CREATE_ARRAY_NODE(oid, array_nodes, get_value_method, set_test_method, set_value_method) \ + {{{ SNMP_NODE_SCALAR_ARRAY, (oid) }, \ + snmp_scalar_array_get_instance, \ + snmp_scalar_array_get_next_instance }, \ + (u16_t)LWIP_ARRAYSIZE(array_nodes), (array_nodes), (get_value_method), (set_test_method), (set_value_method) } + +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_SCALAR_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_framework.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_framework.h new file mode 100644 index 0000000..9f4b41c --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_framework.h @@ -0,0 +1,32 @@ +/* +Generated by LwipMibCompiler +*/ + +#ifndef LWIP_HDR_APPS_SNMP_FRAMEWORK_MIB_H +#define LWIP_HDR_APPS_SNMP_FRAMEWORK_MIB_H + +#include "lwip/apps/snmp_opts.h" +#if LWIP_SNMP + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "lwip/apps/snmp_core.h" + +extern const struct snmp_obj_id usmNoAuthProtocol; +extern const struct snmp_obj_id usmHMACMD5AuthProtocol; +extern const struct snmp_obj_id usmHMACSHAAuthProtocol; + +extern const struct snmp_obj_id usmNoPrivProtocol; +extern const struct snmp_obj_id usmDESPrivProtocol; +extern const struct snmp_obj_id usmAESPrivProtocol; + +extern const struct snmp_mib snmpframeworkmib; + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* LWIP_SNMP */ +#endif /* LWIP_HDR_APPS_SNMP_FRAMEWORK_MIB_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_usm.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_usm.h new file mode 100644 index 0000000..ae09baa --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_usm.h @@ -0,0 +1,24 @@ +/* +Generated by LwipMibCompiler +*/ + +#ifndef LWIP_HDR_APPS_SNMP_USER_BASED_SM_MIB_H +#define LWIP_HDR_APPS_SNMP_USER_BASED_SM_MIB_H + +#include "lwip/apps/snmp_opts.h" +#if LWIP_SNMP + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "lwip/apps/snmp_core.h" + +extern const struct snmp_mib snmpusmmib; + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* LWIP_SNMP */ +#endif /* LWIP_HDR_APPS_SNMP_USER_BASED_SM_MIB_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_table.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_table.h new file mode 100644 index 0000000..6930d0b --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_table.h @@ -0,0 +1,134 @@ +/** + * @file + * SNMP server MIB API to implement table nodes + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Martin Hentschel + * + */ + +#ifndef LWIP_HDR_APPS_SNMP_TABLE_H +#define LWIP_HDR_APPS_SNMP_TABLE_H + +#include "lwip/apps/snmp_opts.h" +#include "lwip/apps/snmp_core.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +/** default (customizable) read/write table */ +struct snmp_table_col_def +{ + u32_t index; + u8_t asn1_type; + snmp_access_t access; +}; + +/** table node */ +struct snmp_table_node +{ + /** inherited "base class" members */ + struct snmp_leaf_node node; + u16_t column_count; + const struct snmp_table_col_def* columns; + snmp_err_t (*get_cell_instance)(const u32_t* column, const u32_t* row_oid, u8_t row_oid_len, struct snmp_node_instance* cell_instance); + snmp_err_t (*get_next_cell_instance)(const u32_t* column, struct snmp_obj_id* row_oid, struct snmp_node_instance* cell_instance); + /** returns object value for the given object identifier */ + node_instance_get_value_method get_value; + /** tests length and/or range BEFORE setting */ + node_instance_set_test_method set_test; + /** sets object value, only called when set_test() was successful */ + node_instance_set_value_method set_value; +}; + +snmp_err_t snmp_table_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_table_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +#define SNMP_TABLE_CREATE(oid, columns, get_cell_instance_method, get_next_cell_instance_method, get_value_method, set_test_method, set_value_method) \ + {{{ SNMP_NODE_TABLE, (oid) }, \ + snmp_table_get_instance, \ + snmp_table_get_next_instance }, \ + (u16_t)LWIP_ARRAYSIZE(columns), (columns), \ + (get_cell_instance_method), (get_next_cell_instance_method), \ + (get_value_method), (set_test_method), (set_value_method)} + +#define SNMP_TABLE_GET_COLUMN_FROM_OID(oid) ((oid)[1]) /* first array value is (fixed) row entry (fixed to 1) and 2nd value is column, follow3ed by instance */ + + +/** simple read-only table */ +typedef enum { + SNMP_VARIANT_VALUE_TYPE_U32, + SNMP_VARIANT_VALUE_TYPE_S32, + SNMP_VARIANT_VALUE_TYPE_PTR, + SNMP_VARIANT_VALUE_TYPE_CONST_PTR +} snmp_table_column_data_type_t; + +struct snmp_table_simple_col_def +{ + u32_t index; + u8_t asn1_type; + snmp_table_column_data_type_t data_type; /* depending of what union member is used to store the value*/ +}; + +/** simple read-only table node */ +struct snmp_table_simple_node +{ + /* inherited "base class" members */ + struct snmp_leaf_node node; + u16_t column_count; + const struct snmp_table_simple_col_def* columns; + snmp_err_t (*get_cell_value)(const u32_t* column, const u32_t* row_oid, u8_t row_oid_len, union snmp_variant_value* value, u32_t* value_len); + snmp_err_t (*get_next_cell_instance_and_value)(const u32_t* column, struct snmp_obj_id* row_oid, union snmp_variant_value* value, u32_t* value_len); +}; + +snmp_err_t snmp_table_simple_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_table_simple_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +#define SNMP_TABLE_CREATE_SIMPLE(oid, columns, get_cell_value_method, get_next_cell_instance_and_value_method) \ + {{{ SNMP_NODE_TABLE, (oid) }, \ + snmp_table_simple_get_instance, \ + snmp_table_simple_get_next_instance }, \ + (u16_t)LWIP_ARRAYSIZE(columns), (columns), (get_cell_value_method), (get_next_cell_instance_and_value_method) } + +s16_t snmp_table_extract_value_from_s32ref(struct snmp_node_instance* instance, void* value); +s16_t snmp_table_extract_value_from_u32ref(struct snmp_node_instance* instance, void* value); +s16_t snmp_table_extract_value_from_refconstptr(struct snmp_node_instance* instance, void* value); + +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_TABLE_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_threadsync.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_threadsync.h new file mode 100644 index 0000000..5eb3538 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_threadsync.h @@ -0,0 +1,114 @@ +/** + * @file + * SNMP server MIB API to implement thread synchronization + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ + +#ifndef LWIP_HDR_APPS_SNMP_THREADSYNC_H +#define LWIP_HDR_APPS_SNMP_THREADSYNC_H + +#include "lwip/apps/snmp_opts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/apps/snmp_core.h" +#include "lwip/sys.h" + +typedef void (*snmp_threadsync_called_fn)(void* arg); +typedef void (*snmp_threadsync_synchronizer_fn)(snmp_threadsync_called_fn fn, void* arg); + + +/** Thread sync runtime data. For internal usage only. */ +struct threadsync_data +{ + union { + snmp_err_t err; + s16_t s16; + } retval; + union { + const u32_t *root_oid; + void *value; + } arg1; + union { + u8_t root_oid_len; + u16_t len; + } arg2; + const struct snmp_threadsync_node *threadsync_node; + struct snmp_node_instance proxy_instance; +}; + +/** Thread sync instance. Needed EXCATLY once for every thread to be synced into. */ +struct snmp_threadsync_instance +{ + sys_sem_t sem; + sys_mutex_t sem_usage_mutex; + snmp_threadsync_synchronizer_fn sync_fn; + struct threadsync_data data; +}; + +/** SNMP thread sync proxy leaf node */ +struct snmp_threadsync_node +{ + /* inherited "base class" members */ + struct snmp_leaf_node node; + + const struct snmp_leaf_node *target; + struct snmp_threadsync_instance *instance; +}; + +snmp_err_t snmp_threadsync_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_threadsync_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +/** Create thread sync proxy node */ +#define SNMP_CREATE_THREAD_SYNC_NODE(oid, target_leaf_node, threadsync_instance) \ + {{{ SNMP_NODE_THREADSYNC, (oid) }, \ + snmp_threadsync_get_instance, \ + snmp_threadsync_get_next_instance }, \ + (target_leaf_node), \ + (threadsync_instance) } + +/** Create thread sync instance data */ +void snmp_threadsync_init(struct snmp_threadsync_instance *instance, snmp_threadsync_synchronizer_fn sync_fn); + +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_THREADSYNC_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmpv3.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmpv3.h new file mode 100644 index 0000000..bd7d314 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmpv3.h @@ -0,0 +1,114 @@ +/** + * @file + * Additional SNMPv3 functionality RFC3414 and RFC3826. + */ + +/* + * Copyright (c) 2016 Elias Oenal. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Elias Oenal + */ + +#ifndef LWIP_HDR_APPS_SNMP_V3_H +#define LWIP_HDR_APPS_SNMP_V3_H + +#include "lwip/apps/snmp_opts.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP && LWIP_SNMP_V3 + +typedef enum +{ + SNMP_V3_AUTH_ALGO_INVAL = 0, + SNMP_V3_AUTH_ALGO_MD5 = 1, + SNMP_V3_AUTH_ALGO_SHA = 2 +} snmpv3_auth_algo_t; + +typedef enum +{ + SNMP_V3_PRIV_ALGO_INVAL = 0, + SNMP_V3_PRIV_ALGO_DES = 1, + SNMP_V3_PRIV_ALGO_AES = 2 +} snmpv3_priv_algo_t; + +typedef enum +{ + SNMP_V3_USER_STORAGETYPE_OTHER = 1, + SNMP_V3_USER_STORAGETYPE_VOLATILE = 2, + SNMP_V3_USER_STORAGETYPE_NONVOLATILE = 3, + SNMP_V3_USER_STORAGETYPE_PERMANENT = 4, + SNMP_V3_USER_STORAGETYPE_READONLY = 5 +} snmpv3_user_storagetype_t; + +/* + * The following callback functions must be implemented by the application. + * There is a dummy implementation in snmpv3_dummy.c. + */ + +void snmpv3_get_engine_id(const char **id, u8_t *len); +err_t snmpv3_set_engine_id(const char* id, u8_t len); + +u32_t snmpv3_get_engine_boots(void); +void snmpv3_set_engine_boots(u32_t boots); + +u32_t snmpv3_get_engine_time(void); +void snmpv3_reset_engine_time(void); + +err_t snmpv3_get_user(const char* username, snmpv3_auth_algo_t *auth_algo, u8_t *auth_key, snmpv3_priv_algo_t *priv_algo, u8_t *priv_key); +u8_t snmpv3_get_amount_of_users(void); +err_t snmpv3_get_user_storagetype(const char *username, snmpv3_user_storagetype_t *storagetype); +err_t snmpv3_get_username(char *username, u8_t index); + +/* The following functions are provided by the SNMPv3 agent */ + +void snmpv3_engine_id_changed(void); +s32_t snmpv3_get_engine_time_internal(void); + +void snmpv3_password_to_key_md5( + const u8_t *password, /* IN */ + size_t passwordlen, /* IN */ + const u8_t *engineID, /* IN - pointer to snmpEngineID */ + u8_t engineLength, /* IN - length of snmpEngineID */ + u8_t *key); /* OUT - pointer to caller 16-octet buffer */ + +void snmpv3_password_to_key_sha( + const u8_t *password, /* IN */ + size_t passwordlen, /* IN */ + const u8_t *engineID, /* IN - pointer to snmpEngineID */ + u8_t engineLength, /* IN - length of snmpEngineID */ + u8_t *key); /* OUT - pointer to caller 20-octet buffer */ + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_V3_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp.h new file mode 100644 index 0000000..00135b4 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp.h @@ -0,0 +1,80 @@ +/** + * @file + * SNTP client API + */ + +/* + * Copyright (c) 2007-2009 Frédéric Bernon, Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Frédéric Bernon, Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_APPS_SNTP_H +#define LWIP_HDR_APPS_SNTP_H + +#include "lwip/apps/sntp_opts.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* SNTP operating modes: default is to poll using unicast. + The mode has to be set before calling sntp_init(). */ +#define SNTP_OPMODE_POLL 0 +#define SNTP_OPMODE_LISTENONLY 1 +void sntp_setoperatingmode(u8_t operating_mode); +u8_t sntp_getoperatingmode(void); + +void sntp_init(void); +void sntp_stop(void); +u8_t sntp_enabled(void); + +void sntp_setserver(u8_t idx, const ip_addr_t *addr); +const ip_addr_t* sntp_getserver(u8_t idx); + +#if SNTP_MONITOR_SERVER_REACHABILITY +u8_t sntp_getreachability(u8_t idx); +#endif /* SNTP_MONITOR_SERVER_REACHABILITY */ + +#if SNTP_SERVER_DNS +void sntp_setservername(u8_t idx, const char *server); +const char *sntp_getservername(u8_t idx); +#endif /* SNTP_SERVER_DNS */ + +#if SNTP_GET_SERVERS_FROM_DHCP +void sntp_servermode_dhcp(int set_servers_from_dhcp); +#else /* SNTP_GET_SERVERS_FROM_DHCP */ +#define sntp_servermode_dhcp(x) +#endif /* SNTP_GET_SERVERS_FROM_DHCP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNTP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp_opts.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp_opts.h new file mode 100644 index 0000000..c25a386 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp_opts.h @@ -0,0 +1,209 @@ +/** + * @file + * SNTP client options list + */ + +/* + * Copyright (c) 2007-2009 Frédéric Bernon, Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Frédéric Bernon, Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_APPS_SNTP_OPTS_H +#define LWIP_HDR_APPS_SNTP_OPTS_H + +#include "lwip/opt.h" +#include "lwip/prot/iana.h" + +/** + * @defgroup sntp_opts Options + * @ingroup sntp + * @{ + */ + +/** SNTP macro to change system time in seconds + * Define SNTP_SET_SYSTEM_TIME_US(sec, us) to set the time in microseconds + * instead of this one if you need the additional precision. Alternatively, + * define SNTP_SET_SYSTEM_TIME_NTP(sec, frac) in order to work with native + * NTP timestamps instead. + */ +#if !defined SNTP_SET_SYSTEM_TIME || defined __DOXYGEN__ +#define SNTP_SET_SYSTEM_TIME(sec) LWIP_UNUSED_ARG(sec) +#endif + +/** The maximum number of SNTP servers that can be set */ +#if !defined SNTP_MAX_SERVERS || defined __DOXYGEN__ +#define SNTP_MAX_SERVERS LWIP_DHCP_MAX_NTP_SERVERS +#endif + +/** Set this to 1 to implement the callback function called by dhcp when + * NTP servers are received. */ +#if !defined SNTP_GET_SERVERS_FROM_DHCP || defined __DOXYGEN__ +#define SNTP_GET_SERVERS_FROM_DHCP LWIP_DHCP_GET_NTP_SRV +#endif + +/** Set this to 1 to support DNS names (or IP address strings) to set sntp servers + * One server address/name can be defined as default if SNTP_SERVER_DNS == 1: + * \#define SNTP_SERVER_ADDRESS "pool.ntp.org" + */ +#if !defined SNTP_SERVER_DNS || defined __DOXYGEN__ +#define SNTP_SERVER_DNS 0 +#endif + +/** + * SNTP_DEBUG: Enable debugging for SNTP. + */ +#if !defined SNTP_DEBUG || defined __DOXYGEN__ +#define SNTP_DEBUG LWIP_DBG_OFF +#endif + +/** SNTP server port */ +#if !defined SNTP_PORT || defined __DOXYGEN__ +#define SNTP_PORT LWIP_IANA_PORT_SNTP +#endif + +/** Sanity check: + * Define this to + * - 0 to turn off sanity checks (default; smaller code) + * - >= 1 to check address and port of the response packet to ensure the + * response comes from the server we sent the request to. + * - >= 2 to check returned Originate Timestamp against Transmit Timestamp + * sent to the server (to ensure response to older request). + * - >= 3 @todo: discard reply if any of the VN, Stratum, or Transmit Timestamp + * fields is 0 or the Mode field is not 4 (unicast) or 5 (broadcast). + * - >= 4 @todo: to check that the Root Delay and Root Dispersion fields are each + * greater than or equal to 0 and less than infinity, where infinity is + * currently a cozy number like one second. This check avoids using a + * server whose synchronization source has expired for a very long time. + */ +#if !defined SNTP_CHECK_RESPONSE || defined __DOXYGEN__ +#define SNTP_CHECK_RESPONSE 0 +#endif + +/** Enable round-trip delay compensation. + * Compensate for the round-trip delay by calculating the clock offset from + * the originate, receive, transmit and destination timestamps, as per RFC. + * + * The calculation requires compiler support for 64-bit integers. Also, either + * SNTP_SET_SYSTEM_TIME_US or SNTP_SET_SYSTEM_TIME_NTP has to be implemented + * for setting the system clock with sub-second precision. Likewise, either + * SNTP_GET_SYSTEM_TIME or SNTP_GET_SYSTEM_TIME_NTP needs to be implemented + * with sub-second precision. + * + * Although not strictly required, it makes sense to combine this option with + * SNTP_CHECK_RESPONSE >= 2 for sanity-checking of the received timestamps. + * Also, in order for the round-trip calculation to work, the difference + * between the local clock and the NTP server clock must not be larger than + * about 34 years. If that limit is exceeded, the implementation will fall back + * to setting the clock without compensation. In order to ensure that the local + * clock is always within the permitted range for compensation, even at first + * try, it may be necessary to store at least the current year in non-volatile + * memory. + */ +#if !defined SNTP_COMP_ROUNDTRIP || defined __DOXYGEN__ +#define SNTP_COMP_ROUNDTRIP 0 +#endif + +/** According to the RFC, this shall be a random delay + * between 1 and 5 minutes (in milliseconds) to prevent load peaks. + * This can be defined to a random generation function, + * which must return the delay in milliseconds as u32_t. + * Turned off by default. + */ +#if !defined SNTP_STARTUP_DELAY || defined __DOXYGEN__ +#ifdef LWIP_RAND +#define SNTP_STARTUP_DELAY 1 +#else +#define SNTP_STARTUP_DELAY 0 +#endif +#endif + +/** If you want the startup delay to be a function, define this + * to a function (including the brackets) and define SNTP_STARTUP_DELAY to 1. + */ +#if !defined SNTP_STARTUP_DELAY_FUNC || defined __DOXYGEN__ +#define SNTP_STARTUP_DELAY_FUNC (LWIP_RAND() % 5000) +#endif + +/** SNTP receive timeout - in milliseconds + * Also used as retry timeout - this shouldn't be too low. + * Default is 15 seconds. Must not be beolw 15 seconds by specification (i.e. 15000) + */ +#if !defined SNTP_RECV_TIMEOUT || defined __DOXYGEN__ +#define SNTP_RECV_TIMEOUT 15000 +#endif + +/** SNTP update delay - in milliseconds + * Default is 1 hour. Must not be beolw 60 seconds by specification (i.e. 60000) + */ +#if !defined SNTP_UPDATE_DELAY || defined __DOXYGEN__ +#define SNTP_UPDATE_DELAY 3600000 +#endif + +/** SNTP macro to get system time, used with SNTP_CHECK_RESPONSE >= 2 + * to send in request and compare in response. Also used for round-trip + * delay compensation if SNTP_COMP_ROUNDTRIP != 0. + * Alternatively, define SNTP_GET_SYSTEM_TIME_NTP(sec, frac) in order to + * work with native NTP timestamps instead. + */ +#if !defined SNTP_GET_SYSTEM_TIME || defined __DOXYGEN__ +#define SNTP_GET_SYSTEM_TIME(sec, us) do { (sec) = 0; (us) = 0; } while(0) +#endif + +/** Default retry timeout (in milliseconds) if the response + * received is invalid. + * This is doubled with each retry until SNTP_RETRY_TIMEOUT_MAX is reached. + */ +#if !defined SNTP_RETRY_TIMEOUT || defined __DOXYGEN__ +#define SNTP_RETRY_TIMEOUT SNTP_RECV_TIMEOUT +#endif + +/** Maximum retry timeout (in milliseconds). */ +#if !defined SNTP_RETRY_TIMEOUT_MAX || defined __DOXYGEN__ +#define SNTP_RETRY_TIMEOUT_MAX (SNTP_RETRY_TIMEOUT * 10) +#endif + +/** Increase retry timeout with every retry sent + * Default is on to conform to RFC. + */ +#if !defined SNTP_RETRY_TIMEOUT_EXP || defined __DOXYGEN__ +#define SNTP_RETRY_TIMEOUT_EXP 1 +#endif + +/** Keep a reachability shift register per server + * Default is on to conform to RFC. + */ +#if !defined SNTP_MONITOR_SERVER_REACHABILITY || defined __DOXYGEN__ +#define SNTP_MONITOR_SERVER_REACHABILITY 1 +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_SNTP_OPTS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_opts.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_opts.h new file mode 100644 index 0000000..d187b82 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_opts.h @@ -0,0 +1,106 @@ +/** + * + * @file tftp_opts.h + * + * @author Logan Gunthorpe + * + * @brief Trivial File Transfer Protocol (RFC 1350) implementation options + * + * Copyright (c) Deltatee Enterprises Ltd. 2013 + * All rights reserved. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without + * modification,are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Logan Gunthorpe + * + */ + +#ifndef LWIP_HDR_APPS_TFTP_OPTS_H +#define LWIP_HDR_APPS_TFTP_OPTS_H + +#include "lwip/opt.h" +#include "lwip/prot/iana.h" + +/** + * @defgroup tftp_opts Options + * @ingroup tftp + * @{ + */ + +/** + * Enable TFTP debug messages + */ +#if !defined TFTP_DEBUG || defined __DOXYGEN__ +#define TFTP_DEBUG LWIP_DBG_OFF +#endif + +/** + * TFTP server port + */ +#if !defined TFTP_PORT || defined __DOXYGEN__ +#define TFTP_PORT LWIP_IANA_PORT_TFTP +#endif + +/** + * TFTP timeout + */ +#if !defined TFTP_TIMEOUT_MSECS || defined __DOXYGEN__ +#define TFTP_TIMEOUT_MSECS 10000 +#endif + +/** + * Max. number of retries when a file is read from server + */ +#if !defined TFTP_MAX_RETRIES || defined __DOXYGEN__ +#define TFTP_MAX_RETRIES 5 +#endif + +/** + * TFTP timer cyclic interval + */ +#if !defined TFTP_TIMER_MSECS || defined __DOXYGEN__ +#define TFTP_TIMER_MSECS (TFTP_TIMEOUT_MSECS / 10) +#endif + +/** + * Max. length of TFTP filename + */ +#if !defined TFTP_MAX_FILENAME_LEN || defined __DOXYGEN__ +#define TFTP_MAX_FILENAME_LEN 20 +#endif + +/** + * Max. length of TFTP mode + */ +#if !defined TFTP_MAX_MODE_LEN || defined __DOXYGEN__ +#define TFTP_MAX_MODE_LEN 7 +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_TFTP_OPTS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_server.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_server.h new file mode 100644 index 0000000..3ce5e40 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_server.h @@ -0,0 +1,95 @@ +/** + * + * @file tftp_server.h + * + * @author Logan Gunthorpe + * + * @brief Trivial File Transfer Protocol (RFC 1350) + * + * Copyright (c) Deltatee Enterprises Ltd. 2013 + * All rights reserved. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without + * modification,are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Logan Gunthorpe + * + */ + +#ifndef LWIP_HDR_APPS_TFTP_SERVER_H +#define LWIP_HDR_APPS_TFTP_SERVER_H + +#include "lwip/apps/tftp_opts.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @ingroup tftp + * TFTP context containing callback functions for TFTP transfers + */ +struct tftp_context { + /** + * Open file for read/write. + * @param fname Filename + * @param mode Mode string from TFTP RFC 1350 (netascii, octet, mail) + * @param write Flag indicating read (0) or write (!= 0) access + * @returns File handle supplied to other functions + */ + void* (*open)(const char* fname, const char* mode, u8_t write); + /** + * Close file handle + * @param handle File handle returned by open() + */ + void (*close)(void* handle); + /** + * Read from file + * @param handle File handle returned by open() + * @param buf Target buffer to copy read data to + * @param bytes Number of bytes to copy to buf + * @returns >= 0: Success; < 0: Error + */ + int (*read)(void* handle, void* buf, int bytes); + /** + * Write to file + * @param handle File handle returned by open() + * @param pbuf PBUF adjusted such that payload pointer points + * to the beginning of write data. In other words, + * TFTP headers are stripped off. + * @returns >= 0: Success; < 0: Error + */ + int (*write)(void* handle, struct pbuf* p); +}; + +err_t tftp_init(const struct tftp_context* ctx); +void tftp_cleanup(void); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_TFTP_SERVER_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/arch.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/arch.h new file mode 100644 index 0000000..d281da7 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/arch.h @@ -0,0 +1,393 @@ +/** + * @file + * Support for different processor and compiler architectures + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_ARCH_H +#define LWIP_HDR_ARCH_H + +#ifndef LITTLE_ENDIAN +#define LITTLE_ENDIAN 1234 +#endif + +#ifndef BIG_ENDIAN +#define BIG_ENDIAN 4321 +#endif + +#include "arch/cc.h" + +/** + * @defgroup compiler_abstraction Compiler/platform abstraction + * @ingroup sys_layer + * All defines related to this section must not be placed in lwipopts.h, + * but in arch/cc.h! + * If the compiler does not provide memset() this file must include a + * definition of it, or include a file which defines it. + * These options cannot be \#defined in lwipopts.h since they are not options + * of lwIP itself, but options of the lwIP port to your system. + * @{ + */ + +/** Define the byte order of the system. + * Needed for conversion of network data to host byte order. + * Allowed values: LITTLE_ENDIAN and BIG_ENDIAN + */ +#ifndef BYTE_ORDER +#define BYTE_ORDER LITTLE_ENDIAN +#endif + +/** Define random number generator function of your system */ +#ifdef __DOXYGEN__ +#define LWIP_RAND() ((u32_t)rand()) +#endif + +/** Platform specific diagnostic output.\n + * Note the default implementation pulls in printf, which may + * in turn pull in a lot of standard libary code. In resource-constrained + * systems, this should be defined to something less resource-consuming. + */ +#ifndef LWIP_PLATFORM_DIAG +#define LWIP_PLATFORM_DIAG(x) do {printf x;} while(0) +#include +#include +#endif + +/** Platform specific assertion handling.\n + * Note the default implementation pulls in printf, fflush and abort, which may + * in turn pull in a lot of standard libary code. In resource-constrained + * systems, this should be defined to something less resource-consuming. + */ +#ifndef LWIP_PLATFORM_ASSERT +#define LWIP_PLATFORM_ASSERT(x) do {printf("Assertion \"%s\" failed at line %d in %s\n", \ + x, __LINE__, __FILE__); fflush(NULL); abort();} while(0) +#include +#include +#endif + +/** Define this to 1 in arch/cc.h of your port if you do not want to + * include stddef.h header to get size_t. You need to typedef size_t + * by yourself in this case. + */ +#ifndef LWIP_NO_STDDEF_H +#define LWIP_NO_STDDEF_H 0 +#endif + +#if !LWIP_NO_STDDEF_H +#include /* for size_t */ +#endif + +/** Define this to 1 in arch/cc.h of your port if your compiler does not provide + * the stdint.h header. You need to typedef the generic types listed in + * lwip/arch.h yourself in this case (u8_t, u16_t...). + */ +#ifndef LWIP_NO_STDINT_H +#define LWIP_NO_STDINT_H 0 +#endif + +/* Define generic types used in lwIP */ +#if !LWIP_NO_STDINT_H +#include +/* stdint.h is C99 which should also provide support for 64-bit integers */ +#if !defined(LWIP_HAVE_INT64) && defined(UINT64_MAX) +#define LWIP_HAVE_INT64 1 +#endif +typedef uint8_t u8_t; +typedef int8_t s8_t; +typedef uint16_t u16_t; +typedef int16_t s16_t; +typedef uint32_t u32_t; +typedef int32_t s32_t; +#if LWIP_HAVE_INT64 +typedef uint64_t u64_t; +typedef int64_t s64_t; +#endif +typedef uintptr_t mem_ptr_t; +#endif + +/** Define this to 1 in arch/cc.h of your port if your compiler does not provide + * the inttypes.h header. You need to define the format strings listed in + * lwip/arch.h yourself in this case (X8_F, U16_F...). + */ +#ifndef LWIP_NO_INTTYPES_H +#define LWIP_NO_INTTYPES_H 0 +#endif + +/* Define (sn)printf formatters for these lwIP types */ +#if !LWIP_NO_INTTYPES_H +#include +#ifndef X8_F +#define X8_F "02" PRIx8 +#endif +#ifndef U16_F +#define U16_F PRIu16 +#endif +#ifndef S16_F +#define S16_F PRId16 +#endif +#ifndef X16_F +#define X16_F PRIx16 +#endif +#ifndef U32_F +#define U32_F PRIu32 +#endif +#ifndef S32_F +#define S32_F PRId32 +#endif +#ifndef X32_F +#define X32_F PRIx32 +#endif +#ifndef SZT_F +#define SZT_F PRIuPTR +#endif +#endif + +/** Define this to 1 in arch/cc.h of your port if your compiler does not provide + * the limits.h header. You need to define the type limits yourself in this case + * (e.g. INT_MAX, SSIZE_MAX). + */ +#ifndef LWIP_NO_LIMITS_H +#define LWIP_NO_LIMITS_H 0 +#endif + +/* Include limits.h? */ +#if !LWIP_NO_LIMITS_H +#include +#endif + +/* Do we need to define ssize_t? This is a compatibility hack: + * Unfortunately, this type seems to be unavailable on some systems (even if + * sys/types or unistd.h are available). + * Being like that, we define it to 'int' if SSIZE_MAX is not defined. + */ +#ifdef SSIZE_MAX +/* If SSIZE_MAX is defined, unistd.h should provide the type as well */ +#ifndef LWIP_NO_UNISTD_H +#define LWIP_NO_UNISTD_H 0 +#endif +#if !LWIP_NO_UNISTD_H +#include +#endif +#else /* SSIZE_MAX */ +typedef int ssize_t; +#define SSIZE_MAX INT_MAX +#endif /* SSIZE_MAX */ + +/* some maximum values needed in lwip code */ +#define LWIP_UINT32_MAX 0xffffffff + +/** Define this to 1 in arch/cc.h of your port if your compiler does not provide + * the ctype.h header. If ctype.h is available, a few character functions + * are mapped to the appropriate functions (lwip_islower, lwip_isdigit...), if + * not, a private implementation is provided. + */ +#ifndef LWIP_NO_CTYPE_H +#define LWIP_NO_CTYPE_H 0 +#endif + +#if LWIP_NO_CTYPE_H +#define lwip_in_range(c, lo, up) ((u8_t)(c) >= (lo) && (u8_t)(c) <= (up)) +#define lwip_isdigit(c) lwip_in_range((c), '0', '9') +#define lwip_isxdigit(c) (lwip_isdigit(c) || lwip_in_range((c), 'a', 'f') || lwip_in_range((c), 'A', 'F')) +#define lwip_islower(c) lwip_in_range((c), 'a', 'z') +#define lwip_isspace(c) ((c) == ' ' || (c) == '\f' || (c) == '\n' || (c) == '\r' || (c) == '\t' || (c) == '\v') +#define lwip_isupper(c) lwip_in_range((c), 'A', 'Z') +#define lwip_tolower(c) (lwip_isupper(c) ? (c) - 'A' + 'a' : c) +#define lwip_toupper(c) (lwip_islower(c) ? (c) - 'a' + 'A' : c) +#else +#include +#define lwip_isdigit(c) isdigit((unsigned char)(c)) +#define lwip_isxdigit(c) isxdigit((unsigned char)(c)) +#define lwip_islower(c) islower((unsigned char)(c)) +#define lwip_isspace(c) isspace((unsigned char)(c)) +#define lwip_isupper(c) isupper((unsigned char)(c)) +#define lwip_tolower(c) tolower((unsigned char)(c)) +#define lwip_toupper(c) toupper((unsigned char)(c)) +#endif + +/** C++ const_cast(val) equivalent to remove constness from a value (GCC -Wcast-qual) */ +#ifndef LWIP_CONST_CAST +#define LWIP_CONST_CAST(target_type, val) ((target_type)((ptrdiff_t)val)) +#endif + +/** Get rid of alignment cast warnings (GCC -Wcast-align) */ +#ifndef LWIP_ALIGNMENT_CAST +#define LWIP_ALIGNMENT_CAST(target_type, val) LWIP_CONST_CAST(target_type, val) +#endif + +/** Get rid of warnings related to pointer-to-numeric and vice-versa casts, + * e.g. "conversion from 'u8_t' to 'void *' of greater size" + */ +#ifndef LWIP_PTR_NUMERIC_CAST +#define LWIP_PTR_NUMERIC_CAST(target_type, val) LWIP_CONST_CAST(target_type, val) +#endif + +/** Avoid warnings/errors related to implicitly casting away packed attributes by doing a explicit cast */ +#ifndef LWIP_PACKED_CAST +#define LWIP_PACKED_CAST(target_type, val) LWIP_CONST_CAST(target_type, val) +#endif + +/** Allocates a memory buffer of specified size that is of sufficient size to align + * its start address using LWIP_MEM_ALIGN. + * You can declare your own version here e.g. to enforce alignment without adding + * trailing padding bytes (see LWIP_MEM_ALIGN_BUFFER) or your own section placement + * requirements.\n + * e.g. if you use gcc and need 32 bit alignment:\n + * \#define LWIP_DECLARE_MEMORY_ALIGNED(variable_name, size) u8_t variable_name[size] \_\_attribute\_\_((aligned(4)))\n + * or more portable:\n + * \#define LWIP_DECLARE_MEMORY_ALIGNED(variable_name, size) u32_t variable_name[(size + sizeof(u32_t) - 1) / sizeof(u32_t)] + */ +#ifndef LWIP_DECLARE_MEMORY_ALIGNED +#define LWIP_DECLARE_MEMORY_ALIGNED(variable_name, size) u8_t variable_name[LWIP_MEM_ALIGN_BUFFER(size)] +#endif + +/** Calculate memory size for an aligned buffer - returns the next highest + * multiple of MEM_ALIGNMENT (e.g. LWIP_MEM_ALIGN_SIZE(3) and + * LWIP_MEM_ALIGN_SIZE(4) will both yield 4 for MEM_ALIGNMENT == 4). + */ +#ifndef LWIP_MEM_ALIGN_SIZE +#define LWIP_MEM_ALIGN_SIZE(size) (((size) + MEM_ALIGNMENT - 1U) & ~(MEM_ALIGNMENT-1U)) +#endif + +/** Calculate safe memory size for an aligned buffer when using an unaligned + * type as storage. This includes a safety-margin on (MEM_ALIGNMENT - 1) at the + * start (e.g. if buffer is u8_t[] and actual data will be u32_t*) + */ +#ifndef LWIP_MEM_ALIGN_BUFFER +#define LWIP_MEM_ALIGN_BUFFER(size) (((size) + MEM_ALIGNMENT - 1U)) +#endif + +/** Align a memory pointer to the alignment defined by MEM_ALIGNMENT + * so that ADDR % MEM_ALIGNMENT == 0 + */ +#ifndef LWIP_MEM_ALIGN +#define LWIP_MEM_ALIGN(addr) ((void *)(((mem_ptr_t)(addr) + MEM_ALIGNMENT - 1) & ~(mem_ptr_t)(MEM_ALIGNMENT-1))) +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** Packed structs support. + * Placed BEFORE declaration of a packed struct.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_BEGIN +#define PACK_STRUCT_BEGIN +#endif /* PACK_STRUCT_BEGIN */ + +/** Packed structs support. + * Placed AFTER declaration of a packed struct.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_END +#define PACK_STRUCT_END +#endif /* PACK_STRUCT_END */ + +/** Packed structs support. + * Placed between end of declaration of a packed struct and trailing semicolon.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_STRUCT +#if defined(__GNUC__) || defined(__clang__) +#define PACK_STRUCT_STRUCT __attribute__((packed)) +#else +#define PACK_STRUCT_STRUCT +#endif +#endif /* PACK_STRUCT_STRUCT */ + +/** Packed structs support. + * Wraps u32_t and u16_t members.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_FIELD +#define PACK_STRUCT_FIELD(x) x +#endif /* PACK_STRUCT_FIELD */ + +/** Packed structs support. + * Wraps u8_t members, where some compilers warn that packing is not necessary.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_FLD_8 +#define PACK_STRUCT_FLD_8(x) PACK_STRUCT_FIELD(x) +#endif /* PACK_STRUCT_FLD_8 */ + +/** Packed structs support. + * Wraps members that are packed structs themselves, where some compilers warn that packing is not necessary.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_FLD_S +#define PACK_STRUCT_FLD_S(x) PACK_STRUCT_FIELD(x) +#endif /* PACK_STRUCT_FLD_S */ + +/** PACK_STRUCT_USE_INCLUDES==1: Packed structs support using \#include files before and after struct to be packed.\n + * The file included BEFORE the struct is "arch/bpstruct.h".\n + * The file included AFTER the struct is "arch/epstruct.h".\n + * This can be used to implement struct packing on MS Visual C compilers, see + * the Win32 port in the lwIP contrib repository for reference. + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifdef __DOXYGEN__ +#define PACK_STRUCT_USE_INCLUDES +#endif + +/** Eliminates compiler warning about unused arguments (GCC -Wextra -Wunused). */ +#ifndef LWIP_UNUSED_ARG +#define LWIP_UNUSED_ARG(x) (void)x +#endif /* LWIP_UNUSED_ARG */ + +/** LWIP_PROVIDE_ERRNO==1: Let lwIP provide ERRNO values and the 'errno' variable. + * If this is disabled, cc.h must either define 'errno', include , + * define LWIP_ERRNO_STDINCLUDE to get included or + * define LWIP_ERRNO_INCLUDE to or equivalent. + */ +#if defined __DOXYGEN__ +#define LWIP_PROVIDE_ERRNO +#endif + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_ARCH_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/autoip.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/autoip.h new file mode 100644 index 0000000..5c6d13e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/autoip.h @@ -0,0 +1,99 @@ +/** + * @file + * + * AutoIP Automatic LinkLocal IP Configuration + */ + +/* + * + * Copyright (c) 2007 Dominik Spies + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Dominik Spies + * + * This is a AutoIP implementation for the lwIP TCP/IP stack. It aims to conform + * with RFC 3927. + * + */ + +#ifndef LWIP_HDR_AUTOIP_H +#define LWIP_HDR_AUTOIP_H + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_AUTOIP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netif.h" +/* #include "lwip/udp.h" */ +#include "lwip/etharp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** AutoIP Timing */ +#define AUTOIP_TMR_INTERVAL 100 +#define AUTOIP_TICKS_PER_SECOND (1000 / AUTOIP_TMR_INTERVAL) + +/** AutoIP state information per netif */ +struct autoip +{ + /** the currently selected, probed, announced or used LL IP-Address */ + ip4_addr_t llipaddr; + /** current AutoIP state machine state */ + u8_t state; + /** sent number of probes or announces, dependent on state */ + u8_t sent_num; + /** ticks to wait, tick is AUTOIP_TMR_INTERVAL long */ + u16_t ttw; + /** ticks until a conflict can be solved by defending */ + u8_t lastconflict; + /** total number of probed/used Link Local IP-Addresses */ + u8_t tried_llipaddr; +}; + + +void autoip_set_struct(struct netif *netif, struct autoip *autoip); +/** Remove a struct autoip previously set to the netif using autoip_set_struct() */ +#define autoip_remove_struct(netif) do { (netif)->autoip = NULL; } while (0) +err_t autoip_start(struct netif *netif); +err_t autoip_stop(struct netif *netif); +void autoip_arp_reply(struct netif *netif, struct etharp_hdr *hdr); +void autoip_tmr(void); +void autoip_network_changed(struct netif *netif); +u8_t autoip_supplied_address(const struct netif *netif); + +/* for lwIP internal use by ip4.c */ +u8_t autoip_accept_packet(struct netif *netif, const ip4_addr_t *addr); + +#define netif_autoip_data(netif) ((struct autoip*)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 && LWIP_AUTOIP */ + +#endif /* LWIP_HDR_AUTOIP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/debug.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/debug.h new file mode 100644 index 0000000..346ac47 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/debug.h @@ -0,0 +1,161 @@ +/** + * @file + * Debug messages infrastructure + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_DEBUG_H +#define LWIP_HDR_DEBUG_H + +#include "lwip/arch.h" +#include "lwip/opt.h" + +/** + * @defgroup debugging_levels LWIP_DBG_MIN_LEVEL and LWIP_DBG_TYPES_ON values + * @ingroup lwip_opts_debugmsg + * @{ + */ + +/** @name Debug level (LWIP_DBG_MIN_LEVEL) + * @{ + */ +/** Debug level: ALL messages*/ +#define LWIP_DBG_LEVEL_ALL 0x00 +/** Debug level: Warnings. bad checksums, dropped packets, ... */ +#define LWIP_DBG_LEVEL_WARNING 0x01 +/** Debug level: Serious. memory allocation failures, ... */ +#define LWIP_DBG_LEVEL_SERIOUS 0x02 +/** Debug level: Severe */ +#define LWIP_DBG_LEVEL_SEVERE 0x03 +/** + * @} + */ + +#define LWIP_DBG_MASK_LEVEL 0x03 +/* compatibility define only */ +#define LWIP_DBG_LEVEL_OFF LWIP_DBG_LEVEL_ALL + +/** @name Enable/disable debug messages completely (LWIP_DBG_TYPES_ON) + * @{ + */ +/** flag for LWIP_DEBUGF to enable that debug message */ +#define LWIP_DBG_ON 0x80U +/** flag for LWIP_DEBUGF to disable that debug message */ +#define LWIP_DBG_OFF 0x00U +/** + * @} + */ + +/** @name Debug message types (LWIP_DBG_TYPES_ON) + * @{ + */ +/** flag for LWIP_DEBUGF indicating a tracing message (to follow program flow) */ +#define LWIP_DBG_TRACE 0x40U +/** flag for LWIP_DEBUGF indicating a state debug message (to follow module states) */ +#define LWIP_DBG_STATE 0x20U +/** flag for LWIP_DEBUGF indicating newly added code, not thoroughly tested yet */ +#define LWIP_DBG_FRESH 0x10U +/** flag for LWIP_DEBUGF to halt after printing this debug message */ +#define LWIP_DBG_HALT 0x08U +/** + * @} + */ + +/** + * @} + */ + +/** + * @defgroup lwip_assertions Assertion handling + * @ingroup lwip_opts_debug + * @{ + */ +/** + * LWIP_NOASSERT: Disable LWIP_ASSERT checks: + * To disable assertions define LWIP_NOASSERT in arch/cc.h. + */ +#ifdef __DOXYGEN__ +#define LWIP_NOASSERT +#undef LWIP_NOASSERT +#endif +/** + * @} + */ + +#ifndef LWIP_NOASSERT +#define LWIP_ASSERT(message, assertion) do { if (!(assertion)) { \ + LWIP_PLATFORM_ASSERT(message); }} while(0) +#else /* LWIP_NOASSERT */ +#define LWIP_ASSERT(message, assertion) +#endif /* LWIP_NOASSERT */ + +#ifndef LWIP_ERROR +#ifndef LWIP_NOASSERT +#define LWIP_PLATFORM_ERROR(message) LWIP_PLATFORM_ASSERT(message) +#elif defined LWIP_DEBUG +#define LWIP_PLATFORM_ERROR(message) LWIP_PLATFORM_DIAG((message)) +#else +#define LWIP_PLATFORM_ERROR(message) +#endif + +/* if "expression" isn't true, then print "message" and execute "handler" expression */ +#define LWIP_ERROR(message, expression, handler) do { if (!(expression)) { \ + LWIP_PLATFORM_ERROR(message); handler;}} while(0) +#endif /* LWIP_ERROR */ + +/** Enable debug message printing, but only if debug message type is enabled + * AND is of correct type AND is at least LWIP_DBG_LEVEL. + */ +#ifdef __DOXYGEN__ +#define LWIP_DEBUG +#undef LWIP_DEBUG +#endif + +#ifdef LWIP_DEBUG +#define LWIP_DEBUGF(debug, message) do { \ + if ( \ + ((debug) & LWIP_DBG_ON) && \ + ((debug) & LWIP_DBG_TYPES_ON) && \ + ((s16_t)((debug) & LWIP_DBG_MASK_LEVEL) >= LWIP_DBG_MIN_LEVEL)) { \ + LWIP_PLATFORM_DIAG(message); \ + if ((debug) & LWIP_DBG_HALT) { \ + while(1); \ + } \ + } \ + } while(0) + +#else /* LWIP_DEBUG */ +#define LWIP_DEBUGF(debug, message) +#endif /* LWIP_DEBUG */ + +#endif /* LWIP_HDR_DEBUG_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/def.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/def.h new file mode 100644 index 0000000..10b5ac2 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/def.h @@ -0,0 +1,152 @@ +/** + * @file + * various utility macros + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/** + * @defgroup perf Performance measurement + * @ingroup sys_layer + * All defines related to this section must not be placed in lwipopts.h, + * but in arch/perf.h! + * Measurement calls made throughout lwip, these can be defined to nothing. + * - PERF_START: start measuring something. + * - PERF_STOP(x): stop measuring something, and record the result. + */ + +#ifndef LWIP_HDR_DEF_H +#define LWIP_HDR_DEF_H + +/* arch.h might define NULL already */ +#include "lwip/arch.h" +#include "lwip/opt.h" +#if LWIP_PERF +#include "arch/perf.h" +#else /* LWIP_PERF */ +#define PERF_START /* null definition */ +#define PERF_STOP(x) /* null definition */ +#endif /* LWIP_PERF */ + +#ifdef __cplusplus +extern "C" { +#endif + +#define LWIP_MAX(x , y) (((x) > (y)) ? (x) : (y)) +#define LWIP_MIN(x , y) (((x) < (y)) ? (x) : (y)) + +/* Get the number of entries in an array ('x' must NOT be a pointer!) */ +#define LWIP_ARRAYSIZE(x) (sizeof(x)/sizeof((x)[0])) + +/** Create u32_t value from bytes */ +#define LWIP_MAKEU32(a,b,c,d) (((u32_t)((a) & 0xff) << 24) | \ + ((u32_t)((b) & 0xff) << 16) | \ + ((u32_t)((c) & 0xff) << 8) | \ + (u32_t)((d) & 0xff)) + +#ifndef NULL +#ifdef __cplusplus +#define NULL 0 +#else +#define NULL ((void *)0) +#endif +#endif + +#if BYTE_ORDER == BIG_ENDIAN +#define lwip_htons(x) ((u16_t)(x)) +#define lwip_ntohs(x) ((u16_t)(x)) +#define lwip_htonl(x) ((u32_t)(x)) +#define lwip_ntohl(x) ((u32_t)(x)) +#define PP_HTONS(x) ((u16_t)(x)) +#define PP_NTOHS(x) ((u16_t)(x)) +#define PP_HTONL(x) ((u32_t)(x)) +#define PP_NTOHL(x) ((u32_t)(x)) +#else /* BYTE_ORDER != BIG_ENDIAN */ +#ifndef lwip_htons +u16_t lwip_htons(u16_t x); +#endif +#define lwip_ntohs(x) lwip_htons(x) + +#ifndef lwip_htonl +u32_t lwip_htonl(u32_t x); +#endif +#define lwip_ntohl(x) lwip_htonl(x) + +/* These macros should be calculated by the preprocessor and are used + with compile-time constants only (so that there is no little-endian + overhead at runtime). */ +#define PP_HTONS(x) ((u16_t)((((x) & (u16_t)0x00ffU) << 8) | (((x) & (u16_t)0xff00U) >> 8))) +#define PP_NTOHS(x) PP_HTONS(x) +#define PP_HTONL(x) ((((x) & (u32_t)0x000000ffUL) << 24) | \ + (((x) & (u32_t)0x0000ff00UL) << 8) | \ + (((x) & (u32_t)0x00ff0000UL) >> 8) | \ + (((x) & (u32_t)0xff000000UL) >> 24)) +#define PP_NTOHL(x) PP_HTONL(x) +#endif /* BYTE_ORDER == BIG_ENDIAN */ + +/* Provide usual function names as macros for users, but this can be turned off */ +#ifndef LWIP_DONT_PROVIDE_BYTEORDER_FUNCTIONS +#define htons(x) lwip_htons(x) +#define ntohs(x) lwip_ntohs(x) +#define htonl(x) lwip_htonl(x) +#define ntohl(x) lwip_ntohl(x) +#endif + +/* Functions that are not available as standard implementations. + * In cc.h, you can #define these to implementations available on + * your platform to save some code bytes if you use these functions + * in your application, too. + */ + +#ifndef lwip_itoa +/* This can be #defined to itoa() or snprintf(result, bufsize, "%d", number) depending on your platform */ +void lwip_itoa(char* result, size_t bufsize, int number); +#endif +#ifndef lwip_strnicmp +/* This can be #defined to strnicmp() or strncasecmp() depending on your platform */ +int lwip_strnicmp(const char* str1, const char* str2, size_t len); +#endif +#ifndef lwip_stricmp +/* This can be #defined to stricmp() or strcasecmp() depending on your platform */ +int lwip_stricmp(const char* str1, const char* str2); +#endif +#ifndef lwip_strnstr +/* This can be #defined to strnstr() depending on your platform */ +char* lwip_strnstr(const char* buffer, const char* token, size_t n); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_DEF_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp.h new file mode 100644 index 0000000..a965efd --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp.h @@ -0,0 +1,139 @@ +/** + * @file + * DHCP client API + */ + +/* + * Copyright (c) 2001-2004 Leon Woestenberg + * Copyright (c) 2001-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Leon Woestenberg + * + */ +#ifndef LWIP_HDR_DHCP_H +#define LWIP_HDR_DHCP_H + +#include "lwip/opt.h" + +#if LWIP_DHCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netif.h" +#include "lwip/udp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** period (in seconds) of the application calling dhcp_coarse_tmr() */ +#define DHCP_COARSE_TIMER_SECS 60 +/** period (in milliseconds) of the application calling dhcp_coarse_tmr() */ +#define DHCP_COARSE_TIMER_MSECS (DHCP_COARSE_TIMER_SECS * 1000UL) +/** period (in milliseconds) of the application calling dhcp_fine_tmr() */ +#define DHCP_FINE_TIMER_MSECS 500 + +#define DHCP_BOOT_FILE_LEN 128U + +/* AutoIP cooperation flags (struct dhcp.autoip_coop_state) */ +typedef enum { + DHCP_AUTOIP_COOP_STATE_OFF = 0, + DHCP_AUTOIP_COOP_STATE_ON = 1 +} dhcp_autoip_coop_state_enum_t; + +struct dhcp +{ + /** transaction identifier of last sent request */ + u32_t xid; + /** track PCB allocation state */ + u8_t pcb_allocated; + /** current DHCP state machine state */ + u8_t state; + /** retries of current request */ + u8_t tries; +#if LWIP_DHCP_AUTOIP_COOP + u8_t autoip_coop_state; +#endif + u8_t subnet_mask_given; + + u16_t request_timeout; /* #ticks with period DHCP_FINE_TIMER_SECS for request timeout */ + u16_t t1_timeout; /* #ticks with period DHCP_COARSE_TIMER_SECS for renewal time */ + u16_t t2_timeout; /* #ticks with period DHCP_COARSE_TIMER_SECS for rebind time */ + u16_t t1_renew_time; /* #ticks with period DHCP_COARSE_TIMER_SECS until next renew try */ + u16_t t2_rebind_time; /* #ticks with period DHCP_COARSE_TIMER_SECS until next rebind try */ + u16_t lease_used; /* #ticks with period DHCP_COARSE_TIMER_SECS since last received DHCP ack */ + u16_t t0_timeout; /* #ticks with period DHCP_COARSE_TIMER_SECS for lease time */ + ip_addr_t server_ip_addr; /* dhcp server address that offered this lease (ip_addr_t because passed to UDP) */ + ip4_addr_t offered_ip_addr; + ip4_addr_t offered_sn_mask; + ip4_addr_t offered_gw_addr; + + u32_t offered_t0_lease; /* lease period (in seconds) */ + u32_t offered_t1_renew; /* recommended renew time (usually 50% of lease period) */ + u32_t offered_t2_rebind; /* recommended rebind time (usually 87.5 of lease period) */ +#if LWIP_DHCP_BOOTP_FILE + ip4_addr_t offered_si_addr; + char boot_file_name[DHCP_BOOT_FILE_LEN]; +#endif /* LWIP_DHCP_BOOTPFILE */ +}; + + +void dhcp_set_struct(struct netif *netif, struct dhcp *dhcp); +/** Remove a struct dhcp previously set to the netif using dhcp_set_struct() */ +#define dhcp_remove_struct(netif) netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, NULL) +void dhcp_cleanup(struct netif *netif); +err_t dhcp_start(struct netif *netif); +err_t dhcp_renew(struct netif *netif); +err_t dhcp_release(struct netif *netif); +void dhcp_stop(struct netif *netif); +void dhcp_release_and_stop(struct netif *netif); +void dhcp_inform(struct netif *netif); +void dhcp_network_changed(struct netif *netif); +#if DHCP_DOES_ARP_CHECK +void dhcp_arp_reply(struct netif *netif, const ip4_addr_t *addr); +#endif +u8_t dhcp_supplied_address(const struct netif *netif); +/* to be called every minute */ +void dhcp_coarse_tmr(void); +/* to be called every half second */ +void dhcp_fine_tmr(void); + +#if LWIP_DHCP_GET_NTP_SRV +/** This function must exist, in other to add offered NTP servers to + * the NTP (or SNTP) engine. + * See LWIP_DHCP_MAX_NTP_SERVERS */ +extern void dhcp_set_ntp_servers(u8_t num_ntp_servers, const ip4_addr_t* ntp_server_addrs); +#endif /* LWIP_DHCP_GET_NTP_SRV */ + +#define netif_dhcp_data(netif) ((struct dhcp*)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_DHCP */ + +#endif /*LWIP_HDR_DHCP_H*/ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp6.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp6.h new file mode 100644 index 0000000..61ca4c4 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp6.h @@ -0,0 +1,104 @@ +/** + * @file + * + * DHCPv6 client: IPv6 address autoconfiguration as per + * RFC 3315 (stateful DHCPv6) and + * RFC 3736 (stateless DHCPv6). + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + */ + +#ifndef LWIP_HDR_IP6_DHCP6_H +#define LWIP_HDR_IP6_DHCP6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6_DHCP6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/err.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** period (in milliseconds) of the application calling dhcp6_tmr() */ +#define DHCP6_TIMER_MSECS 500 + +struct dhcp6 +{ + /** transaction identifier of last sent request */ + u32_t xid; + /** track PCB allocation state */ + u8_t pcb_allocated; + /** current DHCPv6 state machine state */ + u8_t state; + /** retries of current request */ + u8_t tries; + /** if request config is triggered while another action is active, this keeps track of it */ + u8_t request_config_pending; + /** #ticks with period DHCP6_TIMER_MSECS for request timeout */ + u16_t request_timeout; +#if LWIP_IPV6_DHCP6_STATEFUL + /* @todo: add more members here to keep track of stateful DHCPv6 data, like lease times */ +#endif /* LWIP_IPV6_DHCP6_STATEFUL */ +}; + +void dhcp6_set_struct(struct netif *netif, struct dhcp6 *dhcp6); +/** Remove a struct dhcp6 previously set to the netif using dhcp6_set_struct() */ +#define dhcp6_remove_struct(netif) netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, NULL) +void dhcp6_cleanup(struct netif *netif); + +err_t dhcp6_enable_stateful(struct netif *netif); +err_t dhcp6_enable_stateless(struct netif *netif); +void dhcp6_disable(struct netif *netif); + +void dhcp6_tmr(void); + +void dhcp6_nd6_ra_trigger(struct netif *netif, u8_t managed_addr_config, u8_t other_config); + +#if LWIP_DHCP6_GET_NTP_SRV +/** This function must exist, in other to add offered NTP servers to + * the NTP (or SNTP) engine. + * See LWIP_DHCP6_MAX_NTP_SERVERS */ +extern void dhcp6_set_ntp_servers(u8_t num_ntp_servers, const ip_addr_t* ntp_server_addrs); +#endif /* LWIP_DHCP6_GET_NTP_SRV */ + +#define netif_dhcp6_data(netif) ((struct dhcp6*)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6_DHCP6 */ + +#endif /* LWIP_HDR_IP6_DHCP6_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/dns.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/dns.h new file mode 100644 index 0000000..eed6825 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/dns.h @@ -0,0 +1,131 @@ +/** + * @file + * DNS API + */ + +/** + * lwip DNS resolver header file. + + * Author: Jim Pettinato + * April 2007 + + * ported from uIP resolv.c Copyright (c) 2002-2003, Adam Dunkels. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef LWIP_HDR_DNS_H +#define LWIP_HDR_DNS_H + +#include "lwip/opt.h" + +#if LWIP_DNS + +#include "lwip/ip_addr.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** DNS timer period */ +#define DNS_TMR_INTERVAL 1000 + +/* DNS resolve types: */ +#define LWIP_DNS_ADDRTYPE_IPV4 0 +#define LWIP_DNS_ADDRTYPE_IPV6 1 +#define LWIP_DNS_ADDRTYPE_IPV4_IPV6 2 /* try to resolve IPv4 first, try IPv6 if IPv4 fails only */ +#define LWIP_DNS_ADDRTYPE_IPV6_IPV4 3 /* try to resolve IPv6 first, try IPv4 if IPv6 fails only */ +#if LWIP_IPV4 && LWIP_IPV6 +#ifndef LWIP_DNS_ADDRTYPE_DEFAULT +#define LWIP_DNS_ADDRTYPE_DEFAULT LWIP_DNS_ADDRTYPE_IPV4_IPV6 +#endif +#elif LWIP_IPV4 +#define LWIP_DNS_ADDRTYPE_DEFAULT LWIP_DNS_ADDRTYPE_IPV4 +#else +#define LWIP_DNS_ADDRTYPE_DEFAULT LWIP_DNS_ADDRTYPE_IPV6 +#endif + +#if DNS_LOCAL_HOSTLIST +/** struct used for local host-list */ +struct local_hostlist_entry { + /** static hostname */ + const char *name; + /** static host address in network byteorder */ + ip_addr_t addr; + struct local_hostlist_entry *next; +}; +#define DNS_LOCAL_HOSTLIST_ELEM(name, addr_init) {name, addr_init, NULL} +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC +#ifndef DNS_LOCAL_HOSTLIST_MAX_NAMELEN +#define DNS_LOCAL_HOSTLIST_MAX_NAMELEN DNS_MAX_NAME_LENGTH +#endif +#define LOCALHOSTLIST_ELEM_SIZE ((sizeof(struct local_hostlist_entry) + DNS_LOCAL_HOSTLIST_MAX_NAMELEN + 1)) +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ +#endif /* DNS_LOCAL_HOSTLIST */ + +#if LWIP_IPV4 +extern const ip_addr_t dns_mquery_v4group; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 +extern const ip_addr_t dns_mquery_v6group; +#endif /* LWIP_IPV6 */ + +/** Callback which is invoked when a hostname is found. + * A function of this type must be implemented by the application using the DNS resolver. + * @param name pointer to the name that was looked up. + * @param ipaddr pointer to an ip_addr_t containing the IP address of the hostname, + * or NULL if the name could not be found (or on any other error). + * @param callback_arg a user-specified callback argument passed to dns_gethostbyname +*/ +typedef void (*dns_found_callback)(const char *name, const ip_addr_t *ipaddr, void *callback_arg); + +void dns_init(void); +void dns_tmr(void); +void dns_setserver(u8_t numdns, const ip_addr_t *dnsserver); +const ip_addr_t* dns_getserver(u8_t numdns); +err_t dns_gethostbyname(const char *hostname, ip_addr_t *addr, + dns_found_callback found, void *callback_arg); +err_t dns_gethostbyname_addrtype(const char *hostname, ip_addr_t *addr, + dns_found_callback found, void *callback_arg, + u8_t dns_addrtype); + + +#if DNS_LOCAL_HOSTLIST +size_t dns_local_iterate(dns_found_callback iterator_fn, void *iterator_arg); +err_t dns_local_lookup(const char *hostname, ip_addr_t *addr, u8_t dns_addrtype); +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC +int dns_local_removehost(const char *hostname, const ip_addr_t *addr); +err_t dns_local_addhost(const char *hostname, const ip_addr_t *addr); +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ +#endif /* DNS_LOCAL_HOSTLIST */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_DNS */ + +#endif /* LWIP_HDR_DNS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/err.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/err.h new file mode 100644 index 0000000..63c138a --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/err.h @@ -0,0 +1,117 @@ +/** + * @file + * lwIP Error codes + */ +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_ERR_H +#define LWIP_HDR_ERR_H + +#include "lwip/opt.h" +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup infrastructure_errors Error codes + * @ingroup infrastructure + * @{ + */ + +/** Definitions for error constants. */ +typedef enum { +/** No error, everything OK. */ + ERR_OK = 0, +/** Out of memory error. */ + ERR_MEM = -1, +/** Buffer error. */ + ERR_BUF = -2, +/** Timeout. */ + ERR_TIMEOUT = -3, +/** Routing problem. */ + ERR_RTE = -4, +/** Operation in progress */ + ERR_INPROGRESS = -5, +/** Illegal value. */ + ERR_VAL = -6, +/** Operation would block. */ + ERR_WOULDBLOCK = -7, +/** Address in use. */ + ERR_USE = -8, +/** Already connecting. */ + ERR_ALREADY = -9, +/** Conn already established.*/ + ERR_ISCONN = -10, +/** Not connected. */ + ERR_CONN = -11, +/** Low-level netif error */ + ERR_IF = -12, + +/** Connection aborted. */ + ERR_ABRT = -13, +/** Connection reset. */ + ERR_RST = -14, +/** Connection closed. */ + ERR_CLSD = -15, +/** Illegal argument. */ + ERR_ARG = -16 +} err_enum_t; + +/** Define LWIP_ERR_T in cc.h if you want to use + * a different type for your platform (must be signed). */ +#ifdef LWIP_ERR_T +typedef LWIP_ERR_T err_t; +#else /* LWIP_ERR_T */ +typedef s8_t err_t; +#endif /* LWIP_ERR_T*/ + +/** + * @} + */ + +#ifdef LWIP_DEBUG +extern const char *lwip_strerr(err_t err); +#else +#define lwip_strerr(x) "" +#endif /* LWIP_DEBUG */ + +#if !NO_SYS +int err_to_errno(err_t err); +#endif /* !NO_SYS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_ERR_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/errno.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/errno.h new file mode 100644 index 0000000..eae5d6e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/errno.h @@ -0,0 +1,198 @@ +/** + * @file + * Posix Errno defines + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_ERRNO_H +#define LWIP_HDR_ERRNO_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef LWIP_PROVIDE_ERRNO + +#define EPERM 1 /* Operation not permitted */ +#define ENOENT 2 /* No such file or directory */ +#define ESRCH 3 /* No such process */ +#define EINTR 4 /* Interrupted system call */ +#define EIO 5 /* I/O error */ +#define ENXIO 6 /* No such device or address */ +#define E2BIG 7 /* Arg list too long */ +#define ENOEXEC 8 /* Exec format error */ +#define EBADF 9 /* Bad file number */ +#define ECHILD 10 /* No child processes */ +#define EAGAIN 11 /* Try again */ +#define ENOMEM 12 /* Out of memory */ +#define EACCES 13 /* Permission denied */ +#define EFAULT 14 /* Bad address */ +#define ENOTBLK 15 /* Block device required */ +#define EBUSY 16 /* Device or resource busy */ +#define EEXIST 17 /* File exists */ +#define EXDEV 18 /* Cross-device link */ +#define ENODEV 19 /* No such device */ +#define ENOTDIR 20 /* Not a directory */ +#define EISDIR 21 /* Is a directory */ +#define EINVAL 22 /* Invalid argument */ +#define ENFILE 23 /* File table overflow */ +#define EMFILE 24 /* Too many open files */ +#define ENOTTY 25 /* Not a typewriter */ +#define ETXTBSY 26 /* Text file busy */ +#define EFBIG 27 /* File too large */ +#define ENOSPC 28 /* No space left on device */ +#define ESPIPE 29 /* Illegal seek */ +#define EROFS 30 /* Read-only file system */ +#define EMLINK 31 /* Too many links */ +#define EPIPE 32 /* Broken pipe */ +#define EDOM 33 /* Math argument out of domain of func */ +#define ERANGE 34 /* Math result not representable */ +#define EDEADLK 35 /* Resource deadlock would occur */ +#define ENAMETOOLONG 36 /* File name too long */ +#define ENOLCK 37 /* No record locks available */ +#define ENOSYS 38 /* Function not implemented */ +#define ENOTEMPTY 39 /* Directory not empty */ +#define ELOOP 40 /* Too many symbolic links encountered */ +#define EWOULDBLOCK EAGAIN /* Operation would block */ +#define ENOMSG 42 /* No message of desired type */ +#define EIDRM 43 /* Identifier removed */ +#define ECHRNG 44 /* Channel number out of range */ +#define EL2NSYNC 45 /* Level 2 not synchronized */ +#define EL3HLT 46 /* Level 3 halted */ +#define EL3RST 47 /* Level 3 reset */ +#define ELNRNG 48 /* Link number out of range */ +#define EUNATCH 49 /* Protocol driver not attached */ +#define ENOCSI 50 /* No CSI structure available */ +#define EL2HLT 51 /* Level 2 halted */ +#define EBADE 52 /* Invalid exchange */ +#define EBADR 53 /* Invalid request descriptor */ +#define EXFULL 54 /* Exchange full */ +#define ENOANO 55 /* No anode */ +#define EBADRQC 56 /* Invalid request code */ +#define EBADSLT 57 /* Invalid slot */ + +#define EDEADLOCK EDEADLK + +#define EBFONT 59 /* Bad font file format */ +#define ENOSTR 60 /* Device not a stream */ +#define ENODATA 61 /* No data available */ +#define ETIME 62 /* Timer expired */ +#define ENOSR 63 /* Out of streams resources */ +#define ENONET 64 /* Machine is not on the network */ +#define ENOPKG 65 /* Package not installed */ +#define EREMOTE 66 /* Object is remote */ +#define ENOLINK 67 /* Link has been severed */ +#define EADV 68 /* Advertise error */ +#define ESRMNT 69 /* Srmount error */ +#define ECOMM 70 /* Communication error on send */ +#define EPROTO 71 /* Protocol error */ +#define EMULTIHOP 72 /* Multihop attempted */ +#define EDOTDOT 73 /* RFS specific error */ +#define EBADMSG 74 /* Not a data message */ +#define EOVERFLOW 75 /* Value too large for defined data type */ +#define ENOTUNIQ 76 /* Name not unique on network */ +#define EBADFD 77 /* File descriptor in bad state */ +#define EREMCHG 78 /* Remote address changed */ +#define ELIBACC 79 /* Can not access a needed shared library */ +#define ELIBBAD 80 /* Accessing a corrupted shared library */ +#define ELIBSCN 81 /* .lib section in a.out corrupted */ +#define ELIBMAX 82 /* Attempting to link in too many shared libraries */ +#define ELIBEXEC 83 /* Cannot exec a shared library directly */ +#define EILSEQ 84 /* Illegal byte sequence */ +#define ERESTART 85 /* Interrupted system call should be restarted */ +#define ESTRPIPE 86 /* Streams pipe error */ +#define EUSERS 87 /* Too many users */ +#define ENOTSOCK 88 /* Socket operation on non-socket */ +#define EDESTADDRREQ 89 /* Destination address required */ +#define EMSGSIZE 90 /* Message too long */ +#define EPROTOTYPE 91 /* Protocol wrong type for socket */ +#define ENOPROTOOPT 92 /* Protocol not available */ +#define EPROTONOSUPPORT 93 /* Protocol not supported */ +#define ESOCKTNOSUPPORT 94 /* Socket type not supported */ +#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ +#define EPFNOSUPPORT 96 /* Protocol family not supported */ +#define EAFNOSUPPORT 97 /* Address family not supported by protocol */ +#define EADDRINUSE 98 /* Address already in use */ +#define EADDRNOTAVAIL 99 /* Cannot assign requested address */ +#define ENETDOWN 100 /* Network is down */ +#define ENETUNREACH 101 /* Network is unreachable */ +#define ENETRESET 102 /* Network dropped connection because of reset */ +#define ECONNABORTED 103 /* Software caused connection abort */ +#define ECONNRESET 104 /* Connection reset by peer */ +#define ENOBUFS 105 /* No buffer space available */ +#define EISCONN 106 /* Transport endpoint is already connected */ +#define ENOTCONN 107 /* Transport endpoint is not connected */ +#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */ +#define ETOOMANYREFS 109 /* Too many references: cannot splice */ +#define ETIMEDOUT 110 /* Connection timed out */ +#define ECONNREFUSED 111 /* Connection refused */ +#define EHOSTDOWN 112 /* Host is down */ +#define EHOSTUNREACH 113 /* No route to host */ +#define EALREADY 114 /* Operation already in progress */ +#define EINPROGRESS 115 /* Operation now in progress */ +#define ESTALE 116 /* Stale NFS file handle */ +#define EUCLEAN 117 /* Structure needs cleaning */ +#define ENOTNAM 118 /* Not a XENIX named type file */ +#define ENAVAIL 119 /* No XENIX semaphores available */ +#define EISNAM 120 /* Is a named type file */ +#define EREMOTEIO 121 /* Remote I/O error */ +#define EDQUOT 122 /* Quota exceeded */ + +#define ENOMEDIUM 123 /* No medium found */ +#define EMEDIUMTYPE 124 /* Wrong medium type */ + +#ifndef errno +extern int errno; +#endif + +#else /* LWIP_PROVIDE_ERRNO */ + +/* Define LWIP_ERRNO_STDINCLUDE if you want to include here */ +#ifdef LWIP_ERRNO_STDINCLUDE +#include +#else /* LWIP_ERRNO_STDINCLUDE */ +/* Define LWIP_ERRNO_INCLUDE to an equivalent of to include the error defines here */ +#ifdef LWIP_ERRNO_INCLUDE +#include LWIP_ERRNO_INCLUDE +#endif /* LWIP_ERRNO_INCLUDE */ +#endif /* LWIP_ERRNO_STDINCLUDE */ + +#endif /* LWIP_PROVIDE_ERRNO */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_ERRNO_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/etharp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/etharp.h new file mode 100644 index 0000000..b018f07 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/etharp.h @@ -0,0 +1,105 @@ +/** + * @file + * Ethernet output function - handles OUTGOING ethernet level traffic, implements + * ARP resolving. + * To be used in most low-level netif implementations + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * Copyright (c) 2003-2004 Leon Woestenberg + * Copyright (c) 2003-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_NETIF_ETHARP_H +#define LWIP_HDR_NETIF_ETHARP_H + +#include "lwip/opt.h" + +#if LWIP_ARP || LWIP_ETHERNET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/ip4_addr.h" +#include "lwip/netif.h" +#include "lwip/ip4.h" +#include "lwip/prot/ethernet.h" + +#if LWIP_IPV4 && LWIP_ARP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/prot/etharp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** 1 seconds period */ +#define ARP_TMR_INTERVAL 1000 + +#if ARP_QUEUEING +/** struct for queueing outgoing packets for unknown address + * defined here to be accessed by memp.h + */ +struct etharp_q_entry { + struct etharp_q_entry *next; + struct pbuf *p; +}; +#endif /* ARP_QUEUEING */ + +#define etharp_init() /* Compatibility define, no init needed. */ +void etharp_tmr(void); +ssize_t etharp_find_addr(struct netif *netif, const ip4_addr_t *ipaddr, + struct eth_addr **eth_ret, const ip4_addr_t **ip_ret); +int etharp_get_entry(size_t i, ip4_addr_t **ipaddr, struct netif **netif, struct eth_addr **eth_ret); +err_t etharp_output(struct netif *netif, struct pbuf *q, const ip4_addr_t *ipaddr); +err_t etharp_query(struct netif *netif, const ip4_addr_t *ipaddr, struct pbuf *q); +err_t etharp_request(struct netif *netif, const ip4_addr_t *ipaddr); +/** For Ethernet network interfaces, we might want to send "gratuitous ARP"; + * this is an ARP packet sent by a node in order to spontaneously cause other + * nodes to update an entry in their ARP cache. + * From RFC 3220 "IP Mobility Support for IPv4" section 4.6. */ +#define etharp_gratuitous(netif) etharp_request((netif), netif_ip4_addr(netif)) +void etharp_cleanup_netif(struct netif *netif); + +#if ETHARP_SUPPORT_STATIC_ENTRIES +err_t etharp_add_static_entry(const ip4_addr_t *ipaddr, struct eth_addr *ethaddr); +err_t etharp_remove_static_entry(const ip4_addr_t *ipaddr); +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + +void etharp_input(struct pbuf *p, struct netif *netif); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 && LWIP_ARP */ +#endif /* LWIP_ARP || LWIP_ETHERNET */ + +#endif /* LWIP_HDR_NETIF_ETHARP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ethip6.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ethip6.h new file mode 100644 index 0000000..f393c9c --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ethip6.h @@ -0,0 +1,68 @@ +/** + * @file + * + * Ethernet output for IPv6. Uses ND tables for link-layer addressing. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_ETHIP6_H +#define LWIP_HDR_ETHIP6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_ETHERNET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" + + +#ifdef __cplusplus +extern "C" { +#endif + + +err_t ethip6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 && LWIP_ETHERNET */ + +#endif /* LWIP_HDR_ETHIP6_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/icmp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/icmp.h new file mode 100644 index 0000000..d0d67b6 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/icmp.h @@ -0,0 +1,110 @@ +/** + * @file + * ICMP API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_ICMP_H +#define LWIP_HDR_ICMP_H + +#include "lwip/opt.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/prot/icmp.h" + +#if LWIP_IPV6 && LWIP_ICMP6 +#include "lwip/icmp6.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** ICMP destination unreachable codes */ +enum icmp_dur_type { + /** net unreachable */ + ICMP_DUR_NET = 0, + /** host unreachable */ + ICMP_DUR_HOST = 1, + /** protocol unreachable */ + ICMP_DUR_PROTO = 2, + /** port unreachable */ + ICMP_DUR_PORT = 3, + /** fragmentation needed and DF set */ + ICMP_DUR_FRAG = 4, + /** source route failed */ + ICMP_DUR_SR = 5 +}; + +/** ICMP time exceeded codes */ +enum icmp_te_type { + /** time to live exceeded in transit */ + ICMP_TE_TTL = 0, + /** fragment reassembly time exceeded */ + ICMP_TE_FRAG = 1 +}; + +#if LWIP_IPV4 && LWIP_ICMP /* don't build if not configured for use in lwipopts.h */ + +void icmp_input(struct pbuf *p, struct netif *inp); +void icmp_dest_unreach(struct pbuf *p, enum icmp_dur_type t); +void icmp_time_exceeded(struct pbuf *p, enum icmp_te_type t); + +#endif /* LWIP_IPV4 && LWIP_ICMP */ + +#if LWIP_IPV4 && LWIP_IPV6 +#if LWIP_ICMP && LWIP_ICMP6 +#define icmp_port_unreach(isipv6, pbuf) ((isipv6) ? \ + icmp6_dest_unreach(pbuf, ICMP6_DUR_PORT) : \ + icmp_dest_unreach(pbuf, ICMP_DUR_PORT)) +#elif LWIP_ICMP +#define icmp_port_unreach(isipv6, pbuf) do{ if(!(isipv6)) { icmp_dest_unreach(pbuf, ICMP_DUR_PORT);}}while(0) +#elif LWIP_ICMP6 +#define icmp_port_unreach(isipv6, pbuf) do{ if(isipv6) { icmp6_dest_unreach(pbuf, ICMP6_DUR_PORT);}}while(0) +#else +#define icmp_port_unreach(isipv6, pbuf) +#endif +#elif LWIP_IPV6 && LWIP_ICMP6 +#define icmp_port_unreach(isipv6, pbuf) icmp6_dest_unreach(pbuf, ICMP6_DUR_PORT) +#elif LWIP_IPV4 && LWIP_ICMP +#define icmp_port_unreach(isipv6, pbuf) icmp_dest_unreach(pbuf, ICMP_DUR_PORT) +#else /* (LWIP_IPV6 && LWIP_ICMP6) || (LWIP_IPV4 && LWIP_ICMP) */ +#define icmp_port_unreach(isipv6, pbuf) +#endif /* (LWIP_IPV6 && LWIP_ICMP6) || (LWIP_IPV4 && LWIP_ICMP) LWIP_IPV4*/ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_ICMP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/icmp6.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/icmp6.h new file mode 100644 index 0000000..cb383d9 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/icmp6.h @@ -0,0 +1,72 @@ +/** + * @file + * + * IPv6 version of ICMP, as per RFC 4443. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ +#ifndef LWIP_HDR_ICMP6_H +#define LWIP_HDR_ICMP6_H + +#include "lwip/opt.h" +#include "lwip/pbuf.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" +#include "lwip/prot/icmp6.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_ICMP6 && LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +void icmp6_input(struct pbuf *p, struct netif *inp); +void icmp6_dest_unreach(struct pbuf *p, enum icmp6_dur_code c); +void icmp6_packet_too_big(struct pbuf *p, u32_t mtu); +void icmp6_time_exceeded(struct pbuf *p, enum icmp6_te_code c); +void icmp6_time_exceeded_with_addrs(struct pbuf *p, enum icmp6_te_code c, + const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr); +void icmp6_param_problem(struct pbuf *p, enum icmp6_pp_code c, const void *pointer); + +#endif /* LWIP_ICMP6 && LWIP_IPV6 */ + + +#ifdef __cplusplus +} +#endif + + +#endif /* LWIP_HDR_ICMP6_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/if_api.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/if_api.h new file mode 100644 index 0000000..d8261f7 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/if_api.h @@ -0,0 +1,68 @@ +/** + * @file + * Interface Identification APIs from: + * RFC 3493: Basic Socket Interface Extensions for IPv6 + * Section 4: Interface Identification + */ + +/* + * Copyright (c) 2017 Joel Cunningham, Garmin International, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Joel Cunningham + * + */ +#ifndef LWIP_HDR_IF_H +#define LWIP_HDR_IF_H + +#include "lwip/opt.h" + +#if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define IF_NAMESIZE NETIF_NAMESIZE + +char * lwip_if_indextoname(unsigned int ifindex, char *ifname); +unsigned int lwip_if_nametoindex(const char *ifname); + +#if LWIP_COMPAT_SOCKETS +#define if_indextoname(ifindex, ifname) lwip_if_indextoname(ifindex,ifname) +#define if_nametoindex(ifname) lwip_if_nametoindex(ifname) +#endif /* LWIP_COMPAT_SOCKETS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_SOCKET */ + +#endif /* LWIP_HDR_IF_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/igmp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/igmp.h new file mode 100644 index 0000000..ca52a2d --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/igmp.h @@ -0,0 +1,115 @@ +/** + * @file + * IGMP API + */ + +/* + * Copyright (c) 2002 CITEL Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of CITEL Technologies Ltd nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY CITEL TECHNOLOGIES AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL CITEL TECHNOLOGIES OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is a contribution to the lwIP TCP/IP stack. + * The Swedish Institute of Computer Science and Adam Dunkels + * are specifically granted permission to redistribute this + * source code. +*/ + +#ifndef LWIP_HDR_IGMP_H +#define LWIP_HDR_IGMP_H + +#include "lwip/opt.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/pbuf.h" + +#if LWIP_IPV4 && LWIP_IGMP /* don't build if not configured for use in lwipopts.h */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* IGMP timer */ +#define IGMP_TMR_INTERVAL 100 /* Milliseconds */ +#define IGMP_V1_DELAYING_MEMBER_TMR (1000/IGMP_TMR_INTERVAL) +#define IGMP_JOIN_DELAYING_MEMBER_TMR (500 /IGMP_TMR_INTERVAL) + +/* Compatibility defines (don't use for new code) */ +#define IGMP_DEL_MAC_FILTER NETIF_DEL_MAC_FILTER +#define IGMP_ADD_MAC_FILTER NETIF_ADD_MAC_FILTER + +/** + * igmp group structure - there is + * a list of groups for each interface + * these should really be linked from the interface, but + * if we keep them separate we will not affect the lwip original code + * too much + * + * There will be a group for the all systems group address but this + * will not run the state machine as it is used to kick off reports + * from all the other groups + */ +struct igmp_group { + /** next link */ + struct igmp_group *next; + /** multicast address */ + ip4_addr_t group_address; + /** signifies we were the last person to report */ + u8_t last_reporter_flag; + /** current state of the group */ + u8_t group_state; + /** timer for reporting, negative is OFF */ + u16_t timer; + /** counter of simultaneous uses */ + u8_t use; +}; + +/* Prototypes */ +void igmp_init(void); +err_t igmp_start(struct netif *netif); +err_t igmp_stop(struct netif *netif); +void igmp_report_groups(struct netif *netif); +struct igmp_group *igmp_lookfor_group(struct netif *ifp, const ip4_addr_t *addr); +void igmp_input(struct pbuf *p, struct netif *inp, const ip4_addr_t *dest); +err_t igmp_joingroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr); +err_t igmp_joingroup_netif(struct netif *netif, const ip4_addr_t *groupaddr); +err_t igmp_leavegroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr); +err_t igmp_leavegroup_netif(struct netif *netif, const ip4_addr_t *groupaddr); +void igmp_tmr(void); + +/** @ingroup igmp + * Get list head of IGMP groups for netif. + * Note: The allsystems group IP is contained in the list as first entry. + * @see @ref netif_set_igmp_mac_filter() + */ +#define netif_igmp_data(netif) ((struct igmp_group *)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_IGMP)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 && LWIP_IGMP */ + +#endif /* LWIP_HDR_IGMP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/inet.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/inet.h new file mode 100644 index 0000000..36f6ec7 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/inet.h @@ -0,0 +1,169 @@ +/** + * @file + * This file (together with sockets.h) aims to provide structs and functions from + * - arpa/inet.h + * - netinet/in.h + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_INET_H +#define LWIP_HDR_INET_H + +#include "lwip/opt.h" +#include "lwip/def.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* If your port already typedef's in_addr_t, define IN_ADDR_T_DEFINED + to prevent this code from redefining it. */ +#if !defined(in_addr_t) && !defined(IN_ADDR_T_DEFINED) +typedef u32_t in_addr_t; +#endif + +struct in_addr { + in_addr_t s_addr; +}; + +struct in6_addr { + union { + u32_t u32_addr[4]; + u8_t u8_addr[16]; + } un; +#define s6_addr un.u8_addr +}; + +/** 255.255.255.255 */ +#define INADDR_NONE IPADDR_NONE +/** 127.0.0.1 */ +#define INADDR_LOOPBACK IPADDR_LOOPBACK +/** 0.0.0.0 */ +#define INADDR_ANY IPADDR_ANY +/** 255.255.255.255 */ +#define INADDR_BROADCAST IPADDR_BROADCAST + +/** This macro can be used to initialize a variable of type struct in6_addr + to the IPv6 wildcard address. */ +#define IN6ADDR_ANY_INIT {{{0,0,0,0}}} +/** This macro can be used to initialize a variable of type struct in6_addr + to the IPv6 loopback address. */ +#define IN6ADDR_LOOPBACK_INIT {{{0,0,0,PP_HTONL(1)}}} +/** This variable is initialized by the system to contain the wildcard IPv6 address. */ +extern const struct in6_addr in6addr_any; + +/* Definitions of the bits in an (IPv4) Internet address integer. + + On subnets, host and network parts are found according to + the subnet mask, not these masks. */ +#define IN_CLASSA(a) IP_CLASSA(a) +#define IN_CLASSA_NET IP_CLASSA_NET +#define IN_CLASSA_NSHIFT IP_CLASSA_NSHIFT +#define IN_CLASSA_HOST IP_CLASSA_HOST +#define IN_CLASSA_MAX IP_CLASSA_MAX + +#define IN_CLASSB(b) IP_CLASSB(b) +#define IN_CLASSB_NET IP_CLASSB_NET +#define IN_CLASSB_NSHIFT IP_CLASSB_NSHIFT +#define IN_CLASSB_HOST IP_CLASSB_HOST +#define IN_CLASSB_MAX IP_CLASSB_MAX + +#define IN_CLASSC(c) IP_CLASSC(c) +#define IN_CLASSC_NET IP_CLASSC_NET +#define IN_CLASSC_NSHIFT IP_CLASSC_NSHIFT +#define IN_CLASSC_HOST IP_CLASSC_HOST +#define IN_CLASSC_MAX IP_CLASSC_MAX + +#define IN_CLASSD(d) IP_CLASSD(d) +#define IN_CLASSD_NET IP_CLASSD_NET /* These ones aren't really */ +#define IN_CLASSD_NSHIFT IP_CLASSD_NSHIFT /* net and host fields, but */ +#define IN_CLASSD_HOST IP_CLASSD_HOST /* routing needn't know. */ +#define IN_CLASSD_MAX IP_CLASSD_MAX + +#define IN_MULTICAST(a) IP_MULTICAST(a) + +#define IN_EXPERIMENTAL(a) IP_EXPERIMENTAL(a) +#define IN_BADCLASS(a) IP_BADCLASS(a) + +#define IN_LOOPBACKNET IP_LOOPBACKNET + + +#ifndef INET_ADDRSTRLEN +#define INET_ADDRSTRLEN IP4ADDR_STRLEN_MAX +#endif +#if LWIP_IPV6 +#ifndef INET6_ADDRSTRLEN +#define INET6_ADDRSTRLEN IP6ADDR_STRLEN_MAX +#endif +#endif + +#if LWIP_IPV4 + +#define inet_addr_from_ip4addr(target_inaddr, source_ipaddr) ((target_inaddr)->s_addr = ip4_addr_get_u32(source_ipaddr)) +#define inet_addr_to_ip4addr(target_ipaddr, source_inaddr) (ip4_addr_set_u32(target_ipaddr, (source_inaddr)->s_addr)) + +/* directly map this to the lwip internal functions */ +#define inet_addr(cp) ipaddr_addr(cp) +#define inet_aton(cp, addr) ip4addr_aton(cp, (ip4_addr_t*)addr) +#define inet_ntoa(addr) ip4addr_ntoa((const ip4_addr_t*)&(addr)) +#define inet_ntoa_r(addr, buf, buflen) ip4addr_ntoa_r((const ip4_addr_t*)&(addr), buf, buflen) + +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +#define inet6_addr_from_ip6addr(target_in6addr, source_ip6addr) {(target_in6addr)->un.u32_addr[0] = (source_ip6addr)->addr[0]; \ + (target_in6addr)->un.u32_addr[1] = (source_ip6addr)->addr[1]; \ + (target_in6addr)->un.u32_addr[2] = (source_ip6addr)->addr[2]; \ + (target_in6addr)->un.u32_addr[3] = (source_ip6addr)->addr[3];} +#define inet6_addr_to_ip6addr(target_ip6addr, source_in6addr) {(target_ip6addr)->addr[0] = (source_in6addr)->un.u32_addr[0]; \ + (target_ip6addr)->addr[1] = (source_in6addr)->un.u32_addr[1]; \ + (target_ip6addr)->addr[2] = (source_in6addr)->un.u32_addr[2]; \ + (target_ip6addr)->addr[3] = (source_in6addr)->un.u32_addr[3]; \ + ip6_addr_clear_zone(target_ip6addr);} + +/* directly map this to the lwip internal functions */ +#define inet6_aton(cp, addr) ip6addr_aton(cp, (ip6_addr_t*)addr) +#define inet6_ntoa(addr) ip6addr_ntoa((const ip6_addr_t*)&(addr)) +#define inet6_ntoa_r(addr, buf, buflen) ip6addr_ntoa_r((const ip6_addr_t*)&(addr), buf, buflen) + +#endif /* LWIP_IPV6 */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_INET_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/inet_chksum.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/inet_chksum.h new file mode 100644 index 0000000..46ec2ae --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/inet_chksum.h @@ -0,0 +1,105 @@ +/** + * @file + * IP checksum calculation functions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_INET_CHKSUM_H +#define LWIP_HDR_INET_CHKSUM_H + +#include "lwip/opt.h" + +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" + +/** Swap the bytes in an u16_t: much like lwip_htons() for little-endian */ +#ifndef SWAP_BYTES_IN_WORD +#define SWAP_BYTES_IN_WORD(w) (((w) & 0xff) << 8) | (((w) & 0xff00) >> 8) +#endif /* SWAP_BYTES_IN_WORD */ + +/** Split an u32_t in two u16_ts and add them up */ +#ifndef FOLD_U32T +#define FOLD_U32T(u) ((u32_t)(((u) >> 16) + ((u) & 0x0000ffffUL))) +#endif + +#if LWIP_CHECKSUM_ON_COPY +/** Function-like macro: same as MEMCPY but returns the checksum of copied data + as u16_t */ +# ifndef LWIP_CHKSUM_COPY +# define LWIP_CHKSUM_COPY(dst, src, len) lwip_chksum_copy(dst, src, len) +# ifndef LWIP_CHKSUM_COPY_ALGORITHM +# define LWIP_CHKSUM_COPY_ALGORITHM 1 +# endif /* LWIP_CHKSUM_COPY_ALGORITHM */ +# else /* LWIP_CHKSUM_COPY */ +# define LWIP_CHKSUM_COPY_ALGORITHM 0 +# endif /* LWIP_CHKSUM_COPY */ +#else /* LWIP_CHECKSUM_ON_COPY */ +# define LWIP_CHKSUM_COPY_ALGORITHM 0 +#endif /* LWIP_CHECKSUM_ON_COPY */ + +#ifdef __cplusplus +extern "C" { +#endif + +u16_t inet_chksum(const void *dataptr, u16_t len); +u16_t inet_chksum_pbuf(struct pbuf *p); +#if LWIP_CHKSUM_COPY_ALGORITHM +u16_t lwip_chksum_copy(void *dst, const void *src, u16_t len); +#endif /* LWIP_CHKSUM_COPY_ALGORITHM */ + +#if LWIP_IPV4 +u16_t inet_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip4_addr_t *src, const ip4_addr_t *dest); +u16_t inet_chksum_pseudo_partial(struct pbuf *p, u8_t proto, + u16_t proto_len, u16_t chksum_len, const ip4_addr_t *src, const ip4_addr_t *dest); +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +u16_t ip6_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip6_addr_t *src, const ip6_addr_t *dest); +u16_t ip6_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip6_addr_t *src, const ip6_addr_t *dest); +#endif /* LWIP_IPV6 */ + + +u16_t ip_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip_addr_t *src, const ip_addr_t *dest); +u16_t ip_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip_addr_t *src, const ip_addr_t *dest); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_INET_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/init.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/init.h new file mode 100644 index 0000000..48c256b --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/init.h @@ -0,0 +1,100 @@ +/** + * @file + * lwIP initialization API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_INIT_H +#define LWIP_HDR_INIT_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup lwip_version Version + * @ingroup lwip + * @{ + */ + +/** X.x.x: Major version of the stack */ +#define LWIP_VERSION_MAJOR 2 +/** x.X.x: Minor version of the stack */ +#define LWIP_VERSION_MINOR 1 +/** x.x.X: Revision of the stack */ +#define LWIP_VERSION_REVISION 2 +/** For release candidates, this is set to 1..254 + * For official releases, this is set to 255 (LWIP_RC_RELEASE) + * For development versions (Git), this is set to 0 (LWIP_RC_DEVELOPMENT) */ +#define LWIP_VERSION_RC LWIP_RC_RELEASE + +/** LWIP_VERSION_RC is set to LWIP_RC_RELEASE for official releases */ +#define LWIP_RC_RELEASE 255 +/** LWIP_VERSION_RC is set to LWIP_RC_DEVELOPMENT for Git versions */ +#define LWIP_RC_DEVELOPMENT 0 + +#define LWIP_VERSION_IS_RELEASE (LWIP_VERSION_RC == LWIP_RC_RELEASE) +#define LWIP_VERSION_IS_DEVELOPMENT (LWIP_VERSION_RC == LWIP_RC_DEVELOPMENT) +#define LWIP_VERSION_IS_RC ((LWIP_VERSION_RC != LWIP_RC_RELEASE) && (LWIP_VERSION_RC != LWIP_RC_DEVELOPMENT)) + +/* Some helper defines to get a version string */ +#define LWIP_VERSTR2(x) #x +#define LWIP_VERSTR(x) LWIP_VERSTR2(x) +#if LWIP_VERSION_IS_RELEASE +#define LWIP_VERSION_STRING_SUFFIX "" +#elif LWIP_VERSION_IS_DEVELOPMENT +#define LWIP_VERSION_STRING_SUFFIX "d" +#else +#define LWIP_VERSION_STRING_SUFFIX "rc" LWIP_VERSTR(LWIP_VERSION_RC) +#endif + +/** Provides the version of the stack */ +#define LWIP_VERSION ((LWIP_VERSION_MAJOR) << 24 | (LWIP_VERSION_MINOR) << 16 | \ + (LWIP_VERSION_REVISION) << 8 | (LWIP_VERSION_RC)) +/** Provides the version of the stack as string */ +#define LWIP_VERSION_STRING LWIP_VERSTR(LWIP_VERSION_MAJOR) "." LWIP_VERSTR(LWIP_VERSION_MINOR) "." LWIP_VERSTR(LWIP_VERSION_REVISION) LWIP_VERSION_STRING_SUFFIX + +/** + * @} + */ + +/* Modules initialization */ +void lwip_init(void); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_INIT_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip.h new file mode 100644 index 0000000..28889d5 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip.h @@ -0,0 +1,330 @@ +/** + * @file + * IP API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_IP_H +#define LWIP_HDR_IP_H + +#include "lwip/opt.h" + +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/err.h" +#include "lwip/netif.h" +#include "lwip/ip4.h" +#include "lwip/ip6.h" +#include "lwip/prot/ip.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* This is passed as the destination address to ip_output_if (not + to ip_output), meaning that an IP header already is constructed + in the pbuf. This is used when TCP retransmits. */ +#define LWIP_IP_HDRINCL NULL + +/** pbufs passed to IP must have a ref-count of 1 as their payload pointer + gets altered as the packet is passed down the stack */ +#ifndef LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX +#define LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p) LWIP_ASSERT("p->ref == 1", (p)->ref == 1) +#endif + +#if LWIP_NETIF_USE_HINTS +#define IP_PCB_NETIFHINT ;struct netif_hint netif_hints +#else /* LWIP_NETIF_USE_HINTS */ +#define IP_PCB_NETIFHINT +#endif /* LWIP_NETIF_USE_HINTS */ + +/** This is the common part of all PCB types. It needs to be at the + beginning of a PCB type definition. It is located here so that + changes to this common part are made in one location instead of + having to change all PCB structs. */ +#define IP_PCB \ + /* ip addresses in network byte order */ \ + ip_addr_t local_ip; \ + ip_addr_t remote_ip; \ + /* Bound netif index */ \ + u8_t netif_idx; \ + /* Socket options */ \ + u8_t so_options; \ + /* Type Of Service */ \ + u8_t tos; \ + /* Time To Live */ \ + u8_t ttl \ + /* link layer address resolution hint */ \ + IP_PCB_NETIFHINT + +struct ip_pcb { + /* Common members of all PCB types */ + IP_PCB; +}; + +/* + * Option flags per-socket. These are the same like SO_XXX in sockets.h + */ +#define SOF_REUSEADDR 0x04U /* allow local address reuse */ +#define SOF_KEEPALIVE 0x08U /* keep connections alive */ +#define SOF_BROADCAST 0x20U /* permit to send and to receive broadcast messages (see IP_SOF_BROADCAST option) */ + +/* These flags are inherited (e.g. from a listen-pcb to a connection-pcb): */ +#define SOF_INHERITED (SOF_REUSEADDR|SOF_KEEPALIVE) + +/** Global variables of this module, kept in a struct for efficient access using base+index. */ +struct ip_globals +{ + /** The interface that accepted the packet for the current callback invocation. */ + struct netif *current_netif; + /** The interface that received the packet for the current callback invocation. */ + struct netif *current_input_netif; +#if LWIP_IPV4 + /** Header of the input packet currently being processed. */ + const struct ip_hdr *current_ip4_header; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + /** Header of the input IPv6 packet currently being processed. */ + struct ip6_hdr *current_ip6_header; +#endif /* LWIP_IPV6 */ + /** Total header length of current_ip4/6_header (i.e. after this, the UDP/TCP header starts) */ + u16_t current_ip_header_tot_len; + /** Source IP address of current_header */ + ip_addr_t current_iphdr_src; + /** Destination IP address of current_header */ + ip_addr_t current_iphdr_dest; +}; +extern struct ip_globals ip_data; + + +/** Get the interface that accepted the current packet. + * This may or may not be the receiving netif, depending on your netif/network setup. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip_current_netif() (ip_data.current_netif) +/** Get the interface that received the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip_current_input_netif() (ip_data.current_input_netif) +/** Total header length of ip(6)_current_header() (i.e. after this, the UDP/TCP header starts) */ +#define ip_current_header_tot_len() (ip_data.current_ip_header_tot_len) +/** Source IP address of current_header */ +#define ip_current_src_addr() (&ip_data.current_iphdr_src) +/** Destination IP address of current_header */ +#define ip_current_dest_addr() (&ip_data.current_iphdr_dest) + +#if LWIP_IPV4 && LWIP_IPV6 +/** Get the IPv4 header of the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip4_current_header() ip_data.current_ip4_header +/** Get the IPv6 header of the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip6_current_header() ((const struct ip6_hdr*)(ip_data.current_ip6_header)) +/** Returns TRUE if the current IP input packet is IPv6, FALSE if it is IPv4 */ +#define ip_current_is_v6() (ip6_current_header() != NULL) +/** Source IPv6 address of current_header */ +#define ip6_current_src_addr() (ip_2_ip6(&ip_data.current_iphdr_src)) +/** Destination IPv6 address of current_header */ +#define ip6_current_dest_addr() (ip_2_ip6(&ip_data.current_iphdr_dest)) +/** Get the transport layer protocol */ +#define ip_current_header_proto() (ip_current_is_v6() ? \ + IP6H_NEXTH(ip6_current_header()) :\ + IPH_PROTO(ip4_current_header())) +/** Get the transport layer header */ +#define ip_next_header_ptr() ((const void*)((ip_current_is_v6() ? \ + (const u8_t*)ip6_current_header() : (const u8_t*)ip4_current_header()) + ip_current_header_tot_len())) + +/** Source IP4 address of current_header */ +#define ip4_current_src_addr() (ip_2_ip4(&ip_data.current_iphdr_src)) +/** Destination IP4 address of current_header */ +#define ip4_current_dest_addr() (ip_2_ip4(&ip_data.current_iphdr_dest)) + +#elif LWIP_IPV4 /* LWIP_IPV4 && LWIP_IPV6 */ + +/** Get the IPv4 header of the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip4_current_header() ip_data.current_ip4_header +/** Always returns FALSE when only supporting IPv4 only */ +#define ip_current_is_v6() 0 +/** Get the transport layer protocol */ +#define ip_current_header_proto() IPH_PROTO(ip4_current_header()) +/** Get the transport layer header */ +#define ip_next_header_ptr() ((const void*)((const u8_t*)ip4_current_header() + ip_current_header_tot_len())) +/** Source IP4 address of current_header */ +#define ip4_current_src_addr() (&ip_data.current_iphdr_src) +/** Destination IP4 address of current_header */ +#define ip4_current_dest_addr() (&ip_data.current_iphdr_dest) + +#elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */ + +/** Get the IPv6 header of the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip6_current_header() ((const struct ip6_hdr*)(ip_data.current_ip6_header)) +/** Always returns TRUE when only supporting IPv6 only */ +#define ip_current_is_v6() 1 +/** Get the transport layer protocol */ +#define ip_current_header_proto() IP6H_NEXTH(ip6_current_header()) +/** Get the transport layer header */ +#define ip_next_header_ptr() ((const void*)(((const u8_t*)ip6_current_header()) + ip_current_header_tot_len())) +/** Source IP6 address of current_header */ +#define ip6_current_src_addr() (&ip_data.current_iphdr_src) +/** Destination IP6 address of current_header */ +#define ip6_current_dest_addr() (&ip_data.current_iphdr_dest) + +#endif /* LWIP_IPV6 */ + +/** Union source address of current_header */ +#define ip_current_src_addr() (&ip_data.current_iphdr_src) +/** Union destination address of current_header */ +#define ip_current_dest_addr() (&ip_data.current_iphdr_dest) + +/** Gets an IP pcb option (SOF_* flags) */ +#define ip_get_option(pcb, opt) ((pcb)->so_options & (opt)) +/** Sets an IP pcb option (SOF_* flags) */ +#define ip_set_option(pcb, opt) ((pcb)->so_options = (u8_t)((pcb)->so_options | (opt))) +/** Resets an IP pcb option (SOF_* flags) */ +#define ip_reset_option(pcb, opt) ((pcb)->so_options = (u8_t)((pcb)->so_options & ~(opt))) + +#if LWIP_IPV4 && LWIP_IPV6 +/** + * @ingroup ip + * Output IP packet, netif is selected by source address + */ +#define ip_output(p, src, dest, ttl, tos, proto) \ + (IP_IS_V6(dest) ? \ + ip6_output(p, ip_2_ip6(src), ip_2_ip6(dest), ttl, tos, proto) : \ + ip4_output(p, ip_2_ip4(src), ip_2_ip4(dest), ttl, tos, proto)) +/** + * @ingroup ip + * Output IP packet to specified interface + */ +#define ip_output_if(p, src, dest, ttl, tos, proto, netif) \ + (IP_IS_V6(dest) ? \ + ip6_output_if(p, ip_2_ip6(src), ip_2_ip6(dest), ttl, tos, proto, netif) : \ + ip4_output_if(p, ip_2_ip4(src), ip_2_ip4(dest), ttl, tos, proto, netif)) +/** + * @ingroup ip + * Output IP packet to interface specifying source address + */ +#define ip_output_if_src(p, src, dest, ttl, tos, proto, netif) \ + (IP_IS_V6(dest) ? \ + ip6_output_if_src(p, ip_2_ip6(src), ip_2_ip6(dest), ttl, tos, proto, netif) : \ + ip4_output_if_src(p, ip_2_ip4(src), ip_2_ip4(dest), ttl, tos, proto, netif)) +/** Output IP packet that already includes an IP header. */ +#define ip_output_if_hdrincl(p, src, dest, netif) \ + (IP_IS_V6(dest) ? \ + ip6_output_if(p, ip_2_ip6(src), LWIP_IP_HDRINCL, 0, 0, 0, netif) : \ + ip4_output_if(p, ip_2_ip4(src), LWIP_IP_HDRINCL, 0, 0, 0, netif)) +/** Output IP packet with netif_hint */ +#define ip_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) \ + (IP_IS_V6(dest) ? \ + ip6_output_hinted(p, ip_2_ip6(src), ip_2_ip6(dest), ttl, tos, proto, netif_hint) : \ + ip4_output_hinted(p, ip_2_ip4(src), ip_2_ip4(dest), ttl, tos, proto, netif_hint)) +/** + * @ingroup ip + * Get netif for address combination. See \ref ip6_route and \ref ip4_route + */ +#define ip_route(src, dest) \ + (IP_IS_V6(dest) ? \ + ip6_route(ip_2_ip6(src), ip_2_ip6(dest)) : \ + ip4_route_src(ip_2_ip4(src), ip_2_ip4(dest))) +/** + * @ingroup ip + * Get netif for IP. + */ +#define ip_netif_get_local_ip(netif, dest) (IP_IS_V6(dest) ? \ + ip6_netif_get_local_ip(netif, ip_2_ip6(dest)) : \ + ip4_netif_get_local_ip(netif)) +#define ip_debug_print(is_ipv6, p) ((is_ipv6) ? ip6_debug_print(p) : ip4_debug_print(p)) + +err_t ip_input(struct pbuf *p, struct netif *inp); + +#elif LWIP_IPV4 /* LWIP_IPV4 && LWIP_IPV6 */ + +#define ip_output(p, src, dest, ttl, tos, proto) \ + ip4_output(p, src, dest, ttl, tos, proto) +#define ip_output_if(p, src, dest, ttl, tos, proto, netif) \ + ip4_output_if(p, src, dest, ttl, tos, proto, netif) +#define ip_output_if_src(p, src, dest, ttl, tos, proto, netif) \ + ip4_output_if_src(p, src, dest, ttl, tos, proto, netif) +#define ip_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) \ + ip4_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) +#define ip_output_if_hdrincl(p, src, dest, netif) \ + ip4_output_if(p, src, LWIP_IP_HDRINCL, 0, 0, 0, netif) +#define ip_route(src, dest) \ + ip4_route_src(src, dest) +#define ip_netif_get_local_ip(netif, dest) \ + ip4_netif_get_local_ip(netif) +#define ip_debug_print(is_ipv6, p) ip4_debug_print(p) + +#define ip_input ip4_input + +#elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */ + +#define ip_output(p, src, dest, ttl, tos, proto) \ + ip6_output(p, src, dest, ttl, tos, proto) +#define ip_output_if(p, src, dest, ttl, tos, proto, netif) \ + ip6_output_if(p, src, dest, ttl, tos, proto, netif) +#define ip_output_if_src(p, src, dest, ttl, tos, proto, netif) \ + ip6_output_if_src(p, src, dest, ttl, tos, proto, netif) +#define ip_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) \ + ip6_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) +#define ip_output_if_hdrincl(p, src, dest, netif) \ + ip6_output_if(p, src, LWIP_IP_HDRINCL, 0, 0, 0, netif) +#define ip_route(src, dest) \ + ip6_route(src, dest) +#define ip_netif_get_local_ip(netif, dest) \ + ip6_netif_get_local_ip(netif, dest) +#define ip_debug_print(is_ipv6, p) ip6_debug_print(p) + +#define ip_input ip6_input + +#endif /* LWIP_IPV6 */ + +#define ip_route_get_local_ip(src, dest, netif, ipaddr) do { \ + (netif) = ip_route(src, dest); \ + (ipaddr) = ip_netif_get_local_ip(netif, dest); \ +}while(0) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_IP_H */ + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip4.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip4.h new file mode 100644 index 0000000..6409c4b --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip4.h @@ -0,0 +1,111 @@ +/** + * @file + * IPv4 API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_IP4_H +#define LWIP_HDR_IP4_H + +#include "lwip/opt.h" + +#if LWIP_IPV4 + +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/ip4_addr.h" +#include "lwip/err.h" +#include "lwip/netif.h" +#include "lwip/prot/ip4.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef LWIP_HOOK_IP4_ROUTE_SRC +#define LWIP_IPV4_SRC_ROUTING 1 +#else +#define LWIP_IPV4_SRC_ROUTING 0 +#endif + +/** Currently, the function ip_output_if_opt() is only used with IGMP */ +#define IP_OPTIONS_SEND (LWIP_IPV4 && LWIP_IGMP) + +#define ip_init() /* Compatibility define, no init needed. */ +struct netif *ip4_route(const ip4_addr_t *dest); +#if LWIP_IPV4_SRC_ROUTING +struct netif *ip4_route_src(const ip4_addr_t *src, const ip4_addr_t *dest); +#else /* LWIP_IPV4_SRC_ROUTING */ +#define ip4_route_src(src, dest) ip4_route(dest) +#endif /* LWIP_IPV4_SRC_ROUTING */ +err_t ip4_input(struct pbuf *p, struct netif *inp); +err_t ip4_output(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto); +err_t ip4_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif); +err_t ip4_output_if_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif); +#if LWIP_NETIF_USE_HINTS +err_t ip4_output_hinted(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif_hint *netif_hint); +#endif /* LWIP_NETIF_USE_HINTS */ +#if IP_OPTIONS_SEND +err_t ip4_output_if_opt(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, + u16_t optlen); +err_t ip4_output_if_opt_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, + u16_t optlen); +#endif /* IP_OPTIONS_SEND */ + +#if LWIP_MULTICAST_TX_OPTIONS +void ip4_set_default_multicast_netif(struct netif* default_multicast_netif); +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#define ip4_netif_get_local_ip(netif) (((netif) != NULL) ? netif_ip_addr4(netif) : NULL) + +#if IP_DEBUG +void ip4_debug_print(struct pbuf *p); +#else +#define ip4_debug_print(p) +#endif /* IP_DEBUG */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 */ + +#endif /* LWIP_HDR_IP_H */ + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_addr.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_addr.h new file mode 100644 index 0000000..9990756 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_addr.h @@ -0,0 +1,216 @@ +/** + * @file + * IPv4 address API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_IP4_ADDR_H +#define LWIP_HDR_IP4_ADDR_H + +#include "lwip/opt.h" +#include "lwip/def.h" + +#if LWIP_IPV4 + +#ifdef __cplusplus +extern "C" { +#endif + +/** This is the aligned version of ip4_addr_t, + used as local variable, on the stack, etc. */ +struct ip4_addr { + u32_t addr; +}; + +/** ip4_addr_t uses a struct for convenience only, so that the same defines can + * operate both on ip4_addr_t as well as on ip4_addr_p_t. */ +typedef struct ip4_addr ip4_addr_t; + +/* Forward declaration to not include netif.h */ +struct netif; + +/** 255.255.255.255 */ +#define IPADDR_NONE ((u32_t)0xffffffffUL) +/** 127.0.0.1 */ +#define IPADDR_LOOPBACK ((u32_t)0x7f000001UL) +/** 0.0.0.0 */ +#define IPADDR_ANY ((u32_t)0x00000000UL) +/** 255.255.255.255 */ +#define IPADDR_BROADCAST ((u32_t)0xffffffffUL) + +/* Definitions of the bits in an Internet address integer. + + On subnets, host and network parts are found according to + the subnet mask, not these masks. */ +#define IP_CLASSA(a) ((((u32_t)(a)) & 0x80000000UL) == 0) +#define IP_CLASSA_NET 0xff000000 +#define IP_CLASSA_NSHIFT 24 +#define IP_CLASSA_HOST (0xffffffff & ~IP_CLASSA_NET) +#define IP_CLASSA_MAX 128 + +#define IP_CLASSB(a) ((((u32_t)(a)) & 0xc0000000UL) == 0x80000000UL) +#define IP_CLASSB_NET 0xffff0000 +#define IP_CLASSB_NSHIFT 16 +#define IP_CLASSB_HOST (0xffffffff & ~IP_CLASSB_NET) +#define IP_CLASSB_MAX 65536 + +#define IP_CLASSC(a) ((((u32_t)(a)) & 0xe0000000UL) == 0xc0000000UL) +#define IP_CLASSC_NET 0xffffff00 +#define IP_CLASSC_NSHIFT 8 +#define IP_CLASSC_HOST (0xffffffff & ~IP_CLASSC_NET) + +#define IP_CLASSD(a) (((u32_t)(a) & 0xf0000000UL) == 0xe0000000UL) +#define IP_CLASSD_NET 0xf0000000 /* These ones aren't really */ +#define IP_CLASSD_NSHIFT 28 /* net and host fields, but */ +#define IP_CLASSD_HOST 0x0fffffff /* routing needn't know. */ +#define IP_MULTICAST(a) IP_CLASSD(a) + +#define IP_EXPERIMENTAL(a) (((u32_t)(a) & 0xf0000000UL) == 0xf0000000UL) +#define IP_BADCLASS(a) (((u32_t)(a) & 0xf0000000UL) == 0xf0000000UL) + +#define IP_LOOPBACKNET 127 /* official! */ + +/** Set an IP address given by the four byte-parts */ +#define IP4_ADDR(ipaddr, a,b,c,d) (ipaddr)->addr = PP_HTONL(LWIP_MAKEU32(a,b,c,d)) + +/** Copy IP address - faster than ip4_addr_set: no NULL check */ +#define ip4_addr_copy(dest, src) ((dest).addr = (src).addr) +/** Safely copy one IP address to another (src may be NULL) */ +#define ip4_addr_set(dest, src) ((dest)->addr = \ + ((src) == NULL ? 0 : \ + (src)->addr)) +/** Set complete address to zero */ +#define ip4_addr_set_zero(ipaddr) ((ipaddr)->addr = 0) +/** Set address to IPADDR_ANY (no need for lwip_htonl()) */ +#define ip4_addr_set_any(ipaddr) ((ipaddr)->addr = IPADDR_ANY) +/** Set address to loopback address */ +#define ip4_addr_set_loopback(ipaddr) ((ipaddr)->addr = PP_HTONL(IPADDR_LOOPBACK)) +/** Check if an address is in the loopback region */ +#define ip4_addr_isloopback(ipaddr) (((ipaddr)->addr & PP_HTONL(IP_CLASSA_NET)) == PP_HTONL(((u32_t)IP_LOOPBACKNET) << 24)) +/** Safely copy one IP address to another and change byte order + * from host- to network-order. */ +#define ip4_addr_set_hton(dest, src) ((dest)->addr = \ + ((src) == NULL ? 0:\ + lwip_htonl((src)->addr))) +/** IPv4 only: set the IP address given as an u32_t */ +#define ip4_addr_set_u32(dest_ipaddr, src_u32) ((dest_ipaddr)->addr = (src_u32)) +/** IPv4 only: get the IP address as an u32_t */ +#define ip4_addr_get_u32(src_ipaddr) ((src_ipaddr)->addr) + +/** Get the network address by combining host address with netmask */ +#define ip4_addr_get_network(target, host, netmask) do { ((target)->addr = ((host)->addr) & ((netmask)->addr)); } while(0) + +/** + * Determine if two address are on the same network. + * + * @arg addr1 IP address 1 + * @arg addr2 IP address 2 + * @arg mask network identifier mask + * @return !0 if the network identifiers of both address match + */ +#define ip4_addr_netcmp(addr1, addr2, mask) (((addr1)->addr & \ + (mask)->addr) == \ + ((addr2)->addr & \ + (mask)->addr)) +#define ip4_addr_cmp(addr1, addr2) ((addr1)->addr == (addr2)->addr) + +#define ip4_addr_isany_val(addr1) ((addr1).addr == IPADDR_ANY) +#define ip4_addr_isany(addr1) ((addr1) == NULL || ip4_addr_isany_val(*(addr1))) + +#define ip4_addr_isbroadcast(addr1, netif) ip4_addr_isbroadcast_u32((addr1)->addr, netif) +u8_t ip4_addr_isbroadcast_u32(u32_t addr, const struct netif *netif); + +#define ip_addr_netmask_valid(netmask) ip4_addr_netmask_valid((netmask)->addr) +u8_t ip4_addr_netmask_valid(u32_t netmask); + +#define ip4_addr_ismulticast(addr1) (((addr1)->addr & PP_HTONL(0xf0000000UL)) == PP_HTONL(0xe0000000UL)) + +#define ip4_addr_islinklocal(addr1) (((addr1)->addr & PP_HTONL(0xffff0000UL)) == PP_HTONL(0xa9fe0000UL)) + +#define ip4_addr_debug_print_parts(debug, a, b, c, d) \ + LWIP_DEBUGF(debug, ("%" U16_F ".%" U16_F ".%" U16_F ".%" U16_F, a, b, c, d)) +#define ip4_addr_debug_print(debug, ipaddr) \ + ip4_addr_debug_print_parts(debug, \ + (u16_t)((ipaddr) != NULL ? ip4_addr1_16(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? ip4_addr2_16(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? ip4_addr3_16(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? ip4_addr4_16(ipaddr) : 0)) +#define ip4_addr_debug_print_val(debug, ipaddr) \ + ip4_addr_debug_print_parts(debug, \ + ip4_addr1_16_val(ipaddr), \ + ip4_addr2_16_val(ipaddr), \ + ip4_addr3_16_val(ipaddr), \ + ip4_addr4_16_val(ipaddr)) + +/* Get one byte from the 4-byte address */ +#define ip4_addr_get_byte(ipaddr, idx) (((const u8_t*)(&(ipaddr)->addr))[idx]) +#define ip4_addr1(ipaddr) ip4_addr_get_byte(ipaddr, 0) +#define ip4_addr2(ipaddr) ip4_addr_get_byte(ipaddr, 1) +#define ip4_addr3(ipaddr) ip4_addr_get_byte(ipaddr, 2) +#define ip4_addr4(ipaddr) ip4_addr_get_byte(ipaddr, 3) +/* Get one byte from the 4-byte address, but argument is 'ip4_addr_t', + * not a pointer */ +#define ip4_addr_get_byte_val(ipaddr, idx) ((u8_t)(((ipaddr).addr >> (idx * 8)) & 0xff)) +#define ip4_addr1_val(ipaddr) ip4_addr_get_byte_val(ipaddr, 0) +#define ip4_addr2_val(ipaddr) ip4_addr_get_byte_val(ipaddr, 1) +#define ip4_addr3_val(ipaddr) ip4_addr_get_byte_val(ipaddr, 2) +#define ip4_addr4_val(ipaddr) ip4_addr_get_byte_val(ipaddr, 3) +/* These are cast to u16_t, with the intent that they are often arguments + * to printf using the U16_F format from cc.h. */ +#define ip4_addr1_16(ipaddr) ((u16_t)ip4_addr1(ipaddr)) +#define ip4_addr2_16(ipaddr) ((u16_t)ip4_addr2(ipaddr)) +#define ip4_addr3_16(ipaddr) ((u16_t)ip4_addr3(ipaddr)) +#define ip4_addr4_16(ipaddr) ((u16_t)ip4_addr4(ipaddr)) +#define ip4_addr1_16_val(ipaddr) ((u16_t)ip4_addr1_val(ipaddr)) +#define ip4_addr2_16_val(ipaddr) ((u16_t)ip4_addr2_val(ipaddr)) +#define ip4_addr3_16_val(ipaddr) ((u16_t)ip4_addr3_val(ipaddr)) +#define ip4_addr4_16_val(ipaddr) ((u16_t)ip4_addr4_val(ipaddr)) + +#define IP4ADDR_STRLEN_MAX 16 + +/** For backwards compatibility */ +#define ip_ntoa(ipaddr) ipaddr_ntoa(ipaddr) + +u32_t ipaddr_addr(const char *cp); +int ip4addr_aton(const char *cp, ip4_addr_t *addr); +/** returns ptr to static buffer; not reentrant! */ +char *ip4addr_ntoa(const ip4_addr_t *addr); +char *ip4addr_ntoa_r(const ip4_addr_t *addr, char *buf, int buflen); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 */ + +#endif /* LWIP_HDR_IP_ADDR_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_frag.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_frag.h new file mode 100644 index 0000000..18cd2d0 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_frag.h @@ -0,0 +1,100 @@ +/** + * @file + * IP fragmentation/reassembly + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Jani Monoses + * + */ + +#ifndef LWIP_HDR_IP4_FRAG_H +#define LWIP_HDR_IP4_FRAG_H + +#include "lwip/opt.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/ip_addr.h" +#include "lwip/ip.h" + +#if LWIP_IPV4 + +#ifdef __cplusplus +extern "C" { +#endif + +#if IP_REASSEMBLY +/* The IP reassembly timer interval in milliseconds. */ +#define IP_TMR_INTERVAL 1000 + +/** IP reassembly helper struct. + * This is exported because memp needs to know the size. + */ +struct ip_reassdata { + struct ip_reassdata *next; + struct pbuf *p; + struct ip_hdr iphdr; + u16_t datagram_len; + u8_t flags; + u8_t timer; +}; + +void ip_reass_init(void); +void ip_reass_tmr(void); +struct pbuf * ip4_reass(struct pbuf *p); +#endif /* IP_REASSEMBLY */ + +#if IP_FRAG +#if !LWIP_NETIF_TX_SINGLE_PBUF +#ifndef LWIP_PBUF_CUSTOM_REF_DEFINED +#define LWIP_PBUF_CUSTOM_REF_DEFINED +/** A custom pbuf that holds a reference to another pbuf, which is freed + * when this custom pbuf is freed. This is used to create a custom PBUF_REF + * that points into the original pbuf. */ +struct pbuf_custom_ref { + /** 'base class' */ + struct pbuf_custom pc; + /** pointer to the original pbuf that is referenced */ + struct pbuf *original; +}; +#endif /* LWIP_PBUF_CUSTOM_REF_DEFINED */ +#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ + +err_t ip4_frag(struct pbuf *p, struct netif *netif, const ip4_addr_t *dest); +#endif /* IP_FRAG */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 */ + +#endif /* LWIP_HDR_IP4_FRAG_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip6.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip6.h new file mode 100644 index 0000000..706f66d --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip6.h @@ -0,0 +1,93 @@ +/** + * @file + * + * IPv6 layer. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ +#ifndef LWIP_HDR_IP6_H +#define LWIP_HDR_IP6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip6_addr.h" +#include "lwip/prot/ip6.h" +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" + +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct netif *ip6_route(const ip6_addr_t *src, const ip6_addr_t *dest); +const ip_addr_t *ip6_select_source_address(struct netif *netif, const ip6_addr_t * dest); +err_t ip6_input(struct pbuf *p, struct netif *inp); +err_t ip6_output(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth); +err_t ip6_output_if(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth, struct netif *netif); +err_t ip6_output_if_src(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth, struct netif *netif); +#if LWIP_NETIF_USE_HINTS +err_t ip6_output_hinted(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth, struct netif_hint *netif_hint); +#endif /* LWIP_NETIF_USE_HINTS */ +#if LWIP_IPV6_MLD +err_t ip6_options_add_hbh_ra(struct pbuf * p, u8_t nexth, u8_t value); +#endif /* LWIP_IPV6_MLD */ + +#define ip6_netif_get_local_ip(netif, dest) (((netif) != NULL) ? \ + ip6_select_source_address(netif, dest) : NULL) + +#if IP6_DEBUG +void ip6_debug_print(struct pbuf *p); +#else +#define ip6_debug_print(p) +#endif /* IP6_DEBUG */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_IP6_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_addr.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_addr.h new file mode 100644 index 0000000..7c1f913 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_addr.h @@ -0,0 +1,352 @@ +/** + * @file + * + * IPv6 addresses. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * Structs and macros for handling IPv6 addresses. + * + * Please coordinate changes and requests with Ivan Delamer + * + */ +#ifndef LWIP_HDR_IP6_ADDR_H +#define LWIP_HDR_IP6_ADDR_H + +#include "lwip/opt.h" +#include "def.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip6_zone.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +/** This is the aligned version of ip6_addr_t, + used as local variable, on the stack, etc. */ +struct ip6_addr { + u32_t addr[4]; +#if LWIP_IPV6_SCOPES + u8_t zone; +#endif /* LWIP_IPV6_SCOPES */ +}; + +/** IPv6 address */ +typedef struct ip6_addr ip6_addr_t; + +/** Set an IPv6 partial address given by byte-parts */ +#define IP6_ADDR_PART(ip6addr, index, a,b,c,d) \ + (ip6addr)->addr[index] = PP_HTONL(LWIP_MAKEU32(a,b,c,d)) + +/** Set a full IPv6 address by passing the 4 u32_t indices in network byte order + (use PP_HTONL() for constants) */ +#define IP6_ADDR(ip6addr, idx0, idx1, idx2, idx3) do { \ + (ip6addr)->addr[0] = idx0; \ + (ip6addr)->addr[1] = idx1; \ + (ip6addr)->addr[2] = idx2; \ + (ip6addr)->addr[3] = idx3; \ + ip6_addr_clear_zone(ip6addr); } while(0) + +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK1(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[0]) >> 16) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK2(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[0])) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK3(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[1]) >> 16) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK4(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[1])) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK5(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[2]) >> 16) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK6(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[2])) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK7(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[3]) >> 16) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK8(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[3])) & 0xffff)) + +/** Copy IPv6 address - faster than ip6_addr_set: no NULL check */ +#define ip6_addr_copy(dest, src) do{(dest).addr[0] = (src).addr[0]; \ + (dest).addr[1] = (src).addr[1]; \ + (dest).addr[2] = (src).addr[2]; \ + (dest).addr[3] = (src).addr[3]; \ + ip6_addr_copy_zone((dest), (src)); }while(0) +/** Safely copy one IPv6 address to another (src may be NULL) */ +#define ip6_addr_set(dest, src) do{(dest)->addr[0] = (src) == NULL ? 0 : (src)->addr[0]; \ + (dest)->addr[1] = (src) == NULL ? 0 : (src)->addr[1]; \ + (dest)->addr[2] = (src) == NULL ? 0 : (src)->addr[2]; \ + (dest)->addr[3] = (src) == NULL ? 0 : (src)->addr[3]; \ + ip6_addr_set_zone((dest), (src) == NULL ? IP6_NO_ZONE : ip6_addr_zone(src)); }while(0) + +/** Copy packed IPv6 address to unpacked IPv6 address; zone is not set */ +#define ip6_addr_copy_from_packed(dest, src) do{(dest).addr[0] = (src).addr[0]; \ + (dest).addr[1] = (src).addr[1]; \ + (dest).addr[2] = (src).addr[2]; \ + (dest).addr[3] = (src).addr[3]; \ + ip6_addr_clear_zone(&dest); }while(0) + +/** Copy unpacked IPv6 address to packed IPv6 address; zone is lost */ +#define ip6_addr_copy_to_packed(dest, src) do{(dest).addr[0] = (src).addr[0]; \ + (dest).addr[1] = (src).addr[1]; \ + (dest).addr[2] = (src).addr[2]; \ + (dest).addr[3] = (src).addr[3]; }while(0) + +/** Set complete address to zero */ +#define ip6_addr_set_zero(ip6addr) do{(ip6addr)->addr[0] = 0; \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = 0; \ + (ip6addr)->addr[3] = 0; \ + ip6_addr_clear_zone(ip6addr);}while(0) + +/** Set address to ipv6 'any' (no need for lwip_htonl()) */ +#define ip6_addr_set_any(ip6addr) ip6_addr_set_zero(ip6addr) +/** Set address to ipv6 loopback address */ +#define ip6_addr_set_loopback(ip6addr) do{(ip6addr)->addr[0] = 0; \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = 0; \ + (ip6addr)->addr[3] = PP_HTONL(0x00000001UL); \ + ip6_addr_clear_zone(ip6addr);}while(0) +/** Safely copy one IPv6 address to another and change byte order + * from host- to network-order. */ +#define ip6_addr_set_hton(dest, src) do{(dest)->addr[0] = (src) == NULL ? 0 : lwip_htonl((src)->addr[0]); \ + (dest)->addr[1] = (src) == NULL ? 0 : lwip_htonl((src)->addr[1]); \ + (dest)->addr[2] = (src) == NULL ? 0 : lwip_htonl((src)->addr[2]); \ + (dest)->addr[3] = (src) == NULL ? 0 : lwip_htonl((src)->addr[3]); \ + ip6_addr_set_zone((dest), (src) == NULL ? IP6_NO_ZONE : ip6_addr_zone(src));}while(0) + + +/** Compare IPv6 networks, ignoring zone information. To be used sparingly! */ +#define ip6_addr_netcmp_zoneless(addr1, addr2) (((addr1)->addr[0] == (addr2)->addr[0]) && \ + ((addr1)->addr[1] == (addr2)->addr[1])) + +/** + * Determine if two IPv6 address are on the same network. + * + * @param addr1 IPv6 address 1 + * @param addr2 IPv6 address 2 + * @return 1 if the network identifiers of both address match, 0 if not + */ +#define ip6_addr_netcmp(addr1, addr2) (ip6_addr_netcmp_zoneless((addr1), (addr2)) && \ + ip6_addr_cmp_zone((addr1), (addr2))) + +/* Exact-host comparison *after* ip6_addr_netcmp() succeeded, for efficiency. */ +#define ip6_addr_nethostcmp(addr1, addr2) (((addr1)->addr[2] == (addr2)->addr[2]) && \ + ((addr1)->addr[3] == (addr2)->addr[3])) + +/** Compare IPv6 addresses, ignoring zone information. To be used sparingly! */ +#define ip6_addr_cmp_zoneless(addr1, addr2) (((addr1)->addr[0] == (addr2)->addr[0]) && \ + ((addr1)->addr[1] == (addr2)->addr[1]) && \ + ((addr1)->addr[2] == (addr2)->addr[2]) && \ + ((addr1)->addr[3] == (addr2)->addr[3])) +/** + * Determine if two IPv6 addresses are the same. In particular, the address + * part of both must be the same, and the zone must be compatible. + * + * @param addr1 IPv6 address 1 + * @param addr2 IPv6 address 2 + * @return 1 if the addresses are considered equal, 0 if not + */ +#define ip6_addr_cmp(addr1, addr2) (ip6_addr_cmp_zoneless((addr1), (addr2)) && \ + ip6_addr_cmp_zone((addr1), (addr2))) + +/** Compare IPv6 address to packed address and zone */ +#define ip6_addr_cmp_packed(ip6addr, paddr, zone_idx) (((ip6addr)->addr[0] == (paddr)->addr[0]) && \ + ((ip6addr)->addr[1] == (paddr)->addr[1]) && \ + ((ip6addr)->addr[2] == (paddr)->addr[2]) && \ + ((ip6addr)->addr[3] == (paddr)->addr[3]) && \ + ip6_addr_equals_zone((ip6addr), (zone_idx))) + +#define ip6_get_subnet_id(ip6addr) (lwip_htonl((ip6addr)->addr[2]) & 0x0000ffffUL) + +#define ip6_addr_isany_val(ip6addr) (((ip6addr).addr[0] == 0) && \ + ((ip6addr).addr[1] == 0) && \ + ((ip6addr).addr[2] == 0) && \ + ((ip6addr).addr[3] == 0)) +#define ip6_addr_isany(ip6addr) (((ip6addr) == NULL) || ip6_addr_isany_val(*(ip6addr))) + +#define ip6_addr_isloopback(ip6addr) (((ip6addr)->addr[0] == 0UL) && \ + ((ip6addr)->addr[1] == 0UL) && \ + ((ip6addr)->addr[2] == 0UL) && \ + ((ip6addr)->addr[3] == PP_HTONL(0x00000001UL))) + +#define ip6_addr_isglobal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xe0000000UL)) == PP_HTONL(0x20000000UL)) + +#define ip6_addr_islinklocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xffc00000UL)) == PP_HTONL(0xfe800000UL)) + +#define ip6_addr_issitelocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xffc00000UL)) == PP_HTONL(0xfec00000UL)) + +#define ip6_addr_isuniquelocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xfe000000UL)) == PP_HTONL(0xfc000000UL)) + +#define ip6_addr_isipv4mappedipv6(ip6addr) (((ip6addr)->addr[0] == 0) && ((ip6addr)->addr[1] == 0) && (((ip6addr)->addr[2]) == PP_HTONL(0x0000FFFFUL))) + +#define ip6_addr_ismulticast(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff000000UL)) == PP_HTONL(0xff000000UL)) +#define ip6_addr_multicast_transient_flag(ip6addr) ((ip6addr)->addr[0] & PP_HTONL(0x00100000UL)) +#define ip6_addr_multicast_prefix_flag(ip6addr) ((ip6addr)->addr[0] & PP_HTONL(0x00200000UL)) +#define ip6_addr_multicast_rendezvous_flag(ip6addr) ((ip6addr)->addr[0] & PP_HTONL(0x00400000UL)) +#define ip6_addr_multicast_scope(ip6addr) ((lwip_htonl((ip6addr)->addr[0]) >> 16) & 0xf) +#define IP6_MULTICAST_SCOPE_RESERVED 0x0 +#define IP6_MULTICAST_SCOPE_RESERVED0 0x0 +#define IP6_MULTICAST_SCOPE_INTERFACE_LOCAL 0x1 +#define IP6_MULTICAST_SCOPE_LINK_LOCAL 0x2 +#define IP6_MULTICAST_SCOPE_RESERVED3 0x3 +#define IP6_MULTICAST_SCOPE_ADMIN_LOCAL 0x4 +#define IP6_MULTICAST_SCOPE_SITE_LOCAL 0x5 +#define IP6_MULTICAST_SCOPE_ORGANIZATION_LOCAL 0x8 +#define IP6_MULTICAST_SCOPE_GLOBAL 0xe +#define IP6_MULTICAST_SCOPE_RESERVEDF 0xf +#define ip6_addr_ismulticast_iflocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff010000UL)) +#define ip6_addr_ismulticast_linklocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff020000UL)) +#define ip6_addr_ismulticast_adminlocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff040000UL)) +#define ip6_addr_ismulticast_sitelocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff050000UL)) +#define ip6_addr_ismulticast_orglocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff080000UL)) +#define ip6_addr_ismulticast_global(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff0e0000UL)) + +/* Scoping note: while interface-local and link-local multicast addresses do + * have a scope (i.e., they are meaningful only in the context of a particular + * interface), the following functions are not assigning or comparing zone + * indices. The reason for this is backward compatibility. Any call site that + * produces a non-global multicast address must assign a multicast address as + * appropriate itself. */ + +#define ip6_addr_isallnodes_iflocal(ip6addr) (((ip6addr)->addr[0] == PP_HTONL(0xff010000UL)) && \ + ((ip6addr)->addr[1] == 0UL) && \ + ((ip6addr)->addr[2] == 0UL) && \ + ((ip6addr)->addr[3] == PP_HTONL(0x00000001UL))) + +#define ip6_addr_isallnodes_linklocal(ip6addr) (((ip6addr)->addr[0] == PP_HTONL(0xff020000UL)) && \ + ((ip6addr)->addr[1] == 0UL) && \ + ((ip6addr)->addr[2] == 0UL) && \ + ((ip6addr)->addr[3] == PP_HTONL(0x00000001UL))) +#define ip6_addr_set_allnodes_linklocal(ip6addr) do{(ip6addr)->addr[0] = PP_HTONL(0xff020000UL); \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = 0; \ + (ip6addr)->addr[3] = PP_HTONL(0x00000001UL); \ + ip6_addr_clear_zone(ip6addr); }while(0) + +#define ip6_addr_isallrouters_linklocal(ip6addr) (((ip6addr)->addr[0] == PP_HTONL(0xff020000UL)) && \ + ((ip6addr)->addr[1] == 0UL) && \ + ((ip6addr)->addr[2] == 0UL) && \ + ((ip6addr)->addr[3] == PP_HTONL(0x00000002UL))) +#define ip6_addr_set_allrouters_linklocal(ip6addr) do{(ip6addr)->addr[0] = PP_HTONL(0xff020000UL); \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = 0; \ + (ip6addr)->addr[3] = PP_HTONL(0x00000002UL); \ + ip6_addr_clear_zone(ip6addr); }while(0) + +#define ip6_addr_issolicitednode(ip6addr) ( ((ip6addr)->addr[0] == PP_HTONL(0xff020000UL)) && \ + ((ip6addr)->addr[2] == PP_HTONL(0x00000001UL)) && \ + (((ip6addr)->addr[3] & PP_HTONL(0xff000000UL)) == PP_HTONL(0xff000000UL)) ) + +#define ip6_addr_set_solicitednode(ip6addr, if_id) do{(ip6addr)->addr[0] = PP_HTONL(0xff020000UL); \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = PP_HTONL(0x00000001UL); \ + (ip6addr)->addr[3] = (PP_HTONL(0xff000000UL) | (if_id)); \ + ip6_addr_clear_zone(ip6addr); }while(0) + +#define ip6_addr_cmp_solicitednode(ip6addr, sn_addr) (((ip6addr)->addr[0] == PP_HTONL(0xff020000UL)) && \ + ((ip6addr)->addr[1] == 0) && \ + ((ip6addr)->addr[2] == PP_HTONL(0x00000001UL)) && \ + ((ip6addr)->addr[3] == (PP_HTONL(0xff000000UL) | (sn_addr)->addr[3]))) + +/* IPv6 address states. */ +#define IP6_ADDR_INVALID 0x00 +#define IP6_ADDR_TENTATIVE 0x08 +#define IP6_ADDR_TENTATIVE_1 0x09 /* 1 probe sent */ +#define IP6_ADDR_TENTATIVE_2 0x0a /* 2 probes sent */ +#define IP6_ADDR_TENTATIVE_3 0x0b /* 3 probes sent */ +#define IP6_ADDR_TENTATIVE_4 0x0c /* 4 probes sent */ +#define IP6_ADDR_TENTATIVE_5 0x0d /* 5 probes sent */ +#define IP6_ADDR_TENTATIVE_6 0x0e /* 6 probes sent */ +#define IP6_ADDR_TENTATIVE_7 0x0f /* 7 probes sent */ +#define IP6_ADDR_VALID 0x10 /* This bit marks an address as valid (preferred or deprecated) */ +#define IP6_ADDR_PREFERRED 0x30 +#define IP6_ADDR_DEPRECATED 0x10 /* Same as VALID (valid but not preferred) */ +#define IP6_ADDR_DUPLICATED 0x40 /* Failed DAD test, not valid */ + +#define IP6_ADDR_TENTATIVE_COUNT_MASK 0x07 /* 1-7 probes sent */ + +#define ip6_addr_isinvalid(addr_state) (addr_state == IP6_ADDR_INVALID) +#define ip6_addr_istentative(addr_state) (addr_state & IP6_ADDR_TENTATIVE) +#define ip6_addr_isvalid(addr_state) (addr_state & IP6_ADDR_VALID) /* Include valid, preferred, and deprecated. */ +#define ip6_addr_ispreferred(addr_state) (addr_state == IP6_ADDR_PREFERRED) +#define ip6_addr_isdeprecated(addr_state) (addr_state == IP6_ADDR_DEPRECATED) +#define ip6_addr_isduplicated(addr_state) (addr_state == IP6_ADDR_DUPLICATED) + +#if LWIP_IPV6_ADDRESS_LIFETIMES +#define IP6_ADDR_LIFE_STATIC (0) +#define IP6_ADDR_LIFE_INFINITE (0xffffffffUL) +#define ip6_addr_life_isstatic(addr_life) ((addr_life) == IP6_ADDR_LIFE_STATIC) +#define ip6_addr_life_isinfinite(addr_life) ((addr_life) == IP6_ADDR_LIFE_INFINITE) +#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */ + +#define ip6_addr_debug_print_parts(debug, a, b, c, d, e, f, g, h) \ + LWIP_DEBUGF(debug, ("%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F, \ + a, b, c, d, e, f, g, h)) +#define ip6_addr_debug_print(debug, ipaddr) \ + ip6_addr_debug_print_parts(debug, \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK1(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK2(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK3(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK4(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK5(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK6(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK7(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK8(ipaddr) : 0)) +#define ip6_addr_debug_print_val(debug, ipaddr) \ + ip6_addr_debug_print_parts(debug, \ + IP6_ADDR_BLOCK1(&(ipaddr)), \ + IP6_ADDR_BLOCK2(&(ipaddr)), \ + IP6_ADDR_BLOCK3(&(ipaddr)), \ + IP6_ADDR_BLOCK4(&(ipaddr)), \ + IP6_ADDR_BLOCK5(&(ipaddr)), \ + IP6_ADDR_BLOCK6(&(ipaddr)), \ + IP6_ADDR_BLOCK7(&(ipaddr)), \ + IP6_ADDR_BLOCK8(&(ipaddr))) + +#define IP6ADDR_STRLEN_MAX 46 + +int ip6addr_aton(const char *cp, ip6_addr_t *addr); +/** returns ptr to static buffer; not reentrant! */ +char *ip6addr_ntoa(const ip6_addr_t *addr); +char *ip6addr_ntoa_r(const ip6_addr_t *addr, char *buf, int buflen); + + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_IP6_ADDR_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_frag.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_frag.h new file mode 100644 index 0000000..71940dd --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_frag.h @@ -0,0 +1,144 @@ +/** + * @file + * + * IPv6 fragmentation and reassembly. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ +#ifndef LWIP_HDR_IP6_FRAG_H +#define LWIP_HDR_IP6_FRAG_H + +#include "lwip/opt.h" +#include "lwip/pbuf.h" +#include "lwip/ip6_addr.h" +#include "lwip/ip6.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +#if LWIP_IPV6 && LWIP_IPV6_REASS /* don't build if not configured for use in lwipopts.h */ + +/** The IPv6 reassembly timer interval in milliseconds. */ +#define IP6_REASS_TMR_INTERVAL 1000 + +/** IP6_FRAG_COPYHEADER==1: for platforms where sizeof(void*) > 4, "struct + * ip6_reass_helper" is too large to be stored in the IPv6 fragment header, and + * will bleed into the header before it, which may be the IPv6 header or an + * extension header. This means that for each first fragment packet, we need to + * 1) make a copy of some IPv6 header fields (src+dest) that we need later on, + * just in case we do overwrite part of the IPv6 header, and 2) make a copy of + * the header data that we overwrote, so that we can restore it before either + * completing reassembly or sending an ICMPv6 reply. The last part is true even + * if this setting is disabled, but if it is enabled, we need to save a bit + * more data (up to the size of a pointer) because we overwrite more. */ +#ifndef IPV6_FRAG_COPYHEADER +#define IPV6_FRAG_COPYHEADER 0 +#endif + +/* With IPV6_FRAG_COPYHEADER==1, a helper structure may (or, depending on the + * presence of extensions, may not) overwrite part of the IP header. Therefore, + * we copy the fields that we need from the IP header for as long as the helper + * structure may still be in place. This is easier than temporarily restoring + * those fields in the IP header each time we need to perform checks on them. */ +#if IPV6_FRAG_COPYHEADER +#define IPV6_FRAG_SRC(ipr) ((ipr)->src) +#define IPV6_FRAG_DEST(ipr) ((ipr)->dest) +#else /* IPV6_FRAG_COPYHEADER */ +#define IPV6_FRAG_SRC(ipr) ((ipr)->iphdr->src) +#define IPV6_FRAG_DEST(ipr) ((ipr)->iphdr->dest) +#endif /* IPV6_FRAG_COPYHEADER */ + +/** IPv6 reassembly helper struct. + * This is exported because memp needs to know the size. + */ +struct ip6_reassdata { + struct ip6_reassdata *next; + struct pbuf *p; + struct ip6_hdr *iphdr; /* pointer to the first (original) IPv6 header */ +#if IPV6_FRAG_COPYHEADER + ip6_addr_p_t src; /* copy of the source address in the IP header */ + ip6_addr_p_t dest; /* copy of the destination address in the IP header */ + /* This buffer (for the part of the original header that we overwrite) will + * be slightly oversized, but we cannot compute the exact size from here. */ + u8_t orig_hdr[sizeof(struct ip6_frag_hdr) + sizeof(void*)]; +#else /* IPV6_FRAG_COPYHEADER */ + /* In this case we still need the buffer, for sending ICMPv6 replies. */ + u8_t orig_hdr[sizeof(struct ip6_frag_hdr)]; +#endif /* IPV6_FRAG_COPYHEADER */ + u32_t identification; + u16_t datagram_len; + u8_t nexth; + u8_t timer; +#if LWIP_IPV6_SCOPES + u8_t src_zone; /* zone of original packet's source address */ + u8_t dest_zone; /* zone of original packet's destination address */ +#endif /* LWIP_IPV6_SCOPES */ +}; + +#define ip6_reass_init() /* Compatibility define */ +void ip6_reass_tmr(void); +struct pbuf *ip6_reass(struct pbuf *p); + +#endif /* LWIP_IPV6 && LWIP_IPV6_REASS */ + +#if LWIP_IPV6 && LWIP_IPV6_FRAG /* don't build if not configured for use in lwipopts.h */ + +#ifndef LWIP_PBUF_CUSTOM_REF_DEFINED +#define LWIP_PBUF_CUSTOM_REF_DEFINED +/** A custom pbuf that holds a reference to another pbuf, which is freed + * when this custom pbuf is freed. This is used to create a custom PBUF_REF + * that points into the original pbuf. */ +struct pbuf_custom_ref { + /** 'base class' */ + struct pbuf_custom pc; + /** pointer to the original pbuf that is referenced */ + struct pbuf *original; +}; +#endif /* LWIP_PBUF_CUSTOM_REF_DEFINED */ + +err_t ip6_frag(struct pbuf *p, struct netif *netif, const ip6_addr_t *dest); + +#endif /* LWIP_IPV6 && LWIP_IPV6_FRAG */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_IP6_FRAG_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_zone.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_zone.h new file mode 100644 index 0000000..235141e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_zone.h @@ -0,0 +1,304 @@ +/** + * @file + * + * IPv6 address scopes, zones, and scoping policy. + * + * This header provides the means to implement support for IPv6 address scopes, + * as per RFC 4007. An address scope can be either global or more constrained. + * In lwIP, we say that an address "has a scope" or "is scoped" when its scope + * is constrained, in which case the address is meaningful only in a specific + * "zone." For unicast addresses, only link-local addresses have a scope; in + * that case, the scope is the link. For multicast addresses, there are various + * scopes defined by RFC 4007 and others. For any constrained scope, a system + * must establish a (potentially one-to-many) mapping between zones and local + * interfaces. For example, a link-local address is valid on only one link (its + * zone). That link may be attached to one or more local interfaces. The + * decisions on which scopes are constrained and the mapping between zones and + * interfaces is together what we refer to as the "scoping policy" - more on + * this in a bit. + * + * In lwIP, each IPv6 address has an associated zone index. This zone index may + * be set to "no zone" (IP6_NO_ZONE, 0) or an actual zone. We say that an + * address "has a zone" or "is zoned" when its zone index is *not* set to "no + * zone." In lwIP, in principle, each address should be "properly zoned," which + * means that if the address has a zone if and only if has a scope. As such, it + * is a rule that an unscoped (e.g., global) address must never have a zone. + * Even though one could argue that there is always one zone even for global + * scopes, this rule exists for implementation simplicity. Violation of the + * rule will trigger assertions or otherwise result in undesired behavior. + * + * Backward compatibility prevents us from requiring that applications always + * provide properly zoned addresses. We do enforce the rule that the in the + * lwIP link layer (everything below netif->output_ip6() and in particular ND6) + * *all* addresses are properly zoned. Thus, on the output paths down the + * stack, various places deal with the case of addresses that lack a zone. + * Some of them are best-effort for efficiency (e.g. the PCB bind and connect + * API calls' attempts to add missing zones); ultimately the IPv6 output + * handler (@ref ip6_output_if_src) will set a zone if necessary. + * + * Aside from dealing with scoped addresses lacking a zone, a proper IPv6 + * implementation must also ensure that a packet with a scoped source and/or + * destination address does not leave its zone. This is currently implemented + * in the input and forward functions. However, for output, these checks are + * deliberately omitted in order to keep the implementation lightweight. The + * routing algorithm in @ref ip6_route will take decisions such that it will + * not cause zone violations unless the application sets bad addresses, though. + * + * In terms of scoping policy, lwIP implements the default policy from RFC 4007 + * using macros in this file. This policy considers link-local unicast + * addresses and (only) interface-local and link-local multicast addresses as + * having a scope. For all these addresses, the zone is equal to the interface. + * As shown below in this file, it is possible to implement a custom policy. + */ + +/* + * Copyright (c) 2017 The MINIX 3 Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: David van Moolenbroek + * + */ +#ifndef LWIP_HDR_IP6_ZONE_H +#define LWIP_HDR_IP6_ZONE_H + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup ip6_zones IPv6 Zones + * @ingroup ip6 + * @{ + */ + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +/** Identifier for "no zone". */ +#define IP6_NO_ZONE 0 + +#if LWIP_IPV6_SCOPES + +/** Zone initializer for static IPv6 address initialization, including comma. */ +#define IPADDR6_ZONE_INIT , IP6_NO_ZONE + +/** Return the zone index of the given IPv6 address; possibly "no zone". */ +#define ip6_addr_zone(ip6addr) ((ip6addr)->zone) + +/** Does the given IPv6 address have a zone set? (0/1) */ +#define ip6_addr_has_zone(ip6addr) (ip6_addr_zone(ip6addr) != IP6_NO_ZONE) + +/** Set the zone field of an IPv6 address to a particular value. */ +#define ip6_addr_set_zone(ip6addr, zone_idx) ((ip6addr)->zone = (zone_idx)) + +/** Clear the zone field of an IPv6 address, setting it to "no zone". */ +#define ip6_addr_clear_zone(ip6addr) ((ip6addr)->zone = IP6_NO_ZONE) + +/** Copy the zone field from the second IPv6 address to the first one. */ +#define ip6_addr_copy_zone(ip6addr1, ip6addr2) ((ip6addr1).zone = (ip6addr2).zone) + +/** Is the zone field of the given IPv6 address equal to the given zone index? (0/1) */ +#define ip6_addr_equals_zone(ip6addr, zone_idx) ((ip6addr)->zone == (zone_idx)) + +/** Are the zone fields of the given IPv6 addresses equal? (0/1) + * This macro must only be used on IPv6 addresses of the same scope. */ +#define ip6_addr_cmp_zone(ip6addr1, ip6addr2) ((ip6addr1)->zone == (ip6addr2)->zone) + +/** Symbolic constants for the 'type' parameters in some of the macros. + * These exist for efficiency only, allowing the macros to avoid certain tests + * when the address is known not to be of a certain type. Dead code elimination + * will do the rest. IP6_MULTICAST is supported but currently not optimized. + * @see ip6_addr_has_scope, ip6_addr_assign_zone, ip6_addr_lacks_zone. + */ +enum lwip_ipv6_scope_type +{ + /** Unknown */ + IP6_UNKNOWN = 0, + /** Unicast */ + IP6_UNICAST = 1, + /** Multicast */ + IP6_MULTICAST = 2 +}; + +/** IPV6_CUSTOM_SCOPES: together, the following three macro definitions, + * @ref ip6_addr_has_scope, @ref ip6_addr_assign_zone, and + * @ref ip6_addr_test_zone, completely define the lwIP scoping policy. + * The definitions below implement the default policy from RFC 4007 Sec. 6. + * Should an implementation desire to implement a different policy, it can + * define IPV6_CUSTOM_SCOPES to 1 and supply its own definitions for the three + * macros instead. + */ +#ifndef IPV6_CUSTOM_SCOPES +#define IPV6_CUSTOM_SCOPES 0 +#endif /* !IPV6_CUSTOM_SCOPES */ + +#if !IPV6_CUSTOM_SCOPES + +/** + * Determine whether an IPv6 address has a constrained scope, and as such is + * meaningful only if accompanied by a zone index to identify the scope's zone. + * The given address type may be used to eliminate at compile time certain + * checks that will evaluate to false at run time anyway. + * + * This default implementation follows the default model of RFC 4007, where + * only interface-local and link-local scopes are defined. + * + * Even though the unicast loopback address does have an implied link-local + * scope, in this implementation it does not have an explicitly assigned zone + * index. As such it should not be tested for in this macro. + * + * @param ip6addr the IPv6 address (const); only its address part is examined. + * @param type address type; see @ref lwip_ipv6_scope_type. + * @return 1 if the address has a constrained scope, 0 if it does not. + */ +#define ip6_addr_has_scope(ip6addr, type) \ + (ip6_addr_islinklocal(ip6addr) || (((type) != IP6_UNICAST) && \ + (ip6_addr_ismulticast_iflocal(ip6addr) || \ + ip6_addr_ismulticast_linklocal(ip6addr)))) + +/** + * Assign a zone index to an IPv6 address, based on a network interface. If the + * given address has a scope, the assigned zone index is that scope's zone of + * the given netif; otherwise, the assigned zone index is "no zone". + * + * This default implementation follows the default model of RFC 4007, where + * only interface-local and link-local scopes are defined, and the zone index + * of both of those scopes always equals the index of the network interface. + * As such, this default implementation need not distinguish between different + * constrained scopes when assigning the zone. + * + * @param ip6addr the IPv6 address; its address part is examined, and its zone + * index is assigned. + * @param type address type; see @ref lwip_ipv6_scope_type. + * @param netif the network interface (const). + */ +#define ip6_addr_assign_zone(ip6addr, type, netif) \ + (ip6_addr_set_zone((ip6addr), \ + ip6_addr_has_scope((ip6addr), (type)) ? netif_get_index(netif) : 0)) + +/** + * Test whether an IPv6 address is "zone-compatible" with a network interface. + * That is, test whether the network interface is part of the zone associated + * with the address. For efficiency, this macro is only ever called if the + * given address is either scoped or zoned, and thus, it need not test this. + * If an address is scoped but not zoned, or zoned and not scoped, it is + * considered not zone-compatible with any netif. + * + * This default implementation follows the default model of RFC 4007, where + * only interface-local and link-local scopes are defined, and the zone index + * of both of those scopes always equals the index of the network interface. + * As such, there is always only one matching netif for a specific zone index, + * but all call sites of this macro currently support multiple matching netifs + * as well (at no additional expense in the common case). + * + * @param ip6addr the IPv6 address (const). + * @param netif the network interface (const). + * @return 1 if the address is scope-compatible with the netif, 0 if not. + */ +#define ip6_addr_test_zone(ip6addr, netif) \ + (ip6_addr_equals_zone((ip6addr), netif_get_index(netif))) + +#endif /* !IPV6_CUSTOM_SCOPES */ + +/** Does the given IPv6 address have a scope, and as such should also have a + * zone to be meaningful, but does not actually have a zone? (0/1) */ +#define ip6_addr_lacks_zone(ip6addr, type) \ + (!ip6_addr_has_zone(ip6addr) && ip6_addr_has_scope((ip6addr), (type))) + +/** + * Try to select a zone for a scoped address that does not yet have a zone. + * Called from PCB bind and connect routines, for two reasons: 1) to save on + * this (relatively expensive) selection for every individual packet route + * operation and 2) to allow the application to obtain the selected zone from + * the PCB as is customary for e.g. getsockname/getpeername BSD socket calls. + * + * Ideally, callers would always supply a properly zoned address, in which case + * this function would not be needed. It exists both for compatibility with the + * BSD socket API (which accepts zoneless destination addresses) and for + * backward compatibility with pre-scoping lwIP code. + * + * It may be impossible to select a zone, e.g. if there are no netifs. In that + * case, the address's zone field will be left as is. + * + * @param dest the IPv6 address for which to select and set a zone. + * @param src source IPv6 address (const); may be equal to dest. + */ +#define ip6_addr_select_zone(dest, src) do { struct netif *selected_netif; \ + selected_netif = ip6_route((src), (dest)); \ + if (selected_netif != NULL) { \ + ip6_addr_assign_zone((dest), IP6_UNKNOWN, selected_netif); \ + } } while (0) + +/** + * @} + */ + +#else /* LWIP_IPV6_SCOPES */ + +#define IPADDR6_ZONE_INIT +#define ip6_addr_zone(ip6addr) (IP6_NO_ZONE) +#define ip6_addr_has_zone(ip6addr) (0) +#define ip6_addr_set_zone(ip6addr, zone_idx) +#define ip6_addr_clear_zone(ip6addr) +#define ip6_addr_copy_zone(ip6addr1, ip6addr2) +#define ip6_addr_equals_zone(ip6addr, zone_idx) (1) +#define ip6_addr_cmp_zone(ip6addr1, ip6addr2) (1) +#define IPV6_CUSTOM_SCOPES 0 +#define ip6_addr_has_scope(ip6addr, type) (0) +#define ip6_addr_assign_zone(ip6addr, type, netif) +#define ip6_addr_test_zone(ip6addr, netif) (1) +#define ip6_addr_lacks_zone(ip6addr, type) (0) +#define ip6_addr_select_zone(ip6addr, src) + +#endif /* LWIP_IPV6_SCOPES */ + +#if LWIP_IPV6_SCOPES && LWIP_IPV6_SCOPES_DEBUG + +/** Verify that the given IPv6 address is properly zoned. */ +#define IP6_ADDR_ZONECHECK(ip6addr) LWIP_ASSERT("IPv6 zone check failed", \ + ip6_addr_has_scope(ip6addr, IP6_UNKNOWN) == ip6_addr_has_zone(ip6addr)) + +/** Verify that the given IPv6 address is properly zoned for the given netif. */ +#define IP6_ADDR_ZONECHECK_NETIF(ip6addr, netif) LWIP_ASSERT("IPv6 netif zone check failed", \ + ip6_addr_has_scope(ip6addr, IP6_UNKNOWN) ? \ + (ip6_addr_has_zone(ip6addr) && \ + (((netif) == NULL) || ip6_addr_test_zone((ip6addr), (netif)))) : \ + !ip6_addr_has_zone(ip6addr)) + +#else /* LWIP_IPV6_SCOPES && LWIP_IPV6_SCOPES_DEBUG */ + +#define IP6_ADDR_ZONECHECK(ip6addr) +#define IP6_ADDR_ZONECHECK_NETIF(ip6addr, netif) + +#endif /* LWIP_IPV6_SCOPES && LWIP_IPV6_SCOPES_DEBUG */ + +#endif /* LWIP_IPV6 */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_IP6_ZONE_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip_addr.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip_addr.h new file mode 100644 index 0000000..f98fa8a --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip_addr.h @@ -0,0 +1,438 @@ +/** + * @file + * IP address API (common IPv4 and IPv6) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_IP_ADDR_H +#define LWIP_HDR_IP_ADDR_H + +#include "lwip/opt.h" +#include "lwip/def.h" + +#include "lwip/ip4_addr.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @ingroup ipaddr + * IP address types for use in ip_addr_t.type member. + * @see tcp_new_ip_type(), udp_new_ip_type(), raw_new_ip_type(). + */ +enum lwip_ip_addr_type { + /** IPv4 */ + IPADDR_TYPE_V4 = 0U, + /** IPv6 */ + IPADDR_TYPE_V6 = 6U, + /** IPv4+IPv6 ("dual-stack") */ + IPADDR_TYPE_ANY = 46U +}; + +#if LWIP_IPV4 && LWIP_IPV6 +/** + * @ingroup ipaddr + * A union struct for both IP version's addresses. + * ATTENTION: watch out for its size when adding IPv6 address scope! + */ +typedef struct ip_addr { + union { + ip6_addr_t ip6; + ip4_addr_t ip4; + } u_addr; + /** @ref lwip_ip_addr_type */ + u8_t type; +} ip_addr_t; + +extern const ip_addr_t ip_addr_any_type; + +/** @ingroup ip4addr */ +#define IPADDR4_INIT(u32val) { { { { u32val, 0ul, 0ul, 0ul } IPADDR6_ZONE_INIT } }, IPADDR_TYPE_V4 } +/** @ingroup ip4addr */ +#define IPADDR4_INIT_BYTES(a,b,c,d) IPADDR4_INIT(PP_HTONL(LWIP_MAKEU32(a,b,c,d))) + +/** @ingroup ip6addr */ +#define IPADDR6_INIT(a, b, c, d) { { { { a, b, c, d } IPADDR6_ZONE_INIT } }, IPADDR_TYPE_V6 } +/** @ingroup ip6addr */ +#define IPADDR6_INIT_HOST(a, b, c, d) { { { { PP_HTONL(a), PP_HTONL(b), PP_HTONL(c), PP_HTONL(d) } IPADDR6_ZONE_INIT } }, IPADDR_TYPE_V6 } + +/** @ingroup ipaddr */ +#define IP_IS_ANY_TYPE_VAL(ipaddr) (IP_GET_TYPE(&ipaddr) == IPADDR_TYPE_ANY) +/** @ingroup ipaddr */ +#define IPADDR_ANY_TYPE_INIT { { { { 0ul, 0ul, 0ul, 0ul } IPADDR6_ZONE_INIT } }, IPADDR_TYPE_ANY } + +/** @ingroup ip4addr */ +#define IP_IS_V4_VAL(ipaddr) (IP_GET_TYPE(&ipaddr) == IPADDR_TYPE_V4) +/** @ingroup ip6addr */ +#define IP_IS_V6_VAL(ipaddr) (IP_GET_TYPE(&ipaddr) == IPADDR_TYPE_V6) +/** @ingroup ip4addr */ +#define IP_IS_V4(ipaddr) (((ipaddr) == NULL) || IP_IS_V4_VAL(*(ipaddr))) +/** @ingroup ip6addr */ +#define IP_IS_V6(ipaddr) (((ipaddr) != NULL) && IP_IS_V6_VAL(*(ipaddr))) + +#define IP_SET_TYPE_VAL(ipaddr, iptype) do { (ipaddr).type = (iptype); }while(0) +#define IP_SET_TYPE(ipaddr, iptype) do { if((ipaddr) != NULL) { IP_SET_TYPE_VAL(*(ipaddr), iptype); }}while(0) +#define IP_GET_TYPE(ipaddr) ((ipaddr)->type) + +#define IP_ADDR_RAW_SIZE(ipaddr) (IP_GET_TYPE(&ipaddr) == IPADDR_TYPE_V4 ? sizeof(ip4_addr_t) : sizeof(ip6_addr_t)) + +#define IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ipaddr) (IP_GET_TYPE(&pcb->local_ip) == IP_GET_TYPE(ipaddr)) +#define IP_ADDR_PCB_VERSION_MATCH(pcb, ipaddr) (IP_IS_ANY_TYPE_VAL(pcb->local_ip) || IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ipaddr)) + +/** @ingroup ip6addr + * Convert generic ip address to specific protocol version + */ +#define ip_2_ip6(ipaddr) (&((ipaddr)->u_addr.ip6)) +/** @ingroup ip4addr + * Convert generic ip address to specific protocol version + */ +#define ip_2_ip4(ipaddr) (&((ipaddr)->u_addr.ip4)) + +/** @ingroup ip4addr */ +#define IP_ADDR4(ipaddr,a,b,c,d) do { IP4_ADDR(ip_2_ip4(ipaddr),a,b,c,d); \ + IP_SET_TYPE_VAL(*(ipaddr), IPADDR_TYPE_V4); } while(0) +/** @ingroup ip6addr */ +#define IP_ADDR6(ipaddr,i0,i1,i2,i3) do { IP6_ADDR(ip_2_ip6(ipaddr),i0,i1,i2,i3); \ + IP_SET_TYPE_VAL(*(ipaddr), IPADDR_TYPE_V6); } while(0) +/** @ingroup ip6addr */ +#define IP_ADDR6_HOST(ipaddr,i0,i1,i2,i3) IP_ADDR6(ipaddr,PP_HTONL(i0),PP_HTONL(i1),PP_HTONL(i2),PP_HTONL(i3)) + +#define ip_clear_no4(ipaddr) do { ip_2_ip6(ipaddr)->addr[1] = \ + ip_2_ip6(ipaddr)->addr[2] = \ + ip_2_ip6(ipaddr)->addr[3] = 0; \ + ip6_addr_clear_zone(ip_2_ip6(ipaddr)); }while(0) + +/** @ingroup ipaddr */ +#define ip_addr_copy(dest, src) do{ IP_SET_TYPE_VAL(dest, IP_GET_TYPE(&src)); if(IP_IS_V6_VAL(src)){ \ + ip6_addr_copy(*ip_2_ip6(&(dest)), *ip_2_ip6(&(src))); }else{ \ + ip4_addr_copy(*ip_2_ip4(&(dest)), *ip_2_ip4(&(src))); ip_clear_no4(&dest); }}while(0) +/** @ingroup ip6addr */ +#define ip_addr_copy_from_ip6(dest, src) do{ \ + ip6_addr_copy(*ip_2_ip6(&(dest)), src); IP_SET_TYPE_VAL(dest, IPADDR_TYPE_V6); }while(0) +/** @ingroup ip6addr */ +#define ip_addr_copy_from_ip6_packed(dest, src) do{ \ + ip6_addr_copy_from_packed(*ip_2_ip6(&(dest)), src); IP_SET_TYPE_VAL(dest, IPADDR_TYPE_V6); }while(0) +/** @ingroup ip4addr */ +#define ip_addr_copy_from_ip4(dest, src) do{ \ + ip4_addr_copy(*ip_2_ip4(&(dest)), src); IP_SET_TYPE_VAL(dest, IPADDR_TYPE_V4); ip_clear_no4(&dest); }while(0) +/** @ingroup ip4addr */ +#define ip_addr_set_ip4_u32(ipaddr, val) do{if(ipaddr){ip4_addr_set_u32(ip_2_ip4(ipaddr), val); \ + IP_SET_TYPE(ipaddr, IPADDR_TYPE_V4); ip_clear_no4(ipaddr); }}while(0) +/** @ingroup ip4addr */ +#define ip_addr_set_ip4_u32_val(ipaddr, val) do{ ip4_addr_set_u32(ip_2_ip4(&(ipaddr)), val); \ + IP_SET_TYPE_VAL(ipaddr, IPADDR_TYPE_V4); ip_clear_no4(&ipaddr); }while(0) +/** @ingroup ip4addr */ +#define ip_addr_get_ip4_u32(ipaddr) (((ipaddr) && IP_IS_V4(ipaddr)) ? \ + ip4_addr_get_u32(ip_2_ip4(ipaddr)) : 0) +/** @ingroup ipaddr */ +#define ip_addr_set(dest, src) do{ IP_SET_TYPE(dest, IP_GET_TYPE(src)); if(IP_IS_V6(src)){ \ + ip6_addr_set(ip_2_ip6(dest), ip_2_ip6(src)); }else{ \ + ip4_addr_set(ip_2_ip4(dest), ip_2_ip4(src)); ip_clear_no4(dest); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_ipaddr(dest, src) ip_addr_set(dest, src) +/** @ingroup ipaddr */ +#define ip_addr_set_zero(ipaddr) do{ \ + ip6_addr_set_zero(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, 0); }while(0) +/** @ingroup ip5addr */ +#define ip_addr_set_zero_ip4(ipaddr) do{ \ + ip6_addr_set_zero(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V4); }while(0) +/** @ingroup ip6addr */ +#define ip_addr_set_zero_ip6(ipaddr) do{ \ + ip6_addr_set_zero(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V6); }while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_any(is_ipv6, ipaddr) do{if(is_ipv6){ \ + ip6_addr_set_any(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V6); }else{ \ + ip4_addr_set_any(ip_2_ip4(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V4); ip_clear_no4(ipaddr); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_any_val(is_ipv6, ipaddr) do{if(is_ipv6){ \ + ip6_addr_set_any(ip_2_ip6(&(ipaddr))); IP_SET_TYPE_VAL(ipaddr, IPADDR_TYPE_V6); }else{ \ + ip4_addr_set_any(ip_2_ip4(&(ipaddr))); IP_SET_TYPE_VAL(ipaddr, IPADDR_TYPE_V4); ip_clear_no4(&ipaddr); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_loopback(is_ipv6, ipaddr) do{if(is_ipv6){ \ + ip6_addr_set_loopback(ip_2_ip6(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V6); }else{ \ + ip4_addr_set_loopback(ip_2_ip4(ipaddr)); IP_SET_TYPE(ipaddr, IPADDR_TYPE_V4); ip_clear_no4(ipaddr); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_loopback_val(is_ipv6, ipaddr) do{if(is_ipv6){ \ + ip6_addr_set_loopback(ip_2_ip6(&(ipaddr))); IP_SET_TYPE_VAL(ipaddr, IPADDR_TYPE_V6); }else{ \ + ip4_addr_set_loopback(ip_2_ip4(&(ipaddr))); IP_SET_TYPE_VAL(ipaddr, IPADDR_TYPE_V4); ip_clear_no4(&ipaddr); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_set_hton(dest, src) do{if(IP_IS_V6(src)){ \ + ip6_addr_set_hton(ip_2_ip6(dest), ip_2_ip6(src)); IP_SET_TYPE(dest, IPADDR_TYPE_V6); }else{ \ + ip4_addr_set_hton(ip_2_ip4(dest), ip_2_ip4(src)); IP_SET_TYPE(dest, IPADDR_TYPE_V4); ip_clear_no4(ipaddr); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_get_network(target, host, netmask) do{if(IP_IS_V6(host)){ \ + ip4_addr_set_zero(ip_2_ip4(target)); IP_SET_TYPE(target, IPADDR_TYPE_V6); } else { \ + ip4_addr_get_network(ip_2_ip4(target), ip_2_ip4(host), ip_2_ip4(netmask)); IP_SET_TYPE(target, IPADDR_TYPE_V4); }}while(0) +/** @ingroup ipaddr */ +#define ip_addr_netcmp(addr1, addr2, mask) ((IP_IS_V6(addr1) && IP_IS_V6(addr2)) ? \ + 0 : \ + ip4_addr_netcmp(ip_2_ip4(addr1), ip_2_ip4(addr2), mask)) +/** @ingroup ipaddr */ +#define ip_addr_cmp(addr1, addr2) ((IP_GET_TYPE(addr1) != IP_GET_TYPE(addr2)) ? 0 : (IP_IS_V6_VAL(*(addr1)) ? \ + ip6_addr_cmp(ip_2_ip6(addr1), ip_2_ip6(addr2)) : \ + ip4_addr_cmp(ip_2_ip4(addr1), ip_2_ip4(addr2)))) +/** @ingroup ipaddr */ +#define ip_addr_cmp_zoneless(addr1, addr2) ((IP_GET_TYPE(addr1) != IP_GET_TYPE(addr2)) ? 0 : (IP_IS_V6_VAL(*(addr1)) ? \ + ip6_addr_cmp_zoneless(ip_2_ip6(addr1), ip_2_ip6(addr2)) : \ + ip4_addr_cmp(ip_2_ip4(addr1), ip_2_ip4(addr2)))) +/** @ingroup ipaddr */ +#define ip_addr_isany(ipaddr) (((ipaddr) == NULL) ? 1 : ((IP_IS_V6(ipaddr)) ? \ + ip6_addr_isany(ip_2_ip6(ipaddr)) : \ + ip4_addr_isany(ip_2_ip4(ipaddr)))) +/** @ingroup ipaddr */ +#define ip_addr_isany_val(ipaddr) ((IP_IS_V6_VAL(ipaddr)) ? \ + ip6_addr_isany_val(*ip_2_ip6(&(ipaddr))) : \ + ip4_addr_isany_val(*ip_2_ip4(&(ipaddr)))) +/** @ingroup ipaddr */ +#define ip_addr_isbroadcast(ipaddr, netif) ((IP_IS_V6(ipaddr)) ? \ + 0 : \ + ip4_addr_isbroadcast(ip_2_ip4(ipaddr), netif)) +/** @ingroup ipaddr */ +#define ip_addr_ismulticast(ipaddr) ((IP_IS_V6(ipaddr)) ? \ + ip6_addr_ismulticast(ip_2_ip6(ipaddr)) : \ + ip4_addr_ismulticast(ip_2_ip4(ipaddr))) +/** @ingroup ipaddr */ +#define ip_addr_isloopback(ipaddr) ((IP_IS_V6(ipaddr)) ? \ + ip6_addr_isloopback(ip_2_ip6(ipaddr)) : \ + ip4_addr_isloopback(ip_2_ip4(ipaddr))) +/** @ingroup ipaddr */ +#define ip_addr_islinklocal(ipaddr) ((IP_IS_V6(ipaddr)) ? \ + ip6_addr_islinklocal(ip_2_ip6(ipaddr)) : \ + ip4_addr_islinklocal(ip_2_ip4(ipaddr))) +#define ip_addr_debug_print(debug, ipaddr) do { if(IP_IS_V6(ipaddr)) { \ + ip6_addr_debug_print(debug, ip_2_ip6(ipaddr)); } else { \ + ip4_addr_debug_print(debug, ip_2_ip4(ipaddr)); }}while(0) +#define ip_addr_debug_print_val(debug, ipaddr) do { if(IP_IS_V6_VAL(ipaddr)) { \ + ip6_addr_debug_print_val(debug, *ip_2_ip6(&(ipaddr))); } else { \ + ip4_addr_debug_print_val(debug, *ip_2_ip4(&(ipaddr))); }}while(0) +char *ipaddr_ntoa(const ip_addr_t *addr); +char *ipaddr_ntoa_r(const ip_addr_t *addr, char *buf, int buflen); +int ipaddr_aton(const char *cp, ip_addr_t *addr); + +/** @ingroup ipaddr */ +#define IPADDR_STRLEN_MAX IP6ADDR_STRLEN_MAX + +/** @ingroup ipaddr */ +#define ip4_2_ipv4_mapped_ipv6(ip6addr, ip4addr) do { \ + (ip6addr)->addr[3] = (ip4addr)->addr; \ + (ip6addr)->addr[2] = PP_HTONL(0x0000FFFFUL); \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[0] = 0; \ + ip6_addr_clear_zone(ip6addr); } while(0); + +/** @ingroup ipaddr */ +#define unmap_ipv4_mapped_ipv6(ip4addr, ip6addr) \ + (ip4addr)->addr = (ip6addr)->addr[3]; + +#define IP46_ADDR_ANY(type) (((type) == IPADDR_TYPE_V6)? IP6_ADDR_ANY : IP4_ADDR_ANY) + +#else /* LWIP_IPV4 && LWIP_IPV6 */ + +#define IP_ADDR_PCB_VERSION_MATCH(addr, pcb) 1 +#define IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ipaddr) 1 + +#define ip_addr_set_any_val(is_ipv6, ipaddr) ip_addr_set_any(is_ipv6, &(ipaddr)) +#define ip_addr_set_loopback_val(is_ipv6, ipaddr) ip_addr_set_loopback(is_ipv6, &(ipaddr)) + +#if LWIP_IPV4 + +typedef ip4_addr_t ip_addr_t; +#define IPADDR4_INIT(u32val) { u32val } +#define IPADDR4_INIT_BYTES(a,b,c,d) IPADDR4_INIT(PP_HTONL(LWIP_MAKEU32(a,b,c,d))) +#define IP_IS_V4_VAL(ipaddr) 1 +#define IP_IS_V6_VAL(ipaddr) 0 +#define IP_IS_V4(ipaddr) 1 +#define IP_IS_V6(ipaddr) 0 +#define IP_IS_ANY_TYPE_VAL(ipaddr) 0 +#define IP_SET_TYPE_VAL(ipaddr, iptype) +#define IP_SET_TYPE(ipaddr, iptype) +#define IP_GET_TYPE(ipaddr) IPADDR_TYPE_V4 +#define IP_ADDR_RAW_SIZE(ipaddr) sizeof(ip4_addr_t) +#define ip_2_ip4(ipaddr) (ipaddr) +#define IP_ADDR4(ipaddr,a,b,c,d) IP4_ADDR(ipaddr,a,b,c,d) + +#define ip_addr_copy(dest, src) ip4_addr_copy(dest, src) +#define ip_addr_copy_from_ip4(dest, src) ip4_addr_copy(dest, src) +#define ip_addr_set_ip4_u32(ipaddr, val) ip4_addr_set_u32(ip_2_ip4(ipaddr), val) +#define ip_addr_set_ip4_u32_val(ipaddr, val) ip_addr_set_ip4_u32(&(ipaddr), val) +#define ip_addr_get_ip4_u32(ipaddr) ip4_addr_get_u32(ip_2_ip4(ipaddr)) +#define ip_addr_set(dest, src) ip4_addr_set(dest, src) +#define ip_addr_set_ipaddr(dest, src) ip4_addr_set(dest, src) +#define ip_addr_set_zero(ipaddr) ip4_addr_set_zero(ipaddr) +#define ip_addr_set_zero_ip4(ipaddr) ip4_addr_set_zero(ipaddr) +#define ip_addr_set_any(is_ipv6, ipaddr) ip4_addr_set_any(ipaddr) +#define ip_addr_set_loopback(is_ipv6, ipaddr) ip4_addr_set_loopback(ipaddr) +#define ip_addr_set_hton(dest, src) ip4_addr_set_hton(dest, src) +#define ip_addr_get_network(target, host, mask) ip4_addr_get_network(target, host, mask) +#define ip_addr_netcmp(addr1, addr2, mask) ip4_addr_netcmp(addr1, addr2, mask) +#define ip_addr_cmp(addr1, addr2) ip4_addr_cmp(addr1, addr2) +#define ip_addr_isany(ipaddr) ip4_addr_isany(ipaddr) +#define ip_addr_isany_val(ipaddr) ip4_addr_isany_val(ipaddr) +#define ip_addr_isloopback(ipaddr) ip4_addr_isloopback(ipaddr) +#define ip_addr_islinklocal(ipaddr) ip4_addr_islinklocal(ipaddr) +#define ip_addr_isbroadcast(addr, netif) ip4_addr_isbroadcast(addr, netif) +#define ip_addr_ismulticast(ipaddr) ip4_addr_ismulticast(ipaddr) +#define ip_addr_debug_print(debug, ipaddr) ip4_addr_debug_print(debug, ipaddr) +#define ip_addr_debug_print_val(debug, ipaddr) ip4_addr_debug_print_val(debug, ipaddr) +#define ipaddr_ntoa(ipaddr) ip4addr_ntoa(ipaddr) +#define ipaddr_ntoa_r(ipaddr, buf, buflen) ip4addr_ntoa_r(ipaddr, buf, buflen) +#define ipaddr_aton(cp, addr) ip4addr_aton(cp, addr) + +#define IPADDR_STRLEN_MAX IP4ADDR_STRLEN_MAX + +#define IP46_ADDR_ANY(type) (IP4_ADDR_ANY) + +#else /* LWIP_IPV4 */ + +typedef ip6_addr_t ip_addr_t; +#define IPADDR6_INIT(a, b, c, d) { { a, b, c, d } IPADDR6_ZONE_INIT } +#define IPADDR6_INIT_HOST(a, b, c, d) { { PP_HTONL(a), PP_HTONL(b), PP_HTONL(c), PP_HTONL(d) } IPADDR6_ZONE_INIT } +#define IP_IS_V4_VAL(ipaddr) 0 +#define IP_IS_V6_VAL(ipaddr) 1 +#define IP_IS_V4(ipaddr) 0 +#define IP_IS_V6(ipaddr) 1 +#define IP_IS_ANY_TYPE_VAL(ipaddr) 0 +#define IP_SET_TYPE_VAL(ipaddr, iptype) +#define IP_SET_TYPE(ipaddr, iptype) +#define IP_GET_TYPE(ipaddr) IPADDR_TYPE_V6 +#define IP_ADDR_RAW_SIZE(ipaddr) sizeof(ip6_addr_t) +#define ip_2_ip6(ipaddr) (ipaddr) +#define IP_ADDR6(ipaddr,i0,i1,i2,i3) IP6_ADDR(ipaddr,i0,i1,i2,i3) +#define IP_ADDR6_HOST(ipaddr,i0,i1,i2,i3) IP_ADDR6(ipaddr,PP_HTONL(i0),PP_HTONL(i1),PP_HTONL(i2),PP_HTONL(i3)) + +#define ip_addr_copy(dest, src) ip6_addr_copy(dest, src) +#define ip_addr_copy_from_ip6(dest, src) ip6_addr_copy(dest, src) +#define ip_addr_copy_from_ip6_packed(dest, src) ip6_addr_copy_from_packed(dest, src) +#define ip_addr_set(dest, src) ip6_addr_set(dest, src) +#define ip_addr_set_ipaddr(dest, src) ip6_addr_set(dest, src) +#define ip_addr_set_zero(ipaddr) ip6_addr_set_zero(ipaddr) +#define ip_addr_set_zero_ip6(ipaddr) ip6_addr_set_zero(ipaddr) +#define ip_addr_set_any(is_ipv6, ipaddr) ip6_addr_set_any(ipaddr) +#define ip_addr_set_loopback(is_ipv6, ipaddr) ip6_addr_set_loopback(ipaddr) +#define ip_addr_set_hton(dest, src) ip6_addr_set_hton(dest, src) +#define ip_addr_get_network(target, host, mask) ip6_addr_set_zero(target) +#define ip_addr_netcmp(addr1, addr2, mask) 0 +#define ip_addr_cmp(addr1, addr2) ip6_addr_cmp(addr1, addr2) +#define ip_addr_cmp_zoneless(addr1, addr2) ip6_addr_cmp_zoneless(addr1, addr2) +#define ip_addr_isany(ipaddr) ip6_addr_isany(ipaddr) +#define ip_addr_isany_val(ipaddr) ip6_addr_isany_val(ipaddr) +#define ip_addr_isloopback(ipaddr) ip6_addr_isloopback(ipaddr) +#define ip_addr_islinklocal(ipaddr) ip6_addr_islinklocal(ipaddr) +#define ip_addr_isbroadcast(addr, netif) 0 +#define ip_addr_ismulticast(ipaddr) ip6_addr_ismulticast(ipaddr) +#define ip_addr_debug_print(debug, ipaddr) ip6_addr_debug_print(debug, ipaddr) +#define ip_addr_debug_print_val(debug, ipaddr) ip6_addr_debug_print_val(debug, ipaddr) +#define ipaddr_ntoa(ipaddr) ip6addr_ntoa(ipaddr) +#define ipaddr_ntoa_r(ipaddr, buf, buflen) ip6addr_ntoa_r(ipaddr, buf, buflen) +#define ipaddr_aton(cp, addr) ip6addr_aton(cp, addr) + +#define IPADDR_STRLEN_MAX IP6ADDR_STRLEN_MAX + +#define IP46_ADDR_ANY(type) (IP6_ADDR_ANY) + +#endif /* LWIP_IPV4 */ +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#if LWIP_IPV4 + +extern const ip_addr_t ip_addr_any; +extern const ip_addr_t ip_addr_broadcast; + +/** + * @ingroup ip4addr + * Can be used as a fixed/const ip_addr_t + * for the IP wildcard. + * Defined to @ref IP4_ADDR_ANY when IPv4 is enabled. + * Defined to @ref IP6_ADDR_ANY in IPv6 only systems. + * Use this if you can handle IPv4 _AND_ IPv6 addresses. + * Use @ref IP4_ADDR_ANY or @ref IP6_ADDR_ANY when the IP + * type matters. + */ +#define IP_ADDR_ANY IP4_ADDR_ANY +/** + * @ingroup ip4addr + * Can be used as a fixed/const ip_addr_t + * for the IPv4 wildcard and the broadcast address + */ +#define IP4_ADDR_ANY (&ip_addr_any) +/** + * @ingroup ip4addr + * Can be used as a fixed/const ip4_addr_t + * for the wildcard and the broadcast address + */ +#define IP4_ADDR_ANY4 (ip_2_ip4(&ip_addr_any)) + +/** @ingroup ip4addr */ +#define IP_ADDR_BROADCAST (&ip_addr_broadcast) +/** @ingroup ip4addr */ +#define IP4_ADDR_BROADCAST (ip_2_ip4(&ip_addr_broadcast)) + +#endif /* LWIP_IPV4*/ + +#if LWIP_IPV6 + +extern const ip_addr_t ip6_addr_any; + +/** + * @ingroup ip6addr + * IP6_ADDR_ANY can be used as a fixed ip_addr_t + * for the IPv6 wildcard address + */ +#define IP6_ADDR_ANY (&ip6_addr_any) +/** + * @ingroup ip6addr + * IP6_ADDR_ANY6 can be used as a fixed ip6_addr_t + * for the IPv6 wildcard address + */ +#define IP6_ADDR_ANY6 (ip_2_ip6(&ip6_addr_any)) + +#if !LWIP_IPV4 +/** IPv6-only configurations */ +#define IP_ADDR_ANY IP6_ADDR_ANY +#endif /* !LWIP_IPV4 */ + +#endif + +#if LWIP_IPV4 && LWIP_IPV6 +/** @ingroup ipaddr */ +#define IP_ANY_TYPE (&ip_addr_any_type) +#else +#define IP_ANY_TYPE IP_ADDR_ANY +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_IP_ADDR_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/mem.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/mem.h new file mode 100644 index 0000000..424de91 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/mem.h @@ -0,0 +1,82 @@ +/** + * @file + * Heap API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_MEM_H +#define LWIP_HDR_MEM_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if MEM_LIBC_MALLOC + +#include "lwip/arch.h" + +typedef size_t mem_size_t; +#define MEM_SIZE_F SZT_F + +#elif MEM_USE_POOLS + +typedef u16_t mem_size_t; +#define MEM_SIZE_F U16_F + +#else + +/* MEM_SIZE would have to be aligned, but using 64000 here instead of + * 65535 leaves some room for alignment... + */ +#if MEM_SIZE > 64000L +typedef u32_t mem_size_t; +#define MEM_SIZE_F U32_F +#else +typedef u16_t mem_size_t; +#define MEM_SIZE_F U16_F +#endif /* MEM_SIZE > 64000 */ +#endif + +void mem_init(void); +void *mem_trim(void *mem, mem_size_t size); +void *mem_malloc(mem_size_t size); +void *mem_calloc(mem_size_t count, mem_size_t size); +void mem_free(void *mem); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MEM_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/memp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/memp.h new file mode 100644 index 0000000..74ac818 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/memp.h @@ -0,0 +1,155 @@ +/** + * @file + * Memory pool API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_MEMP_H +#define LWIP_HDR_MEMP_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* run once with empty definition to handle all custom includes in lwippools.h */ +#define LWIP_MEMPOOL(name,num,size,desc) +#include "lwip/priv/memp_std.h" + +/** Create the list of all memory pools managed by memp. MEMP_MAX represents a NULL pool at the end */ +typedef enum { +#define LWIP_MEMPOOL(name,num,size,desc) MEMP_##name, +#include "lwip/priv/memp_std.h" + MEMP_MAX +} memp_t; + +#include "lwip/priv/memp_priv.h" +#include "lwip/stats.h" + +extern const struct memp_desc* const memp_pools[MEMP_MAX]; + +/** + * @ingroup mempool + * Declare prototype for private memory pool if it is used in multiple files + */ +#define LWIP_MEMPOOL_PROTOTYPE(name) extern const struct memp_desc memp_ ## name + +#if MEMP_MEM_MALLOC + +#define LWIP_MEMPOOL_DECLARE(name,num,size,desc) \ + LWIP_MEMPOOL_DECLARE_STATS_INSTANCE(memp_stats_ ## name) \ + const struct memp_desc memp_ ## name = { \ + DECLARE_LWIP_MEMPOOL_DESC(desc) \ + LWIP_MEMPOOL_DECLARE_STATS_REFERENCE(memp_stats_ ## name) \ + LWIP_MEM_ALIGN_SIZE(size) \ + }; + +#else /* MEMP_MEM_MALLOC */ + +/** + * @ingroup mempool + * Declare a private memory pool + * Private mempools example: + * .h: only when pool is used in multiple .c files: LWIP_MEMPOOL_PROTOTYPE(my_private_pool); + * .c: + * - in global variables section: LWIP_MEMPOOL_DECLARE(my_private_pool, 10, sizeof(foo), "Some description") + * - call ONCE before using pool (e.g. in some init() function): LWIP_MEMPOOL_INIT(my_private_pool); + * - allocate: void* my_new_mem = LWIP_MEMPOOL_ALLOC(my_private_pool); + * - free: LWIP_MEMPOOL_FREE(my_private_pool, my_new_mem); + * + * To relocate a pool, declare it as extern in cc.h. Example for GCC: + * extern u8_t \_\_attribute\_\_((section(".onchip_mem"))) memp_memory_my_private_pool_base[]; + */ +#define LWIP_MEMPOOL_DECLARE(name,num,size,desc) \ + LWIP_DECLARE_MEMORY_ALIGNED(memp_memory_ ## name ## _base, ((num) * (MEMP_SIZE + MEMP_ALIGN_SIZE(size)))); \ + \ + LWIP_MEMPOOL_DECLARE_STATS_INSTANCE(memp_stats_ ## name) \ + \ + static struct memp *memp_tab_ ## name; \ + \ + const struct memp_desc memp_ ## name = { \ + DECLARE_LWIP_MEMPOOL_DESC(desc) \ + LWIP_MEMPOOL_DECLARE_STATS_REFERENCE(memp_stats_ ## name) \ + LWIP_MEM_ALIGN_SIZE(size), \ + (num), \ + memp_memory_ ## name ## _base, \ + &memp_tab_ ## name \ + }; + +#endif /* MEMP_MEM_MALLOC */ + +/** + * @ingroup mempool + * Initialize a private memory pool + */ +#define LWIP_MEMPOOL_INIT(name) memp_init_pool(&memp_ ## name) +/** + * @ingroup mempool + * Allocate from a private memory pool + */ +#define LWIP_MEMPOOL_ALLOC(name) memp_malloc_pool(&memp_ ## name) +/** + * @ingroup mempool + * Free element from a private memory pool + */ +#define LWIP_MEMPOOL_FREE(name, x) memp_free_pool(&memp_ ## name, (x)) + +#if MEM_USE_POOLS +/** This structure is used to save the pool one element came from. + * This has to be defined here as it is required for pool size calculation. */ +struct memp_malloc_helper +{ + memp_t poolnr; +#if MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) + u16_t size; +#endif /* MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) */ +}; +#endif /* MEM_USE_POOLS */ + +void memp_init(void); + +#if MEMP_OVERFLOW_CHECK +void *memp_malloc_fn(memp_t type, const char* file, const int line); +#define memp_malloc(t) memp_malloc_fn((t), __FILE__, __LINE__) +#else +void *memp_malloc(memp_t type); +#endif +void memp_free(memp_t type, void *mem); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MEMP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/mld6.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/mld6.h new file mode 100644 index 0000000..f8c99d9 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/mld6.h @@ -0,0 +1,99 @@ +/** + * @file + * + * Multicast listener discovery for IPv6. Aims to be compliant with RFC 2710. + * No support for MLDv2. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_MLD6_H +#define LWIP_HDR_MLD6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6_MLD && LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** MLD group */ +struct mld_group { + /** next link */ + struct mld_group *next; + /** multicast address */ + ip6_addr_t group_address; + /** signifies we were the last person to report */ + u8_t last_reporter_flag; + /** current state of the group */ + u8_t group_state; + /** timer for reporting */ + u16_t timer; + /** counter of simultaneous uses */ + u8_t use; +}; + +#define MLD6_TMR_INTERVAL 100 /* Milliseconds */ + +err_t mld6_stop(struct netif *netif); +void mld6_report_groups(struct netif *netif); +void mld6_tmr(void); +struct mld_group *mld6_lookfor_group(struct netif *ifp, const ip6_addr_t *addr); +void mld6_input(struct pbuf *p, struct netif *inp); +err_t mld6_joingroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr); +err_t mld6_joingroup_netif(struct netif *netif, const ip6_addr_t *groupaddr); +err_t mld6_leavegroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr); +err_t mld6_leavegroup_netif(struct netif *netif, const ip6_addr_t *groupaddr); + +/** @ingroup mld6 + * Get list head of MLD6 groups for netif. + * Note: The allnodes group IP is NOT in the list, since it must always + * be received for correct IPv6 operation. + * @see @ref netif_set_mld_mac_filter() + */ +#define netif_mld6_data(netif) ((struct mld_group *)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6_MLD && LWIP_IPV6 */ + +#endif /* LWIP_HDR_MLD6_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/nd6.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/nd6.h new file mode 100644 index 0000000..89137d5 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/nd6.h @@ -0,0 +1,90 @@ +/** + * @file + * + * Neighbor discovery and stateless address autoconfiguration for IPv6. + * Aims to be compliant with RFC 4861 (Neighbor discovery) and RFC 4862 + * (Address autoconfiguration). + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_ND6_H +#define LWIP_HDR_ND6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip6_addr.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** 1 second period */ +#define ND6_TMR_INTERVAL 1000 + +/** Router solicitations are sent in 4 second intervals (see RFC 4861, ch. 6.3.7) */ +#ifndef ND6_RTR_SOLICITATION_INTERVAL +#define ND6_RTR_SOLICITATION_INTERVAL 4000 +#endif + +struct pbuf; +struct netif; + +void nd6_tmr(void); +void nd6_input(struct pbuf *p, struct netif *inp); +void nd6_clear_destination_cache(void); +struct netif *nd6_find_route(const ip6_addr_t *ip6addr); +err_t nd6_get_next_hop_addr_or_queue(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr, const u8_t **hwaddrp); +u16_t nd6_get_destination_mtu(const ip6_addr_t *ip6addr, struct netif *netif); +#if LWIP_ND6_TCP_REACHABILITY_HINTS +void nd6_reachability_hint(const ip6_addr_t *ip6addr); +#endif /* LWIP_ND6_TCP_REACHABILITY_HINTS */ +void nd6_cleanup_netif(struct netif *netif); +#if LWIP_IPV6_MLD +void nd6_adjust_mld_membership(struct netif *netif, s8_t addr_idx, u8_t new_state); +#endif /* LWIP_IPV6_MLD */ +void nd6_restart_netif(struct netif *netif); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_ND6_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/netbuf.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/netbuf.h new file mode 100644 index 0000000..e92c934 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/netbuf.h @@ -0,0 +1,116 @@ +/** + * @file + * netbuf API (for netconn API) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_NETBUF_H +#define LWIP_HDR_NETBUF_H + +#include "lwip/opt.h" + +#if LWIP_NETCONN || LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ +/* Note: Netconn API is always available when sockets are enabled - + * sockets are implemented on top of them */ + +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** This netbuf has dest-addr/port set */ +#define NETBUF_FLAG_DESTADDR 0x01 +/** This netbuf includes a checksum */ +#define NETBUF_FLAG_CHKSUM 0x02 + +/** "Network buffer" - contains data and addressing info */ +struct netbuf { + struct pbuf *p, *ptr; + ip_addr_t addr; + u16_t port; +#if LWIP_NETBUF_RECVINFO || LWIP_CHECKSUM_ON_COPY + u8_t flags; + u16_t toport_chksum; +#if LWIP_NETBUF_RECVINFO + ip_addr_t toaddr; +#endif /* LWIP_NETBUF_RECVINFO */ +#endif /* LWIP_NETBUF_RECVINFO || LWIP_CHECKSUM_ON_COPY */ +}; + +/* Network buffer functions: */ +struct netbuf * netbuf_new (void); +void netbuf_delete (struct netbuf *buf); +void * netbuf_alloc (struct netbuf *buf, u16_t size); +void netbuf_free (struct netbuf *buf); +err_t netbuf_ref (struct netbuf *buf, + const void *dataptr, u16_t size); +void netbuf_chain (struct netbuf *head, struct netbuf *tail); + +err_t netbuf_data (struct netbuf *buf, + void **dataptr, u16_t *len); +s8_t netbuf_next (struct netbuf *buf); +void netbuf_first (struct netbuf *buf); + + +#define netbuf_copy_partial(buf, dataptr, len, offset) \ + pbuf_copy_partial((buf)->p, (dataptr), (len), (offset)) +#define netbuf_copy(buf,dataptr,len) netbuf_copy_partial(buf, dataptr, len, 0) +#define netbuf_take(buf, dataptr, len) pbuf_take((buf)->p, dataptr, len) +#define netbuf_len(buf) ((buf)->p->tot_len) +#define netbuf_fromaddr(buf) (&((buf)->addr)) +#define netbuf_set_fromaddr(buf, fromaddr) ip_addr_set(&((buf)->addr), fromaddr) +#define netbuf_fromport(buf) ((buf)->port) +#if LWIP_NETBUF_RECVINFO +#define netbuf_destaddr(buf) (&((buf)->toaddr)) +#define netbuf_set_destaddr(buf, destaddr) ip_addr_set(&((buf)->toaddr), destaddr) +#if LWIP_CHECKSUM_ON_COPY +#define netbuf_destport(buf) (((buf)->flags & NETBUF_FLAG_DESTADDR) ? (buf)->toport_chksum : 0) +#else /* LWIP_CHECKSUM_ON_COPY */ +#define netbuf_destport(buf) ((buf)->toport_chksum) +#endif /* LWIP_CHECKSUM_ON_COPY */ +#endif /* LWIP_NETBUF_RECVINFO */ +#if LWIP_CHECKSUM_ON_COPY +#define netbuf_set_chksum(buf, chksum) do { (buf)->flags = NETBUF_FLAG_CHKSUM; \ + (buf)->toport_chksum = chksum; } while(0) +#endif /* LWIP_CHECKSUM_ON_COPY */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_NETCONN || LWIP_SOCKET */ + +#endif /* LWIP_HDR_NETBUF_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/netdb.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/netdb.h new file mode 100644 index 0000000..6aae914 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/netdb.h @@ -0,0 +1,150 @@ +/** + * @file + * NETDB API (sockets) + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_NETDB_H +#define LWIP_HDR_NETDB_H + +#include "lwip/opt.h" + +#if LWIP_DNS && LWIP_SOCKET + +#include "lwip/arch.h" +#include "lwip/inet.h" +#include "lwip/sockets.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* some rarely used options */ +#ifndef LWIP_DNS_API_DECLARE_H_ERRNO +#define LWIP_DNS_API_DECLARE_H_ERRNO 1 +#endif + +#ifndef LWIP_DNS_API_DEFINE_ERRORS +#define LWIP_DNS_API_DEFINE_ERRORS 1 +#endif + +#ifndef LWIP_DNS_API_DEFINE_FLAGS +#define LWIP_DNS_API_DEFINE_FLAGS 1 +#endif + +#ifndef LWIP_DNS_API_DECLARE_STRUCTS +#define LWIP_DNS_API_DECLARE_STRUCTS 1 +#endif + +#if LWIP_DNS_API_DEFINE_ERRORS +/** Errors used by the DNS API functions, h_errno can be one of them */ +#define EAI_NONAME 200 +#define EAI_SERVICE 201 +#define EAI_FAIL 202 +#define EAI_MEMORY 203 +#define EAI_FAMILY 204 + +#define HOST_NOT_FOUND 210 +#define NO_DATA 211 +#define NO_RECOVERY 212 +#define TRY_AGAIN 213 +#endif /* LWIP_DNS_API_DEFINE_ERRORS */ + +#if LWIP_DNS_API_DEFINE_FLAGS +/* input flags for struct addrinfo */ +#define AI_PASSIVE 0x01 +#define AI_CANONNAME 0x02 +#define AI_NUMERICHOST 0x04 +#define AI_NUMERICSERV 0x08 +#define AI_V4MAPPED 0x10 +#define AI_ALL 0x20 +#define AI_ADDRCONFIG 0x40 +#endif /* LWIP_DNS_API_DEFINE_FLAGS */ + +#if LWIP_DNS_API_DECLARE_STRUCTS +struct hostent { + char *h_name; /* Official name of the host. */ + char **h_aliases; /* A pointer to an array of pointers to alternative host names, + terminated by a null pointer. */ + int h_addrtype; /* Address type. */ + int h_length; /* The length, in bytes, of the address. */ + char **h_addr_list; /* A pointer to an array of pointers to network addresses (in + network byte order) for the host, terminated by a null pointer. */ +#define h_addr h_addr_list[0] /* for backward compatibility */ +}; + +struct addrinfo { + int ai_flags; /* Input flags. */ + int ai_family; /* Address family of socket. */ + int ai_socktype; /* Socket type. */ + int ai_protocol; /* Protocol of socket. */ + socklen_t ai_addrlen; /* Length of socket address. */ + struct sockaddr *ai_addr; /* Socket address of socket. */ + char *ai_canonname; /* Canonical name of service location. */ + struct addrinfo *ai_next; /* Pointer to next in list. */ +}; +#endif /* LWIP_DNS_API_DECLARE_STRUCTS */ + +#define NETDB_ELEM_SIZE (sizeof(struct addrinfo) + sizeof(struct sockaddr_storage) + DNS_MAX_NAME_LENGTH + 1) + +#if LWIP_DNS_API_DECLARE_H_ERRNO +/* application accessible error code set by the DNS API functions */ +extern int h_errno; +#endif /* LWIP_DNS_API_DECLARE_H_ERRNO*/ + +struct hostent *lwip_gethostbyname(const char *name); +int lwip_gethostbyname_r(const char *name, struct hostent *ret, char *buf, + size_t buflen, struct hostent **result, int *h_errnop); +void lwip_freeaddrinfo(struct addrinfo *ai); +int lwip_getaddrinfo(const char *nodename, + const char *servname, + const struct addrinfo *hints, + struct addrinfo **res); + +#if LWIP_COMPAT_SOCKETS +/** @ingroup netdbapi */ +#define gethostbyname(name) lwip_gethostbyname(name) +/** @ingroup netdbapi */ +#define gethostbyname_r(name, ret, buf, buflen, result, h_errnop) \ + lwip_gethostbyname_r(name, ret, buf, buflen, result, h_errnop) +/** @ingroup netdbapi */ +#define freeaddrinfo(addrinfo) lwip_freeaddrinfo(addrinfo) +/** @ingroup netdbapi */ +#define getaddrinfo(nodname, servname, hints, res) \ + lwip_getaddrinfo(nodname, servname, hints, res) +#endif /* LWIP_COMPAT_SOCKETS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_DNS && LWIP_SOCKET */ + +#endif /* LWIP_HDR_NETDB_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/netif.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/netif.h new file mode 100644 index 0000000..d3ec064 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/netif.h @@ -0,0 +1,669 @@ +/** + * @file + * netif API (to be used from TCPIP thread) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_NETIF_H +#define LWIP_HDR_NETIF_H + +#include "lwip/opt.h" + +#define ENABLE_LOOPBACK (LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF) + +#include "lwip/err.h" + +#include "lwip/ip_addr.h" + +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/stats.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Throughout this file, IP addresses are expected to be in + * the same byte order as in IP_PCB. */ + +/** Must be the maximum of all used hardware address lengths + across all types of interfaces in use. + This does not have to be changed, normally. */ +#ifndef NETIF_MAX_HWADDR_LEN +#define NETIF_MAX_HWADDR_LEN 6U +#endif + +/** The size of a fully constructed netif name which the + * netif can be identified by in APIs. Composed of + * 2 chars, 3 (max) digits, and 1 \0 + */ +#define NETIF_NAMESIZE 6 + +/** + * @defgroup netif_flags Flags + * @ingroup netif + * @{ + */ + +/** Whether the network interface is 'up'. This is + * a software flag used to control whether this network + * interface is enabled and processes traffic. + * It must be set by the startup code before this netif can be used + * (also for dhcp/autoip). + */ +#define NETIF_FLAG_UP 0x01U +/** If set, the netif has broadcast capability. + * Set by the netif driver in its init function. */ +#define NETIF_FLAG_BROADCAST 0x02U +/** If set, the interface has an active link + * (set by the network interface driver). + * Either set by the netif driver in its init function (if the link + * is up at that time) or at a later point once the link comes up + * (if link detection is supported by the hardware). */ +#define NETIF_FLAG_LINK_UP 0x04U +/** If set, the netif is an ethernet device using ARP. + * Set by the netif driver in its init function. + * Used to check input packet types and use of DHCP. */ +#define NETIF_FLAG_ETHARP 0x08U +/** If set, the netif is an ethernet device. It might not use + * ARP or TCP/IP if it is used for PPPoE only. + */ +#define NETIF_FLAG_ETHERNET 0x10U +/** If set, the netif has IGMP capability. + * Set by the netif driver in its init function. */ +#define NETIF_FLAG_IGMP 0x20U +/** If set, the netif has MLD6 capability. + * Set by the netif driver in its init function. */ +#define NETIF_FLAG_MLD6 0x40U + +/** + * @} + */ + +enum lwip_internal_netif_client_data_index +{ +#if LWIP_IPV4 +#if LWIP_DHCP + LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, +#endif +#if LWIP_AUTOIP + LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP, +#endif +#if LWIP_IGMP + LWIP_NETIF_CLIENT_DATA_INDEX_IGMP, +#endif +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 +#if LWIP_IPV6_DHCP6 + LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, +#endif +#if LWIP_IPV6_MLD + LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, +#endif +#endif /* LWIP_IPV6 */ + LWIP_NETIF_CLIENT_DATA_INDEX_MAX +}; + +#if LWIP_CHECKSUM_CTRL_PER_NETIF +#define NETIF_CHECKSUM_GEN_IP 0x0001 +#define NETIF_CHECKSUM_GEN_UDP 0x0002 +#define NETIF_CHECKSUM_GEN_TCP 0x0004 +#define NETIF_CHECKSUM_GEN_ICMP 0x0008 +#define NETIF_CHECKSUM_GEN_ICMP6 0x0010 +#define NETIF_CHECKSUM_CHECK_IP 0x0100 +#define NETIF_CHECKSUM_CHECK_UDP 0x0200 +#define NETIF_CHECKSUM_CHECK_TCP 0x0400 +#define NETIF_CHECKSUM_CHECK_ICMP 0x0800 +#define NETIF_CHECKSUM_CHECK_ICMP6 0x1000 +#define NETIF_CHECKSUM_ENABLE_ALL 0xFFFF +#define NETIF_CHECKSUM_DISABLE_ALL 0x0000 +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */ + +struct netif; + +/** MAC Filter Actions, these are passed to a netif's igmp_mac_filter or + * mld_mac_filter callback function. */ +enum netif_mac_filter_action { + /** Delete a filter entry */ + NETIF_DEL_MAC_FILTER = 0, + /** Add a filter entry */ + NETIF_ADD_MAC_FILTER = 1 +}; + +/** Function prototype for netif init functions. Set up flags and output/linkoutput + * callback functions in this function. + * + * @param netif The netif to initialize + */ +typedef err_t (*netif_init_fn)(struct netif *netif); +/** Function prototype for netif->input functions. This function is saved as 'input' + * callback function in the netif struct. Call it when a packet has been received. + * + * @param p The received packet, copied into a pbuf + * @param inp The netif which received the packet + * @return ERR_OK if the packet was handled + * != ERR_OK is the packet was NOT handled, in this case, the caller has + * to free the pbuf + */ +typedef err_t (*netif_input_fn)(struct pbuf *p, struct netif *inp); + +#if LWIP_IPV4 +/** Function prototype for netif->output functions. Called by lwIP when a packet + * shall be sent. For ethernet netif, set this to 'etharp_output' and set + * 'linkoutput'. + * + * @param netif The netif which shall send a packet + * @param p The packet to send (p->payload points to IP header) + * @param ipaddr The IP address to which the packet shall be sent + */ +typedef err_t (*netif_output_fn)(struct netif *netif, struct pbuf *p, + const ip4_addr_t *ipaddr); +#endif /* LWIP_IPV4*/ + +#if LWIP_IPV6 +/** Function prototype for netif->output_ip6 functions. Called by lwIP when a packet + * shall be sent. For ethernet netif, set this to 'ethip6_output' and set + * 'linkoutput'. + * + * @param netif The netif which shall send a packet + * @param p The packet to send (p->payload points to IP header) + * @param ipaddr The IPv6 address to which the packet shall be sent + */ +typedef err_t (*netif_output_ip6_fn)(struct netif *netif, struct pbuf *p, + const ip6_addr_t *ipaddr); +#endif /* LWIP_IPV6 */ + +/** Function prototype for netif->linkoutput functions. Only used for ethernet + * netifs. This function is called by ARP when a packet shall be sent. + * + * @param netif The netif which shall send a packet + * @param p The packet to send (raw ethernet packet) + */ +typedef err_t (*netif_linkoutput_fn)(struct netif *netif, struct pbuf *p); +/** Function prototype for netif status- or link-callback functions. */ +typedef void (*netif_status_callback_fn)(struct netif *netif); +#if LWIP_IPV4 && LWIP_IGMP +/** Function prototype for netif igmp_mac_filter functions */ +typedef err_t (*netif_igmp_mac_filter_fn)(struct netif *netif, + const ip4_addr_t *group, enum netif_mac_filter_action action); +#endif /* LWIP_IPV4 && LWIP_IGMP */ +#if LWIP_IPV6 && LWIP_IPV6_MLD +/** Function prototype for netif mld_mac_filter functions */ +typedef err_t (*netif_mld_mac_filter_fn)(struct netif *netif, + const ip6_addr_t *group, enum netif_mac_filter_action action); +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + +#if LWIP_DHCP || LWIP_AUTOIP || LWIP_IGMP || LWIP_IPV6_MLD || LWIP_IPV6_DHCP6 || (LWIP_NUM_NETIF_CLIENT_DATA > 0) +#if LWIP_NUM_NETIF_CLIENT_DATA > 0 +u8_t netif_alloc_client_data_id(void); +#endif +/** @ingroup netif_cd + * Set client data. Obtain ID from netif_alloc_client_data_id(). + */ +#define netif_set_client_data(netif, id, data) netif_get_client_data(netif, id) = (data) +/** @ingroup netif_cd + * Get client data. Obtain ID from netif_alloc_client_data_id(). + */ +#define netif_get_client_data(netif, id) (netif)->client_data[(id)] +#endif + +#if (LWIP_IPV4 && LWIP_ARP && (ARP_TABLE_SIZE > 0x7f)) || (LWIP_IPV6 && (LWIP_ND6_NUM_DESTINATIONS > 0x7f)) +typedef u16_t netif_addr_idx_t; +#define NETIF_ADDR_IDX_MAX 0x7FFF +#else +typedef u8_t netif_addr_idx_t; +#define NETIF_ADDR_IDX_MAX 0x7F +#endif + +#if LWIP_NETIF_HWADDRHINT +#define LWIP_NETIF_USE_HINTS 1 +struct netif_hint { + netif_addr_idx_t addr_hint; +}; +#else /* LWIP_NETIF_HWADDRHINT */ +#define LWIP_NETIF_USE_HINTS 0 +#endif /* LWIP_NETIF_HWADDRHINT */ + +/** Generic data structure used for all lwIP network interfaces. + * The following fields should be filled in by the initialization + * function for the device driver: hwaddr_len, hwaddr[], mtu, flags */ +struct netif { +#if !LWIP_SINGLE_NETIF + /** pointer to next in linked list */ + struct netif *next; +#endif + +#if LWIP_IPV4 + /** IP address configuration in network byte order */ + ip_addr_t ip_addr; + ip_addr_t netmask; + ip_addr_t gw; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + /** Array of IPv6 addresses for this netif. */ + ip_addr_t ip6_addr[LWIP_IPV6_NUM_ADDRESSES]; + /** The state of each IPv6 address (Tentative, Preferred, etc). + * @see ip6_addr.h */ + u8_t ip6_addr_state[LWIP_IPV6_NUM_ADDRESSES]; +#if LWIP_IPV6_ADDRESS_LIFETIMES + /** Remaining valid and preferred lifetime of each IPv6 address, in seconds. + * For valid lifetimes, the special value of IP6_ADDR_LIFE_STATIC (0) + * indicates the address is static and has no lifetimes. */ + u32_t ip6_addr_valid_life[LWIP_IPV6_NUM_ADDRESSES]; + u32_t ip6_addr_pref_life[LWIP_IPV6_NUM_ADDRESSES]; +#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */ +#endif /* LWIP_IPV6 */ + /** This function is called by the network device driver + * to pass a packet up the TCP/IP stack. */ + netif_input_fn input; +#if LWIP_IPV4 + /** This function is called by the IP module when it wants + * to send a packet on the interface. This function typically + * first resolves the hardware address, then sends the packet. + * For ethernet physical layer, this is usually etharp_output() */ + netif_output_fn output; +#endif /* LWIP_IPV4 */ + /** This function is called by ethernet_output() when it wants + * to send a packet on the interface. This function outputs + * the pbuf as-is on the link medium. */ + netif_linkoutput_fn linkoutput; +#if LWIP_IPV6 + /** This function is called by the IPv6 module when it wants + * to send a packet on the interface. This function typically + * first resolves the hardware address, then sends the packet. + * For ethernet physical layer, this is usually ethip6_output() */ + netif_output_ip6_fn output_ip6; +#endif /* LWIP_IPV6 */ +#if LWIP_NETIF_STATUS_CALLBACK + /** This function is called when the netif state is set to up or down + */ + netif_status_callback_fn status_callback; +#endif /* LWIP_NETIF_STATUS_CALLBACK */ +#if LWIP_NETIF_LINK_CALLBACK + /** This function is called when the netif link is set to up or down + */ + netif_status_callback_fn link_callback; +#endif /* LWIP_NETIF_LINK_CALLBACK */ +#if LWIP_NETIF_REMOVE_CALLBACK + /** This function is called when the netif has been removed */ + netif_status_callback_fn remove_callback; +#endif /* LWIP_NETIF_REMOVE_CALLBACK */ + /** This field can be set by the device driver and could point + * to state information for the device. */ + void *state; +#ifdef netif_get_client_data + void* client_data[LWIP_NETIF_CLIENT_DATA_INDEX_MAX + LWIP_NUM_NETIF_CLIENT_DATA]; +#endif +#if LWIP_NETIF_HOSTNAME + /* the hostname for this netif, NULL is a valid value */ + const char* hostname; +#endif /* LWIP_NETIF_HOSTNAME */ +#if LWIP_CHECKSUM_CTRL_PER_NETIF + u16_t chksum_flags; +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF*/ + /** maximum transfer unit (in bytes) */ + u16_t mtu; +#if LWIP_IPV6 && LWIP_ND6_ALLOW_RA_UPDATES + /** maximum transfer unit (in bytes), updated by RA */ + u16_t mtu6; +#endif /* LWIP_IPV6 && LWIP_ND6_ALLOW_RA_UPDATES */ + /** link level hardware address of this interface */ + u8_t hwaddr[NETIF_MAX_HWADDR_LEN]; + /** number of bytes used in hwaddr */ + u8_t hwaddr_len; + /** flags (@see @ref netif_flags) */ + u8_t flags; + /** descriptive abbreviation */ + char name[2]; + /** number of this interface. Used for @ref if_api and @ref netifapi_netif, + * as well as for IPv6 zones */ + u8_t num; +#if LWIP_IPV6_AUTOCONFIG + /** is this netif enabled for IPv6 autoconfiguration */ + u8_t ip6_autoconfig_enabled; +#endif /* LWIP_IPV6_AUTOCONFIG */ +#if LWIP_IPV6_SEND_ROUTER_SOLICIT + /** Number of Router Solicitation messages that remain to be sent. */ + u8_t rs_count; +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ +#if MIB2_STATS + /** link type (from "snmp_ifType" enum from snmp_mib2.h) */ + u8_t link_type; + /** (estimate) link speed */ + u32_t link_speed; + /** timestamp at last change made (up/down) */ + u32_t ts; + /** counters */ + struct stats_mib2_netif_ctrs mib2_counters; +#endif /* MIB2_STATS */ +#if LWIP_IPV4 && LWIP_IGMP + /** This function could be called to add or delete an entry in the multicast + filter table of the ethernet MAC.*/ + netif_igmp_mac_filter_fn igmp_mac_filter; +#endif /* LWIP_IPV4 && LWIP_IGMP */ +#if LWIP_IPV6 && LWIP_IPV6_MLD + /** This function could be called to add or delete an entry in the IPv6 multicast + filter table of the ethernet MAC. */ + netif_mld_mac_filter_fn mld_mac_filter; +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ +#if LWIP_NETIF_USE_HINTS + struct netif_hint *hints; +#endif /* LWIP_NETIF_USE_HINTS */ +#if ENABLE_LOOPBACK + /* List of packets to be queued for ourselves. */ + struct pbuf *loop_first; + struct pbuf *loop_last; +#if LWIP_LOOPBACK_MAX_PBUFS + u16_t loop_cnt_current; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ +#endif /* ENABLE_LOOPBACK */ +}; + +#if LWIP_CHECKSUM_CTRL_PER_NETIF +#define NETIF_SET_CHECKSUM_CTRL(netif, chksumflags) do { \ + (netif)->chksum_flags = chksumflags; } while(0) +#define IF__NETIF_CHECKSUM_ENABLED(netif, chksumflag) if (((netif) == NULL) || (((netif)->chksum_flags & (chksumflag)) != 0)) +#else /* LWIP_CHECKSUM_CTRL_PER_NETIF */ +#define NETIF_SET_CHECKSUM_CTRL(netif, chksumflags) +#define IF__NETIF_CHECKSUM_ENABLED(netif, chksumflag) +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */ + +#if LWIP_SINGLE_NETIF +#define NETIF_FOREACH(netif) if (((netif) = netif_default) != NULL) +#else /* LWIP_SINGLE_NETIF */ +/** The list of network interfaces. */ +extern struct netif *netif_list; +#define NETIF_FOREACH(netif) for ((netif) = netif_list; (netif) != NULL; (netif) = (netif)->next) +#endif /* LWIP_SINGLE_NETIF */ +/** The default network interface. */ +extern struct netif *netif_default; + +void netif_init(void); + +struct netif *netif_add_noaddr(struct netif *netif, void *state, netif_init_fn init, netif_input_fn input); + +#if LWIP_IPV4 +struct netif *netif_add(struct netif *netif, + const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, + void *state, netif_init_fn init, netif_input_fn input); +void netif_set_addr(struct netif *netif, const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, + const ip4_addr_t *gw); +#else /* LWIP_IPV4 */ +struct netif *netif_add(struct netif *netif, void *state, netif_init_fn init, netif_input_fn input); +#endif /* LWIP_IPV4 */ +void netif_remove(struct netif * netif); + +/* Returns a network interface given its name. The name is of the form + "et0", where the first two letters are the "name" field in the + netif structure, and the digit is in the num field in the same + structure. */ +struct netif *netif_find(const char *name); + +void netif_set_default(struct netif *netif); + +#if LWIP_IPV4 +void netif_set_ipaddr(struct netif *netif, const ip4_addr_t *ipaddr); +void netif_set_netmask(struct netif *netif, const ip4_addr_t *netmask); +void netif_set_gw(struct netif *netif, const ip4_addr_t *gw); +/** @ingroup netif_ip4 */ +#define netif_ip4_addr(netif) ((const ip4_addr_t*)ip_2_ip4(&((netif)->ip_addr))) +/** @ingroup netif_ip4 */ +#define netif_ip4_netmask(netif) ((const ip4_addr_t*)ip_2_ip4(&((netif)->netmask))) +/** @ingroup netif_ip4 */ +#define netif_ip4_gw(netif) ((const ip4_addr_t*)ip_2_ip4(&((netif)->gw))) +/** @ingroup netif_ip4 */ +#define netif_ip_addr4(netif) ((const ip_addr_t*)&((netif)->ip_addr)) +/** @ingroup netif_ip4 */ +#define netif_ip_netmask4(netif) ((const ip_addr_t*)&((netif)->netmask)) +/** @ingroup netif_ip4 */ +#define netif_ip_gw4(netif) ((const ip_addr_t*)&((netif)->gw)) +#endif /* LWIP_IPV4 */ + +#define netif_set_flags(netif, set_flags) do { (netif)->flags = (u8_t)((netif)->flags | (set_flags)); } while(0) +#define netif_clear_flags(netif, clr_flags) do { (netif)->flags = (u8_t)((netif)->flags & (u8_t)(~(clr_flags) & 0xff)); } while(0) +#define netif_is_flag_set(nefif, flag) (((netif)->flags & (flag)) != 0) + +void netif_set_up(struct netif *netif); +void netif_set_down(struct netif *netif); +/** @ingroup netif + * Ask if an interface is up + */ +#define netif_is_up(netif) (((netif)->flags & NETIF_FLAG_UP) ? (u8_t)1 : (u8_t)0) + +#if LWIP_NETIF_STATUS_CALLBACK +void netif_set_status_callback(struct netif *netif, netif_status_callback_fn status_callback); +#endif /* LWIP_NETIF_STATUS_CALLBACK */ +#if LWIP_NETIF_REMOVE_CALLBACK +void netif_set_remove_callback(struct netif *netif, netif_status_callback_fn remove_callback); +#endif /* LWIP_NETIF_REMOVE_CALLBACK */ + +void netif_set_link_up(struct netif *netif); +void netif_set_link_down(struct netif *netif); +/** Ask if a link is up */ +#define netif_is_link_up(netif) (((netif)->flags & NETIF_FLAG_LINK_UP) ? (u8_t)1 : (u8_t)0) + +#if LWIP_NETIF_LINK_CALLBACK +void netif_set_link_callback(struct netif *netif, netif_status_callback_fn link_callback); +#endif /* LWIP_NETIF_LINK_CALLBACK */ + +#if LWIP_NETIF_HOSTNAME +/** @ingroup netif */ +#define netif_set_hostname(netif, name) do { if((netif) != NULL) { (netif)->hostname = name; }}while(0) +/** @ingroup netif */ +#define netif_get_hostname(netif) (((netif) != NULL) ? ((netif)->hostname) : NULL) +#endif /* LWIP_NETIF_HOSTNAME */ + +#if LWIP_IGMP +/** @ingroup netif */ +#define netif_set_igmp_mac_filter(netif, function) do { if((netif) != NULL) { (netif)->igmp_mac_filter = function; }}while(0) +#define netif_get_igmp_mac_filter(netif) (((netif) != NULL) ? ((netif)->igmp_mac_filter) : NULL) +#endif /* LWIP_IGMP */ + +#if LWIP_IPV6 && LWIP_IPV6_MLD +/** @ingroup netif */ +#define netif_set_mld_mac_filter(netif, function) do { if((netif) != NULL) { (netif)->mld_mac_filter = function; }}while(0) +#define netif_get_mld_mac_filter(netif) (((netif) != NULL) ? ((netif)->mld_mac_filter) : NULL) +#define netif_mld_mac_filter(netif, addr, action) do { if((netif) && (netif)->mld_mac_filter) { (netif)->mld_mac_filter((netif), (addr), (action)); }}while(0) +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + +#if ENABLE_LOOPBACK +err_t netif_loop_output(struct netif *netif, struct pbuf *p); +void netif_poll(struct netif *netif); +#if !LWIP_NETIF_LOOPBACK_MULTITHREADING +void netif_poll_all(void); +#endif /* !LWIP_NETIF_LOOPBACK_MULTITHREADING */ +#endif /* ENABLE_LOOPBACK */ + +err_t netif_input(struct pbuf *p, struct netif *inp); + +#if LWIP_IPV6 +/** @ingroup netif_ip6 */ +#define netif_ip_addr6(netif, i) ((const ip_addr_t*)(&((netif)->ip6_addr[i]))) +/** @ingroup netif_ip6 */ +#define netif_ip6_addr(netif, i) ((const ip6_addr_t*)ip_2_ip6(&((netif)->ip6_addr[i]))) +void netif_ip6_addr_set(struct netif *netif, s8_t addr_idx, const ip6_addr_t *addr6); +void netif_ip6_addr_set_parts(struct netif *netif, s8_t addr_idx, u32_t i0, u32_t i1, u32_t i2, u32_t i3); +#define netif_ip6_addr_state(netif, i) ((netif)->ip6_addr_state[i]) +void netif_ip6_addr_set_state(struct netif* netif, s8_t addr_idx, u8_t state); +s8_t netif_get_ip6_addr_match(struct netif *netif, const ip6_addr_t *ip6addr); +void netif_create_ip6_linklocal_address(struct netif *netif, u8_t from_mac_48bit); +err_t netif_add_ip6_address(struct netif *netif, const ip6_addr_t *ip6addr, s8_t *chosen_idx); +#define netif_set_ip6_autoconfig_enabled(netif, action) do { if(netif) { (netif)->ip6_autoconfig_enabled = (action); }}while(0) +#if LWIP_IPV6_ADDRESS_LIFETIMES +#define netif_ip6_addr_valid_life(netif, i) \ + (((netif) != NULL) ? ((netif)->ip6_addr_valid_life[i]) : IP6_ADDR_LIFE_STATIC) +#define netif_ip6_addr_set_valid_life(netif, i, secs) \ + do { if (netif != NULL) { (netif)->ip6_addr_valid_life[i] = (secs); }} while (0) +#define netif_ip6_addr_pref_life(netif, i) \ + (((netif) != NULL) ? ((netif)->ip6_addr_pref_life[i]) : IP6_ADDR_LIFE_STATIC) +#define netif_ip6_addr_set_pref_life(netif, i, secs) \ + do { if (netif != NULL) { (netif)->ip6_addr_pref_life[i] = (secs); }} while (0) +#define netif_ip6_addr_isstatic(netif, i) \ + (netif_ip6_addr_valid_life((netif), (i)) == IP6_ADDR_LIFE_STATIC) +#else /* !LWIP_IPV6_ADDRESS_LIFETIMES */ +#define netif_ip6_addr_isstatic(netif, i) (1) /* all addresses are static */ +#endif /* !LWIP_IPV6_ADDRESS_LIFETIMES */ +#if LWIP_ND6_ALLOW_RA_UPDATES +#define netif_mtu6(netif) ((netif)->mtu6) +#else /* LWIP_ND6_ALLOW_RA_UPDATES */ +#define netif_mtu6(netif) ((netif)->mtu) +#endif /* LWIP_ND6_ALLOW_RA_UPDATES */ +#endif /* LWIP_IPV6 */ + +#if LWIP_NETIF_USE_HINTS +#define NETIF_SET_HINTS(netif, netifhint) (netif)->hints = (netifhint) +#define NETIF_RESET_HINTS(netif) (netif)->hints = NULL +#else /* LWIP_NETIF_USE_HINTS */ +#define NETIF_SET_HINTS(netif, netifhint) +#define NETIF_RESET_HINTS(netif) +#endif /* LWIP_NETIF_USE_HINTS */ + +u8_t netif_name_to_index(const char *name); +char * netif_index_to_name(u8_t idx, char *name); +struct netif* netif_get_by_index(u8_t idx); + +/* Interface indexes always start at 1 per RFC 3493, section 4, num starts at 0 (internal index is 0..254)*/ +#define netif_get_index(netif) ((u8_t)((netif)->num + 1)) +#define NETIF_NO_INDEX (0) + +/** + * @ingroup netif + * Extended netif status callback (NSC) reasons flags. + * May be extended in the future! + */ +typedef u16_t netif_nsc_reason_t; + +/* used for initialization only */ +#define LWIP_NSC_NONE 0x0000 +/** netif was added. arg: NULL. Called AFTER netif was added. */ +#define LWIP_NSC_NETIF_ADDED 0x0001 +/** netif was removed. arg: NULL. Called BEFORE netif is removed. */ +#define LWIP_NSC_NETIF_REMOVED 0x0002 +/** link changed */ +#define LWIP_NSC_LINK_CHANGED 0x0004 +/** netif administrative status changed.\n + * up is called AFTER netif is set up.\n + * down is called BEFORE the netif is actually set down. */ +#define LWIP_NSC_STATUS_CHANGED 0x0008 +/** IPv4 address has changed */ +#define LWIP_NSC_IPV4_ADDRESS_CHANGED 0x0010 +/** IPv4 gateway has changed */ +#define LWIP_NSC_IPV4_GATEWAY_CHANGED 0x0020 +/** IPv4 netmask has changed */ +#define LWIP_NSC_IPV4_NETMASK_CHANGED 0x0040 +/** called AFTER IPv4 address/gateway/netmask changes have been applied */ +#define LWIP_NSC_IPV4_SETTINGS_CHANGED 0x0080 +/** IPv6 address was added */ +#define LWIP_NSC_IPV6_SET 0x0100 +/** IPv6 address state has changed */ +#define LWIP_NSC_IPV6_ADDR_STATE_CHANGED 0x0200 + +/** @ingroup netif + * Argument supplied to netif_ext_callback_fn. + */ +typedef union +{ + /** Args to LWIP_NSC_LINK_CHANGED callback */ + struct link_changed_s + { + /** 1: up; 0: down */ + u8_t state; + } link_changed; + /** Args to LWIP_NSC_STATUS_CHANGED callback */ + struct status_changed_s + { + /** 1: up; 0: down */ + u8_t state; + } status_changed; + /** Args to LWIP_NSC_IPV4_ADDRESS_CHANGED|LWIP_NSC_IPV4_GATEWAY_CHANGED|LWIP_NSC_IPV4_NETMASK_CHANGED|LWIP_NSC_IPV4_SETTINGS_CHANGED callback */ + struct ipv4_changed_s + { + /** Old IPv4 address */ + const ip_addr_t* old_address; + const ip_addr_t* old_netmask; + const ip_addr_t* old_gw; + } ipv4_changed; + /** Args to LWIP_NSC_IPV6_SET callback */ + struct ipv6_set_s + { + /** Index of changed IPv6 address */ + s8_t addr_index; + /** Old IPv6 address */ + const ip_addr_t* old_address; + } ipv6_set; + /** Args to LWIP_NSC_IPV6_ADDR_STATE_CHANGED callback */ + struct ipv6_addr_state_changed_s + { + /** Index of affected IPv6 address */ + s8_t addr_index; + /** Old IPv6 address state */ + u8_t old_state; + /** Affected IPv6 address */ + const ip_addr_t* address; + } ipv6_addr_state_changed; +} netif_ext_callback_args_t; + +/** + * @ingroup netif + * Function used for extended netif status callbacks + * Note: When parsing reason argument, keep in mind that more reasons may be added in the future! + * @param netif netif that is affected by change + * @param reason change reason + * @param args depends on reason, see reason description + */ +typedef void (*netif_ext_callback_fn)(struct netif* netif, netif_nsc_reason_t reason, const netif_ext_callback_args_t* args); + +#if LWIP_NETIF_EXT_STATUS_CALLBACK +struct netif_ext_callback; +typedef struct netif_ext_callback +{ + netif_ext_callback_fn callback_fn; + struct netif_ext_callback* next; +} netif_ext_callback_t; + +#define NETIF_DECLARE_EXT_CALLBACK(name) static netif_ext_callback_t name; +void netif_add_ext_callback(netif_ext_callback_t* callback, netif_ext_callback_fn fn); +void netif_remove_ext_callback(netif_ext_callback_t* callback); +void netif_invoke_ext_callback(struct netif* netif, netif_nsc_reason_t reason, const netif_ext_callback_args_t* args); +#else +#define NETIF_DECLARE_EXT_CALLBACK(name) +#define netif_add_ext_callback(callback, fn) +#define netif_remove_ext_callback(callback) +#define netif_invoke_ext_callback(netif, reason, args) +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_NETIF_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/netifapi.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/netifapi.h new file mode 100644 index 0000000..5bbd8f8 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/netifapi.h @@ -0,0 +1,161 @@ +/** + * @file + * netif API (to be used from non-TCPIP threads) + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ +#ifndef LWIP_HDR_NETIFAPI_H +#define LWIP_HDR_NETIFAPI_H + +#include "lwip/opt.h" + +#if LWIP_NETIF_API /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/sys.h" +#include "lwip/netif.h" +#include "lwip/dhcp.h" +#include "lwip/autoip.h" +#include "lwip/priv/tcpip_priv.h" +#include "lwip/priv/api_msg.h" +#include "lwip/prot/ethernet.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* API for application */ +#if LWIP_ARP && LWIP_IPV4 +/* Used for netfiapi_arp_* APIs */ +enum netifapi_arp_entry { + NETIFAPI_ARP_PERM /* Permanent entry */ + /* Other entry types can be added here */ +}; + +/** @ingroup netifapi_arp */ +err_t netifapi_arp_add(const ip4_addr_t *ipaddr, struct eth_addr *ethaddr, enum netifapi_arp_entry type); +/** @ingroup netifapi_arp */ +err_t netifapi_arp_remove(const ip4_addr_t *ipaddr, enum netifapi_arp_entry type); +#endif /* LWIP_ARP && LWIP_IPV4 */ + +err_t netifapi_netif_add(struct netif *netif, +#if LWIP_IPV4 + const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, +#endif /* LWIP_IPV4 */ + void *state, netif_init_fn init, netif_input_fn input); + +#if LWIP_IPV4 +err_t netifapi_netif_set_addr(struct netif *netif, const ip4_addr_t *ipaddr, + const ip4_addr_t *netmask, const ip4_addr_t *gw); +#endif /* LWIP_IPV4*/ + +err_t netifapi_netif_common(struct netif *netif, netifapi_void_fn voidfunc, + netifapi_errt_fn errtfunc); + +/** @ingroup netifapi_netif */ +err_t netifapi_netif_name_to_index(const char *name, u8_t *index); +/** @ingroup netifapi_netif */ +err_t netifapi_netif_index_to_name(u8_t index, char *name); + +/** @ingroup netifapi_netif + * @see netif_remove() + */ +#define netifapi_netif_remove(n) netifapi_netif_common(n, netif_remove, NULL) +/** @ingroup netifapi_netif + * @see netif_set_up() + */ +#define netifapi_netif_set_up(n) netifapi_netif_common(n, netif_set_up, NULL) +/** @ingroup netifapi_netif + * @see netif_set_down() + */ +#define netifapi_netif_set_down(n) netifapi_netif_common(n, netif_set_down, NULL) +/** @ingroup netifapi_netif + * @see netif_set_default() + */ +#define netifapi_netif_set_default(n) netifapi_netif_common(n, netif_set_default, NULL) +/** @ingroup netifapi_netif + * @see netif_set_link_up() + */ +#define netifapi_netif_set_link_up(n) netifapi_netif_common(n, netif_set_link_up, NULL) +/** @ingroup netifapi_netif + * @see netif_set_link_down() + */ +#define netifapi_netif_set_link_down(n) netifapi_netif_common(n, netif_set_link_down, NULL) + +/** + * @defgroup netifapi_dhcp4 DHCPv4 + * @ingroup netifapi + * To be called from non-TCPIP threads + */ +/** @ingroup netifapi_dhcp4 + * @see dhcp_start() + */ +#define netifapi_dhcp_start(n) netifapi_netif_common(n, NULL, dhcp_start) +/** + * @ingroup netifapi_dhcp4 + * @deprecated Use netifapi_dhcp_release_and_stop() instead. + */ +#define netifapi_dhcp_stop(n) netifapi_netif_common(n, dhcp_stop, NULL) +/** @ingroup netifapi_dhcp4 + * @see dhcp_inform() + */ +#define netifapi_dhcp_inform(n) netifapi_netif_common(n, dhcp_inform, NULL) +/** @ingroup netifapi_dhcp4 + * @see dhcp_renew() + */ +#define netifapi_dhcp_renew(n) netifapi_netif_common(n, NULL, dhcp_renew) +/** + * @ingroup netifapi_dhcp4 + * @deprecated Use netifapi_dhcp_release_and_stop() instead. + */ +#define netifapi_dhcp_release(n) netifapi_netif_common(n, NULL, dhcp_release) +/** @ingroup netifapi_dhcp4 + * @see dhcp_release_and_stop() + */ +#define netifapi_dhcp_release_and_stop(n) netifapi_netif_common(n, dhcp_release_and_stop, NULL) + +/** + * @defgroup netifapi_autoip AUTOIP + * @ingroup netifapi + * To be called from non-TCPIP threads + */ +/** @ingroup netifapi_autoip + * @see autoip_start() + */ +#define netifapi_autoip_start(n) netifapi_netif_common(n, NULL, autoip_start) +/** @ingroup netifapi_autoip + * @see autoip_stop() + */ +#define netifapi_autoip_stop(n) netifapi_netif_common(n, NULL, autoip_stop) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_NETIF_API */ + +#endif /* LWIP_HDR_NETIFAPI_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/opt.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/opt.h new file mode 100644 index 0000000..bb6c33d --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/opt.h @@ -0,0 +1,3519 @@ +/** + * @file + * + * lwIP Options Configuration + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/* + * NOTE: || defined __DOXYGEN__ is a workaround for doxygen bug - + * without this, doxygen does not see the actual #define + */ + +#if !defined LWIP_HDR_OPT_H +#define LWIP_HDR_OPT_H + +/* + * Include user defined options first. Anything not defined in these files + * will be set to standard values. Override anything you don't like! + */ +#include "lwipopts.h" +#include "lwip/debug.h" + +/** + * @defgroup lwip_opts Options (lwipopts.h) + * @ingroup lwip + * + * @defgroup lwip_opts_debug Debugging + * @ingroup lwip_opts + * + * @defgroup lwip_opts_infrastructure Infrastructure + * @ingroup lwip_opts + * + * @defgroup lwip_opts_callback Callback-style APIs + * @ingroup lwip_opts + * + * @defgroup lwip_opts_threadsafe_apis Thread-safe APIs + * @ingroup lwip_opts + */ + + /* + ------------------------------------ + -------------- NO SYS -------------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_nosys NO_SYS + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * NO_SYS==1: Use lwIP without OS-awareness (no thread, semaphores, mutexes or + * mboxes). This means threaded APIs cannot be used (socket, netconn, + * i.e. everything in the 'api' folder), only the callback-style raw API is + * available (and you have to watch out for yourself that you don't access + * lwIP functions/structures from more than one context at a time!) + */ +#if !defined NO_SYS || defined __DOXYGEN__ +#define NO_SYS 0 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_timers Timers + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * LWIP_TIMERS==0: Drop support for sys_timeout and lwip-internal cyclic timers. + * (the array of lwip-internal cyclic timers is still provided) + * (check NO_SYS_NO_TIMERS for compatibility to old versions) + */ +#if !defined LWIP_TIMERS || defined __DOXYGEN__ +#ifdef NO_SYS_NO_TIMERS +#define LWIP_TIMERS (!NO_SYS || (NO_SYS && !NO_SYS_NO_TIMERS)) +#else +#define LWIP_TIMERS 1 +#endif +#endif + +/** + * LWIP_TIMERS_CUSTOM==1: Provide your own timer implementation. + * Function prototypes in timeouts.h and the array of lwip-internal cyclic timers + * are still included, but the implementation is not. The following functions + * will be required: sys_timeouts_init(), sys_timeout(), sys_untimeout(), + * sys_timeouts_mbox_fetch() + */ +#if !defined LWIP_TIMERS_CUSTOM || defined __DOXYGEN__ +#define LWIP_TIMERS_CUSTOM 0 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_memcpy memcpy + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * MEMCPY: override this if you have a faster implementation at hand than the + * one included in your C library + */ +#if !defined MEMCPY || defined __DOXYGEN__ +#define MEMCPY(dst,src,len) memcpy(dst,src,len) +#endif + +/** + * SMEMCPY: override this with care! Some compilers (e.g. gcc) can inline a + * call to memcpy() if the length is known at compile time and is small. + */ +#if !defined SMEMCPY || defined __DOXYGEN__ +#define SMEMCPY(dst,src,len) memcpy(dst,src,len) +#endif + +/** + * MEMMOVE: override this if you have a faster implementation at hand than the + * one included in your C library. lwIP currently uses MEMMOVE only when IPv6 + * fragmentation support is enabled. + */ +#if !defined MEMMOVE || defined __DOXYGEN__ +#define MEMMOVE(dst,src,len) memmove(dst,src,len) +#endif +/** + * @} + */ + +/* + ------------------------------------ + ----------- Core locking ----------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_lock Core locking and MPU + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * LWIP_MPU_COMPATIBLE: enables special memory management mechanism + * which makes lwip able to work on MPU (Memory Protection Unit) system + * by not passing stack-pointers to other threads + * (this decreases performance as memory is allocated from pools instead + * of keeping it on the stack) + */ +#if !defined LWIP_MPU_COMPATIBLE || defined __DOXYGEN__ +#define LWIP_MPU_COMPATIBLE 0 +#endif + +/** + * LWIP_TCPIP_CORE_LOCKING + * Creates a global mutex that is held during TCPIP thread operations. + * Can be locked by client code to perform lwIP operations without changing + * into TCPIP thread using callbacks. See LOCK_TCPIP_CORE() and + * UNLOCK_TCPIP_CORE(). + * Your system should provide mutexes supporting priority inversion to use this. + */ +#if !defined LWIP_TCPIP_CORE_LOCKING || defined __DOXYGEN__ +#define LWIP_TCPIP_CORE_LOCKING 1 +#endif + +/** + * LWIP_TCPIP_CORE_LOCKING_INPUT: when LWIP_TCPIP_CORE_LOCKING is enabled, + * this lets tcpip_input() grab the mutex for input packets as well, + * instead of allocating a message and passing it to tcpip_thread. + * + * ATTENTION: this does not work when tcpip_input() is called from + * interrupt context! + */ +#if !defined LWIP_TCPIP_CORE_LOCKING_INPUT || defined __DOXYGEN__ +#define LWIP_TCPIP_CORE_LOCKING_INPUT 0 +#endif + +/** + * SYS_LIGHTWEIGHT_PROT==1: enable inter-task protection (and task-vs-interrupt + * protection) for certain critical regions during buffer allocation, deallocation + * and memory allocation and deallocation. + * ATTENTION: This is required when using lwIP from more than one context! If + * you disable this, you must be sure what you are doing! + */ +#if !defined SYS_LIGHTWEIGHT_PROT || defined __DOXYGEN__ +#define SYS_LIGHTWEIGHT_PROT 1 +#endif + +/** + * Macro/function to check whether lwIP's threading/locking + * requirements are satisfied during current function call. + * This macro usually calls a function that is implemented in the OS-dependent + * sys layer and performs the following checks: + * - Not in ISR (this should be checked for NO_SYS==1, too!) + * - If @ref LWIP_TCPIP_CORE_LOCKING = 1: TCPIP core lock is held + * - If @ref LWIP_TCPIP_CORE_LOCKING = 0: function is called from TCPIP thread + * @see @ref multithreading + */ +#if !defined LWIP_ASSERT_CORE_LOCKED || defined __DOXYGEN__ +#define LWIP_ASSERT_CORE_LOCKED() +#endif + +/** + * Called as first thing in the lwIP TCPIP thread. Can be used in conjunction + * with @ref LWIP_ASSERT_CORE_LOCKED to check core locking. + * @see @ref multithreading + */ +#if !defined LWIP_MARK_TCPIP_THREAD || defined __DOXYGEN__ +#define LWIP_MARK_TCPIP_THREAD() +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- Memory options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_mem Heap and memory pools + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * MEM_LIBC_MALLOC==1: Use malloc/free/realloc provided by your C-library + * instead of the lwip internal allocator. Can save code size if you + * already use it. + */ +#if !defined MEM_LIBC_MALLOC || defined __DOXYGEN__ +#define MEM_LIBC_MALLOC 0 +#endif + +/** + * MEMP_MEM_MALLOC==1: Use mem_malloc/mem_free instead of the lwip pool allocator. + * Especially useful with MEM_LIBC_MALLOC but handle with care regarding execution + * speed (heap alloc can be much slower than pool alloc) and usage from interrupts + * (especially if your netif driver allocates PBUF_POOL pbufs for received frames + * from interrupt)! + * ATTENTION: Currently, this uses the heap for ALL pools (also for private pools, + * not only for internal pools defined in memp_std.h)! + */ +#if !defined MEMP_MEM_MALLOC || defined __DOXYGEN__ +#define MEMP_MEM_MALLOC 0 +#endif + +/** + * MEMP_MEM_INIT==1: Force use of memset to initialize pool memory. + * Useful if pool are moved in uninitialized section of memory. This will ensure + * default values in pcbs struct are well initialized in all conditions. + */ +#if !defined MEMP_MEM_INIT || defined __DOXYGEN__ +#define MEMP_MEM_INIT 0 +#endif + +/** + * MEM_ALIGNMENT: should be set to the alignment of the CPU + * 4 byte alignment -> \#define MEM_ALIGNMENT 4 + * 2 byte alignment -> \#define MEM_ALIGNMENT 2 + */ +#if !defined MEM_ALIGNMENT || defined __DOXYGEN__ +#define MEM_ALIGNMENT 1 +#endif + +/** + * MEM_SIZE: the size of the heap memory. If the application will send + * a lot of data that needs to be copied, this should be set high. + */ +#if !defined MEM_SIZE || defined __DOXYGEN__ +#define MEM_SIZE 1600 +#endif + +/** + * MEMP_OVERFLOW_CHECK: memp overflow protection reserves a configurable + * amount of bytes before and after each memp element in every pool and fills + * it with a prominent default value. + * MEMP_OVERFLOW_CHECK == 0 no checking + * MEMP_OVERFLOW_CHECK == 1 checks each element when it is freed + * MEMP_OVERFLOW_CHECK >= 2 checks each element in every pool every time + * memp_malloc() or memp_free() is called (useful but slow!) + */ +#if !defined MEMP_OVERFLOW_CHECK || defined __DOXYGEN__ +#define MEMP_OVERFLOW_CHECK 0 +#endif + +/** + * MEMP_SANITY_CHECK==1: run a sanity check after each memp_free() to make + * sure that there are no cycles in the linked lists. + */ +#if !defined MEMP_SANITY_CHECK || defined __DOXYGEN__ +#define MEMP_SANITY_CHECK 0 +#endif + +/** + * MEM_OVERFLOW_CHECK: mem overflow protection reserves a configurable + * amount of bytes before and after each heap allocation chunk and fills + * it with a prominent default value. + * MEM_OVERFLOW_CHECK == 0 no checking + * MEM_OVERFLOW_CHECK == 1 checks each element when it is freed + * MEM_OVERFLOW_CHECK >= 2 checks all heap elements every time + * mem_malloc() or mem_free() is called (useful but slow!) + */ +#if !defined MEM_OVERFLOW_CHECK || defined __DOXYGEN__ +#define MEM_OVERFLOW_CHECK 0 +#endif + +/** + * MEM_SANITY_CHECK==1: run a sanity check after each mem_free() to make + * sure that the linked list of heap elements is not corrupted. + */ +#if !defined MEM_SANITY_CHECK || defined __DOXYGEN__ +#define MEM_SANITY_CHECK 0 +#endif + +/** + * MEM_USE_POOLS==1: Use an alternative to malloc() by allocating from a set + * of memory pools of various sizes. When mem_malloc is called, an element of + * the smallest pool that can provide the length needed is returned. + * To use this, MEMP_USE_CUSTOM_POOLS also has to be enabled. + */ +#if !defined MEM_USE_POOLS || defined __DOXYGEN__ +#define MEM_USE_POOLS 0 +#endif + +/** + * MEM_USE_POOLS_TRY_BIGGER_POOL==1: if one malloc-pool is empty, try the next + * bigger pool - WARNING: THIS MIGHT WASTE MEMORY but it can make a system more + * reliable. */ +#if !defined MEM_USE_POOLS_TRY_BIGGER_POOL || defined __DOXYGEN__ +#define MEM_USE_POOLS_TRY_BIGGER_POOL 0 +#endif + +/** + * MEMP_USE_CUSTOM_POOLS==1: whether to include a user file lwippools.h + * that defines additional pools beyond the "standard" ones required + * by lwIP. If you set this to 1, you must have lwippools.h in your + * include path somewhere. + */ +#if !defined MEMP_USE_CUSTOM_POOLS || defined __DOXYGEN__ +#define MEMP_USE_CUSTOM_POOLS 0 +#endif + +/** + * Set this to 1 if you want to free PBUF_RAM pbufs (or call mem_free()) from + * interrupt context (or another context that doesn't allow waiting for a + * semaphore). + * If set to 1, mem_malloc will be protected by a semaphore and SYS_ARCH_PROTECT, + * while mem_free will only use SYS_ARCH_PROTECT. mem_malloc SYS_ARCH_UNPROTECTs + * with each loop so that mem_free can run. + * + * ATTENTION: As you can see from the above description, this leads to dis-/ + * enabling interrupts often, which can be slow! Also, on low memory, mem_malloc + * can need longer. + * + * If you don't want that, at least for NO_SYS=0, you can still use the following + * functions to enqueue a deallocation call which then runs in the tcpip_thread + * context: + * - pbuf_free_callback(p); + * - mem_free_callback(m); + */ +#if !defined LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT || defined __DOXYGEN__ +#define LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT 0 +#endif +/** + * @} + */ + +/* + ------------------------------------------------ + ---------- Internal Memory Pool Sizes ---------- + ------------------------------------------------ +*/ +/** + * @defgroup lwip_opts_memp Internal memory pools + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * MEMP_NUM_PBUF: the number of memp struct pbufs (used for PBUF_ROM and PBUF_REF). + * If the application sends a lot of data out of ROM (or other static memory), + * this should be set high. + */ +#if !defined MEMP_NUM_PBUF || defined __DOXYGEN__ +#define MEMP_NUM_PBUF 16 +#endif + +/** + * MEMP_NUM_RAW_PCB: Number of raw connection PCBs + * (requires the LWIP_RAW option) + */ +#if !defined MEMP_NUM_RAW_PCB || defined __DOXYGEN__ +#define MEMP_NUM_RAW_PCB 4 +#endif + +/** + * MEMP_NUM_UDP_PCB: the number of UDP protocol control blocks. One + * per active UDP "connection". + * (requires the LWIP_UDP option) + */ +#if !defined MEMP_NUM_UDP_PCB || defined __DOXYGEN__ +#define MEMP_NUM_UDP_PCB 4 +#endif + +/** + * MEMP_NUM_TCP_PCB: the number of simultaneously active TCP connections. + * (requires the LWIP_TCP option) + */ +#if !defined MEMP_NUM_TCP_PCB || defined __DOXYGEN__ +#define MEMP_NUM_TCP_PCB 5 +#endif + +/** + * MEMP_NUM_TCP_PCB_LISTEN: the number of listening TCP connections. + * (requires the LWIP_TCP option) + */ +#if !defined MEMP_NUM_TCP_PCB_LISTEN || defined __DOXYGEN__ +#define MEMP_NUM_TCP_PCB_LISTEN 8 +#endif + +/** + * MEMP_NUM_TCP_SEG: the number of simultaneously queued TCP segments. + * (requires the LWIP_TCP option) + */ +#if !defined MEMP_NUM_TCP_SEG || defined __DOXYGEN__ +#define MEMP_NUM_TCP_SEG 16 +#endif + +/** + * MEMP_NUM_ALTCP_PCB: the number of simultaneously active altcp layer pcbs. + * (requires the LWIP_ALTCP option) + * Connections with multiple layers require more than one altcp_pcb (e.g. TLS + * over TCP requires 2 altcp_pcbs, one for TLS and one for TCP). + */ +#if !defined MEMP_NUM_ALTCP_PCB || defined __DOXYGEN__ +#define MEMP_NUM_ALTCP_PCB MEMP_NUM_TCP_PCB +#endif + +/** + * MEMP_NUM_REASSDATA: the number of IP packets simultaneously queued for + * reassembly (whole packets, not fragments!) + */ +#if !defined MEMP_NUM_REASSDATA || defined __DOXYGEN__ +#define MEMP_NUM_REASSDATA 5 +#endif + +/** + * MEMP_NUM_FRAG_PBUF: the number of IP fragments simultaneously sent + * (fragments, not whole packets!). + * This is only used with LWIP_NETIF_TX_SINGLE_PBUF==0 and only has to be > 1 + * with DMA-enabled MACs where the packet is not yet sent when netif->output + * returns. + */ +#if !defined MEMP_NUM_FRAG_PBUF || defined __DOXYGEN__ +#define MEMP_NUM_FRAG_PBUF 15 +#endif + +/** + * MEMP_NUM_ARP_QUEUE: the number of simultaneously queued outgoing + * packets (pbufs) that are waiting for an ARP request (to resolve + * their destination address) to finish. + * (requires the ARP_QUEUEING option) + */ +#if !defined MEMP_NUM_ARP_QUEUE || defined __DOXYGEN__ +#define MEMP_NUM_ARP_QUEUE 30 +#endif + +/** + * MEMP_NUM_IGMP_GROUP: The number of multicast groups whose network interfaces + * can be members at the same time (one per netif - allsystems group -, plus one + * per netif membership). + * (requires the LWIP_IGMP option) + */ +#if !defined MEMP_NUM_IGMP_GROUP || defined __DOXYGEN__ +#define MEMP_NUM_IGMP_GROUP 8 +#endif + +/** + * The number of sys timeouts used by the core stack (not apps) + * The default number of timeouts is calculated here for all enabled modules. + */ +#define LWIP_NUM_SYS_TIMEOUT_INTERNAL (LWIP_TCP + IP_REASSEMBLY + LWIP_ARP + (2*LWIP_DHCP) + LWIP_AUTOIP + LWIP_IGMP + LWIP_DNS + PPP_NUM_TIMEOUTS + (LWIP_IPV6 * (1 + LWIP_IPV6_REASS + LWIP_IPV6_MLD))) + +/** + * MEMP_NUM_SYS_TIMEOUT: the number of simultaneously active timeouts. + * The default number of timeouts is calculated here for all enabled modules. + * The formula expects settings to be either '0' or '1'. + */ +#if !defined MEMP_NUM_SYS_TIMEOUT || defined __DOXYGEN__ +#define MEMP_NUM_SYS_TIMEOUT LWIP_NUM_SYS_TIMEOUT_INTERNAL +#endif + +/** + * MEMP_NUM_NETBUF: the number of struct netbufs. + * (only needed if you use the sequential API, like api_lib.c) + */ +#if !defined MEMP_NUM_NETBUF || defined __DOXYGEN__ +#define MEMP_NUM_NETBUF 2 +#endif + +/** + * MEMP_NUM_NETCONN: the number of struct netconns. + * (only needed if you use the sequential API, like api_lib.c) + */ +#if !defined MEMP_NUM_NETCONN || defined __DOXYGEN__ +#define MEMP_NUM_NETCONN 4 +#endif + +/** + * MEMP_NUM_SELECT_CB: the number of struct lwip_select_cb. + * (Only needed if you have LWIP_MPU_COMPATIBLE==1 and use the socket API. + * In that case, you need one per thread calling lwip_select.) + */ +#if !defined MEMP_NUM_SELECT_CB || defined __DOXYGEN__ +#define MEMP_NUM_SELECT_CB 4 +#endif + +/** + * MEMP_NUM_TCPIP_MSG_API: the number of struct tcpip_msg, which are used + * for callback/timeout API communication. + * (only needed if you use tcpip.c) + */ +#if !defined MEMP_NUM_TCPIP_MSG_API || defined __DOXYGEN__ +#define MEMP_NUM_TCPIP_MSG_API 8 +#endif + +/** + * MEMP_NUM_TCPIP_MSG_INPKT: the number of struct tcpip_msg, which are used + * for incoming packets. + * (only needed if you use tcpip.c) + */ +#if !defined MEMP_NUM_TCPIP_MSG_INPKT || defined __DOXYGEN__ +#define MEMP_NUM_TCPIP_MSG_INPKT 8 +#endif + +/** + * MEMP_NUM_NETDB: the number of concurrently running lwip_addrinfo() calls + * (before freeing the corresponding memory using lwip_freeaddrinfo()). + */ +#if !defined MEMP_NUM_NETDB || defined __DOXYGEN__ +#define MEMP_NUM_NETDB 1 +#endif + +/** + * MEMP_NUM_LOCALHOSTLIST: the number of host entries in the local host list + * if DNS_LOCAL_HOSTLIST_IS_DYNAMIC==1. + */ +#if !defined MEMP_NUM_LOCALHOSTLIST || defined __DOXYGEN__ +#define MEMP_NUM_LOCALHOSTLIST 1 +#endif + +/** + * PBUF_POOL_SIZE: the number of buffers in the pbuf pool. + */ +#if !defined PBUF_POOL_SIZE || defined __DOXYGEN__ +#define PBUF_POOL_SIZE 16 +#endif + +/** MEMP_NUM_API_MSG: the number of concurrently active calls to various + * socket, netconn, and tcpip functions + */ +#if !defined MEMP_NUM_API_MSG || defined __DOXYGEN__ +#define MEMP_NUM_API_MSG MEMP_NUM_TCPIP_MSG_API +#endif + +/** MEMP_NUM_DNS_API_MSG: the number of concurrently active calls to netconn_gethostbyname + */ +#if !defined MEMP_NUM_DNS_API_MSG || defined __DOXYGEN__ +#define MEMP_NUM_DNS_API_MSG MEMP_NUM_TCPIP_MSG_API +#endif + +/** MEMP_NUM_SOCKET_SETGETSOCKOPT_DATA: the number of concurrently active calls + * to getsockopt/setsockopt + */ +#if !defined MEMP_NUM_SOCKET_SETGETSOCKOPT_DATA || defined __DOXYGEN__ +#define MEMP_NUM_SOCKET_SETGETSOCKOPT_DATA MEMP_NUM_TCPIP_MSG_API +#endif + +/** MEMP_NUM_NETIFAPI_MSG: the number of concurrently active calls to the + * netifapi functions + */ +#if !defined MEMP_NUM_NETIFAPI_MSG || defined __DOXYGEN__ +#define MEMP_NUM_NETIFAPI_MSG MEMP_NUM_TCPIP_MSG_API +#endif +/** + * @} + */ + +/* + --------------------------------- + ---------- ARP options ---------- + --------------------------------- +*/ +/** + * @defgroup lwip_opts_arp ARP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_ARP==1: Enable ARP functionality. + */ +#if !defined LWIP_ARP || defined __DOXYGEN__ +#define LWIP_ARP 1 +#endif + +/** + * ARP_TABLE_SIZE: Number of active MAC-IP address pairs cached. + */ +#if !defined ARP_TABLE_SIZE || defined __DOXYGEN__ +#define ARP_TABLE_SIZE 10 +#endif + +/** the time an ARP entry stays valid after its last update, + * for ARP_TMR_INTERVAL = 1000, this is + * (60 * 5) seconds = 5 minutes. + */ +#if !defined ARP_MAXAGE || defined __DOXYGEN__ +#define ARP_MAXAGE 300 +#endif + +/** + * ARP_QUEUEING==1: Multiple outgoing packets are queued during hardware address + * resolution. By default, only the most recent packet is queued per IP address. + * This is sufficient for most protocols and mainly reduces TCP connection + * startup time. Set this to 1 if you know your application sends more than one + * packet in a row to an IP address that is not in the ARP cache. + */ +#if !defined ARP_QUEUEING || defined __DOXYGEN__ +#define ARP_QUEUEING 0 +#endif + +/** The maximum number of packets which may be queued for each + * unresolved address by other network layers. Defaults to 3, 0 means disabled. + * Old packets are dropped, new packets are queued. + */ +#if !defined ARP_QUEUE_LEN || defined __DOXYGEN__ +#define ARP_QUEUE_LEN 3 +#endif + +/** + * ETHARP_SUPPORT_VLAN==1: support receiving and sending ethernet packets with + * VLAN header. See the description of LWIP_HOOK_VLAN_CHECK and + * LWIP_HOOK_VLAN_SET hooks to check/set VLAN headers. + * Additionally, you can define ETHARP_VLAN_CHECK to an u16_t VLAN ID to check. + * If ETHARP_VLAN_CHECK is defined, only VLAN-traffic for this VLAN is accepted. + * If ETHARP_VLAN_CHECK is not defined, all traffic is accepted. + * Alternatively, define a function/define ETHARP_VLAN_CHECK_FN(eth_hdr, vlan) + * that returns 1 to accept a packet or 0 to drop a packet. + */ +#if !defined ETHARP_SUPPORT_VLAN || defined __DOXYGEN__ +#define ETHARP_SUPPORT_VLAN 0 +#endif + +/** LWIP_ETHERNET==1: enable ethernet support even though ARP might be disabled + */ +#if !defined LWIP_ETHERNET || defined __DOXYGEN__ +#define LWIP_ETHERNET LWIP_ARP +#endif + +/** ETH_PAD_SIZE: number of bytes added before the ethernet header to ensure + * alignment of payload after that header. Since the header is 14 bytes long, + * without this padding e.g. addresses in the IP header will not be aligned + * on a 32-bit boundary, so setting this to 2 can speed up 32-bit-platforms. + */ +#if !defined ETH_PAD_SIZE || defined __DOXYGEN__ +#define ETH_PAD_SIZE 0 +#endif + +/** ETHARP_SUPPORT_STATIC_ENTRIES==1: enable code to support static ARP table + * entries (using etharp_add_static_entry/etharp_remove_static_entry). + */ +#if !defined ETHARP_SUPPORT_STATIC_ENTRIES || defined __DOXYGEN__ +#define ETHARP_SUPPORT_STATIC_ENTRIES 0 +#endif + +/** ETHARP_TABLE_MATCH_NETIF==1: Match netif for ARP table entries. + * If disabled, duplicate IP address on multiple netifs are not supported + * (but this should only occur for AutoIP). + */ +#if !defined ETHARP_TABLE_MATCH_NETIF || defined __DOXYGEN__ +#define ETHARP_TABLE_MATCH_NETIF !LWIP_SINGLE_NETIF +#endif +/** + * @} + */ + +/* + -------------------------------- + ---------- IP options ---------- + -------------------------------- +*/ +/** + * @defgroup lwip_opts_ipv4 IPv4 + * @ingroup lwip_opts + * @{ + */ +/** + * LWIP_IPV4==1: Enable IPv4 + */ +#if !defined LWIP_IPV4 || defined __DOXYGEN__ +#define LWIP_IPV4 1 +#endif + +/** + * IP_FORWARD==1: Enables the ability to forward IP packets across network + * interfaces. If you are going to run lwIP on a device with only one network + * interface, define this to 0. + */ +#if !defined IP_FORWARD || defined __DOXYGEN__ +#define IP_FORWARD 0 +#endif + +/** + * IP_REASSEMBLY==1: Reassemble incoming fragmented IP packets. Note that + * this option does not affect outgoing packet sizes, which can be controlled + * via IP_FRAG. + */ +#if !defined IP_REASSEMBLY || defined __DOXYGEN__ +#define IP_REASSEMBLY 1 +#endif + +/** + * IP_FRAG==1: Fragment outgoing IP packets if their size exceeds MTU. Note + * that this option does not affect incoming packet sizes, which can be + * controlled via IP_REASSEMBLY. + */ +#if !defined IP_FRAG || defined __DOXYGEN__ +#define IP_FRAG 1 +#endif + +#if !LWIP_IPV4 +/* disable IPv4 extensions when IPv4 is disabled */ +#undef IP_FORWARD +#define IP_FORWARD 0 +#undef IP_REASSEMBLY +#define IP_REASSEMBLY 0 +#undef IP_FRAG +#define IP_FRAG 0 +#endif /* !LWIP_IPV4 */ + +/** + * IP_OPTIONS_ALLOWED: Defines the behavior for IP options. + * IP_OPTIONS_ALLOWED==0: All packets with IP options are dropped. + * IP_OPTIONS_ALLOWED==1: IP options are allowed (but not parsed). + */ +#if !defined IP_OPTIONS_ALLOWED || defined __DOXYGEN__ +#define IP_OPTIONS_ALLOWED 1 +#endif + +/** + * IP_REASS_MAXAGE: Maximum time (in multiples of IP_TMR_INTERVAL - so seconds, normally) + * a fragmented IP packet waits for all fragments to arrive. If not all fragments arrived + * in this time, the whole packet is discarded. + */ +#if !defined IP_REASS_MAXAGE || defined __DOXYGEN__ +#define IP_REASS_MAXAGE 15 +#endif + +/** + * IP_REASS_MAX_PBUFS: Total maximum amount of pbufs waiting to be reassembled. + * Since the received pbufs are enqueued, be sure to configure + * PBUF_POOL_SIZE > IP_REASS_MAX_PBUFS so that the stack is still able to receive + * packets even if the maximum amount of fragments is enqueued for reassembly! + * When IPv4 *and* IPv6 are enabled, this even changes to + * (PBUF_POOL_SIZE > 2 * IP_REASS_MAX_PBUFS)! + */ +#if !defined IP_REASS_MAX_PBUFS || defined __DOXYGEN__ +#define IP_REASS_MAX_PBUFS 10 +#endif + +/** + * IP_DEFAULT_TTL: Default value for Time-To-Live used by transport layers. + */ +#if !defined IP_DEFAULT_TTL || defined __DOXYGEN__ +#define IP_DEFAULT_TTL 255 +#endif + +/** + * IP_SOF_BROADCAST=1: Use the SOF_BROADCAST field to enable broadcast + * filter per pcb on udp and raw send operations. To enable broadcast filter + * on recv operations, you also have to set IP_SOF_BROADCAST_RECV=1. + */ +#if !defined IP_SOF_BROADCAST || defined __DOXYGEN__ +#define IP_SOF_BROADCAST 0 +#endif + +/** + * IP_SOF_BROADCAST_RECV (requires IP_SOF_BROADCAST=1) enable the broadcast + * filter on recv operations. + */ +#if !defined IP_SOF_BROADCAST_RECV || defined __DOXYGEN__ +#define IP_SOF_BROADCAST_RECV 0 +#endif + +/** + * IP_FORWARD_ALLOW_TX_ON_RX_NETIF==1: allow ip_forward() to send packets back + * out on the netif where it was received. This should only be used for + * wireless networks. + * ATTENTION: When this is 1, make sure your netif driver correctly marks incoming + * link-layer-broadcast/multicast packets as such using the corresponding pbuf flags! + */ +#if !defined IP_FORWARD_ALLOW_TX_ON_RX_NETIF || defined __DOXYGEN__ +#define IP_FORWARD_ALLOW_TX_ON_RX_NETIF 0 +#endif +/** + * @} + */ + +/* + ---------------------------------- + ---------- ICMP options ---------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_icmp ICMP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_ICMP==1: Enable ICMP module inside the IP stack. + * Be careful, disable that make your product non-compliant to RFC1122 + */ +#if !defined LWIP_ICMP || defined __DOXYGEN__ +#define LWIP_ICMP 1 +#endif + +/** + * ICMP_TTL: Default value for Time-To-Live used by ICMP packets. + */ +#if !defined ICMP_TTL || defined __DOXYGEN__ +#define ICMP_TTL IP_DEFAULT_TTL +#endif + +/** + * LWIP_BROADCAST_PING==1: respond to broadcast pings (default is unicast only) + */ +#if !defined LWIP_BROADCAST_PING || defined __DOXYGEN__ +#define LWIP_BROADCAST_PING 0 +#endif + +/** + * LWIP_MULTICAST_PING==1: respond to multicast pings (default is unicast only) + */ +#if !defined LWIP_MULTICAST_PING || defined __DOXYGEN__ +#define LWIP_MULTICAST_PING 0 +#endif +/** + * @} + */ + +/* + --------------------------------- + ---------- RAW options ---------- + --------------------------------- +*/ +/** + * @defgroup lwip_opts_raw RAW + * @ingroup lwip_opts_callback + * @{ + */ +/** + * LWIP_RAW==1: Enable application layer to hook into the IP layer itself. + */ +#if !defined LWIP_RAW || defined __DOXYGEN__ +#define LWIP_RAW 0 +#endif + +/** + * LWIP_RAW==1: Enable application layer to hook into the IP layer itself. + */ +#if !defined RAW_TTL || defined __DOXYGEN__ +#define RAW_TTL IP_DEFAULT_TTL +#endif +/** + * @} + */ + +/* + ---------------------------------- + ---------- DHCP options ---------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_dhcp DHCP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_DHCP==1: Enable DHCP module. + */ +#if !defined LWIP_DHCP || defined __DOXYGEN__ +#define LWIP_DHCP 0 +#endif +#if !LWIP_IPV4 +/* disable DHCP when IPv4 is disabled */ +#undef LWIP_DHCP +#define LWIP_DHCP 0 +#endif /* !LWIP_IPV4 */ + +/** + * DHCP_DOES_ARP_CHECK==1: Do an ARP check on the offered address. + */ +#if !defined DHCP_DOES_ARP_CHECK || defined __DOXYGEN__ +#define DHCP_DOES_ARP_CHECK (LWIP_DHCP && LWIP_ARP) +#endif + +/** + * LWIP_DHCP_BOOTP_FILE==1: Store offered_si_addr and boot_file_name. + */ +#if !defined LWIP_DHCP_BOOTP_FILE || defined __DOXYGEN__ +#define LWIP_DHCP_BOOTP_FILE 0 +#endif + +/** + * LWIP_DHCP_GETS_NTP==1: Request NTP servers with discover/select. For each + * response packet, an callback is called, which has to be provided by the port: + * void dhcp_set_ntp_servers(u8_t num_ntp_servers, ip_addr_t* ntp_server_addrs); +*/ +#if !defined LWIP_DHCP_GET_NTP_SRV || defined __DOXYGEN__ +#define LWIP_DHCP_GET_NTP_SRV 0 +#endif + +/** + * The maximum of NTP servers requested + */ +#if !defined LWIP_DHCP_MAX_NTP_SERVERS || defined __DOXYGEN__ +#define LWIP_DHCP_MAX_NTP_SERVERS 1 +#endif + +/** + * LWIP_DHCP_MAX_DNS_SERVERS > 0: Request DNS servers with discover/select. + * DNS servers received in the response are passed to DNS via @ref dns_setserver() + * (up to the maximum limit defined here). + */ +#if !defined LWIP_DHCP_MAX_DNS_SERVERS || defined __DOXYGEN__ +#define LWIP_DHCP_MAX_DNS_SERVERS DNS_MAX_SERVERS +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- AUTOIP options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_autoip AUTOIP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_AUTOIP==1: Enable AUTOIP module. + */ +#if !defined LWIP_AUTOIP || defined __DOXYGEN__ +#define LWIP_AUTOIP 0 +#endif +#if !LWIP_IPV4 +/* disable AUTOIP when IPv4 is disabled */ +#undef LWIP_AUTOIP +#define LWIP_AUTOIP 0 +#endif /* !LWIP_IPV4 */ + +/** + * LWIP_DHCP_AUTOIP_COOP==1: Allow DHCP and AUTOIP to be both enabled on + * the same interface at the same time. + */ +#if !defined LWIP_DHCP_AUTOIP_COOP || defined __DOXYGEN__ +#define LWIP_DHCP_AUTOIP_COOP 0 +#endif + +/** + * LWIP_DHCP_AUTOIP_COOP_TRIES: Set to the number of DHCP DISCOVER probes + * that should be sent before falling back on AUTOIP (the DHCP client keeps + * running in this case). This can be set as low as 1 to get an AutoIP address + * very quickly, but you should be prepared to handle a changing IP address + * when DHCP overrides AutoIP. + */ +#if !defined LWIP_DHCP_AUTOIP_COOP_TRIES || defined __DOXYGEN__ +#define LWIP_DHCP_AUTOIP_COOP_TRIES 9 +#endif +/** + * @} + */ + +/* + ---------------------------------- + ----- SNMP MIB2 support ----- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_mib2 SNMP MIB2 callbacks + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * LWIP_MIB2_CALLBACKS==1: Turn on SNMP MIB2 callbacks. + * Turn this on to get callbacks needed to implement MIB2. + * Usually MIB2_STATS should be enabled, too. + */ +#if !defined LWIP_MIB2_CALLBACKS || defined __DOXYGEN__ +#define LWIP_MIB2_CALLBACKS 0 +#endif +/** + * @} + */ + +/* + ---------------------------------- + -------- Multicast options ------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_multicast Multicast + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * LWIP_MULTICAST_TX_OPTIONS==1: Enable multicast TX support like the socket options + * IP_MULTICAST_TTL/IP_MULTICAST_IF/IP_MULTICAST_LOOP, as well as (currently only) + * core support for the corresponding IPv6 options. + */ +#if !defined LWIP_MULTICAST_TX_OPTIONS || defined __DOXYGEN__ +#define LWIP_MULTICAST_TX_OPTIONS ((LWIP_IGMP || LWIP_IPV6_MLD) && (LWIP_UDP || LWIP_RAW)) +#endif +/** + * @} + */ + +/* + ---------------------------------- + ---------- IGMP options ---------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_igmp IGMP + * @ingroup lwip_opts_ipv4 + * @{ + */ +/** + * LWIP_IGMP==1: Turn on IGMP module. + */ +#if !defined LWIP_IGMP || defined __DOXYGEN__ +#define LWIP_IGMP 0 +#endif +#if !LWIP_IPV4 +#undef LWIP_IGMP +#define LWIP_IGMP 0 +#endif +/** + * @} + */ + +/* + ---------------------------------- + ---------- DNS options ----------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_dns DNS + * @ingroup lwip_opts_callback + * @{ + */ +/** + * LWIP_DNS==1: Turn on DNS module. UDP must be available for DNS + * transport. + */ +#if !defined LWIP_DNS || defined __DOXYGEN__ +#define LWIP_DNS 0 +#endif + +/** DNS maximum number of entries to maintain locally. */ +#if !defined DNS_TABLE_SIZE || defined __DOXYGEN__ +#define DNS_TABLE_SIZE 4 +#endif + +/** DNS maximum host name length supported in the name table. */ +#if !defined DNS_MAX_NAME_LENGTH || defined __DOXYGEN__ +#define DNS_MAX_NAME_LENGTH 256 +#endif + +/** The maximum of DNS servers + * The first server can be initialized automatically by defining + * DNS_SERVER_ADDRESS(ipaddr), where 'ipaddr' is an 'ip_addr_t*' + */ +#if !defined DNS_MAX_SERVERS || defined __DOXYGEN__ +#define DNS_MAX_SERVERS 2 +#endif + +/** DNS maximum number of retries when asking for a name, before "timeout". */ +#if !defined DNS_MAX_RETRIES || defined __DOXYGEN__ +#define DNS_MAX_RETRIES 4 +#endif + +/** DNS do a name checking between the query and the response. */ +#if !defined DNS_DOES_NAME_CHECK || defined __DOXYGEN__ +#define DNS_DOES_NAME_CHECK 1 +#endif + +/** LWIP_DNS_SECURE: controls the security level of the DNS implementation + * Use all DNS security features by default. + * This is overridable but should only be needed by very small targets + * or when using against non standard DNS servers. */ +#if !defined LWIP_DNS_SECURE || defined __DOXYGEN__ +#define LWIP_DNS_SECURE (LWIP_DNS_SECURE_RAND_XID | LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING | LWIP_DNS_SECURE_RAND_SRC_PORT) +#endif + +/* A list of DNS security features follows */ +#define LWIP_DNS_SECURE_RAND_XID 1 +#define LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING 2 +#define LWIP_DNS_SECURE_RAND_SRC_PORT 4 + +/** DNS_LOCAL_HOSTLIST: Implements a local host-to-address list. If enabled, you have to define an initializer: + * \#define DNS_LOCAL_HOSTLIST_INIT {DNS_LOCAL_HOSTLIST_ELEM("host_ip4", IPADDR4_INIT_BYTES(1,2,3,4)), \ + * DNS_LOCAL_HOSTLIST_ELEM("host_ip6", IPADDR6_INIT_HOST(123, 234, 345, 456)} + * + * Instead, you can also use an external function: + * \#define DNS_LOOKUP_LOCAL_EXTERN(x) extern err_t my_lookup_function(const char *name, ip_addr_t *addr, u8_t dns_addrtype) + * that looks up the IP address and returns ERR_OK if found (LWIP_DNS_ADDRTYPE_xxx is passed in dns_addrtype). + */ +#if !defined DNS_LOCAL_HOSTLIST || defined __DOXYGEN__ +#define DNS_LOCAL_HOSTLIST 0 +#endif /* DNS_LOCAL_HOSTLIST */ + +/** If this is turned on, the local host-list can be dynamically changed + * at runtime. */ +#if !defined DNS_LOCAL_HOSTLIST_IS_DYNAMIC || defined __DOXYGEN__ +#define DNS_LOCAL_HOSTLIST_IS_DYNAMIC 0 +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + +/** Set this to 1 to enable querying ".local" names via mDNS + * using a One-Shot Multicast DNS Query */ +#if !defined LWIP_DNS_SUPPORT_MDNS_QUERIES || defined __DOXYGEN__ +#define LWIP_DNS_SUPPORT_MDNS_QUERIES 0 +#endif +/** + * @} + */ + +/* + --------------------------------- + ---------- UDP options ---------- + --------------------------------- +*/ +/** + * @defgroup lwip_opts_udp UDP + * @ingroup lwip_opts_callback + * @{ + */ +/** + * LWIP_UDP==1: Turn on UDP. + */ +#if !defined LWIP_UDP || defined __DOXYGEN__ +#define LWIP_UDP 1 +#endif + +/** + * LWIP_UDPLITE==1: Turn on UDP-Lite. (Requires LWIP_UDP) + */ +#if !defined LWIP_UDPLITE || defined __DOXYGEN__ +#define LWIP_UDPLITE 0 +#endif + +/** + * UDP_TTL: Default Time-To-Live value. + */ +#if !defined UDP_TTL || defined __DOXYGEN__ +#define UDP_TTL IP_DEFAULT_TTL +#endif + +/** + * LWIP_NETBUF_RECVINFO==1: append destination addr and port to every netbuf. + */ +#if !defined LWIP_NETBUF_RECVINFO || defined __DOXYGEN__ +#define LWIP_NETBUF_RECVINFO 0 +#endif +/** + * @} + */ + +/* + --------------------------------- + ---------- TCP options ---------- + --------------------------------- +*/ +/** + * @defgroup lwip_opts_tcp TCP + * @ingroup lwip_opts_callback + * @{ + */ +/** + * LWIP_TCP==1: Turn on TCP. + */ +#if !defined LWIP_TCP || defined __DOXYGEN__ +#define LWIP_TCP 1 +#endif + +/** + * TCP_TTL: Default Time-To-Live value. + */ +#if !defined TCP_TTL || defined __DOXYGEN__ +#define TCP_TTL IP_DEFAULT_TTL +#endif + +/** + * TCP_WND: The size of a TCP window. This must be at least + * (2 * TCP_MSS) for things to work well. + * ATTENTION: when using TCP_RCV_SCALE, TCP_WND is the total size + * with scaling applied. Maximum window value in the TCP header + * will be TCP_WND >> TCP_RCV_SCALE + */ +#if !defined TCP_WND || defined __DOXYGEN__ +#define TCP_WND (4 * TCP_MSS) +#endif + +/** + * TCP_MAXRTX: Maximum number of retransmissions of data segments. + */ +#if !defined TCP_MAXRTX || defined __DOXYGEN__ +#define TCP_MAXRTX 12 +#endif + +/** + * TCP_SYNMAXRTX: Maximum number of retransmissions of SYN segments. + */ +#if !defined TCP_SYNMAXRTX || defined __DOXYGEN__ +#define TCP_SYNMAXRTX 6 +#endif + +/** + * TCP_QUEUE_OOSEQ==1: TCP will queue segments that arrive out of order. + * Define to 0 if your device is low on memory. + */ +#if !defined TCP_QUEUE_OOSEQ || defined __DOXYGEN__ +#define TCP_QUEUE_OOSEQ LWIP_TCP +#endif + +/** + * LWIP_TCP_SACK_OUT==1: TCP will support sending selective acknowledgements (SACKs). + */ +#if !defined LWIP_TCP_SACK_OUT || defined __DOXYGEN__ +#define LWIP_TCP_SACK_OUT 0 +#endif + +/** + * LWIP_TCP_MAX_SACK_NUM: The maximum number of SACK values to include in TCP segments. + * Must be at least 1, but is only used if LWIP_TCP_SACK_OUT is enabled. + * NOTE: Even though we never send more than 3 or 4 SACK ranges in a single segment + * (depending on other options), setting this option to values greater than 4 is not pointless. + * This is basically the max number of SACK ranges we want to keep track of. + * As new data is delivered, some of the SACK ranges may be removed or merged. + * In that case some of those older SACK ranges may be used again. + * The amount of memory used to store SACK ranges is LWIP_TCP_MAX_SACK_NUM * 8 bytes for each TCP PCB. + */ +#if !defined LWIP_TCP_MAX_SACK_NUM || defined __DOXYGEN__ +#define LWIP_TCP_MAX_SACK_NUM 4 +#endif + +/** + * TCP_MSS: TCP Maximum segment size. (default is 536, a conservative default, + * you might want to increase this.) + * For the receive side, this MSS is advertised to the remote side + * when opening a connection. For the transmit size, this MSS sets + * an upper limit on the MSS advertised by the remote host. + */ +#if !defined TCP_MSS || defined __DOXYGEN__ +#define TCP_MSS 536 +#endif + +/** + * TCP_CALCULATE_EFF_SEND_MSS: "The maximum size of a segment that TCP really + * sends, the 'effective send MSS,' MUST be the smaller of the send MSS (which + * reflects the available reassembly buffer size at the remote host) and the + * largest size permitted by the IP layer" (RFC 1122) + * Setting this to 1 enables code that checks TCP_MSS against the MTU of the + * netif used for a connection and limits the MSS if it would be too big otherwise. + */ +#if !defined TCP_CALCULATE_EFF_SEND_MSS || defined __DOXYGEN__ +#define TCP_CALCULATE_EFF_SEND_MSS 1 +#endif + + +/** + * TCP_SND_BUF: TCP sender buffer space (bytes). + * To achieve good performance, this should be at least 2 * TCP_MSS. + */ +#if !defined TCP_SND_BUF || defined __DOXYGEN__ +#define TCP_SND_BUF (2 * TCP_MSS) +#endif + +/** + * TCP_SND_QUEUELEN: TCP sender buffer space (pbufs). This must be at least + * as much as (2 * TCP_SND_BUF/TCP_MSS) for things to work. + */ +#if !defined TCP_SND_QUEUELEN || defined __DOXYGEN__ +#define TCP_SND_QUEUELEN ((4 * (TCP_SND_BUF) + (TCP_MSS - 1))/(TCP_MSS)) +#endif + +/** + * TCP_SNDLOWAT: TCP writable space (bytes). This must be less than + * TCP_SND_BUF. It is the amount of space which must be available in the + * TCP snd_buf for select to return writable (combined with TCP_SNDQUEUELOWAT). + */ +#if !defined TCP_SNDLOWAT || defined __DOXYGEN__ +#define TCP_SNDLOWAT LWIP_MIN(LWIP_MAX(((TCP_SND_BUF)/2), (2 * TCP_MSS) + 1), (TCP_SND_BUF) - 1) +#endif + +/** + * TCP_SNDQUEUELOWAT: TCP writable bufs (pbuf count). This must be less + * than TCP_SND_QUEUELEN. If the number of pbufs queued on a pcb drops below + * this number, select returns writable (combined with TCP_SNDLOWAT). + */ +#if !defined TCP_SNDQUEUELOWAT || defined __DOXYGEN__ +#define TCP_SNDQUEUELOWAT LWIP_MAX(((TCP_SND_QUEUELEN)/2), 5) +#endif + +/** + * TCP_OOSEQ_MAX_BYTES: The default maximum number of bytes queued on ooseq per + * pcb if TCP_OOSEQ_BYTES_LIMIT is not defined. Default is 0 (no limit). + * Only valid for TCP_QUEUE_OOSEQ==1. + */ +#if !defined TCP_OOSEQ_MAX_BYTES || defined __DOXYGEN__ +#define TCP_OOSEQ_MAX_BYTES 0 +#endif + +/** + * TCP_OOSEQ_BYTES_LIMIT(pcb): Return the maximum number of bytes to be queued + * on ooseq per pcb, given the pcb. Only valid for TCP_QUEUE_OOSEQ==1 && + * TCP_OOSEQ_MAX_BYTES==1. + * Use this to override TCP_OOSEQ_MAX_BYTES to a dynamic value per pcb. + */ +#if !defined TCP_OOSEQ_BYTES_LIMIT +#if TCP_OOSEQ_MAX_BYTES +#define TCP_OOSEQ_BYTES_LIMIT(pcb) TCP_OOSEQ_MAX_BYTES +#elif defined __DOXYGEN__ +#define TCP_OOSEQ_BYTES_LIMIT(pcb) +#endif +#endif + +/** + * TCP_OOSEQ_MAX_PBUFS: The default maximum number of pbufs queued on ooseq per + * pcb if TCP_OOSEQ_BYTES_LIMIT is not defined. Default is 0 (no limit). + * Only valid for TCP_QUEUE_OOSEQ==1. + */ +#if !defined TCP_OOSEQ_MAX_PBUFS || defined __DOXYGEN__ +#define TCP_OOSEQ_MAX_PBUFS 0 +#endif + +/** + * TCP_OOSEQ_PBUFS_LIMIT(pcb): Return the maximum number of pbufs to be queued + * on ooseq per pcb, given the pcb. Only valid for TCP_QUEUE_OOSEQ==1 && + * TCP_OOSEQ_MAX_PBUFS==1. + * Use this to override TCP_OOSEQ_MAX_PBUFS to a dynamic value per pcb. + */ +#if !defined TCP_OOSEQ_PBUFS_LIMIT +#if TCP_OOSEQ_MAX_PBUFS +#define TCP_OOSEQ_PBUFS_LIMIT(pcb) TCP_OOSEQ_MAX_PBUFS +#elif defined __DOXYGEN__ +#define TCP_OOSEQ_PBUFS_LIMIT(pcb) +#endif +#endif + +/** + * TCP_LISTEN_BACKLOG: Enable the backlog option for tcp listen pcb. + */ +#if !defined TCP_LISTEN_BACKLOG || defined __DOXYGEN__ +#define TCP_LISTEN_BACKLOG 0 +#endif + +/** + * The maximum allowed backlog for TCP listen netconns. + * This backlog is used unless another is explicitly specified. + * 0xff is the maximum (u8_t). + */ +#if !defined TCP_DEFAULT_LISTEN_BACKLOG || defined __DOXYGEN__ +#define TCP_DEFAULT_LISTEN_BACKLOG 0xff +#endif + +/** + * TCP_OVERSIZE: The maximum number of bytes that tcp_write may + * allocate ahead of time in an attempt to create shorter pbuf chains + * for transmission. The meaningful range is 0 to TCP_MSS. Some + * suggested values are: + * + * 0: Disable oversized allocation. Each tcp_write() allocates a new + pbuf (old behaviour). + * 1: Allocate size-aligned pbufs with minimal excess. Use this if your + * scatter-gather DMA requires aligned fragments. + * 128: Limit the pbuf/memory overhead to 20%. + * TCP_MSS: Try to create unfragmented TCP packets. + * TCP_MSS/4: Try to create 4 fragments or less per TCP packet. + */ +#if !defined TCP_OVERSIZE || defined __DOXYGEN__ +#define TCP_OVERSIZE TCP_MSS +#endif + +/** + * LWIP_TCP_TIMESTAMPS==1: support the TCP timestamp option. + * The timestamp option is currently only used to help remote hosts, it is not + * really used locally. Therefore, it is only enabled when a TS option is + * received in the initial SYN packet from a remote host. + */ +#if !defined LWIP_TCP_TIMESTAMPS || defined __DOXYGEN__ +#define LWIP_TCP_TIMESTAMPS 0 +#endif + +/** + * TCP_WND_UPDATE_THRESHOLD: difference in window to trigger an + * explicit window update + */ +#if !defined TCP_WND_UPDATE_THRESHOLD || defined __DOXYGEN__ +#define TCP_WND_UPDATE_THRESHOLD LWIP_MIN((TCP_WND / 4), (TCP_MSS * 4)) +#endif + +/** + * LWIP_EVENT_API and LWIP_CALLBACK_API: Only one of these should be set to 1. + * LWIP_EVENT_API==1: The user defines lwip_tcp_event() to receive all + * events (accept, sent, etc) that happen in the system. + * LWIP_CALLBACK_API==1: The PCB callback function is called directly + * for the event. This is the default. + */ +#if !defined(LWIP_EVENT_API) && !defined(LWIP_CALLBACK_API) || defined __DOXYGEN__ +#define LWIP_EVENT_API 0 +#define LWIP_CALLBACK_API 1 +#else +#ifndef LWIP_EVENT_API +#define LWIP_EVENT_API 0 +#endif +#ifndef LWIP_CALLBACK_API +#define LWIP_CALLBACK_API 0 +#endif +#endif + +/** + * LWIP_WND_SCALE and TCP_RCV_SCALE: + * Set LWIP_WND_SCALE to 1 to enable window scaling. + * Set TCP_RCV_SCALE to the desired scaling factor (shift count in the + * range of [0..14]). + * When LWIP_WND_SCALE is enabled but TCP_RCV_SCALE is 0, we can use a large + * send window while having a small receive window only. + */ +#if !defined LWIP_WND_SCALE || defined __DOXYGEN__ +#define LWIP_WND_SCALE 0 +#define TCP_RCV_SCALE 0 +#endif + +/** + * LWIP_TCP_PCB_NUM_EXT_ARGS: + * When this is > 0, every tcp pcb (including listen pcb) includes a number of + * additional argument entries in an array (see tcp_ext_arg_alloc_id) + */ +#if !defined LWIP_TCP_PCB_NUM_EXT_ARGS || defined __DOXYGEN__ +#define LWIP_TCP_PCB_NUM_EXT_ARGS 0 +#endif + +/** LWIP_ALTCP==1: enable the altcp API. + * altcp is an abstraction layer that prevents applications linking against the + * tcp.h functions but provides the same functionality. It is used to e.g. add + * SSL/TLS or proxy-connect support to an application written for the tcp callback + * API without that application knowing the protocol details. + * + * With LWIP_ALTCP==0, applications written against the altcp API can still be + * compiled but are directly linked against the tcp.h callback API and then + * cannot use layered protocols. + * + * See @ref altcp_api + */ +#if !defined LWIP_ALTCP || defined __DOXYGEN__ +#define LWIP_ALTCP 0 +#endif + +/** LWIP_ALTCP_TLS==1: enable TLS support for altcp API. + * This needs a port of the functions in altcp_tls.h to a TLS library. + * A port to ARM mbedtls is provided with lwIP, see apps/altcp_tls/ directory + * and LWIP_ALTCP_TLS_MBEDTLS option. + */ +#if !defined LWIP_ALTCP_TLS || defined __DOXYGEN__ +#define LWIP_ALTCP_TLS 0 +#endif + +/** + * @} + */ + +/* + ---------------------------------- + ---------- Pbuf options ---------- + ---------------------------------- +*/ +/** + * @defgroup lwip_opts_pbuf PBUF + * @ingroup lwip_opts + * @{ + */ +/** + * PBUF_LINK_HLEN: the number of bytes that should be allocated for a + * link level header. The default is 14, the standard value for + * Ethernet. + */ +#if !defined PBUF_LINK_HLEN || defined __DOXYGEN__ +#if defined LWIP_HOOK_VLAN_SET && !defined __DOXYGEN__ +#define PBUF_LINK_HLEN (18 + ETH_PAD_SIZE) +#else /* LWIP_HOOK_VLAN_SET */ +#define PBUF_LINK_HLEN (14 + ETH_PAD_SIZE) +#endif /* LWIP_HOOK_VLAN_SET */ +#endif + +/** + * PBUF_LINK_ENCAPSULATION_HLEN: the number of bytes that should be allocated + * for an additional encapsulation header before ethernet headers (e.g. 802.11) + */ +#if !defined PBUF_LINK_ENCAPSULATION_HLEN || defined __DOXYGEN__ +#define PBUF_LINK_ENCAPSULATION_HLEN 0 +#endif + +/** + * PBUF_POOL_BUFSIZE: the size of each pbuf in the pbuf pool. The default is + * designed to accommodate single full size TCP frame in one pbuf, including + * TCP_MSS, IP header, and link header. + */ +#if !defined PBUF_POOL_BUFSIZE || defined __DOXYGEN__ +#define PBUF_POOL_BUFSIZE LWIP_MEM_ALIGN_SIZE(TCP_MSS+40+PBUF_LINK_ENCAPSULATION_HLEN+PBUF_LINK_HLEN) +#endif + +/** + * LWIP_PBUF_REF_T: Refcount type in pbuf. + * Default width of u8_t can be increased if 255 refs are not enough for you. + */ +#if !defined LWIP_PBUF_REF_T || defined __DOXYGEN__ +#define LWIP_PBUF_REF_T u8_t +#endif +/** + * @} + */ + +/* + ------------------------------------------------ + ---------- Network Interfaces options ---------- + ------------------------------------------------ +*/ +/** + * @defgroup lwip_opts_netif NETIF + * @ingroup lwip_opts + * @{ + */ +/** + * LWIP_SINGLE_NETIF==1: use a single netif only. This is the common case for + * small real-life targets. Some code like routing etc. can be left out. + */ +#if !defined LWIP_SINGLE_NETIF || defined __DOXYGEN__ +#define LWIP_SINGLE_NETIF 0 +#endif + +/** + * LWIP_NETIF_HOSTNAME==1: use DHCP_OPTION_HOSTNAME with netif's hostname + * field. + */ +#if !defined LWIP_NETIF_HOSTNAME || defined __DOXYGEN__ +#define LWIP_NETIF_HOSTNAME 0 +#endif + +/** + * LWIP_NETIF_API==1: Support netif api (in netifapi.c) + */ +#if !defined LWIP_NETIF_API || defined __DOXYGEN__ +#define LWIP_NETIF_API 0 +#endif + +/** + * LWIP_NETIF_STATUS_CALLBACK==1: Support a callback function whenever an interface + * changes its up/down status (i.e., due to DHCP IP acquisition) + */ +#if !defined LWIP_NETIF_STATUS_CALLBACK || defined __DOXYGEN__ +#define LWIP_NETIF_STATUS_CALLBACK 0 +#endif + +/** + * LWIP_NETIF_EXT_STATUS_CALLBACK==1: Support an extended callback function + * for several netif related event that supports multiple subscribers. + * @see netif_ext_status_callback + */ +#if !defined LWIP_NETIF_EXT_STATUS_CALLBACK || defined __DOXYGEN__ +#define LWIP_NETIF_EXT_STATUS_CALLBACK 0 +#endif + +/** + * LWIP_NETIF_LINK_CALLBACK==1: Support a callback function from an interface + * whenever the link changes (i.e., link down) + */ +#if !defined LWIP_NETIF_LINK_CALLBACK || defined __DOXYGEN__ +#define LWIP_NETIF_LINK_CALLBACK 0 +#endif + +/** + * LWIP_NETIF_REMOVE_CALLBACK==1: Support a callback function that is called + * when a netif has been removed + */ +#if !defined LWIP_NETIF_REMOVE_CALLBACK || defined __DOXYGEN__ +#define LWIP_NETIF_REMOVE_CALLBACK 0 +#endif + +/** + * LWIP_NETIF_HWADDRHINT==1: Cache link-layer-address hints (e.g. table + * indices) in struct netif. TCP and UDP can make use of this to prevent + * scanning the ARP table for every sent packet. While this is faster for big + * ARP tables or many concurrent connections, it might be counterproductive + * if you have a tiny ARP table or if there never are concurrent connections. + */ +#if !defined LWIP_NETIF_HWADDRHINT || defined __DOXYGEN__ +#define LWIP_NETIF_HWADDRHINT 0 +#endif + +/** + * LWIP_NETIF_TX_SINGLE_PBUF: if this is set to 1, lwIP *tries* to put all data + * to be sent into one single pbuf. This is for compatibility with DMA-enabled + * MACs that do not support scatter-gather. + * Beware that this might involve CPU-memcpy before transmitting that would not + * be needed without this flag! Use this only if you need to! + * + * ATTENTION: a driver should *NOT* rely on getting single pbufs but check TX + * pbufs for being in one piece. If not, @ref pbuf_clone can be used to get + * a single pbuf: + * if (p->next != NULL) { + * struct pbuf *q = pbuf_clone(PBUF_RAW, PBUF_RAM, p); + * if (q == NULL) { + * return ERR_MEM; + * } + * p = q; ATTENTION: do NOT free the old 'p' as the ref belongs to the caller! + * } + */ +#if !defined LWIP_NETIF_TX_SINGLE_PBUF || defined __DOXYGEN__ +#define LWIP_NETIF_TX_SINGLE_PBUF 0 +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + +/** + * LWIP_NUM_NETIF_CLIENT_DATA: Number of clients that may store + * data in client_data member array of struct netif (max. 256). + */ +#if !defined LWIP_NUM_NETIF_CLIENT_DATA || defined __DOXYGEN__ +#define LWIP_NUM_NETIF_CLIENT_DATA 0 +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- LOOPIF options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_loop Loopback interface + * @ingroup lwip_opts_netif + * @{ + */ +/** + * LWIP_HAVE_LOOPIF==1: Support loop interface (127.0.0.1). + * This is only needed when no real netifs are available. If at least one other + * netif is available, loopback traffic uses this netif. + */ +#if !defined LWIP_HAVE_LOOPIF || defined __DOXYGEN__ +#define LWIP_HAVE_LOOPIF (LWIP_NETIF_LOOPBACK && !LWIP_SINGLE_NETIF) +#endif + +/** + * LWIP_LOOPIF_MULTICAST==1: Support multicast/IGMP on loop interface (127.0.0.1). + */ +#if !defined LWIP_LOOPIF_MULTICAST || defined __DOXYGEN__ +#define LWIP_LOOPIF_MULTICAST 0 +#endif + +/** + * LWIP_NETIF_LOOPBACK==1: Support sending packets with a destination IP + * address equal to the netif IP address, looping them back up the stack. + */ +#if !defined LWIP_NETIF_LOOPBACK || defined __DOXYGEN__ +#define LWIP_NETIF_LOOPBACK 0 +#endif + +/** + * LWIP_LOOPBACK_MAX_PBUFS: Maximum number of pbufs on queue for loopback + * sending for each netif (0 = disabled) + */ +#if !defined LWIP_LOOPBACK_MAX_PBUFS || defined __DOXYGEN__ +#define LWIP_LOOPBACK_MAX_PBUFS 0 +#endif + +/** + * LWIP_NETIF_LOOPBACK_MULTITHREADING: Indicates whether threading is enabled in + * the system, as netifs must change how they behave depending on this setting + * for the LWIP_NETIF_LOOPBACK option to work. + * Setting this is needed to avoid reentering non-reentrant functions like + * tcp_input(). + * LWIP_NETIF_LOOPBACK_MULTITHREADING==1: Indicates that the user is using a + * multithreaded environment like tcpip.c. In this case, netif->input() + * is called directly. + * LWIP_NETIF_LOOPBACK_MULTITHREADING==0: Indicates a polling (or NO_SYS) setup. + * The packets are put on a list and netif_poll() must be called in + * the main application loop. + */ +#if !defined LWIP_NETIF_LOOPBACK_MULTITHREADING || defined __DOXYGEN__ +#define LWIP_NETIF_LOOPBACK_MULTITHREADING (!NO_SYS) +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- Thread options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_thread Threading + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * TCPIP_THREAD_NAME: The name assigned to the main tcpip thread. + */ +#if !defined TCPIP_THREAD_NAME || defined __DOXYGEN__ +#define TCPIP_THREAD_NAME "tcpip_thread" +#endif + +/** + * TCPIP_THREAD_STACKSIZE: The stack size used by the main tcpip thread. + * The stack size value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined TCPIP_THREAD_STACKSIZE || defined __DOXYGEN__ +#define TCPIP_THREAD_STACKSIZE 0 +#endif + +/** + * TCPIP_THREAD_PRIO: The priority assigned to the main tcpip thread. + * The priority value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined TCPIP_THREAD_PRIO || defined __DOXYGEN__ +#define TCPIP_THREAD_PRIO 1 +#endif + +/** + * TCPIP_MBOX_SIZE: The mailbox size for the tcpip thread messages + * The queue size value itself is platform-dependent, but is passed to + * sys_mbox_new() when tcpip_init is called. + */ +#if !defined TCPIP_MBOX_SIZE || defined __DOXYGEN__ +#define TCPIP_MBOX_SIZE 0 +#endif + +/** + * Define this to something that triggers a watchdog. This is called from + * tcpip_thread after processing a message. + */ +#if !defined LWIP_TCPIP_THREAD_ALIVE || defined __DOXYGEN__ +#define LWIP_TCPIP_THREAD_ALIVE() +#endif + +/** + * SLIPIF_THREAD_NAME: The name assigned to the slipif_loop thread. + */ +#if !defined SLIPIF_THREAD_NAME || defined __DOXYGEN__ +#define SLIPIF_THREAD_NAME "slipif_loop" +#endif + +/** + * SLIP_THREAD_STACKSIZE: The stack size used by the slipif_loop thread. + * The stack size value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined SLIPIF_THREAD_STACKSIZE || defined __DOXYGEN__ +#define SLIPIF_THREAD_STACKSIZE 0 +#endif + +/** + * SLIPIF_THREAD_PRIO: The priority assigned to the slipif_loop thread. + * The priority value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined SLIPIF_THREAD_PRIO || defined __DOXYGEN__ +#define SLIPIF_THREAD_PRIO 1 +#endif + +/** + * DEFAULT_THREAD_NAME: The name assigned to any other lwIP thread. + */ +#if !defined DEFAULT_THREAD_NAME || defined __DOXYGEN__ +#define DEFAULT_THREAD_NAME "lwIP" +#endif + +/** + * DEFAULT_THREAD_STACKSIZE: The stack size used by any other lwIP thread. + * The stack size value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined DEFAULT_THREAD_STACKSIZE || defined __DOXYGEN__ +#define DEFAULT_THREAD_STACKSIZE 0 +#endif + +/** + * DEFAULT_THREAD_PRIO: The priority assigned to any other lwIP thread. + * The priority value itself is platform-dependent, but is passed to + * sys_thread_new() when the thread is created. + */ +#if !defined DEFAULT_THREAD_PRIO || defined __DOXYGEN__ +#define DEFAULT_THREAD_PRIO 1 +#endif + +/** + * DEFAULT_RAW_RECVMBOX_SIZE: The mailbox size for the incoming packets on a + * NETCONN_RAW. The queue size value itself is platform-dependent, but is passed + * to sys_mbox_new() when the recvmbox is created. + */ +#if !defined DEFAULT_RAW_RECVMBOX_SIZE || defined __DOXYGEN__ +#define DEFAULT_RAW_RECVMBOX_SIZE 0 +#endif + +/** + * DEFAULT_UDP_RECVMBOX_SIZE: The mailbox size for the incoming packets on a + * NETCONN_UDP. The queue size value itself is platform-dependent, but is passed + * to sys_mbox_new() when the recvmbox is created. + */ +#if !defined DEFAULT_UDP_RECVMBOX_SIZE || defined __DOXYGEN__ +#define DEFAULT_UDP_RECVMBOX_SIZE 0 +#endif + +/** + * DEFAULT_TCP_RECVMBOX_SIZE: The mailbox size for the incoming packets on a + * NETCONN_TCP. The queue size value itself is platform-dependent, but is passed + * to sys_mbox_new() when the recvmbox is created. + */ +#if !defined DEFAULT_TCP_RECVMBOX_SIZE || defined __DOXYGEN__ +#define DEFAULT_TCP_RECVMBOX_SIZE 0 +#endif + +/** + * DEFAULT_ACCEPTMBOX_SIZE: The mailbox size for the incoming connections. + * The queue size value itself is platform-dependent, but is passed to + * sys_mbox_new() when the acceptmbox is created. + */ +#if !defined DEFAULT_ACCEPTMBOX_SIZE || defined __DOXYGEN__ +#define DEFAULT_ACCEPTMBOX_SIZE 0 +#endif +/** + * @} + */ + +/* + ---------------------------------------------- + ---------- Sequential layer options ---------- + ---------------------------------------------- +*/ +/** + * @defgroup lwip_opts_netconn Netconn + * @ingroup lwip_opts_threadsafe_apis + * @{ + */ +/** + * LWIP_NETCONN==1: Enable Netconn API (require to use api_lib.c) + */ +#if !defined LWIP_NETCONN || defined __DOXYGEN__ +#define LWIP_NETCONN 1 +#endif + +/** LWIP_TCPIP_TIMEOUT==1: Enable tcpip_timeout/tcpip_untimeout to create + * timers running in tcpip_thread from another thread. + */ +#if !defined LWIP_TCPIP_TIMEOUT || defined __DOXYGEN__ +#define LWIP_TCPIP_TIMEOUT 0 +#endif + +/** LWIP_NETCONN_SEM_PER_THREAD==1: Use one (thread-local) semaphore per + * thread calling socket/netconn functions instead of allocating one + * semaphore per netconn (and per select etc.) + * ATTENTION: a thread-local semaphore for API calls is needed: + * - LWIP_NETCONN_THREAD_SEM_GET() returning a sys_sem_t* + * - LWIP_NETCONN_THREAD_SEM_ALLOC() creating the semaphore + * - LWIP_NETCONN_THREAD_SEM_FREE() freeing the semaphore + * The latter 2 can be invoked up by calling netconn_thread_init()/netconn_thread_cleanup(). + * Ports may call these for threads created with sys_thread_new(). + */ +#if !defined LWIP_NETCONN_SEM_PER_THREAD || defined __DOXYGEN__ +#define LWIP_NETCONN_SEM_PER_THREAD 0 +#endif + +/** LWIP_NETCONN_FULLDUPLEX==1: Enable code that allows reading from one thread, + * writing from a 2nd thread and closing from a 3rd thread at the same time. + * ATTENTION: This is currently really alpha! Some requirements: + * - LWIP_NETCONN_SEM_PER_THREAD==1 is required to use one socket/netconn from + * multiple threads at once + * - sys_mbox_free() has to unblock receive tasks waiting on recvmbox/acceptmbox + * and prevent a task pending on this during/after deletion + */ +#if !defined LWIP_NETCONN_FULLDUPLEX || defined __DOXYGEN__ +#define LWIP_NETCONN_FULLDUPLEX 0 +#endif +/** + * @} + */ + +/* + ------------------------------------ + ---------- Socket options ---------- + ------------------------------------ +*/ +/** + * @defgroup lwip_opts_socket Sockets + * @ingroup lwip_opts_threadsafe_apis + * @{ + */ +/** + * LWIP_SOCKET==1: Enable Socket API (require to use sockets.c) + */ +#if !defined LWIP_SOCKET || defined __DOXYGEN__ +#define LWIP_SOCKET 1 +#endif + +/** + * LWIP_COMPAT_SOCKETS==1: Enable BSD-style sockets functions names through defines. + * LWIP_COMPAT_SOCKETS==2: Same as ==1 but correctly named functions are created. + * While this helps code completion, it might conflict with existing libraries. + * (only used if you use sockets.c) + */ +#if !defined LWIP_COMPAT_SOCKETS || defined __DOXYGEN__ +#define LWIP_COMPAT_SOCKETS 1 +#endif + +/** + * LWIP_POSIX_SOCKETS_IO_NAMES==1: Enable POSIX-style sockets functions names. + * Disable this option if you use a POSIX operating system that uses the same + * names (read, write & close). (only used if you use sockets.c) + */ +#if !defined LWIP_POSIX_SOCKETS_IO_NAMES || defined __DOXYGEN__ +#define LWIP_POSIX_SOCKETS_IO_NAMES 1 +#endif + +/** + * LWIP_SOCKET_OFFSET==n: Increases the file descriptor number created by LwIP with n. + * This can be useful when there are multiple APIs which create file descriptors. + * When they all start with a different offset and you won't make them overlap you can + * re implement read/write/close/ioctl/fnctl to send the requested action to the right + * library (sharing select will need more work though). + */ +#if !defined LWIP_SOCKET_OFFSET || defined __DOXYGEN__ +#define LWIP_SOCKET_OFFSET 0 +#endif + +/** + * LWIP_TCP_KEEPALIVE==1: Enable TCP_KEEPIDLE, TCP_KEEPINTVL and TCP_KEEPCNT + * options processing. Note that TCP_KEEPIDLE and TCP_KEEPINTVL have to be set + * in seconds. (does not require sockets.c, and will affect tcp.c) + */ +#if !defined LWIP_TCP_KEEPALIVE || defined __DOXYGEN__ +#define LWIP_TCP_KEEPALIVE 0 +#endif + +/** + * LWIP_SO_SNDTIMEO==1: Enable send timeout for sockets/netconns and + * SO_SNDTIMEO processing. + */ +#if !defined LWIP_SO_SNDTIMEO || defined __DOXYGEN__ +#define LWIP_SO_SNDTIMEO 0 +#endif + +/** + * LWIP_SO_RCVTIMEO==1: Enable receive timeout for sockets/netconns and + * SO_RCVTIMEO processing. + */ +#if !defined LWIP_SO_RCVTIMEO || defined __DOXYGEN__ +#define LWIP_SO_RCVTIMEO 0 +#endif + +/** + * LWIP_SO_SNDRCVTIMEO_NONSTANDARD==1: SO_RCVTIMEO/SO_SNDTIMEO take an int + * (milliseconds, much like winsock does) instead of a struct timeval (default). + */ +#if !defined LWIP_SO_SNDRCVTIMEO_NONSTANDARD || defined __DOXYGEN__ +#define LWIP_SO_SNDRCVTIMEO_NONSTANDARD 0 +#endif + +/** + * LWIP_SO_RCVBUF==1: Enable SO_RCVBUF processing. + */ +#if !defined LWIP_SO_RCVBUF || defined __DOXYGEN__ +#define LWIP_SO_RCVBUF 0 +#endif + +/** + * LWIP_SO_LINGER==1: Enable SO_LINGER processing. + */ +#if !defined LWIP_SO_LINGER || defined __DOXYGEN__ +#define LWIP_SO_LINGER 0 +#endif + +/** + * If LWIP_SO_RCVBUF is used, this is the default value for recv_bufsize. + */ +#if !defined RECV_BUFSIZE_DEFAULT || defined __DOXYGEN__ +#define RECV_BUFSIZE_DEFAULT INT_MAX +#endif + +/** + * By default, TCP socket/netconn close waits 20 seconds max to send the FIN + */ +#if !defined LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT || defined __DOXYGEN__ +#define LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT 20000 +#endif + +/** + * SO_REUSE==1: Enable SO_REUSEADDR option. + */ +#if !defined SO_REUSE || defined __DOXYGEN__ +#define SO_REUSE 0 +#endif + +/** + * SO_REUSE_RXTOALL==1: Pass a copy of incoming broadcast/multicast packets + * to all local matches if SO_REUSEADDR is turned on. + * WARNING: Adds a memcpy for every packet if passing to more than one pcb! + */ +#if !defined SO_REUSE_RXTOALL || defined __DOXYGEN__ +#define SO_REUSE_RXTOALL 0 +#endif + +/** + * LWIP_FIONREAD_LINUXMODE==0 (default): ioctl/FIONREAD returns the amount of + * pending data in the network buffer. This is the way windows does it. It's + * the default for lwIP since it is smaller. + * LWIP_FIONREAD_LINUXMODE==1: ioctl/FIONREAD returns the size of the next + * pending datagram in bytes. This is the way linux does it. This code is only + * here for compatibility. + */ +#if !defined LWIP_FIONREAD_LINUXMODE || defined __DOXYGEN__ +#define LWIP_FIONREAD_LINUXMODE 0 +#endif + +/** + * LWIP_SOCKET_SELECT==1 (default): enable select() for sockets (uses a netconn + * callback to keep track of events). + * This saves RAM (counters per socket) and code (netconn event callback), which + * should improve performance a bit). + */ +#if !defined LWIP_SOCKET_SELECT || defined __DOXYGEN__ +#define LWIP_SOCKET_SELECT 1 +#endif + +/** + * LWIP_SOCKET_POLL==1 (default): enable poll() for sockets (including + * struct pollfd, nfds_t, and constants) + */ +#if !defined LWIP_SOCKET_POLL || defined __DOXYGEN__ +#define LWIP_SOCKET_POLL 1 +#endif +/** + * @} + */ + +/* + ---------------------------------------- + ---------- Statistics options ---------- + ---------------------------------------- +*/ +/** + * @defgroup lwip_opts_stats Statistics + * @ingroup lwip_opts_debug + * @{ + */ +/** + * LWIP_STATS==1: Enable statistics collection in lwip_stats. + */ +#if !defined LWIP_STATS || defined __DOXYGEN__ +#define LWIP_STATS 1 +#endif + +#if LWIP_STATS + +/** + * LWIP_STATS_DISPLAY==1: Compile in the statistics output functions. + */ +#if !defined LWIP_STATS_DISPLAY || defined __DOXYGEN__ +#define LWIP_STATS_DISPLAY 0 +#endif + +/** + * LINK_STATS==1: Enable link stats. + */ +#if !defined LINK_STATS || defined __DOXYGEN__ +#define LINK_STATS 1 +#endif + +/** + * ETHARP_STATS==1: Enable etharp stats. + */ +#if !defined ETHARP_STATS || defined __DOXYGEN__ +#define ETHARP_STATS (LWIP_ARP) +#endif + +/** + * IP_STATS==1: Enable IP stats. + */ +#if !defined IP_STATS || defined __DOXYGEN__ +#define IP_STATS 1 +#endif + +/** + * IPFRAG_STATS==1: Enable IP fragmentation stats. Default is + * on if using either frag or reass. + */ +#if !defined IPFRAG_STATS || defined __DOXYGEN__ +#define IPFRAG_STATS (IP_REASSEMBLY || IP_FRAG) +#endif + +/** + * ICMP_STATS==1: Enable ICMP stats. + */ +#if !defined ICMP_STATS || defined __DOXYGEN__ +#define ICMP_STATS 1 +#endif + +/** + * IGMP_STATS==1: Enable IGMP stats. + */ +#if !defined IGMP_STATS || defined __DOXYGEN__ +#define IGMP_STATS (LWIP_IGMP) +#endif + +/** + * UDP_STATS==1: Enable UDP stats. Default is on if + * UDP enabled, otherwise off. + */ +#if !defined UDP_STATS || defined __DOXYGEN__ +#define UDP_STATS (LWIP_UDP) +#endif + +/** + * TCP_STATS==1: Enable TCP stats. Default is on if TCP + * enabled, otherwise off. + */ +#if !defined TCP_STATS || defined __DOXYGEN__ +#define TCP_STATS (LWIP_TCP) +#endif + +/** + * MEM_STATS==1: Enable mem.c stats. + */ +#if !defined MEM_STATS || defined __DOXYGEN__ +#define MEM_STATS ((MEM_LIBC_MALLOC == 0) && (MEM_USE_POOLS == 0)) +#endif + +/** + * MEMP_STATS==1: Enable memp.c pool stats. + */ +#if !defined MEMP_STATS || defined __DOXYGEN__ +#define MEMP_STATS (MEMP_MEM_MALLOC == 0) +#endif + +/** + * SYS_STATS==1: Enable system stats (sem and mbox counts, etc). + */ +#if !defined SYS_STATS || defined __DOXYGEN__ +#define SYS_STATS (NO_SYS == 0) +#endif + +/** + * IP6_STATS==1: Enable IPv6 stats. + */ +#if !defined IP6_STATS || defined __DOXYGEN__ +#define IP6_STATS (LWIP_IPV6) +#endif + +/** + * ICMP6_STATS==1: Enable ICMP for IPv6 stats. + */ +#if !defined ICMP6_STATS || defined __DOXYGEN__ +#define ICMP6_STATS (LWIP_IPV6 && LWIP_ICMP6) +#endif + +/** + * IP6_FRAG_STATS==1: Enable IPv6 fragmentation stats. + */ +#if !defined IP6_FRAG_STATS || defined __DOXYGEN__ +#define IP6_FRAG_STATS (LWIP_IPV6 && (LWIP_IPV6_FRAG || LWIP_IPV6_REASS)) +#endif + +/** + * MLD6_STATS==1: Enable MLD for IPv6 stats. + */ +#if !defined MLD6_STATS || defined __DOXYGEN__ +#define MLD6_STATS (LWIP_IPV6 && LWIP_IPV6_MLD) +#endif + +/** + * ND6_STATS==1: Enable Neighbor discovery for IPv6 stats. + */ +#if !defined ND6_STATS || defined __DOXYGEN__ +#define ND6_STATS (LWIP_IPV6) +#endif + +/** + * MIB2_STATS==1: Stats for SNMP MIB2. + */ +#if !defined MIB2_STATS || defined __DOXYGEN__ +#define MIB2_STATS 0 +#endif + +#else + +#define LINK_STATS 0 +#define ETHARP_STATS 0 +#define IP_STATS 0 +#define IPFRAG_STATS 0 +#define ICMP_STATS 0 +#define IGMP_STATS 0 +#define UDP_STATS 0 +#define TCP_STATS 0 +#define MEM_STATS 0 +#define MEMP_STATS 0 +#define SYS_STATS 0 +#define LWIP_STATS_DISPLAY 0 +#define IP6_STATS 0 +#define ICMP6_STATS 0 +#define IP6_FRAG_STATS 0 +#define MLD6_STATS 0 +#define ND6_STATS 0 +#define MIB2_STATS 0 + +#endif /* LWIP_STATS */ +/** + * @} + */ + +/* + -------------------------------------- + ---------- Checksum options ---------- + -------------------------------------- +*/ +/** + * @defgroup lwip_opts_checksum Checksum + * @ingroup lwip_opts_infrastructure + * @{ + */ +/** + * LWIP_CHECKSUM_CTRL_PER_NETIF==1: Checksum generation/check can be enabled/disabled + * per netif. + * ATTENTION: if enabled, the CHECKSUM_GEN_* and CHECKSUM_CHECK_* defines must be enabled! + */ +#if !defined LWIP_CHECKSUM_CTRL_PER_NETIF || defined __DOXYGEN__ +#define LWIP_CHECKSUM_CTRL_PER_NETIF 0 +#endif + +/** + * CHECKSUM_GEN_IP==1: Generate checksums in software for outgoing IP packets. + */ +#if !defined CHECKSUM_GEN_IP || defined __DOXYGEN__ +#define CHECKSUM_GEN_IP 1 +#endif + +/** + * CHECKSUM_GEN_UDP==1: Generate checksums in software for outgoing UDP packets. + */ +#if !defined CHECKSUM_GEN_UDP || defined __DOXYGEN__ +#define CHECKSUM_GEN_UDP 1 +#endif + +/** + * CHECKSUM_GEN_TCP==1: Generate checksums in software for outgoing TCP packets. + */ +#if !defined CHECKSUM_GEN_TCP || defined __DOXYGEN__ +#define CHECKSUM_GEN_TCP 1 +#endif + +/** + * CHECKSUM_GEN_ICMP==1: Generate checksums in software for outgoing ICMP packets. + */ +#if !defined CHECKSUM_GEN_ICMP || defined __DOXYGEN__ +#define CHECKSUM_GEN_ICMP 1 +#endif + +/** + * CHECKSUM_GEN_ICMP6==1: Generate checksums in software for outgoing ICMP6 packets. + */ +#if !defined CHECKSUM_GEN_ICMP6 || defined __DOXYGEN__ +#define CHECKSUM_GEN_ICMP6 1 +#endif + +/** + * CHECKSUM_CHECK_IP==1: Check checksums in software for incoming IP packets. + */ +#if !defined CHECKSUM_CHECK_IP || defined __DOXYGEN__ +#define CHECKSUM_CHECK_IP 1 +#endif + +/** + * CHECKSUM_CHECK_UDP==1: Check checksums in software for incoming UDP packets. + */ +#if !defined CHECKSUM_CHECK_UDP || defined __DOXYGEN__ +#define CHECKSUM_CHECK_UDP 1 +#endif + +/** + * CHECKSUM_CHECK_TCP==1: Check checksums in software for incoming TCP packets. + */ +#if !defined CHECKSUM_CHECK_TCP || defined __DOXYGEN__ +#define CHECKSUM_CHECK_TCP 1 +#endif + +/** + * CHECKSUM_CHECK_ICMP==1: Check checksums in software for incoming ICMP packets. + */ +#if !defined CHECKSUM_CHECK_ICMP || defined __DOXYGEN__ +#define CHECKSUM_CHECK_ICMP 1 +#endif + +/** + * CHECKSUM_CHECK_ICMP6==1: Check checksums in software for incoming ICMPv6 packets + */ +#if !defined CHECKSUM_CHECK_ICMP6 || defined __DOXYGEN__ +#define CHECKSUM_CHECK_ICMP6 1 +#endif + +/** + * LWIP_CHECKSUM_ON_COPY==1: Calculate checksum when copying data from + * application buffers to pbufs. + */ +#if !defined LWIP_CHECKSUM_ON_COPY || defined __DOXYGEN__ +#define LWIP_CHECKSUM_ON_COPY 0 +#endif +/** + * @} + */ + +/* + --------------------------------------- + ---------- IPv6 options --------------- + --------------------------------------- +*/ +/** + * @defgroup lwip_opts_ipv6 IPv6 + * @ingroup lwip_opts + * @{ + */ +/** + * LWIP_IPV6==1: Enable IPv6 + */ +#if !defined LWIP_IPV6 || defined __DOXYGEN__ +#define LWIP_IPV6 0 +#endif + +/** + * IPV6_REASS_MAXAGE: Maximum time (in multiples of IP6_REASS_TMR_INTERVAL - so seconds, normally) + * a fragmented IP packet waits for all fragments to arrive. If not all fragments arrived + * in this time, the whole packet is discarded. + */ +#if !defined IPV6_REASS_MAXAGE || defined __DOXYGEN__ +#define IPV6_REASS_MAXAGE 60 +#endif + +/** + * LWIP_IPV6_SCOPES==1: Enable support for IPv6 address scopes, ensuring that + * e.g. link-local addresses are really treated as link-local. Disable this + * setting only for single-interface configurations. + * All addresses that have a scope according to the default policy (link-local + * unicast addresses, interface-local and link-local multicast addresses) should + * now have a zone set on them before being passed to the core API, although + * lwIP will currently attempt to select a zone on the caller's behalf when + * necessary. Applications that directly assign IPv6 addresses to interfaces + * (which is NOT recommended) must now ensure that link-local addresses carry + * the netif's zone. See the new ip6_zone.h header file for more information and + * relevant macros. For now it is still possible to turn off scopes support + * through the new LWIP_IPV6_SCOPES option. When upgrading an implementation that + * uses the core API directly, it is highly recommended to enable + * LWIP_IPV6_SCOPES_DEBUG at least for a while, to ensure e.g. proper address + * initialization. + */ +#if !defined LWIP_IPV6_SCOPES || defined __DOXYGEN__ +#define LWIP_IPV6_SCOPES (LWIP_IPV6 && !LWIP_SINGLE_NETIF) +#endif + +/** + * LWIP_IPV6_SCOPES_DEBUG==1: Perform run-time checks to verify that addresses + * are properly zoned (see ip6_zone.h on what that means) where it matters. + * Enabling this setting is highly recommended when upgrading from an existing + * installation that is not yet scope-aware; otherwise it may be too expensive. + */ +#if !defined LWIP_IPV6_SCOPES_DEBUG || defined __DOXYGEN__ +#define LWIP_IPV6_SCOPES_DEBUG 0 +#endif + +/** + * LWIP_IPV6_NUM_ADDRESSES: Number of IPv6 addresses per netif. + */ +#if !defined LWIP_IPV6_NUM_ADDRESSES || defined __DOXYGEN__ +#define LWIP_IPV6_NUM_ADDRESSES 3 +#endif + +/** + * LWIP_IPV6_FORWARD==1: Forward IPv6 packets across netifs + */ +#if !defined LWIP_IPV6_FORWARD || defined __DOXYGEN__ +#define LWIP_IPV6_FORWARD 0 +#endif + +/** + * LWIP_IPV6_FRAG==1: Fragment outgoing IPv6 packets that are too big. + */ +#if !defined LWIP_IPV6_FRAG || defined __DOXYGEN__ +#define LWIP_IPV6_FRAG 1 +#endif + +/** + * LWIP_IPV6_REASS==1: reassemble incoming IPv6 packets that fragmented + */ +#if !defined LWIP_IPV6_REASS || defined __DOXYGEN__ +#define LWIP_IPV6_REASS LWIP_IPV6 +#endif + +/** + * LWIP_IPV6_SEND_ROUTER_SOLICIT==1: Send router solicitation messages during + * network startup. + */ +#if !defined LWIP_IPV6_SEND_ROUTER_SOLICIT || defined __DOXYGEN__ +#define LWIP_IPV6_SEND_ROUTER_SOLICIT 1 +#endif + +/** + * LWIP_IPV6_AUTOCONFIG==1: Enable stateless address autoconfiguration as per RFC 4862. + */ +#if !defined LWIP_IPV6_AUTOCONFIG || defined __DOXYGEN__ +#define LWIP_IPV6_AUTOCONFIG LWIP_IPV6 +#endif + +/** + * LWIP_IPV6_ADDRESS_LIFETIMES==1: Keep valid and preferred lifetimes for each + * IPv6 address. Required for LWIP_IPV6_AUTOCONFIG. May still be enabled + * otherwise, in which case the application may assign address lifetimes with + * the appropriate macros. Addresses with no lifetime are assumed to be static. + * If this option is disabled, all addresses are assumed to be static. + */ +#if !defined LWIP_IPV6_ADDRESS_LIFETIMES || defined __DOXYGEN__ +#define LWIP_IPV6_ADDRESS_LIFETIMES LWIP_IPV6_AUTOCONFIG +#endif + +/** + * LWIP_IPV6_DUP_DETECT_ATTEMPTS=[0..7]: Number of duplicate address detection attempts. + */ +#if !defined LWIP_IPV6_DUP_DETECT_ATTEMPTS || defined __DOXYGEN__ +#define LWIP_IPV6_DUP_DETECT_ATTEMPTS 1 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_icmp6 ICMP6 + * @ingroup lwip_opts_ipv6 + * @{ + */ +/** + * LWIP_ICMP6==1: Enable ICMPv6 (mandatory per RFC) + */ +#if !defined LWIP_ICMP6 || defined __DOXYGEN__ +#define LWIP_ICMP6 LWIP_IPV6 +#endif + +/** + * LWIP_ICMP6_DATASIZE: bytes from original packet to send back in + * ICMPv6 error messages. + */ +#if !defined LWIP_ICMP6_DATASIZE || defined __DOXYGEN__ +#define LWIP_ICMP6_DATASIZE 8 +#endif + +/** + * LWIP_ICMP6_HL: default hop limit for ICMPv6 messages + */ +#if !defined LWIP_ICMP6_HL || defined __DOXYGEN__ +#define LWIP_ICMP6_HL 255 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_mld6 Multicast listener discovery + * @ingroup lwip_opts_ipv6 + * @{ + */ +/** + * LWIP_IPV6_MLD==1: Enable multicast listener discovery protocol. + * If LWIP_IPV6 is enabled but this setting is disabled, the MAC layer must + * indiscriminately pass all inbound IPv6 multicast traffic to lwIP. + */ +#if !defined LWIP_IPV6_MLD || defined __DOXYGEN__ +#define LWIP_IPV6_MLD LWIP_IPV6 +#endif + +/** + * MEMP_NUM_MLD6_GROUP: Max number of IPv6 multicast groups that can be joined. + * There must be enough groups so that each netif can join the solicited-node + * multicast group for each of its local addresses, plus one for MDNS if + * applicable, plus any number of groups to be joined on UDP sockets. + */ +#if !defined MEMP_NUM_MLD6_GROUP || defined __DOXYGEN__ +#define MEMP_NUM_MLD6_GROUP 4 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_nd6 Neighbor discovery + * @ingroup lwip_opts_ipv6 + * @{ + */ +/** + * LWIP_ND6_QUEUEING==1: queue outgoing IPv6 packets while MAC address + * is being resolved. + */ +#if !defined LWIP_ND6_QUEUEING || defined __DOXYGEN__ +#define LWIP_ND6_QUEUEING LWIP_IPV6 +#endif + +/** + * MEMP_NUM_ND6_QUEUE: Max number of IPv6 packets to queue during MAC resolution. + */ +#if !defined MEMP_NUM_ND6_QUEUE || defined __DOXYGEN__ +#define MEMP_NUM_ND6_QUEUE 20 +#endif + +/** + * LWIP_ND6_NUM_NEIGHBORS: Number of entries in IPv6 neighbor cache + */ +#if !defined LWIP_ND6_NUM_NEIGHBORS || defined __DOXYGEN__ +#define LWIP_ND6_NUM_NEIGHBORS 10 +#endif + +/** + * LWIP_ND6_NUM_DESTINATIONS: number of entries in IPv6 destination cache + */ +#if !defined LWIP_ND6_NUM_DESTINATIONS || defined __DOXYGEN__ +#define LWIP_ND6_NUM_DESTINATIONS 10 +#endif + +/** + * LWIP_ND6_NUM_PREFIXES: number of entries in IPv6 on-link prefixes cache + */ +#if !defined LWIP_ND6_NUM_PREFIXES || defined __DOXYGEN__ +#define LWIP_ND6_NUM_PREFIXES 5 +#endif + +/** + * LWIP_ND6_NUM_ROUTERS: number of entries in IPv6 default router cache + */ +#if !defined LWIP_ND6_NUM_ROUTERS || defined __DOXYGEN__ +#define LWIP_ND6_NUM_ROUTERS 3 +#endif + +/** + * LWIP_ND6_MAX_MULTICAST_SOLICIT: max number of multicast solicit messages to send + * (neighbor solicit and router solicit) + */ +#if !defined LWIP_ND6_MAX_MULTICAST_SOLICIT || defined __DOXYGEN__ +#define LWIP_ND6_MAX_MULTICAST_SOLICIT 3 +#endif + +/** + * LWIP_ND6_MAX_UNICAST_SOLICIT: max number of unicast neighbor solicitation messages + * to send during neighbor reachability detection. + */ +#if !defined LWIP_ND6_MAX_UNICAST_SOLICIT || defined __DOXYGEN__ +#define LWIP_ND6_MAX_UNICAST_SOLICIT 3 +#endif + +/** + * Unused: See ND RFC (time in milliseconds). + */ +#if !defined LWIP_ND6_MAX_ANYCAST_DELAY_TIME || defined __DOXYGEN__ +#define LWIP_ND6_MAX_ANYCAST_DELAY_TIME 1000 +#endif + +/** + * Unused: See ND RFC + */ +#if !defined LWIP_ND6_MAX_NEIGHBOR_ADVERTISEMENT || defined __DOXYGEN__ +#define LWIP_ND6_MAX_NEIGHBOR_ADVERTISEMENT 3 +#endif + +/** + * LWIP_ND6_REACHABLE_TIME: default neighbor reachable time (in milliseconds). + * May be updated by router advertisement messages. + */ +#if !defined LWIP_ND6_REACHABLE_TIME || defined __DOXYGEN__ +#define LWIP_ND6_REACHABLE_TIME 30000 +#endif + +/** + * LWIP_ND6_RETRANS_TIMER: default retransmission timer for solicitation messages + */ +#if !defined LWIP_ND6_RETRANS_TIMER || defined __DOXYGEN__ +#define LWIP_ND6_RETRANS_TIMER 1000 +#endif + +/** + * LWIP_ND6_DELAY_FIRST_PROBE_TIME: Delay before first unicast neighbor solicitation + * message is sent, during neighbor reachability detection. + */ +#if !defined LWIP_ND6_DELAY_FIRST_PROBE_TIME || defined __DOXYGEN__ +#define LWIP_ND6_DELAY_FIRST_PROBE_TIME 5000 +#endif + +/** + * LWIP_ND6_ALLOW_RA_UPDATES==1: Allow Router Advertisement messages to update + * Reachable time and retransmission timers, and netif MTU. + */ +#if !defined LWIP_ND6_ALLOW_RA_UPDATES || defined __DOXYGEN__ +#define LWIP_ND6_ALLOW_RA_UPDATES 1 +#endif + +/** + * LWIP_ND6_TCP_REACHABILITY_HINTS==1: Allow TCP to provide Neighbor Discovery + * with reachability hints for connected destinations. This helps avoid sending + * unicast neighbor solicitation messages. + */ +#if !defined LWIP_ND6_TCP_REACHABILITY_HINTS || defined __DOXYGEN__ +#define LWIP_ND6_TCP_REACHABILITY_HINTS 1 +#endif + +/** + * LWIP_ND6_RDNSS_MAX_DNS_SERVERS > 0: Use IPv6 Router Advertisement Recursive + * DNS Server Option (as per RFC 6106) to copy a defined maximum number of DNS + * servers to the DNS module. + */ +#if !defined LWIP_ND6_RDNSS_MAX_DNS_SERVERS || defined __DOXYGEN__ +#define LWIP_ND6_RDNSS_MAX_DNS_SERVERS 0 +#endif +/** + * @} + */ + +/** + * @defgroup lwip_opts_dhcpv6 DHCPv6 + * @ingroup lwip_opts_ipv6 + * @{ + */ +/** + * LWIP_IPV6_DHCP6==1: enable DHCPv6 stateful/stateless address autoconfiguration. + */ +#if !defined LWIP_IPV6_DHCP6 || defined __DOXYGEN__ +#define LWIP_IPV6_DHCP6 0 +#endif + +/** + * LWIP_IPV6_DHCP6_STATEFUL==1: enable DHCPv6 stateful address autoconfiguration. + * (not supported, yet!) + */ +#if !defined LWIP_IPV6_DHCP6_STATEFUL || defined __DOXYGEN__ +#define LWIP_IPV6_DHCP6_STATEFUL 0 +#endif + +/** + * LWIP_IPV6_DHCP6_STATELESS==1: enable DHCPv6 stateless address autoconfiguration. + */ +#if !defined LWIP_IPV6_DHCP6_STATELESS || defined __DOXYGEN__ +#define LWIP_IPV6_DHCP6_STATELESS LWIP_IPV6_DHCP6 +#endif + +/** + * LWIP_DHCP6_GETS_NTP==1: Request NTP servers via DHCPv6. For each + * response packet, a callback is called, which has to be provided by the port: + * void dhcp6_set_ntp_servers(u8_t num_ntp_servers, ip_addr_t* ntp_server_addrs); +*/ +#if !defined LWIP_DHCP6_GET_NTP_SRV || defined __DOXYGEN__ +#define LWIP_DHCP6_GET_NTP_SRV 0 +#endif + +/** + * The maximum of NTP servers requested + */ +#if !defined LWIP_DHCP6_MAX_NTP_SERVERS || defined __DOXYGEN__ +#define LWIP_DHCP6_MAX_NTP_SERVERS 1 +#endif + +/** + * LWIP_DHCP6_MAX_DNS_SERVERS > 0: Request DNS servers via DHCPv6. + * DNS servers received in the response are passed to DNS via @ref dns_setserver() + * (up to the maximum limit defined here). + */ +#if !defined LWIP_DHCP6_MAX_DNS_SERVERS || defined __DOXYGEN__ +#define LWIP_DHCP6_MAX_DNS_SERVERS DNS_MAX_SERVERS +#endif +/** + * @} + */ + +/* + --------------------------------------- + ---------- Hook options --------------- + --------------------------------------- +*/ + +/** + * @defgroup lwip_opts_hooks Hooks + * @ingroup lwip_opts_infrastructure + * Hooks are undefined by default, define them to a function if you need them. + * @{ + */ + +/** + * LWIP_HOOK_FILENAME: Custom filename to \#include in files that provide hooks. + * Declare your hook function prototypes in there, you may also \#include all headers + * providing data types that are need in this file. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_FILENAME "path/to/my/lwip_hooks.h" +#endif + +/** + * LWIP_HOOK_TCP_ISN: + * Hook for generation of the Initial Sequence Number (ISN) for a new TCP + * connection. The default lwIP ISN generation algorithm is very basic and may + * allow for TCP spoofing attacks. This hook provides the means to implement + * the standardized ISN generation algorithm from RFC 6528 (see contrib/adons/tcp_isn), + * or any other desired algorithm as a replacement. + * Called from tcp_connect() and tcp_listen_input() when an ISN is needed for + * a new TCP connection, if TCP support (@ref LWIP_TCP) is enabled.\n + * Signature:\code{.c} + * u32_t my_hook_tcp_isn(const ip_addr_t* local_ip, u16_t local_port, const ip_addr_t* remote_ip, u16_t remote_port); + * \endcode + * - it may be necessary to use "struct ip_addr" (ip4_addr, ip6_addr) instead of "ip_addr_t" in function declarations\n + * Arguments: + * - local_ip: pointer to the local IP address of the connection + * - local_port: local port number of the connection (host-byte order) + * - remote_ip: pointer to the remote IP address of the connection + * - remote_port: remote port number of the connection (host-byte order)\n + * Return value: + * - the 32-bit Initial Sequence Number to use for the new TCP connection. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_TCP_ISN(local_ip, local_port, remote_ip, remote_port) +#endif + +/** + * LWIP_HOOK_TCP_INPACKET_PCB: + * Hook for intercepting incoming packets before they are passed to a pcb. This + * allows updating some state or even dropping a packet. + * Signature:\code{.c} + * err_t my_hook_tcp_inpkt(struct tcp_pcb *pcb, struct tcp_hdr *hdr, u16_t optlen, u16_t opt1len, u8_t *opt2, struct pbuf *p); + * \endcode + * Arguments: + * - pcb: tcp_pcb selected for input of this packet (ATTENTION: this may be + * struct tcp_pcb_listen if pcb->state == LISTEN) + * - hdr: pointer to tcp header (ATTENTION: tcp options may not be in one piece!) + * - optlen: tcp option length + * - opt1len: tcp option length 1st part + * - opt2: if this is != NULL, tcp options are split among 2 pbufs. In that case, + * options start at right after the tcp header ('(u8_t*)(hdr + 1)') for + * the first 'opt1len' bytes and the rest starts at 'opt2'. opt2len can + * be simply calculated: 'opt2len = optlen - opt1len;' + * - p: input packet, p->payload points to application data (that's why tcp hdr + * and options are passed in seperately) + * Return value: + * - ERR_OK: continue input of this packet as normal + * - != ERR_OK: drop this packet for input (don't continue input processing) + * + * ATTENTION: don't call any tcp api functions that might change tcp state (pcb + * state or any pcb lists) from this callback! + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_TCP_INPACKET_PCB(pcb, hdr, optlen, opt1len, opt2, p) +#endif + +/** + * LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH: + * Hook for increasing the size of the options allocated with a tcp header. + * Together with LWIP_HOOK_TCP_OUT_ADD_TCPOPTS, this can be used to add custom + * options to outgoing tcp segments. + * Signature:\code{.c} + * u8_t my_hook_tcp_out_tcpopt_length(const struct tcp_pcb *pcb, u8_t internal_option_length); + * \endcode + * Arguments: + * - pcb: tcp_pcb that transmits (ATTENTION: this may be NULL or + * struct tcp_pcb_listen if pcb->state == LISTEN) + * - internal_option_length: tcp option length used by the stack internally + * Return value: + * - a number of bytes to allocate for tcp options (internal_option_length <= ret <= 40) + * + * ATTENTION: don't call any tcp api functions that might change tcp state (pcb + * state or any pcb lists) from this callback! + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH(pcb, internal_len) +#endif + +/** + * LWIP_HOOK_TCP_OUT_ADD_TCPOPTS: + * Hook for adding custom options to outgoing tcp segments. + * Space for these custom options has to be reserved via LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH. + * Signature:\code{.c} + * u32_t *my_hook_tcp_out_add_tcpopts(struct pbuf *p, struct tcp_hdr *hdr, const struct tcp_pcb *pcb, u32_t *opts); + * \endcode + * Arguments: + * - p: output packet, p->payload pointing to tcp header, data follows + * - hdr: tcp header + * - pcb: tcp_pcb that transmits (ATTENTION: this may be NULL or + * struct tcp_pcb_listen if pcb->state == LISTEN) + * - opts: pointer where to add the custom options (there may already be options + * between the header and these) + * Return value: + * - pointer pointing directly after the inserted options + * + * ATTENTION: don't call any tcp api functions that might change tcp state (pcb + * state or any pcb lists) from this callback! + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_TCP_OUT_ADD_TCPOPTS(p, hdr, pcb, opts) +#endif + +/** + * LWIP_HOOK_IP4_INPUT(pbuf, input_netif): + * Called from ip_input() (IPv4) + * Signature:\code{.c} + * int my_hook(struct pbuf *pbuf, struct netif *input_netif); + * \endcode + * Arguments: + * - pbuf: received struct pbuf passed to ip_input() + * - input_netif: struct netif on which the packet has been received + * Return values: + * - 0: Hook has not consumed the packet, packet is processed as normal + * - != 0: Hook has consumed the packet. + * If the hook consumed the packet, 'pbuf' is in the responsibility of the hook + * (i.e. free it when done). + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP4_INPUT(pbuf, input_netif) +#endif + +/** + * LWIP_HOOK_IP4_ROUTE(dest): + * Called from ip_route() (IPv4) + * Signature:\code{.c} + * struct netif *my_hook(const ip4_addr_t *dest); + * \endcode + * Arguments: + * - dest: destination IPv4 address + * Returns values: + * - the destination netif + * - NULL if no destination netif is found. In that case, ip_route() continues as normal. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP4_ROUTE() +#endif + +/** + * LWIP_HOOK_IP4_ROUTE_SRC(src, dest): + * Source-based routing for IPv4 - called from ip_route() (IPv4) + * Signature:\code{.c} + * struct netif *my_hook(const ip4_addr_t *src, const ip4_addr_t *dest); + * \endcode + * Arguments: + * - src: local/source IPv4 address + * - dest: destination IPv4 address + * Returns values: + * - the destination netif + * - NULL if no destination netif is found. In that case, ip_route() continues as normal. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP4_ROUTE_SRC(src, dest) +#endif + +/** + * LWIP_HOOK_IP4_CANFORWARD(src, dest): + * Check if an IPv4 can be forwarded - called from: + * ip4_input() -> ip4_forward() -> ip4_canforward() (IPv4) + * - source address is available via ip4_current_src_addr() + * - calling an output function in this context (e.g. multicast router) is allowed + * Signature:\code{.c} + * int my_hook(struct pbuf *p, u32_t dest_addr_hostorder); + * \endcode + * Arguments: + * - p: packet to forward + * - dest: destination IPv4 address + * Returns values: + * - 1: forward + * - 0: don't forward + * - -1: no decision. In that case, ip4_canforward() continues as normal. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP4_CANFORWARD(src, dest) +#endif + +/** + * LWIP_HOOK_ETHARP_GET_GW(netif, dest): + * Called from etharp_output() (IPv4) + * Signature:\code{.c} + * const ip4_addr_t *my_hook(struct netif *netif, const ip4_addr_t *dest); + * \endcode + * Arguments: + * - netif: the netif used for sending + * - dest: the destination IPv4 address + * Return values: + * - the IPv4 address of the gateway to handle the specified destination IPv4 address + * - NULL, in which case the netif's default gateway is used + * + * The returned address MUST be directly reachable on the specified netif! + * This function is meant to implement advanced IPv4 routing together with + * LWIP_HOOK_IP4_ROUTE(). The actual routing/gateway table implementation is + * not part of lwIP but can e.g. be hidden in the netif's state argument. +*/ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_ETHARP_GET_GW(netif, dest) +#endif + +/** + * LWIP_HOOK_IP6_INPUT(pbuf, input_netif): + * Called from ip6_input() (IPv6) + * Signature:\code{.c} + * int my_hook(struct pbuf *pbuf, struct netif *input_netif); + * \endcode + * Arguments: + * - pbuf: received struct pbuf passed to ip6_input() + * - input_netif: struct netif on which the packet has been received + * Return values: + * - 0: Hook has not consumed the packet, packet is processed as normal + * - != 0: Hook has consumed the packet. + * If the hook consumed the packet, 'pbuf' is in the responsibility of the hook + * (i.e. free it when done). + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP6_INPUT(pbuf, input_netif) +#endif + +/** + * LWIP_HOOK_IP6_ROUTE(src, dest): + * Called from ip_route() (IPv6) + * Signature:\code{.c} + * struct netif *my_hook(const ip6_addr_t *dest, const ip6_addr_t *src); + * \endcode + * Arguments: + * - src: source IPv6 address + * - dest: destination IPv6 address + * Return values: + * - the destination netif + * - NULL if no destination netif is found. In that case, ip6_route() continues as normal. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_IP6_ROUTE(src, dest) +#endif + +/** + * LWIP_HOOK_ND6_GET_GW(netif, dest): + * Called from nd6_get_next_hop_entry() (IPv6) + * Signature:\code{.c} + * const ip6_addr_t *my_hook(struct netif *netif, const ip6_addr_t *dest); + * \endcode + * Arguments: + * - netif: the netif used for sending + * - dest: the destination IPv6 address + * Return values: + * - the IPv6 address of the next hop to handle the specified destination IPv6 address + * - NULL, in which case a NDP-discovered router is used instead + * + * The returned address MUST be directly reachable on the specified netif! + * This function is meant to implement advanced IPv6 routing together with + * LWIP_HOOK_IP6_ROUTE(). The actual routing/gateway table implementation is + * not part of lwIP but can e.g. be hidden in the netif's state argument. +*/ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_ND6_GET_GW(netif, dest) +#endif + +/** + * LWIP_HOOK_VLAN_CHECK(netif, eth_hdr, vlan_hdr): + * Called from ethernet_input() if VLAN support is enabled + * Signature:\code{.c} + * int my_hook(struct netif *netif, struct eth_hdr *eth_hdr, struct eth_vlan_hdr *vlan_hdr); + * \endcode + * Arguments: + * - netif: struct netif on which the packet has been received + * - eth_hdr: struct eth_hdr of the packet + * - vlan_hdr: struct eth_vlan_hdr of the packet + * Return values: + * - 0: Packet must be dropped. + * - != 0: Packet must be accepted. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_VLAN_CHECK(netif, eth_hdr, vlan_hdr) +#endif + +/** + * LWIP_HOOK_VLAN_SET: + * Hook can be used to set prio_vid field of vlan_hdr. If you need to store data + * on per-netif basis to implement this callback, see @ref netif_cd. + * Called from ethernet_output() if VLAN support (@ref ETHARP_SUPPORT_VLAN) is enabled.\n + * Signature:\code{.c} + * s32_t my_hook_vlan_set(struct netif* netif, struct pbuf* pbuf, const struct eth_addr* src, const struct eth_addr* dst, u16_t eth_type);\n + * \endcode + * Arguments: + * - netif: struct netif that the packet will be sent through + * - p: struct pbuf packet to be sent + * - src: source eth address + * - dst: destination eth address + * - eth_type: ethernet type to packet to be sent\n + * + * + * Return values: + * - <0: Packet shall not contain VLAN header. + * - 0 <= return value <= 0xFFFF: Packet shall contain VLAN header. Return value is prio_vid in host byte order. + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_VLAN_SET(netif, p, src, dst, eth_type) +#endif + +/** + * LWIP_HOOK_MEMP_AVAILABLE(memp_t_type): + * Called from memp_free() when a memp pool was empty and an item is now available + * Signature:\code{.c} + * void my_hook(memp_t type); + * \endcode + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_MEMP_AVAILABLE(memp_t_type) +#endif + +/** + * LWIP_HOOK_UNKNOWN_ETH_PROTOCOL(pbuf, netif): + * Called from ethernet_input() when an unknown eth type is encountered. + * Signature:\code{.c} + * err_t my_hook(struct pbuf* pbuf, struct netif* netif); + * \endcode + * Arguments: + * - p: rx packet with unknown eth type + * - netif: netif on which the packet has been received + * Return values: + * - ERR_OK if packet is accepted (hook function now owns the pbuf) + * - any error code otherwise (pbuf is freed) + * + * Payload points to ethernet header! + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_UNKNOWN_ETH_PROTOCOL(pbuf, netif) +#endif + +/** + * LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, state, msg, msg_type, options_len_ptr): + * Called from various dhcp functions when sending a DHCP message. + * This hook is called just before the DHCP message trailer is added, so the + * options are at the end of a DHCP message. + * Signature:\code{.c} + * void my_hook(struct netif *netif, struct dhcp *dhcp, u8_t state, struct dhcp_msg *msg, + * u8_t msg_type, u16_t *options_len_ptr); + * \endcode + * Arguments: + * - netif: struct netif that the packet will be sent through + * - dhcp: struct dhcp on that netif + * - state: current dhcp state (dhcp_state_enum_t as an u8_t) + * - msg: struct dhcp_msg that will be sent + * - msg_type: dhcp message type to be sent (u8_t) + * - options_len_ptr: pointer to the current length of options in the dhcp_msg "msg" + * (must be increased when options are added!) + * + * Options need to appended like this: + * LWIP_ASSERT("dhcp option overflow", *options_len_ptr + option_len + 2 <= DHCP_OPTIONS_LEN); + * msg->options[(*options_len_ptr)++] = <option_number>; + * msg->options[(*options_len_ptr)++] = <option_len>; + * msg->options[(*options_len_ptr)++] = <option_bytes>; + * [...] + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, state, msg, msg_type, options_len_ptr) +#endif + +/** + * LWIP_HOOK_DHCP_PARSE_OPTION(netif, dhcp, state, msg, msg_type, option, len, pbuf, option_value_offset): + * Called from dhcp_parse_reply when receiving a DHCP message. + * This hook is called for every option in the received message that is not handled internally. + * Signature:\code{.c} + * void my_hook(struct netif *netif, struct dhcp *dhcp, u8_t state, struct dhcp_msg *msg, + * u8_t msg_type, u8_t option, u8_t option_len, struct pbuf *pbuf, u16_t option_value_offset); + * \endcode + * Arguments: + * - netif: struct netif that the packet will be sent through + * - dhcp: struct dhcp on that netif + * - state: current dhcp state (dhcp_state_enum_t as an u8_t) + * - msg: struct dhcp_msg that was received + * - msg_type: dhcp message type received (u8_t, ATTENTION: only valid after + * the message type option has been parsed!) + * - option: option value (u8_t) + * - len: option data length (u8_t) + * - pbuf: pbuf where option data is contained + * - option_value_offset: offset in pbuf where option data begins + * + * A nice way to get the option contents is pbuf_get_contiguous(): + * u8_t buf[32]; + * u8_t *ptr = (u8_t*)pbuf_get_contiguous(p, buf, sizeof(buf), LWIP_MIN(option_len, sizeof(buf)), offset); + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_DHCP_PARSE_OPTION(netif, dhcp, state, msg, msg_type, option, len, pbuf, offset) +#endif + +/** + * LWIP_HOOK_DHCP6_APPEND_OPTIONS(netif, dhcp6, state, msg, msg_type, options_len_ptr, max_len): + * Called from various dhcp6 functions when sending a DHCP6 message. + * This hook is called just before the DHCP6 message is sent, so the + * options are at the end of a DHCP6 message. + * Signature:\code{.c} + * void my_hook(struct netif *netif, struct dhcp6 *dhcp, u8_t state, struct dhcp6_msg *msg, + * u8_t msg_type, u16_t *options_len_ptr); + * \endcode + * Arguments: + * - netif: struct netif that the packet will be sent through + * - dhcp6: struct dhcp6 on that netif + * - state: current dhcp6 state (dhcp6_state_enum_t as an u8_t) + * - msg: struct dhcp6_msg that will be sent + * - msg_type: dhcp6 message type to be sent (u8_t) + * - options_len_ptr: pointer to the current length of options in the dhcp6_msg "msg" + * (must be increased when options are added!) + * + * Options need to appended like this: + * u8_t *options = (u8_t *)(msg + 1); + * LWIP_ASSERT("dhcp option overflow", sizeof(struct dhcp6_msg) + *options_len_ptr + newoptlen <= max_len); + * options[(*options_len_ptr)++] = <option_data>; + * [...] + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_DHCP6_APPEND_OPTIONS(netif, dhcp6, state, msg, msg_type, options_len_ptr, max_len) +#endif + +/** + * LWIP_HOOK_SOCKETS_SETSOCKOPT(s, sock, level, optname, optval, optlen, err) + * Called from socket API to implement setsockopt() for options not provided by lwIP. + * Core lock is held when this hook is called. + * Signature:\code{.c} + * int my_hook(int s, struct lwip_sock *sock, int level, int optname, const void *optval, socklen_t optlen, int *err) + * \endcode + * Arguments: + * - s: socket file descriptor + * - sock: internal socket descriptor (see lwip/priv/sockets_priv.h) + * - level: protocol level at which the option resides + * - optname: option to set + * - optval: value to set + * - optlen: size of optval + * - err: output error + * Return values: + * - 0: Hook has not consumed the option, code continues as normal (to internal options) + * - != 0: Hook has consumed the option, 'err' is returned + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_SOCKETS_SETSOCKOPT(s, sock, level, optname, optval, optlen, err) +#endif + +/** + * LWIP_HOOK_SOCKETS_GETSOCKOPT(s, sock, level, optname, optval, optlen, err) + * Called from socket API to implement getsockopt() for options not provided by lwIP. + * Core lock is held when this hook is called. + * Signature:\code{.c} + * int my_hook(int s, struct lwip_sock *sock, int level, int optname, void *optval, socklen_t *optlen, int *err) + * \endcode + * Arguments: + * - s: socket file descriptor + * - sock: internal socket descriptor (see lwip/priv/sockets_priv.h) + * - level: protocol level at which the option resides + * - optname: option to get + * - optval: value to get + * - optlen: size of optval + * - err: output error + * Return values: + * - 0: Hook has not consumed the option, code continues as normal (to internal options) + * - != 0: Hook has consumed the option, 'err' is returned + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_SOCKETS_GETSOCKOPT(s, sock, level, optname, optval, optlen, err) +#endif + +/** + * LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE(name, addr, addrtype, err) + * Called from netconn APIs (not usable with callback apps) allowing an + * external DNS resolver (which uses sequential API) to handle the query. + * Signature:\code{.c} + * int my_hook(const char *name, ip_addr_t *addr, u8_t addrtype, err_t *err) + * \endcode + * Arguments: + * - name: hostname to resolve + * - addr: output host address + * - addrtype: type of address to query + * - err: output error + * Return values: + * - 0: Hook has not consumed hostname query, query continues into DNS module + * - != 0: Hook has consumed the query + * + * err must also be checked to determine if the hook consumed the query, but + * the query failed + */ +#ifdef __DOXYGEN__ +#define LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE(name, addr, addrtype, err) +#endif +/** + * @} + */ + +/* + --------------------------------------- + ---------- Debugging options ---------- + --------------------------------------- +*/ +/** + * @defgroup lwip_opts_debugmsg Debug messages + * @ingroup lwip_opts_debug + * @{ + */ +/** + * LWIP_DBG_MIN_LEVEL: After masking, the value of the debug is + * compared against this value. If it is smaller, then debugging + * messages are written. + * @see debugging_levels + */ +#if !defined LWIP_DBG_MIN_LEVEL || defined __DOXYGEN__ +#define LWIP_DBG_MIN_LEVEL LWIP_DBG_LEVEL_ALL +#endif + +/** + * LWIP_DBG_TYPES_ON: A mask that can be used to globally enable/disable + * debug messages of certain types. + * @see debugging_levels + */ +#if !defined LWIP_DBG_TYPES_ON || defined __DOXYGEN__ +#define LWIP_DBG_TYPES_ON LWIP_DBG_ON +#endif + +/** + * ETHARP_DEBUG: Enable debugging in etharp.c. + */ +#if !defined ETHARP_DEBUG || defined __DOXYGEN__ +#define ETHARP_DEBUG LWIP_DBG_OFF +#endif + +/** + * NETIF_DEBUG: Enable debugging in netif.c. + */ +#if !defined NETIF_DEBUG || defined __DOXYGEN__ +#define NETIF_DEBUG LWIP_DBG_OFF +#endif + +/** + * PBUF_DEBUG: Enable debugging in pbuf.c. + */ +#if !defined PBUF_DEBUG || defined __DOXYGEN__ +#define PBUF_DEBUG LWIP_DBG_OFF +#endif + +/** + * API_LIB_DEBUG: Enable debugging in api_lib.c. + */ +#if !defined API_LIB_DEBUG || defined __DOXYGEN__ +#define API_LIB_DEBUG LWIP_DBG_OFF +#endif + +/** + * API_MSG_DEBUG: Enable debugging in api_msg.c. + */ +#if !defined API_MSG_DEBUG || defined __DOXYGEN__ +#define API_MSG_DEBUG LWIP_DBG_OFF +#endif + +/** + * SOCKETS_DEBUG: Enable debugging in sockets.c. + */ +#if !defined SOCKETS_DEBUG || defined __DOXYGEN__ +#define SOCKETS_DEBUG LWIP_DBG_OFF +#endif + +/** + * ICMP_DEBUG: Enable debugging in icmp.c. + */ +#if !defined ICMP_DEBUG || defined __DOXYGEN__ +#define ICMP_DEBUG LWIP_DBG_OFF +#endif + +/** + * IGMP_DEBUG: Enable debugging in igmp.c. + */ +#if !defined IGMP_DEBUG || defined __DOXYGEN__ +#define IGMP_DEBUG LWIP_DBG_OFF +#endif + +/** + * INET_DEBUG: Enable debugging in inet.c. + */ +#if !defined INET_DEBUG || defined __DOXYGEN__ +#define INET_DEBUG LWIP_DBG_OFF +#endif + +/** + * IP_DEBUG: Enable debugging for IP. + */ +#if !defined IP_DEBUG || defined __DOXYGEN__ +#define IP_DEBUG LWIP_DBG_OFF +#endif + +/** + * IP_REASS_DEBUG: Enable debugging in ip_frag.c for both frag & reass. + */ +#if !defined IP_REASS_DEBUG || defined __DOXYGEN__ +#define IP_REASS_DEBUG LWIP_DBG_OFF +#endif + +/** + * RAW_DEBUG: Enable debugging in raw.c. + */ +#if !defined RAW_DEBUG || defined __DOXYGEN__ +#define RAW_DEBUG LWIP_DBG_OFF +#endif + +/** + * MEM_DEBUG: Enable debugging in mem.c. + */ +#if !defined MEM_DEBUG || defined __DOXYGEN__ +#define MEM_DEBUG LWIP_DBG_OFF +#endif + +/** + * MEMP_DEBUG: Enable debugging in memp.c. + */ +#if !defined MEMP_DEBUG || defined __DOXYGEN__ +#define MEMP_DEBUG LWIP_DBG_OFF +#endif + +/** + * SYS_DEBUG: Enable debugging in sys.c. + */ +#if !defined SYS_DEBUG || defined __DOXYGEN__ +#define SYS_DEBUG LWIP_DBG_OFF +#endif + +/** + * TIMERS_DEBUG: Enable debugging in timers.c. + */ +#if !defined TIMERS_DEBUG || defined __DOXYGEN__ +#define TIMERS_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_DEBUG: Enable debugging for TCP. + */ +#if !defined TCP_DEBUG || defined __DOXYGEN__ +#define TCP_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_INPUT_DEBUG: Enable debugging in tcp_in.c for incoming debug. + */ +#if !defined TCP_INPUT_DEBUG || defined __DOXYGEN__ +#define TCP_INPUT_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_FR_DEBUG: Enable debugging in tcp_in.c for fast retransmit. + */ +#if !defined TCP_FR_DEBUG || defined __DOXYGEN__ +#define TCP_FR_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_RTO_DEBUG: Enable debugging in TCP for retransmit + * timeout. + */ +#if !defined TCP_RTO_DEBUG || defined __DOXYGEN__ +#define TCP_RTO_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_CWND_DEBUG: Enable debugging for TCP congestion window. + */ +#if !defined TCP_CWND_DEBUG || defined __DOXYGEN__ +#define TCP_CWND_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_WND_DEBUG: Enable debugging in tcp_in.c for window updating. + */ +#if !defined TCP_WND_DEBUG || defined __DOXYGEN__ +#define TCP_WND_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_OUTPUT_DEBUG: Enable debugging in tcp_out.c output functions. + */ +#if !defined TCP_OUTPUT_DEBUG || defined __DOXYGEN__ +#define TCP_OUTPUT_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_RST_DEBUG: Enable debugging for TCP with the RST message. + */ +#if !defined TCP_RST_DEBUG || defined __DOXYGEN__ +#define TCP_RST_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCP_QLEN_DEBUG: Enable debugging for TCP queue lengths. + */ +#if !defined TCP_QLEN_DEBUG || defined __DOXYGEN__ +#define TCP_QLEN_DEBUG LWIP_DBG_OFF +#endif + +/** + * UDP_DEBUG: Enable debugging in UDP. + */ +#if !defined UDP_DEBUG || defined __DOXYGEN__ +#define UDP_DEBUG LWIP_DBG_OFF +#endif + +/** + * TCPIP_DEBUG: Enable debugging in tcpip.c. + */ +#if !defined TCPIP_DEBUG || defined __DOXYGEN__ +#define TCPIP_DEBUG LWIP_DBG_OFF +#endif + +/** + * SLIP_DEBUG: Enable debugging in slipif.c. + */ +#if !defined SLIP_DEBUG || defined __DOXYGEN__ +#define SLIP_DEBUG LWIP_DBG_OFF +#endif + +/** + * DHCP_DEBUG: Enable debugging in dhcp.c. + */ +#if !defined DHCP_DEBUG || defined __DOXYGEN__ +#define DHCP_DEBUG LWIP_DBG_OFF +#endif + +/** + * AUTOIP_DEBUG: Enable debugging in autoip.c. + */ +#if !defined AUTOIP_DEBUG || defined __DOXYGEN__ +#define AUTOIP_DEBUG LWIP_DBG_OFF +#endif + +/** + * DNS_DEBUG: Enable debugging for DNS. + */ +#if !defined DNS_DEBUG || defined __DOXYGEN__ +#define DNS_DEBUG LWIP_DBG_OFF +#endif + +/** + * IP6_DEBUG: Enable debugging for IPv6. + */ +#if !defined IP6_DEBUG || defined __DOXYGEN__ +#define IP6_DEBUG LWIP_DBG_OFF +#endif + +/** + * DHCP6_DEBUG: Enable debugging in dhcp6.c. + */ +#if !defined DHCP6_DEBUG || defined __DOXYGEN__ +#define DHCP6_DEBUG LWIP_DBG_OFF +#endif +/** + * @} + */ + +/** + * LWIP_TESTMODE: Changes to make unit test possible + */ +#if !defined LWIP_TESTMODE +#define LWIP_TESTMODE 0 +#endif + +/* + -------------------------------------------------- + ---------- Performance tracking options ---------- + -------------------------------------------------- +*/ +/** + * @defgroup lwip_opts_perf Performance + * @ingroup lwip_opts_debug + * @{ + */ +/** + * LWIP_PERF: Enable performance testing for lwIP + * (if enabled, arch/perf.h is included) + */ +#if !defined LWIP_PERF || defined __DOXYGEN__ +#define LWIP_PERF 0 +#endif +/** + * @} + */ + +#endif /* LWIP_HDR_OPT_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/pbuf.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/pbuf.h new file mode 100644 index 0000000..328c772 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/pbuf.h @@ -0,0 +1,322 @@ +/** + * @file + * pbuf API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_PBUF_H +#define LWIP_HDR_PBUF_H + +#include "lwip/opt.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** LWIP_SUPPORT_CUSTOM_PBUF==1: Custom pbufs behave much like their pbuf type + * but they are allocated by external code (initialised by calling + * pbuf_alloced_custom()) and when pbuf_free gives up their last reference, they + * are freed by calling pbuf_custom->custom_free_function(). + * Currently, the pbuf_custom code is only needed for one specific configuration + * of IP_FRAG, unless required by external driver/application code. */ +#ifndef LWIP_SUPPORT_CUSTOM_PBUF +#define LWIP_SUPPORT_CUSTOM_PBUF ((IP_FRAG && !LWIP_NETIF_TX_SINGLE_PBUF) || (LWIP_IPV6 && LWIP_IPV6_FRAG)) +#endif + +/** @ingroup pbuf + * PBUF_NEEDS_COPY(p): return a boolean value indicating whether the given + * pbuf needs to be copied in order to be kept around beyond the current call + * stack without risking being corrupted. The default setting provides safety: + * it will make a copy iof any pbuf chain that does not consist entirely of + * PBUF_ROM type pbufs. For setups with zero-copy support, it may be redefined + * to evaluate to true in all cases, for example. However, doing so also has an + * effect on the application side: any buffers that are *not* copied must also + * *not* be reused by the application after passing them to lwIP. For example, + * when setting PBUF_NEEDS_COPY to (0), after using udp_send() with a PBUF_RAM + * pbuf, the application must free the pbuf immediately, rather than reusing it + * for other purposes. For more background information on this, see tasks #6735 + * and #7896, and bugs #11400 and #49914. */ +#ifndef PBUF_NEEDS_COPY +#define PBUF_NEEDS_COPY(p) ((p)->type_internal & PBUF_TYPE_FLAG_DATA_VOLATILE) +#endif /* PBUF_NEEDS_COPY */ + +/* @todo: We need a mechanism to prevent wasting memory in every pbuf + (TCP vs. UDP, IPv4 vs. IPv6: UDP/IPv4 packets may waste up to 28 bytes) */ + +#define PBUF_TRANSPORT_HLEN 20 +#if LWIP_IPV6 +#define PBUF_IP_HLEN 40 +#else +#define PBUF_IP_HLEN 20 +#endif + +/** + * @ingroup pbuf + * Enumeration of pbuf layers + */ +typedef enum { + /** Includes spare room for transport layer header, e.g. UDP header. + * Use this if you intend to pass the pbuf to functions like udp_send(). + */ + PBUF_TRANSPORT = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN, + /** Includes spare room for IP header. + * Use this if you intend to pass the pbuf to functions like raw_send(). + */ + PBUF_IP = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN, + /** Includes spare room for link layer header (ethernet header). + * Use this if you intend to pass the pbuf to functions like ethernet_output(). + * @see PBUF_LINK_HLEN + */ + PBUF_LINK = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN, + /** Includes spare room for additional encapsulation header before ethernet + * headers (e.g. 802.11). + * Use this if you intend to pass the pbuf to functions like netif->linkoutput(). + * @see PBUF_LINK_ENCAPSULATION_HLEN + */ + PBUF_RAW_TX = PBUF_LINK_ENCAPSULATION_HLEN, + /** Use this for input packets in a netif driver when calling netif->input() + * in the most common case - ethernet-layer netif driver. */ + PBUF_RAW = 0 +} pbuf_layer; + + +/* Base flags for pbuf_type definitions: */ + +/** Indicates that the payload directly follows the struct pbuf. + * This makes @ref pbuf_header work in both directions. */ +#define PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS 0x80 +/** Indicates the data stored in this pbuf can change. If this pbuf needs + * to be queued, it must be copied/duplicated. */ +#define PBUF_TYPE_FLAG_DATA_VOLATILE 0x40 +/** 4 bits are reserved for 16 allocation sources (e.g. heap, pool1, pool2, etc) + * Internally, we use: 0=heap, 1=MEMP_PBUF, 2=MEMP_PBUF_POOL -> 13 types free*/ +#define PBUF_TYPE_ALLOC_SRC_MASK 0x0F +/** Indicates this pbuf is used for RX (if not set, indicates use for TX). + * This information can be used to keep some spare RX buffers e.g. for + * receiving TCP ACKs to unblock a connection) */ +#define PBUF_ALLOC_FLAG_RX 0x0100 +/** Indicates the application needs the pbuf payload to be in one piece */ +#define PBUF_ALLOC_FLAG_DATA_CONTIGUOUS 0x0200 + +#define PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP 0x00 +#define PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF 0x01 +#define PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF_POOL 0x02 +/** First pbuf allocation type for applications */ +#define PBUF_TYPE_ALLOC_SRC_MASK_APP_MIN 0x03 +/** Last pbuf allocation type for applications */ +#define PBUF_TYPE_ALLOC_SRC_MASK_APP_MAX PBUF_TYPE_ALLOC_SRC_MASK + +/** + * @ingroup pbuf + * Enumeration of pbuf types + */ +typedef enum { + /** pbuf data is stored in RAM, used for TX mostly, struct pbuf and its payload + are allocated in one piece of contiguous memory (so the first payload byte + can be calculated from struct pbuf). + pbuf_alloc() allocates PBUF_RAM pbufs as unchained pbufs (although that might + change in future versions). + This should be used for all OUTGOING packets (TX).*/ + PBUF_RAM = (PBUF_ALLOC_FLAG_DATA_CONTIGUOUS | PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS | PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP), + /** pbuf data is stored in ROM, i.e. struct pbuf and its payload are located in + totally different memory areas. Since it points to ROM, payload does not + have to be copied when queued for transmission. */ + PBUF_ROM = PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF, + /** pbuf comes from the pbuf pool. Much like PBUF_ROM but payload might change + so it has to be duplicated when queued before transmitting, depending on + who has a 'ref' to it. */ + PBUF_REF = (PBUF_TYPE_FLAG_DATA_VOLATILE | PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF), + /** pbuf payload refers to RAM. This one comes from a pool and should be used + for RX. Payload can be chained (scatter-gather RX) but like PBUF_RAM, struct + pbuf and its payload are allocated in one piece of contiguous memory (so + the first payload byte can be calculated from struct pbuf). + Don't use this for TX, if the pool becomes empty e.g. because of TCP queuing, + you are unable to receive TCP acks! */ + PBUF_POOL = (PBUF_ALLOC_FLAG_RX | PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS | PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF_POOL) +} pbuf_type; + + +/** indicates this packet's data should be immediately passed to the application */ +#define PBUF_FLAG_PUSH 0x01U +/** indicates this is a custom pbuf: pbuf_free calls pbuf_custom->custom_free_function() + when the last reference is released (plus custom PBUF_RAM cannot be trimmed) */ +#define PBUF_FLAG_IS_CUSTOM 0x02U +/** indicates this pbuf is UDP multicast to be looped back */ +#define PBUF_FLAG_MCASTLOOP 0x04U +/** indicates this pbuf was received as link-level broadcast */ +#define PBUF_FLAG_LLBCAST 0x08U +/** indicates this pbuf was received as link-level multicast */ +#define PBUF_FLAG_LLMCAST 0x10U +/** indicates this pbuf includes a TCP FIN flag */ +#define PBUF_FLAG_TCP_FIN 0x20U + +/** Main packet buffer struct */ +struct pbuf { + /** next pbuf in singly linked pbuf chain */ + struct pbuf *next; + + /** pointer to the actual data in the buffer */ + void *payload; + + /** + * total length of this buffer and all next buffers in chain + * belonging to the same packet. + * + * For non-queue packet chains this is the invariant: + * p->tot_len == p->len + (p->next? p->next->tot_len: 0) + */ + u16_t tot_len; + + /** length of this buffer */ + u16_t len; + + /** a bit field indicating pbuf type and allocation sources + (see PBUF_TYPE_FLAG_*, PBUF_ALLOC_FLAG_* and PBUF_TYPE_ALLOC_SRC_MASK) + */ + u8_t type_internal; + + /** misc flags */ + u8_t flags; + + /** + * the reference count always equals the number of pointers + * that refer to this pbuf. This can be pointers from an application, + * the stack itself, or pbuf->next pointers from a chain. + */ + LWIP_PBUF_REF_T ref; + + /** For incoming packets, this contains the input netif's index */ + u8_t if_idx; +}; + + +/** Helper struct for const-correctness only. + * The only meaning of this one is to provide a const payload pointer + * for PBUF_ROM type. + */ +struct pbuf_rom { + /** next pbuf in singly linked pbuf chain */ + struct pbuf *next; + + /** pointer to the actual data in the buffer */ + const void *payload; +}; + +#if LWIP_SUPPORT_CUSTOM_PBUF +/** Prototype for a function to free a custom pbuf */ +typedef void (*pbuf_free_custom_fn)(struct pbuf *p); + +/** A custom pbuf: like a pbuf, but following a function pointer to free it. */ +struct pbuf_custom { + /** The actual pbuf */ + struct pbuf pbuf; + /** This function is called when pbuf_free deallocates this pbuf(_custom) */ + pbuf_free_custom_fn custom_free_function; +}; +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ + +/** Define this to 0 to prevent freeing ooseq pbufs when the PBUF_POOL is empty */ +#ifndef PBUF_POOL_FREE_OOSEQ +#define PBUF_POOL_FREE_OOSEQ 1 +#endif /* PBUF_POOL_FREE_OOSEQ */ +#if LWIP_TCP && TCP_QUEUE_OOSEQ && NO_SYS && PBUF_POOL_FREE_OOSEQ +extern volatile u8_t pbuf_free_ooseq_pending; +void pbuf_free_ooseq(void); +/** When not using sys_check_timeouts(), call PBUF_CHECK_FREE_OOSEQ() + at regular intervals from main level to check if ooseq pbufs need to be + freed! */ +#define PBUF_CHECK_FREE_OOSEQ() do { if(pbuf_free_ooseq_pending) { \ + /* pbuf_alloc() reported PBUF_POOL to be empty -> try to free some \ + ooseq queued pbufs now */ \ + pbuf_free_ooseq(); }}while(0) +#else /* LWIP_TCP && TCP_QUEUE_OOSEQ && NO_SYS && PBUF_POOL_FREE_OOSEQ */ + /* Otherwise declare an empty PBUF_CHECK_FREE_OOSEQ */ + #define PBUF_CHECK_FREE_OOSEQ() +#endif /* LWIP_TCP && TCP_QUEUE_OOSEQ && NO_SYS && PBUF_POOL_FREE_OOSEQ*/ + +/* Initializes the pbuf module. This call is empty for now, but may not be in future. */ +#define pbuf_init() + +struct pbuf *pbuf_alloc(pbuf_layer l, u16_t length, pbuf_type type); +struct pbuf *pbuf_alloc_reference(void *payload, u16_t length, pbuf_type type); +#if LWIP_SUPPORT_CUSTOM_PBUF +struct pbuf *pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, + struct pbuf_custom *p, void *payload_mem, + u16_t payload_mem_len); +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ +void pbuf_realloc(struct pbuf *p, u16_t size); +#define pbuf_get_allocsrc(p) ((p)->type_internal & PBUF_TYPE_ALLOC_SRC_MASK) +#define pbuf_match_allocsrc(p, type) (pbuf_get_allocsrc(p) == ((type) & PBUF_TYPE_ALLOC_SRC_MASK)) +#define pbuf_match_type(p, type) pbuf_match_allocsrc(p, type) +u8_t pbuf_header(struct pbuf *p, s16_t header_size); +u8_t pbuf_header_force(struct pbuf *p, s16_t header_size); +u8_t pbuf_add_header(struct pbuf *p, size_t header_size_increment); +u8_t pbuf_add_header_force(struct pbuf *p, size_t header_size_increment); +u8_t pbuf_remove_header(struct pbuf *p, size_t header_size); +struct pbuf *pbuf_free_header(struct pbuf *q, u16_t size); +void pbuf_ref(struct pbuf *p); +u8_t pbuf_free(struct pbuf *p); +u16_t pbuf_clen(const struct pbuf *p); +void pbuf_cat(struct pbuf *head, struct pbuf *tail); +void pbuf_chain(struct pbuf *head, struct pbuf *tail); +struct pbuf *pbuf_dechain(struct pbuf *p); +err_t pbuf_copy(struct pbuf *p_to, const struct pbuf *p_from); +u16_t pbuf_copy_partial(const struct pbuf *p, void *dataptr, u16_t len, u16_t offset); +void *pbuf_get_contiguous(const struct pbuf *p, void *buffer, size_t bufsize, u16_t len, u16_t offset); +err_t pbuf_take(struct pbuf *buf, const void *dataptr, u16_t len); +err_t pbuf_take_at(struct pbuf *buf, const void *dataptr, u16_t len, u16_t offset); +struct pbuf *pbuf_skip(struct pbuf* in, u16_t in_offset, u16_t* out_offset); +struct pbuf *pbuf_coalesce(struct pbuf *p, pbuf_layer layer); +struct pbuf *pbuf_clone(pbuf_layer l, pbuf_type type, struct pbuf *p); +#if LWIP_CHECKSUM_ON_COPY +err_t pbuf_fill_chksum(struct pbuf *p, u16_t start_offset, const void *dataptr, + u16_t len, u16_t *chksum); +#endif /* LWIP_CHECKSUM_ON_COPY */ +#if LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE +void pbuf_split_64k(struct pbuf *p, struct pbuf **rest); +#endif /* LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + +u8_t pbuf_get_at(const struct pbuf* p, u16_t offset); +int pbuf_try_get_at(const struct pbuf* p, u16_t offset); +void pbuf_put_at(struct pbuf* p, u16_t offset, u8_t data); +u16_t pbuf_memcmp(const struct pbuf* p, u16_t offset, const void* s2, u16_t n); +u16_t pbuf_memfind(const struct pbuf* p, const void* mem, u16_t mem_len, u16_t start_offset); +u16_t pbuf_strstr(const struct pbuf* p, const char* substr); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PBUF_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/altcp_priv.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/altcp_priv.h new file mode 100644 index 0000000..381fc96 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/altcp_priv.h @@ -0,0 +1,146 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread)\n + * This interface mimics the tcp callback API to the application while preventing + * direct linking (much like virtual functions). + * This way, an application can make use of other application layer protocols + * on top of TCP without knowing the details (e.g. TLS, proxy connection). + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_PRIV_H +#define LWIP_HDR_ALTCP_PRIV_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/altcp.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct altcp_pcb *altcp_alloc(void); +void altcp_free(struct altcp_pcb *conn); + +/* Function prototypes for application layers */ +typedef void (*altcp_set_poll_fn)(struct altcp_pcb *conn, u8_t interval); +typedef void (*altcp_recved_fn)(struct altcp_pcb *conn, u16_t len); +typedef err_t (*altcp_bind_fn)(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port); +typedef err_t (*altcp_connect_fn)(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected); + +typedef struct altcp_pcb *(*altcp_listen_fn)(struct altcp_pcb *conn, u8_t backlog, err_t *err); + +typedef void (*altcp_abort_fn)(struct altcp_pcb *conn); +typedef err_t (*altcp_close_fn)(struct altcp_pcb *conn); +typedef err_t (*altcp_shutdown_fn)(struct altcp_pcb *conn, int shut_rx, int shut_tx); + +typedef err_t (*altcp_write_fn)(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags); +typedef err_t (*altcp_output_fn)(struct altcp_pcb *conn); + +typedef u16_t (*altcp_mss_fn)(struct altcp_pcb *conn); +typedef u16_t (*altcp_sndbuf_fn)(struct altcp_pcb *conn); +typedef u16_t (*altcp_sndqueuelen_fn)(struct altcp_pcb *conn); +typedef void (*altcp_nagle_disable_fn)(struct altcp_pcb *conn); +typedef void (*altcp_nagle_enable_fn)(struct altcp_pcb *conn); +typedef int (*altcp_nagle_disabled_fn)(struct altcp_pcb *conn); + +typedef void (*altcp_setprio_fn)(struct altcp_pcb *conn, u8_t prio); + +typedef void (*altcp_dealloc_fn)(struct altcp_pcb *conn); + +typedef err_t (*altcp_get_tcp_addrinfo_fn)(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port); +typedef ip_addr_t *(*altcp_get_ip_fn)(struct altcp_pcb *conn, int local); +typedef u16_t (*altcp_get_port_fn)(struct altcp_pcb *conn, int local); + +#ifdef LWIP_DEBUG +typedef enum tcp_state (*altcp_dbg_get_tcp_state_fn)(struct altcp_pcb *conn); +#endif + +struct altcp_functions { + altcp_set_poll_fn set_poll; + altcp_recved_fn recved; + altcp_bind_fn bind; + altcp_connect_fn connect; + altcp_listen_fn listen; + altcp_abort_fn abort; + altcp_close_fn close; + altcp_shutdown_fn shutdown; + altcp_write_fn write; + altcp_output_fn output; + altcp_mss_fn mss; + altcp_sndbuf_fn sndbuf; + altcp_sndqueuelen_fn sndqueuelen; + altcp_nagle_disable_fn nagle_disable; + altcp_nagle_enable_fn nagle_enable; + altcp_nagle_disabled_fn nagle_disabled; + altcp_setprio_fn setprio; + altcp_dealloc_fn dealloc; + altcp_get_tcp_addrinfo_fn addrinfo; + altcp_get_ip_fn getip; + altcp_get_port_fn getport; +#ifdef LWIP_DEBUG + altcp_dbg_get_tcp_state_fn dbg_get_tcp_state; +#endif +}; + +void altcp_default_set_poll(struct altcp_pcb *conn, u8_t interval); +void altcp_default_recved(struct altcp_pcb *conn, u16_t len); +err_t altcp_default_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port); +err_t altcp_default_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx); +err_t altcp_default_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags); +err_t altcp_default_output(struct altcp_pcb *conn); +u16_t altcp_default_mss(struct altcp_pcb *conn); +u16_t altcp_default_sndbuf(struct altcp_pcb *conn); +u16_t altcp_default_sndqueuelen(struct altcp_pcb *conn); +void altcp_default_nagle_disable(struct altcp_pcb *conn); +void altcp_default_nagle_enable(struct altcp_pcb *conn); +int altcp_default_nagle_disabled(struct altcp_pcb *conn); +void altcp_default_setprio(struct altcp_pcb *conn, u8_t prio); +void altcp_default_dealloc(struct altcp_pcb *conn); +err_t altcp_default_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port); +ip_addr_t *altcp_default_get_ip(struct altcp_pcb *conn, int local); +u16_t altcp_default_get_port(struct altcp_pcb *conn, int local); +#ifdef LWIP_DEBUG +enum tcp_state altcp_default_dbg_get_tcp_state(struct altcp_pcb *conn); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_ALTCP */ + +#endif /* LWIP_HDR_ALTCP_PRIV_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/api_msg.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/api_msg.h new file mode 100644 index 0000000..64c8763 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/api_msg.h @@ -0,0 +1,272 @@ +/** + * @file + * netconn API lwIP internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_API_MSG_H +#define LWIP_HDR_API_MSG_H + +#include "lwip/opt.h" + +#include "lwip/arch.h" +#include "lwip/ip_addr.h" +#include "lwip/err.h" +#include "lwip/sys.h" +#include "lwip/igmp.h" +#include "lwip/api.h" +#include "lwip/priv/tcpip_priv.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_NETCONN || LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ +/* Note: Netconn API is always available when sockets are enabled - + * sockets are implemented on top of them */ + +#if LWIP_MPU_COMPATIBLE +#if LWIP_NETCONN_SEM_PER_THREAD +#define API_MSG_M_DEF_SEM(m) *m +#else +#define API_MSG_M_DEF_SEM(m) API_MSG_M_DEF(m) +#endif +#else /* LWIP_MPU_COMPATIBLE */ +#define API_MSG_M_DEF_SEM(m) API_MSG_M_DEF(m) +#endif /* LWIP_MPU_COMPATIBLE */ + +/* For the netconn API, these values are use as a bitmask! */ +#define NETCONN_SHUT_RD 1 +#define NETCONN_SHUT_WR 2 +#define NETCONN_SHUT_RDWR (NETCONN_SHUT_RD | NETCONN_SHUT_WR) + +/* IP addresses and port numbers are expected to be in + * the same byte order as in the corresponding pcb. + */ +/** This struct includes everything that is necessary to execute a function + for a netconn in another thread context (mainly used to process netconns + in the tcpip_thread context to be thread safe). */ +struct api_msg { + /** The netconn which to process - always needed: it includes the semaphore + which is used to block the application thread until the function finished. */ + struct netconn *conn; + /** The return value of the function executed in tcpip_thread. */ + err_t err; + /** Depending on the executed function, one of these union members is used */ + union { + /** used for lwip_netconn_do_send */ + struct netbuf *b; + /** used for lwip_netconn_do_newconn */ + struct { + u8_t proto; + } n; + /** used for lwip_netconn_do_bind and lwip_netconn_do_connect */ + struct { + API_MSG_M_DEF_C(ip_addr_t, ipaddr); + u16_t port; + u8_t if_idx; + } bc; + /** used for lwip_netconn_do_getaddr */ + struct { + ip_addr_t API_MSG_M_DEF(ipaddr); + u16_t API_MSG_M_DEF(port); + u8_t local; + } ad; + /** used for lwip_netconn_do_write */ + struct { + /** current vector to write */ + const struct netvector *vector; + /** number of unwritten vectors */ + u16_t vector_cnt; + /** offset into current vector */ + size_t vector_off; + /** total length across vectors */ + size_t len; + /** offset into total length/output of bytes written when err == ERR_OK */ + size_t offset; + u8_t apiflags; +#if LWIP_SO_SNDTIMEO + u32_t time_started; +#endif /* LWIP_SO_SNDTIMEO */ + } w; + /** used for lwip_netconn_do_recv */ + struct { + size_t len; + } r; +#if LWIP_TCP + /** used for lwip_netconn_do_close (/shutdown) */ + struct { + u8_t shut; +#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER + u32_t time_started; +#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + u8_t polls_left; +#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + } sd; +#endif /* LWIP_TCP */ +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) + /** used for lwip_netconn_do_join_leave_group */ + struct { + API_MSG_M_DEF_C(ip_addr_t, multiaddr); + API_MSG_M_DEF_C(ip_addr_t, netif_addr); + u8_t if_idx; + enum netconn_igmp join_or_leave; + } jl; +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ +#if TCP_LISTEN_BACKLOG + struct { + u8_t backlog; + } lb; +#endif /* TCP_LISTEN_BACKLOG */ + } msg; +#if LWIP_NETCONN_SEM_PER_THREAD + sys_sem_t* op_completed_sem; +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ +}; + +#if LWIP_NETCONN_SEM_PER_THREAD +#define LWIP_API_MSG_SEM(msg) ((msg)->op_completed_sem) +#else /* LWIP_NETCONN_SEM_PER_THREAD */ +#define LWIP_API_MSG_SEM(msg) (&(msg)->conn->op_completed) +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + +#if LWIP_DNS +/** As lwip_netconn_do_gethostbyname requires more arguments but doesn't require a netconn, + it has its own struct (to avoid struct api_msg getting bigger than necessary). + lwip_netconn_do_gethostbyname must be called using tcpip_callback instead of tcpip_apimsg + (see netconn_gethostbyname). */ +struct dns_api_msg { + /** Hostname to query or dotted IP address string */ +#if LWIP_MPU_COMPATIBLE + char name[DNS_MAX_NAME_LENGTH]; +#else /* LWIP_MPU_COMPATIBLE */ + const char *name; +#endif /* LWIP_MPU_COMPATIBLE */ + /** The resolved address is stored here */ + ip_addr_t API_MSG_M_DEF(addr); +#if LWIP_IPV4 && LWIP_IPV6 + /** Type of resolve call */ + u8_t dns_addrtype; +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + /** This semaphore is posted when the name is resolved, the application thread + should wait on it. */ + sys_sem_t API_MSG_M_DEF_SEM(sem); + /** Errors are given back here */ + err_t API_MSG_M_DEF(err); +}; +#endif /* LWIP_DNS */ + +#if LWIP_NETCONN_FULLDUPLEX +int lwip_netconn_is_deallocated_msg(void *msg); +#endif +int lwip_netconn_is_err_msg(void *msg, err_t *err); +void lwip_netconn_do_newconn (void *m); +void lwip_netconn_do_delconn (void *m); +void lwip_netconn_do_bind (void *m); +void lwip_netconn_do_bind_if (void *m); +void lwip_netconn_do_connect (void *m); +void lwip_netconn_do_disconnect (void *m); +void lwip_netconn_do_listen (void *m); +void lwip_netconn_do_send (void *m); +void lwip_netconn_do_recv (void *m); +#if TCP_LISTEN_BACKLOG +void lwip_netconn_do_accepted (void *m); +#endif /* TCP_LISTEN_BACKLOG */ +void lwip_netconn_do_write (void *m); +void lwip_netconn_do_getaddr (void *m); +void lwip_netconn_do_close (void *m); +void lwip_netconn_do_shutdown (void *m); +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +void lwip_netconn_do_join_leave_group(void *m); +void lwip_netconn_do_join_leave_group_netif(void *m); +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ + +#if LWIP_DNS +void lwip_netconn_do_gethostbyname(void *arg); +#endif /* LWIP_DNS */ + +struct netconn* netconn_alloc(enum netconn_type t, netconn_callback callback); +void netconn_free(struct netconn *conn); + +#endif /* LWIP_NETCONN || LWIP_SOCKET */ + +#if LWIP_NETIF_API /* don't build if not configured for use in lwipopts.h */ + +/* netifapi related lwIP internal definitions */ + +#if LWIP_MPU_COMPATIBLE +#define NETIFAPI_IPADDR_DEF(type, m) type m +#else /* LWIP_MPU_COMPATIBLE */ +#define NETIFAPI_IPADDR_DEF(type, m) const type * m +#endif /* LWIP_MPU_COMPATIBLE */ + +typedef void (*netifapi_void_fn)(struct netif *netif); +typedef err_t (*netifapi_errt_fn)(struct netif *netif); + +struct netifapi_msg { + struct tcpip_api_call_data call; + struct netif *netif; + union { + struct { +#if LWIP_IPV4 + NETIFAPI_IPADDR_DEF(ip4_addr_t, ipaddr); + NETIFAPI_IPADDR_DEF(ip4_addr_t, netmask); + NETIFAPI_IPADDR_DEF(ip4_addr_t, gw); +#endif /* LWIP_IPV4 */ + void *state; + netif_init_fn init; + netif_input_fn input; + } add; + struct { + netifapi_void_fn voidfunc; + netifapi_errt_fn errtfunc; + } common; + struct { +#if LWIP_MPU_COMPATIBLE + char name[NETIF_NAMESIZE]; +#else /* LWIP_MPU_COMPATIBLE */ + char *name; +#endif /* LWIP_MPU_COMPATIBLE */ + u8_t index; + } ifs; + } msg; +}; + +#endif /* LWIP_NETIF_API */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_API_MSG_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/mem_priv.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/mem_priv.h new file mode 100644 index 0000000..5abd9b5 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/mem_priv.h @@ -0,0 +1,84 @@ +/** + * @file + * lwIP internal memory implementations (do not use in application code) + */ + +/* + * Copyright (c) 2018 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#ifndef LWIP_HDR_MEM_PRIV_H +#define LWIP_HDR_MEM_PRIV_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "lwip/mem.h" + +#if MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK +/* if MEM_OVERFLOW_CHECK or MEMP_OVERFLOW_CHECK is turned on, we reserve some + * bytes at the beginning and at the end of each element, initialize them as + * 0xcd and check them later. + * If MEM(P)_OVERFLOW_CHECK is >= 2, on every call to mem(p)_malloc or mem(p)_free, + * every single element in each pool/heap is checked! + * This is VERY SLOW but also very helpful. + * MEM_SANITY_REGION_BEFORE and MEM_SANITY_REGION_AFTER can be overridden in + * lwipopts.h to change the amount reserved for checking. */ +#ifndef MEM_SANITY_REGION_BEFORE +#define MEM_SANITY_REGION_BEFORE 16 +#endif /* MEM_SANITY_REGION_BEFORE*/ +#if MEM_SANITY_REGION_BEFORE > 0 +#define MEM_SANITY_REGION_BEFORE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SANITY_REGION_BEFORE) +#else +#define MEM_SANITY_REGION_BEFORE_ALIGNED 0 +#endif /* MEM_SANITY_REGION_BEFORE*/ +#ifndef MEM_SANITY_REGION_AFTER +#define MEM_SANITY_REGION_AFTER 16 +#endif /* MEM_SANITY_REGION_AFTER*/ +#if MEM_SANITY_REGION_AFTER > 0 +#define MEM_SANITY_REGION_AFTER_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SANITY_REGION_AFTER) +#else +#define MEM_SANITY_REGION_AFTER_ALIGNED 0 +#endif /* MEM_SANITY_REGION_AFTER*/ + +void mem_overflow_init_raw(void *p, size_t size); +void mem_overflow_check_raw(void *p, size_t size, const char *descr1, const char *descr2); + +#endif /* MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MEMP_PRIV_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_priv.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_priv.h new file mode 100644 index 0000000..f9930fd --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_priv.h @@ -0,0 +1,161 @@ +/** + * @file + * memory pools lwIP internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_MEMP_PRIV_H +#define LWIP_HDR_MEMP_PRIV_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "lwip/mem.h" +#include "lwip/priv/mem_priv.h" + +#if MEMP_OVERFLOW_CHECK + + +/* MEMP_SIZE: save space for struct memp and for sanity check */ +#define MEMP_SIZE (LWIP_MEM_ALIGN_SIZE(sizeof(struct memp)) + MEM_SANITY_REGION_BEFORE_ALIGNED) +#define MEMP_ALIGN_SIZE(x) (LWIP_MEM_ALIGN_SIZE(x) + MEM_SANITY_REGION_AFTER_ALIGNED) + +#else /* MEMP_OVERFLOW_CHECK */ + +/* No sanity checks + * We don't need to preserve the struct memp while not allocated, so we + * can save a little space and set MEMP_SIZE to 0. + */ +#define MEMP_SIZE 0 +#define MEMP_ALIGN_SIZE(x) (LWIP_MEM_ALIGN_SIZE(x)) + +#endif /* MEMP_OVERFLOW_CHECK */ + +#if !MEMP_MEM_MALLOC || MEMP_OVERFLOW_CHECK +struct memp { + struct memp *next; +#if MEMP_OVERFLOW_CHECK + const char *file; + int line; +#endif /* MEMP_OVERFLOW_CHECK */ +}; +#endif /* !MEMP_MEM_MALLOC || MEMP_OVERFLOW_CHECK */ + +#if MEM_USE_POOLS && MEMP_USE_CUSTOM_POOLS +/* Use a helper type to get the start and end of the user "memory pools" for mem_malloc */ +typedef enum { + /* Get the first (via: + MEMP_POOL_HELPER_START = ((u8_t) 1*MEMP_POOL_A + 0*MEMP_POOL_B + 0*MEMP_POOL_C + 0)*/ + MEMP_POOL_HELPER_FIRST = ((u8_t) +#define LWIP_MEMPOOL(name,num,size,desc) +#define LWIP_MALLOC_MEMPOOL_START 1 +#define LWIP_MALLOC_MEMPOOL(num, size) * MEMP_POOL_##size + 0 +#define LWIP_MALLOC_MEMPOOL_END +#include "lwip/priv/memp_std.h" + ) , + /* Get the last (via: + MEMP_POOL_HELPER_END = ((u8_t) 0 + MEMP_POOL_A*0 + MEMP_POOL_B*0 + MEMP_POOL_C*1) */ + MEMP_POOL_HELPER_LAST = ((u8_t) +#define LWIP_MEMPOOL(name,num,size,desc) +#define LWIP_MALLOC_MEMPOOL_START +#define LWIP_MALLOC_MEMPOOL(num, size) 0 + MEMP_POOL_##size * +#define LWIP_MALLOC_MEMPOOL_END 1 +#include "lwip/priv/memp_std.h" + ) +} memp_pool_helper_t; + +/* The actual start and stop values are here (cast them over) + We use this helper type and these defines so we can avoid using const memp_t values */ +#define MEMP_POOL_FIRST ((memp_t) MEMP_POOL_HELPER_FIRST) +#define MEMP_POOL_LAST ((memp_t) MEMP_POOL_HELPER_LAST) +#endif /* MEM_USE_POOLS && MEMP_USE_CUSTOM_POOLS */ + +/** Memory pool descriptor */ +struct memp_desc { +#if defined(LWIP_DEBUG) || MEMP_OVERFLOW_CHECK || LWIP_STATS_DISPLAY + /** Textual description */ + const char *desc; +#endif /* LWIP_DEBUG || MEMP_OVERFLOW_CHECK || LWIP_STATS_DISPLAY */ +#if MEMP_STATS + /** Statistics */ + struct stats_mem *stats; +#endif + + /** Element size */ + u16_t size; + +#if !MEMP_MEM_MALLOC + /** Number of elements */ + u16_t num; + + /** Base address */ + u8_t *base; + + /** First free element of each pool. Elements form a linked list. */ + struct memp **tab; +#endif /* MEMP_MEM_MALLOC */ +}; + +#if defined(LWIP_DEBUG) || MEMP_OVERFLOW_CHECK || LWIP_STATS_DISPLAY +#define DECLARE_LWIP_MEMPOOL_DESC(desc) (desc), +#else +#define DECLARE_LWIP_MEMPOOL_DESC(desc) +#endif + +#if MEMP_STATS +#define LWIP_MEMPOOL_DECLARE_STATS_INSTANCE(name) static struct stats_mem name; +#define LWIP_MEMPOOL_DECLARE_STATS_REFERENCE(name) &name, +#else +#define LWIP_MEMPOOL_DECLARE_STATS_INSTANCE(name) +#define LWIP_MEMPOOL_DECLARE_STATS_REFERENCE(name) +#endif + +void memp_init_pool(const struct memp_desc *desc); + +#if MEMP_OVERFLOW_CHECK +void *memp_malloc_pool_fn(const struct memp_desc* desc, const char* file, const int line); +#define memp_malloc_pool(d) memp_malloc_pool_fn((d), __FILE__, __LINE__) +#else +void *memp_malloc_pool(const struct memp_desc *desc); +#endif +void memp_free_pool(const struct memp_desc* desc, void *mem); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MEMP_PRIV_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_std.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_std.h new file mode 100644 index 0000000..4d3b192 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/memp_std.h @@ -0,0 +1,153 @@ +/** + * @file + * lwIP internal memory pools (do not use in application code) + * This file is deliberately included multiple times: once with empty + * definition of LWIP_MEMPOOL() to handle all includes and multiple times + * to build up various lists of mem pools. + */ + +/* + * SETUP: Make sure we define everything we will need. + * + * We have create three types of pools: + * 1) MEMPOOL - standard pools + * 2) MALLOC_MEMPOOL - to be used by mem_malloc in mem.c + * 3) PBUF_MEMPOOL - a mempool of pbuf's, so include space for the pbuf struct + * + * If the include'r doesn't require any special treatment of each of the types + * above, then will declare #2 & #3 to be just standard mempools. + */ +#ifndef LWIP_MALLOC_MEMPOOL +/* This treats "malloc pools" just like any other pool. + The pools are a little bigger to provide 'size' as the amount of user data. */ +#define LWIP_MALLOC_MEMPOOL(num, size) LWIP_MEMPOOL(POOL_##size, num, (size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper))), "MALLOC_"#size) +#define LWIP_MALLOC_MEMPOOL_START +#define LWIP_MALLOC_MEMPOOL_END +#endif /* LWIP_MALLOC_MEMPOOL */ + +#ifndef LWIP_PBUF_MEMPOOL +/* This treats "pbuf pools" just like any other pool. + * Allocates buffers for a pbuf struct AND a payload size */ +#define LWIP_PBUF_MEMPOOL(name, num, payload, desc) LWIP_MEMPOOL(name, num, (LWIP_MEM_ALIGN_SIZE(sizeof(struct pbuf)) + LWIP_MEM_ALIGN_SIZE(payload)), desc) +#endif /* LWIP_PBUF_MEMPOOL */ + + +/* + * A list of internal pools used by LWIP. + * + * LWIP_MEMPOOL(pool_name, number_elements, element_size, pool_description) + * creates a pool name MEMP_pool_name. description is used in stats.c + */ +#if LWIP_RAW +LWIP_MEMPOOL(RAW_PCB, MEMP_NUM_RAW_PCB, sizeof(struct raw_pcb), "RAW_PCB") +#endif /* LWIP_RAW */ + +#if LWIP_UDP +LWIP_MEMPOOL(UDP_PCB, MEMP_NUM_UDP_PCB, sizeof(struct udp_pcb), "UDP_PCB") +#endif /* LWIP_UDP */ + +#if LWIP_TCP +LWIP_MEMPOOL(TCP_PCB, MEMP_NUM_TCP_PCB, sizeof(struct tcp_pcb), "TCP_PCB") +LWIP_MEMPOOL(TCP_PCB_LISTEN, MEMP_NUM_TCP_PCB_LISTEN, sizeof(struct tcp_pcb_listen), "TCP_PCB_LISTEN") +LWIP_MEMPOOL(TCP_SEG, MEMP_NUM_TCP_SEG, sizeof(struct tcp_seg), "TCP_SEG") +#endif /* LWIP_TCP */ + +#if LWIP_ALTCP && LWIP_TCP +LWIP_MEMPOOL(ALTCP_PCB, MEMP_NUM_ALTCP_PCB, sizeof(struct altcp_pcb), "ALTCP_PCB") +#endif /* LWIP_ALTCP && LWIP_TCP */ + +#if LWIP_IPV4 && IP_REASSEMBLY +LWIP_MEMPOOL(REASSDATA, MEMP_NUM_REASSDATA, sizeof(struct ip_reassdata), "REASSDATA") +#endif /* LWIP_IPV4 && IP_REASSEMBLY */ +#if (IP_FRAG && !LWIP_NETIF_TX_SINGLE_PBUF) || (LWIP_IPV6 && LWIP_IPV6_FRAG) +LWIP_MEMPOOL(FRAG_PBUF, MEMP_NUM_FRAG_PBUF, sizeof(struct pbuf_custom_ref),"FRAG_PBUF") +#endif /* IP_FRAG && !LWIP_NETIF_TX_SINGLE_PBUF || (LWIP_IPV6 && LWIP_IPV6_FRAG) */ + +#if LWIP_NETCONN || LWIP_SOCKET +LWIP_MEMPOOL(NETBUF, MEMP_NUM_NETBUF, sizeof(struct netbuf), "NETBUF") +LWIP_MEMPOOL(NETCONN, MEMP_NUM_NETCONN, sizeof(struct netconn), "NETCONN") +#endif /* LWIP_NETCONN || LWIP_SOCKET */ + +#if NO_SYS==0 +LWIP_MEMPOOL(TCPIP_MSG_API, MEMP_NUM_TCPIP_MSG_API, sizeof(struct tcpip_msg), "TCPIP_MSG_API") +#if LWIP_MPU_COMPATIBLE +LWIP_MEMPOOL(API_MSG, MEMP_NUM_API_MSG, sizeof(struct api_msg), "API_MSG") +#if LWIP_DNS +LWIP_MEMPOOL(DNS_API_MSG, MEMP_NUM_DNS_API_MSG, sizeof(struct dns_api_msg), "DNS_API_MSG") +#endif +#if LWIP_SOCKET && !LWIP_TCPIP_CORE_LOCKING +LWIP_MEMPOOL(SOCKET_SETGETSOCKOPT_DATA, MEMP_NUM_SOCKET_SETGETSOCKOPT_DATA, sizeof(struct lwip_setgetsockopt_data), "SOCKET_SETGETSOCKOPT_DATA") +#endif +#if LWIP_SOCKET && (LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL) +LWIP_MEMPOOL(SELECT_CB, MEMP_NUM_SELECT_CB, sizeof(struct lwip_select_cb), "SELECT_CB") +#endif /* LWIP_SOCKET && (LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL) */ +#if LWIP_NETIF_API +LWIP_MEMPOOL(NETIFAPI_MSG, MEMP_NUM_NETIFAPI_MSG, sizeof(struct netifapi_msg), "NETIFAPI_MSG") +#endif +#endif /* LWIP_MPU_COMPATIBLE */ +#if !LWIP_TCPIP_CORE_LOCKING_INPUT +LWIP_MEMPOOL(TCPIP_MSG_INPKT,MEMP_NUM_TCPIP_MSG_INPKT, sizeof(struct tcpip_msg), "TCPIP_MSG_INPKT") +#endif /* !LWIP_TCPIP_CORE_LOCKING_INPUT */ +#endif /* NO_SYS==0 */ + +#if LWIP_IPV4 && LWIP_ARP && ARP_QUEUEING +LWIP_MEMPOOL(ARP_QUEUE, MEMP_NUM_ARP_QUEUE, sizeof(struct etharp_q_entry), "ARP_QUEUE") +#endif /* LWIP_IPV4 && LWIP_ARP && ARP_QUEUEING */ + +#if LWIP_IGMP +LWIP_MEMPOOL(IGMP_GROUP, MEMP_NUM_IGMP_GROUP, sizeof(struct igmp_group), "IGMP_GROUP") +#endif /* LWIP_IGMP */ + +#if LWIP_TIMERS && !LWIP_TIMERS_CUSTOM +LWIP_MEMPOOL(SYS_TIMEOUT, MEMP_NUM_SYS_TIMEOUT, sizeof(struct sys_timeo), "SYS_TIMEOUT") +#endif /* LWIP_TIMERS && !LWIP_TIMERS_CUSTOM */ + +#if LWIP_DNS && LWIP_SOCKET +LWIP_MEMPOOL(NETDB, MEMP_NUM_NETDB, NETDB_ELEM_SIZE, "NETDB") +#endif /* LWIP_DNS && LWIP_SOCKET */ +#if LWIP_DNS && DNS_LOCAL_HOSTLIST && DNS_LOCAL_HOSTLIST_IS_DYNAMIC +LWIP_MEMPOOL(LOCALHOSTLIST, MEMP_NUM_LOCALHOSTLIST, LOCALHOSTLIST_ELEM_SIZE, "LOCALHOSTLIST") +#endif /* LWIP_DNS && DNS_LOCAL_HOSTLIST && DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + +#if LWIP_IPV6 && LWIP_ND6_QUEUEING +LWIP_MEMPOOL(ND6_QUEUE, MEMP_NUM_ND6_QUEUE, sizeof(struct nd6_q_entry), "ND6_QUEUE") +#endif /* LWIP_IPV6 && LWIP_ND6_QUEUEING */ + +#if LWIP_IPV6 && LWIP_IPV6_REASS +LWIP_MEMPOOL(IP6_REASSDATA, MEMP_NUM_REASSDATA, sizeof(struct ip6_reassdata), "IP6_REASSDATA") +#endif /* LWIP_IPV6 && LWIP_IPV6_REASS */ + +#if LWIP_IPV6 && LWIP_IPV6_MLD +LWIP_MEMPOOL(MLD6_GROUP, MEMP_NUM_MLD6_GROUP, sizeof(struct mld_group), "MLD6_GROUP") +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + + +/* + * A list of pools of pbuf's used by LWIP. + * + * LWIP_PBUF_MEMPOOL(pool_name, number_elements, pbuf_payload_size, pool_description) + * creates a pool name MEMP_pool_name. description is used in stats.c + * This allocates enough space for the pbuf struct and a payload. + * (Example: pbuf_payload_size=0 allocates only size for the struct) + */ +LWIP_MEMPOOL(PBUF, MEMP_NUM_PBUF, sizeof(struct pbuf), "PBUF_REF/ROM") +LWIP_PBUF_MEMPOOL(PBUF_POOL, PBUF_POOL_SIZE, PBUF_POOL_BUFSIZE, "PBUF_POOL") + + +/* + * Allow for user-defined pools; this must be explicitly set in lwipopts.h + * since the default is to NOT look for lwippools.h + */ +#if MEMP_USE_CUSTOM_POOLS +#include "lwippools.h" +#endif /* MEMP_USE_CUSTOM_POOLS */ + +/* + * REQUIRED CLEANUP: Clear up so we don't get "multiply defined" error later + * (#undef is ignored for something that is not defined) + */ +#undef LWIP_MEMPOOL +#undef LWIP_MALLOC_MEMPOOL +#undef LWIP_MALLOC_MEMPOOL_START +#undef LWIP_MALLOC_MEMPOOL_END +#undef LWIP_PBUF_MEMPOOL diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/nd6_priv.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/nd6_priv.h new file mode 100644 index 0000000..61c0b73 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/nd6_priv.h @@ -0,0 +1,142 @@ +/** + * @file + * + * Neighbor discovery and stateless address autoconfiguration for IPv6. + * Aims to be compliant with RFC 4861 (Neighbor discovery) and RFC 4862 + * (Address autoconfiguration). + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_ND6_PRIV_H +#define LWIP_HDR_ND6_PRIV_H + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" + + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_ND6_QUEUEING +/** struct for queueing outgoing packets for unknown address + * defined here to be accessed by memp.h + */ +struct nd6_q_entry { + struct nd6_q_entry *next; + struct pbuf *p; +}; +#endif /* LWIP_ND6_QUEUEING */ + +/** Struct for tables. */ +struct nd6_neighbor_cache_entry { + ip6_addr_t next_hop_address; + struct netif *netif; + u8_t lladdr[NETIF_MAX_HWADDR_LEN]; + /*u32_t pmtu;*/ +#if LWIP_ND6_QUEUEING + /** Pointer to queue of pending outgoing packets on this entry. */ + struct nd6_q_entry *q; +#else /* LWIP_ND6_QUEUEING */ + /** Pointer to a single pending outgoing packet on this entry. */ + struct pbuf *q; +#endif /* LWIP_ND6_QUEUEING */ + u8_t state; + u8_t isrouter; + union { + u32_t reachable_time; /* in seconds */ + u32_t delay_time; /* ticks (ND6_TMR_INTERVAL) */ + u32_t probes_sent; + u32_t stale_time; /* ticks (ND6_TMR_INTERVAL) */ + } counter; +}; + +struct nd6_destination_cache_entry { + ip6_addr_t destination_addr; + ip6_addr_t next_hop_addr; + u16_t pmtu; + u32_t age; +}; + +struct nd6_prefix_list_entry { + ip6_addr_t prefix; + struct netif *netif; + u32_t invalidation_timer; /* in seconds */ +}; + +struct nd6_router_list_entry { + struct nd6_neighbor_cache_entry *neighbor_entry; + u32_t invalidation_timer; /* in seconds */ + u8_t flags; +}; + +enum nd6_neighbor_cache_entry_state { + ND6_NO_ENTRY = 0, + ND6_INCOMPLETE, + ND6_REACHABLE, + ND6_STALE, + ND6_DELAY, + ND6_PROBE +}; + +#define ND6_HOPLIM 255 /* maximum hop limit, required in all ND packets */ + +#define ND6_2HRS 7200 /* two hours, expressed in number of seconds */ + +/* Router tables. */ +/* @todo make these static? and entries accessible through API? */ +extern struct nd6_neighbor_cache_entry neighbor_cache[]; +extern struct nd6_destination_cache_entry destination_cache[]; +extern struct nd6_prefix_list_entry prefix_list[]; +extern struct nd6_router_list_entry default_router_list[]; + +/* Default values, can be updated by a RA message. */ +extern u32_t reachable_time; +extern u32_t retrans_timer; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_ND6_PRIV_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/raw_priv.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/raw_priv.h new file mode 100644 index 0000000..0835422 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/raw_priv.h @@ -0,0 +1,69 @@ +/** + * @file + * raw API internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_RAW_PRIV_H +#define LWIP_HDR_RAW_PRIV_H + +#include "lwip/opt.h" + +#if LWIP_RAW /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/raw.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** return codes for raw_input */ +typedef enum raw_input_state +{ + RAW_INPUT_NONE = 0, /* pbuf did not match any pcbs */ + RAW_INPUT_EATEN, /* pbuf handed off and delivered to pcb */ + RAW_INPUT_DELIVERED /* pbuf only delivered to pcb (pbuf can still be referenced) */ +} raw_input_state_t; + +/* The following functions are the lower layer interface to RAW. */ +raw_input_state_t raw_input(struct pbuf *p, struct netif *inp); + +void raw_netif_ip_addr_changed(const ip_addr_t* old_addr, const ip_addr_t* new_addr); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_RAW */ + +#endif /* LWIP_HDR_RAW_PRIV_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/sockets_priv.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/sockets_priv.h new file mode 100644 index 0000000..e481cd1 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/sockets_priv.h @@ -0,0 +1,175 @@ +/** + * @file + * Sockets API internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2017 Joel Cunningham, Garmin International, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Joel Cunningham + * + */ +#ifndef LWIP_HDR_SOCKETS_PRIV_H +#define LWIP_HDR_SOCKETS_PRIV_H + +#include "lwip/opt.h" + +#if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/err.h" +#include "lwip/sockets.h" +#include "lwip/sys.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define NUM_SOCKETS MEMP_NUM_NETCONN + +/** This is overridable for the rare case where more than 255 threads + * select on the same socket... + */ +#ifndef SELWAIT_T +#define SELWAIT_T u8_t +#endif + +union lwip_sock_lastdata { + struct netbuf *netbuf; + struct pbuf *pbuf; +}; + +/** Contains all internal pointers and states used for a socket */ +struct lwip_sock { + /** sockets currently are built on netconns, each socket has one netconn */ + struct netconn *conn; + /** data that was left from the previous read */ + union lwip_sock_lastdata lastdata; +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL + /** number of times data was received, set by event_callback(), + tested by the receive and select functions */ + s16_t rcvevent; + /** number of times data was ACKed (free send buffer), set by event_callback(), + tested by select */ + u16_t sendevent; + /** error happened for this socket, set by event_callback(), tested by select */ + u16_t errevent; + /** counter of how many threads are waiting for this socket using select */ + SELWAIT_T select_waiting; +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ +#if LWIP_NETCONN_FULLDUPLEX + /* counter of how many threads are using a struct lwip_sock (not the 'int') */ + u8_t fd_used; + /* status of pending close/delete actions */ + u8_t fd_free_pending; +#define LWIP_SOCK_FD_FREE_TCP 1 +#define LWIP_SOCK_FD_FREE_FREE 2 +#endif +}; + +#ifndef set_errno +#define set_errno(err) do { if (err) { errno = (err); } } while(0) +#endif + +#if !LWIP_TCPIP_CORE_LOCKING +/** Maximum optlen used by setsockopt/getsockopt */ +#define LWIP_SETGETSOCKOPT_MAXOPTLEN LWIP_MAX(16, sizeof(struct ifreq)) + +/** This struct is used to pass data to the set/getsockopt_internal + * functions running in tcpip_thread context (only a void* is allowed) */ +struct lwip_setgetsockopt_data { + /** socket index for which to change options */ + int s; + /** level of the option to process */ + int level; + /** name of the option to process */ + int optname; + /** set: value to set the option to + * get: value of the option is stored here */ +#if LWIP_MPU_COMPATIBLE + u8_t optval[LWIP_SETGETSOCKOPT_MAXOPTLEN]; +#else + union { + void *p; + const void *pc; + } optval; +#endif + /** size of *optval */ + socklen_t optlen; + /** if an error occurs, it is temporarily stored here */ + int err; + /** semaphore to wake up the calling task */ + void* completed_sem; +}; +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + +#ifdef __cplusplus +} +#endif + +struct lwip_sock* lwip_socket_dbg_get_socket(int fd); + +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL + +#if LWIP_NETCONN_SEM_PER_THREAD +#define SELECT_SEM_T sys_sem_t* +#define SELECT_SEM_PTR(sem) (sem) +#else /* LWIP_NETCONN_SEM_PER_THREAD */ +#define SELECT_SEM_T sys_sem_t +#define SELECT_SEM_PTR(sem) (&(sem)) +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + +/** Description for a task waiting in select */ +struct lwip_select_cb { + /** Pointer to the next waiting task */ + struct lwip_select_cb *next; + /** Pointer to the previous waiting task */ + struct lwip_select_cb *prev; +#if LWIP_SOCKET_SELECT + /** readset passed to select */ + fd_set *readset; + /** writeset passed to select */ + fd_set *writeset; + /** unimplemented: exceptset passed to select */ + fd_set *exceptset; +#endif /* LWIP_SOCKET_SELECT */ +#if LWIP_SOCKET_POLL + /** fds passed to poll; NULL if select */ + struct pollfd *poll_fds; + /** nfds passed to poll; 0 if select */ + nfds_t poll_nfds; +#endif /* LWIP_SOCKET_POLL */ + /** don't signal the same semaphore twice: set to 1 when signalled */ + int sem_signalled; + /** semaphore to wake up a task waiting for select */ + SELECT_SEM_T sem; +}; +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ + +#endif /* LWIP_SOCKET */ + +#endif /* LWIP_HDR_SOCKETS_PRIV_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcp_priv.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcp_priv.h new file mode 100644 index 0000000..96e07e3 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcp_priv.h @@ -0,0 +1,523 @@ +/** + * @file + * TCP internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_TCP_PRIV_H +#define LWIP_HDR_TCP_PRIV_H + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/tcp.h" +#include "lwip/mem.h" +#include "lwip/pbuf.h" +#include "lwip/ip.h" +#include "lwip/icmp.h" +#include "lwip/err.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/prot/tcp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Functions for interfacing with TCP: */ + +/* Lower layer interface to TCP: */ +void tcp_init (void); /* Initialize this module. */ +void tcp_tmr (void); /* Must be called every + TCP_TMR_INTERVAL + ms. (Typically 250 ms). */ +/* It is also possible to call these two functions at the right + intervals (instead of calling tcp_tmr()). */ +void tcp_slowtmr (void); +void tcp_fasttmr (void); + +/* Call this from a netif driver (watch out for threading issues!) that has + returned a memory error on transmit and now has free buffers to send more. + This iterates all active pcbs that had an error and tries to call + tcp_output, so use this with care as it might slow down the system. */ +void tcp_txnow (void); + +/* Only used by IP to pass a TCP segment to TCP: */ +void tcp_input (struct pbuf *p, struct netif *inp); +/* Used within the TCP code only: */ +struct tcp_pcb * tcp_alloc (u8_t prio); +void tcp_free (struct tcp_pcb *pcb); +void tcp_abandon (struct tcp_pcb *pcb, int reset); +err_t tcp_send_empty_ack(struct tcp_pcb *pcb); +err_t tcp_rexmit (struct tcp_pcb *pcb); +err_t tcp_rexmit_rto_prepare(struct tcp_pcb *pcb); +void tcp_rexmit_rto_commit(struct tcp_pcb *pcb); +void tcp_rexmit_rto (struct tcp_pcb *pcb); +void tcp_rexmit_fast (struct tcp_pcb *pcb); +u32_t tcp_update_rcv_ann_wnd(struct tcp_pcb *pcb); +err_t tcp_process_refused_data(struct tcp_pcb *pcb); + +/** + * This is the Nagle algorithm: try to combine user data to send as few TCP + * segments as possible. Only send if + * - no previously transmitted data on the connection remains unacknowledged or + * - the TF_NODELAY flag is set (nagle algorithm turned off for this pcb) or + * - the only unsent segment is at least pcb->mss bytes long (or there is more + * than one unsent segment - with lwIP, this can happen although unsent->len < mss) + * - or if we are in fast-retransmit (TF_INFR) + */ +#define tcp_do_output_nagle(tpcb) ((((tpcb)->unacked == NULL) || \ + ((tpcb)->flags & (TF_NODELAY | TF_INFR)) || \ + (((tpcb)->unsent != NULL) && (((tpcb)->unsent->next != NULL) || \ + ((tpcb)->unsent->len >= (tpcb)->mss))) || \ + ((tcp_sndbuf(tpcb) == 0) || (tcp_sndqueuelen(tpcb) >= TCP_SND_QUEUELEN)) \ + ) ? 1 : 0) +#define tcp_output_nagle(tpcb) (tcp_do_output_nagle(tpcb) ? tcp_output(tpcb) : ERR_OK) + + +#define TCP_SEQ_LT(a,b) ((s32_t)((u32_t)(a) - (u32_t)(b)) < 0) +#define TCP_SEQ_LEQ(a,b) ((s32_t)((u32_t)(a) - (u32_t)(b)) <= 0) +#define TCP_SEQ_GT(a,b) ((s32_t)((u32_t)(a) - (u32_t)(b)) > 0) +#define TCP_SEQ_GEQ(a,b) ((s32_t)((u32_t)(a) - (u32_t)(b)) >= 0) +/* is b<=a<=c? */ +#if 0 /* see bug #10548 */ +#define TCP_SEQ_BETWEEN(a,b,c) ((c)-(b) >= (a)-(b)) +#endif +#define TCP_SEQ_BETWEEN(a,b,c) (TCP_SEQ_GEQ(a,b) && TCP_SEQ_LEQ(a,c)) + +#ifndef TCP_TMR_INTERVAL +#define TCP_TMR_INTERVAL 250 /* The TCP timer interval in milliseconds. */ +#endif /* TCP_TMR_INTERVAL */ + +#ifndef TCP_FAST_INTERVAL +#define TCP_FAST_INTERVAL TCP_TMR_INTERVAL /* the fine grained timeout in milliseconds */ +#endif /* TCP_FAST_INTERVAL */ + +#ifndef TCP_SLOW_INTERVAL +#define TCP_SLOW_INTERVAL (2*TCP_TMR_INTERVAL) /* the coarse grained timeout in milliseconds */ +#endif /* TCP_SLOW_INTERVAL */ + +#define TCP_FIN_WAIT_TIMEOUT 20000 /* milliseconds */ +#define TCP_SYN_RCVD_TIMEOUT 20000 /* milliseconds */ + +#define TCP_OOSEQ_TIMEOUT 6U /* x RTO */ + +#ifndef TCP_MSL +#define TCP_MSL 60000UL /* The maximum segment lifetime in milliseconds */ +#endif + +/* Keepalive values, compliant with RFC 1122. Don't change this unless you know what you're doing */ +#ifndef TCP_KEEPIDLE_DEFAULT +#define TCP_KEEPIDLE_DEFAULT 7200000UL /* Default KEEPALIVE timer in milliseconds */ +#endif + +#ifndef TCP_KEEPINTVL_DEFAULT +#define TCP_KEEPINTVL_DEFAULT 75000UL /* Default Time between KEEPALIVE probes in milliseconds */ +#endif + +#ifndef TCP_KEEPCNT_DEFAULT +#define TCP_KEEPCNT_DEFAULT 9U /* Default Counter for KEEPALIVE probes */ +#endif + +#define TCP_MAXIDLE TCP_KEEPCNT_DEFAULT * TCP_KEEPINTVL_DEFAULT /* Maximum KEEPALIVE probe time */ + +#define TCP_TCPLEN(seg) ((seg)->len + (((TCPH_FLAGS((seg)->tcphdr) & (TCP_FIN | TCP_SYN)) != 0) ? 1U : 0U)) + +/** Flags used on input processing, not on pcb->flags +*/ +#define TF_RESET (u8_t)0x08U /* Connection was reset. */ +#define TF_CLOSED (u8_t)0x10U /* Connection was successfully closed. */ +#define TF_GOT_FIN (u8_t)0x20U /* Connection was closed by the remote end. */ + + +#if LWIP_EVENT_API + +#define TCP_EVENT_ACCEPT(lpcb,pcb,arg,err,ret) ret = lwip_tcp_event(arg, (pcb),\ + LWIP_EVENT_ACCEPT, NULL, 0, err) +#define TCP_EVENT_SENT(pcb,space,ret) ret = lwip_tcp_event((pcb)->callback_arg, (pcb),\ + LWIP_EVENT_SENT, NULL, space, ERR_OK) +#define TCP_EVENT_RECV(pcb,p,err,ret) ret = lwip_tcp_event((pcb)->callback_arg, (pcb),\ + LWIP_EVENT_RECV, (p), 0, (err)) +#define TCP_EVENT_CLOSED(pcb,ret) ret = lwip_tcp_event((pcb)->callback_arg, (pcb),\ + LWIP_EVENT_RECV, NULL, 0, ERR_OK) +#define TCP_EVENT_CONNECTED(pcb,err,ret) ret = lwip_tcp_event((pcb)->callback_arg, (pcb),\ + LWIP_EVENT_CONNECTED, NULL, 0, (err)) +#define TCP_EVENT_POLL(pcb,ret) do { if ((pcb)->state != SYN_RCVD) { \ + ret = lwip_tcp_event((pcb)->callback_arg, (pcb), LWIP_EVENT_POLL, NULL, 0, ERR_OK); \ + } else { \ + ret = ERR_ARG; } } while(0) +/* For event API, last state SYN_RCVD must be excluded here: the application + has not seen this pcb, yet! */ +#define TCP_EVENT_ERR(last_state,errf,arg,err) do { if (last_state != SYN_RCVD) { \ + lwip_tcp_event((arg), NULL, LWIP_EVENT_ERR, NULL, 0, (err)); } } while(0) + +#else /* LWIP_EVENT_API */ + +#define TCP_EVENT_ACCEPT(lpcb,pcb,arg,err,ret) \ + do { \ + if((lpcb)->accept != NULL) \ + (ret) = (lpcb)->accept((arg),(pcb),(err)); \ + else (ret) = ERR_ARG; \ + } while (0) + +#define TCP_EVENT_SENT(pcb,space,ret) \ + do { \ + if((pcb)->sent != NULL) \ + (ret) = (pcb)->sent((pcb)->callback_arg,(pcb),(space)); \ + else (ret) = ERR_OK; \ + } while (0) + +#define TCP_EVENT_RECV(pcb,p,err,ret) \ + do { \ + if((pcb)->recv != NULL) { \ + (ret) = (pcb)->recv((pcb)->callback_arg,(pcb),(p),(err));\ + } else { \ + (ret) = tcp_recv_null(NULL, (pcb), (p), (err)); \ + } \ + } while (0) + +#define TCP_EVENT_CLOSED(pcb,ret) \ + do { \ + if(((pcb)->recv != NULL)) { \ + (ret) = (pcb)->recv((pcb)->callback_arg,(pcb),NULL,ERR_OK);\ + } else { \ + (ret) = ERR_OK; \ + } \ + } while (0) + +#define TCP_EVENT_CONNECTED(pcb,err,ret) \ + do { \ + if((pcb)->connected != NULL) \ + (ret) = (pcb)->connected((pcb)->callback_arg,(pcb),(err)); \ + else (ret) = ERR_OK; \ + } while (0) + +#define TCP_EVENT_POLL(pcb,ret) \ + do { \ + if((pcb)->poll != NULL) \ + (ret) = (pcb)->poll((pcb)->callback_arg,(pcb)); \ + else (ret) = ERR_OK; \ + } while (0) + +#define TCP_EVENT_ERR(last_state,errf,arg,err) \ + do { \ + LWIP_UNUSED_ARG(last_state); \ + if((errf) != NULL) \ + (errf)((arg),(err)); \ + } while (0) + +#endif /* LWIP_EVENT_API */ + +/** Enabled extra-check for TCP_OVERSIZE if LWIP_DEBUG is enabled */ +#if TCP_OVERSIZE && defined(LWIP_DEBUG) +#define TCP_OVERSIZE_DBGCHECK 1 +#else +#define TCP_OVERSIZE_DBGCHECK 0 +#endif + +/** Don't generate checksum on copy if CHECKSUM_GEN_TCP is disabled */ +#define TCP_CHECKSUM_ON_COPY (LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_TCP) + +/* This structure represents a TCP segment on the unsent, unacked and ooseq queues */ +struct tcp_seg { + struct tcp_seg *next; /* used when putting segments on a queue */ + struct pbuf *p; /* buffer containing data + TCP header */ + u16_t len; /* the TCP length of this segment */ +#if TCP_OVERSIZE_DBGCHECK + u16_t oversize_left; /* Extra bytes available at the end of the last + pbuf in unsent (used for asserting vs. + tcp_pcb.unsent_oversize only) */ +#endif /* TCP_OVERSIZE_DBGCHECK */ +#if TCP_CHECKSUM_ON_COPY + u16_t chksum; + u8_t chksum_swapped; +#endif /* TCP_CHECKSUM_ON_COPY */ + u8_t flags; +#define TF_SEG_OPTS_MSS (u8_t)0x01U /* Include MSS option (only used in SYN segments) */ +#define TF_SEG_OPTS_TS (u8_t)0x02U /* Include timestamp option. */ +#define TF_SEG_DATA_CHECKSUMMED (u8_t)0x04U /* ALL data (not the header) is + checksummed into 'chksum' */ +#define TF_SEG_OPTS_WND_SCALE (u8_t)0x08U /* Include WND SCALE option (only used in SYN segments) */ +#define TF_SEG_OPTS_SACK_PERM (u8_t)0x10U /* Include SACK Permitted option (only used in SYN segments) */ + struct tcp_hdr *tcphdr; /* the TCP header */ +}; + +#define LWIP_TCP_OPT_EOL 0 +#define LWIP_TCP_OPT_NOP 1 +#define LWIP_TCP_OPT_MSS 2 +#define LWIP_TCP_OPT_WS 3 +#define LWIP_TCP_OPT_SACK_PERM 4 +#define LWIP_TCP_OPT_TS 8 + +#define LWIP_TCP_OPT_LEN_MSS 4 +#if LWIP_TCP_TIMESTAMPS +#define LWIP_TCP_OPT_LEN_TS 10 +#define LWIP_TCP_OPT_LEN_TS_OUT 12 /* aligned for output (includes NOP padding) */ +#else +#define LWIP_TCP_OPT_LEN_TS_OUT 0 +#endif +#if LWIP_WND_SCALE +#define LWIP_TCP_OPT_LEN_WS 3 +#define LWIP_TCP_OPT_LEN_WS_OUT 4 /* aligned for output (includes NOP padding) */ +#else +#define LWIP_TCP_OPT_LEN_WS_OUT 0 +#endif + +#if LWIP_TCP_SACK_OUT +#define LWIP_TCP_OPT_LEN_SACK_PERM 2 +#define LWIP_TCP_OPT_LEN_SACK_PERM_OUT 4 /* aligned for output (includes NOP padding) */ +#else +#define LWIP_TCP_OPT_LEN_SACK_PERM_OUT 0 +#endif + +#define LWIP_TCP_OPT_LENGTH(flags) \ + ((flags) & TF_SEG_OPTS_MSS ? LWIP_TCP_OPT_LEN_MSS : 0) + \ + ((flags) & TF_SEG_OPTS_TS ? LWIP_TCP_OPT_LEN_TS_OUT : 0) + \ + ((flags) & TF_SEG_OPTS_WND_SCALE ? LWIP_TCP_OPT_LEN_WS_OUT : 0) + \ + ((flags) & TF_SEG_OPTS_SACK_PERM ? LWIP_TCP_OPT_LEN_SACK_PERM_OUT : 0) + +/** This returns a TCP header option for MSS in an u32_t */ +#define TCP_BUILD_MSS_OPTION(mss) lwip_htonl(0x02040000 | ((mss) & 0xFFFF)) + +#if LWIP_WND_SCALE +#define TCPWNDSIZE_F U32_F +#define TCPWND_MAX 0xFFFFFFFFU +#define TCPWND_CHECK16(x) LWIP_ASSERT("window size > 0xFFFF", (x) <= 0xFFFF) +#define TCPWND_MIN16(x) ((u16_t)LWIP_MIN((x), 0xFFFF)) +#else /* LWIP_WND_SCALE */ +#define TCPWNDSIZE_F U16_F +#define TCPWND_MAX 0xFFFFU +#define TCPWND_CHECK16(x) +#define TCPWND_MIN16(x) x +#endif /* LWIP_WND_SCALE */ + +/* Global variables: */ +extern struct tcp_pcb *tcp_input_pcb; +extern u32_t tcp_ticks; +extern u8_t tcp_active_pcbs_changed; + +/* The TCP PCB lists. */ +union tcp_listen_pcbs_t { /* List of all TCP PCBs in LISTEN state. */ + struct tcp_pcb_listen *listen_pcbs; + struct tcp_pcb *pcbs; +}; +extern struct tcp_pcb *tcp_bound_pcbs; +extern union tcp_listen_pcbs_t tcp_listen_pcbs; +extern struct tcp_pcb *tcp_active_pcbs; /* List of all TCP PCBs that are in a + state in which they accept or send + data. */ +extern struct tcp_pcb *tcp_tw_pcbs; /* List of all TCP PCBs in TIME-WAIT. */ + +#define NUM_TCP_PCB_LISTS_NO_TIME_WAIT 3 +#define NUM_TCP_PCB_LISTS 4 +extern struct tcp_pcb ** const tcp_pcb_lists[NUM_TCP_PCB_LISTS]; + +/* Axioms about the above lists: + 1) Every TCP PCB that is not CLOSED is in one of the lists. + 2) A PCB is only in one of the lists. + 3) All PCBs in the tcp_listen_pcbs list is in LISTEN state. + 4) All PCBs in the tcp_tw_pcbs list is in TIME-WAIT state. +*/ +/* Define two macros, TCP_REG and TCP_RMV that registers a TCP PCB + with a PCB list or removes a PCB from a list, respectively. */ +#ifndef TCP_DEBUG_PCB_LISTS +#define TCP_DEBUG_PCB_LISTS 0 +#endif +#if TCP_DEBUG_PCB_LISTS +#define TCP_REG(pcbs, npcb) do {\ + struct tcp_pcb *tcp_tmp_pcb; \ + LWIP_DEBUGF(TCP_DEBUG, ("TCP_REG %p local port %"U16_F"\n", (void *)(npcb), (npcb)->local_port)); \ + for (tcp_tmp_pcb = *(pcbs); \ + tcp_tmp_pcb != NULL; \ + tcp_tmp_pcb = tcp_tmp_pcb->next) { \ + LWIP_ASSERT("TCP_REG: already registered\n", tcp_tmp_pcb != (npcb)); \ + } \ + LWIP_ASSERT("TCP_REG: pcb->state != CLOSED", ((pcbs) == &tcp_bound_pcbs) || ((npcb)->state != CLOSED)); \ + (npcb)->next = *(pcbs); \ + LWIP_ASSERT("TCP_REG: npcb->next != npcb", (npcb)->next != (npcb)); \ + *(pcbs) = (npcb); \ + LWIP_ASSERT("TCP_REG: tcp_pcbs sane", tcp_pcbs_sane()); \ + tcp_timer_needed(); \ + } while(0) +#define TCP_RMV(pcbs, npcb) do { \ + struct tcp_pcb *tcp_tmp_pcb; \ + LWIP_ASSERT("TCP_RMV: pcbs != NULL", *(pcbs) != NULL); \ + LWIP_DEBUGF(TCP_DEBUG, ("TCP_RMV: removing %p from %p\n", (void *)(npcb), (void *)(*(pcbs)))); \ + if(*(pcbs) == (npcb)) { \ + *(pcbs) = (*pcbs)->next; \ + } else for (tcp_tmp_pcb = *(pcbs); tcp_tmp_pcb != NULL; tcp_tmp_pcb = tcp_tmp_pcb->next) { \ + if(tcp_tmp_pcb->next == (npcb)) { \ + tcp_tmp_pcb->next = (npcb)->next; \ + break; \ + } \ + } \ + (npcb)->next = NULL; \ + LWIP_ASSERT("TCP_RMV: tcp_pcbs sane", tcp_pcbs_sane()); \ + LWIP_DEBUGF(TCP_DEBUG, ("TCP_RMV: removed %p from %p\n", (void *)(npcb), (void *)(*(pcbs)))); \ + } while(0) + +#else /* LWIP_DEBUG */ + +#define TCP_REG(pcbs, npcb) \ + do { \ + (npcb)->next = *pcbs; \ + *(pcbs) = (npcb); \ + tcp_timer_needed(); \ + } while (0) + +#define TCP_RMV(pcbs, npcb) \ + do { \ + if(*(pcbs) == (npcb)) { \ + (*(pcbs)) = (*pcbs)->next; \ + } \ + else { \ + struct tcp_pcb *tcp_tmp_pcb; \ + for (tcp_tmp_pcb = *pcbs; \ + tcp_tmp_pcb != NULL; \ + tcp_tmp_pcb = tcp_tmp_pcb->next) { \ + if(tcp_tmp_pcb->next == (npcb)) { \ + tcp_tmp_pcb->next = (npcb)->next; \ + break; \ + } \ + } \ + } \ + (npcb)->next = NULL; \ + } while(0) + +#endif /* LWIP_DEBUG */ + +#define TCP_REG_ACTIVE(npcb) \ + do { \ + TCP_REG(&tcp_active_pcbs, npcb); \ + tcp_active_pcbs_changed = 1; \ + } while (0) + +#define TCP_RMV_ACTIVE(npcb) \ + do { \ + TCP_RMV(&tcp_active_pcbs, npcb); \ + tcp_active_pcbs_changed = 1; \ + } while (0) + +#define TCP_PCB_REMOVE_ACTIVE(pcb) \ + do { \ + tcp_pcb_remove(&tcp_active_pcbs, pcb); \ + tcp_active_pcbs_changed = 1; \ + } while (0) + + +/* Internal functions: */ +struct tcp_pcb *tcp_pcb_copy(struct tcp_pcb *pcb); +void tcp_pcb_purge(struct tcp_pcb *pcb); +void tcp_pcb_remove(struct tcp_pcb **pcblist, struct tcp_pcb *pcb); + +void tcp_segs_free(struct tcp_seg *seg); +void tcp_seg_free(struct tcp_seg *seg); +struct tcp_seg *tcp_seg_copy(struct tcp_seg *seg); + +#define tcp_ack(pcb) \ + do { \ + if((pcb)->flags & TF_ACK_DELAY) { \ + tcp_clear_flags(pcb, TF_ACK_DELAY); \ + tcp_ack_now(pcb); \ + } \ + else { \ + tcp_set_flags(pcb, TF_ACK_DELAY); \ + } \ + } while (0) + +#define tcp_ack_now(pcb) \ + tcp_set_flags(pcb, TF_ACK_NOW) + +err_t tcp_send_fin(struct tcp_pcb *pcb); +err_t tcp_enqueue_flags(struct tcp_pcb *pcb, u8_t flags); + +void tcp_rexmit_seg(struct tcp_pcb *pcb, struct tcp_seg *seg); + +void tcp_rst(const struct tcp_pcb* pcb, u32_t seqno, u32_t ackno, + const ip_addr_t *local_ip, const ip_addr_t *remote_ip, + u16_t local_port, u16_t remote_port); + +u32_t tcp_next_iss(struct tcp_pcb *pcb); + +err_t tcp_keepalive(struct tcp_pcb *pcb); +err_t tcp_split_unsent_seg(struct tcp_pcb *pcb, u16_t split); +err_t tcp_zero_window_probe(struct tcp_pcb *pcb); +void tcp_trigger_input_pcb_close(void); + +#if TCP_CALCULATE_EFF_SEND_MSS +u16_t tcp_eff_send_mss_netif(u16_t sendmss, struct netif *outif, + const ip_addr_t *dest); +#define tcp_eff_send_mss(sendmss, src, dest) \ + tcp_eff_send_mss_netif(sendmss, ip_route(src, dest), dest) +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + +#if LWIP_CALLBACK_API +err_t tcp_recv_null(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err); +#endif /* LWIP_CALLBACK_API */ + +#if TCP_DEBUG || TCP_INPUT_DEBUG || TCP_OUTPUT_DEBUG +void tcp_debug_print(struct tcp_hdr *tcphdr); +void tcp_debug_print_flags(u8_t flags); +void tcp_debug_print_state(enum tcp_state s); +void tcp_debug_print_pcbs(void); +s16_t tcp_pcbs_sane(void); +#else +# define tcp_debug_print(tcphdr) +# define tcp_debug_print_flags(flags) +# define tcp_debug_print_state(s) +# define tcp_debug_print_pcbs() +# define tcp_pcbs_sane() 1 +#endif /* TCP_DEBUG */ + +/** External function (implemented in timers.c), called when TCP detects + * that a timer is needed (i.e. active- or time-wait-pcb found). */ +void tcp_timer_needed(void); + +void tcp_netif_ip_addr_changed(const ip_addr_t* old_addr, const ip_addr_t* new_addr); + +#if TCP_QUEUE_OOSEQ +void tcp_free_ooseq(struct tcp_pcb *pcb); +#endif + +#if LWIP_TCP_PCB_NUM_EXT_ARGS +err_t tcp_ext_arg_invoke_callbacks_passive_open(struct tcp_pcb_listen *lpcb, struct tcp_pcb *cpcb); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_TCP */ + +#endif /* LWIP_HDR_TCP_PRIV_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcpip_priv.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcpip_priv.h new file mode 100644 index 0000000..327673b --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/priv/tcpip_priv.h @@ -0,0 +1,170 @@ +/** + * @file + * TCPIP API internal implementations (do not use in application code) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_TCPIP_PRIV_H +#define LWIP_HDR_TCPIP_PRIV_H + +#include "lwip/opt.h" + +#if !NO_SYS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/tcpip.h" +#include "lwip/sys.h" +#include "lwip/timeouts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct pbuf; +struct netif; + +#if LWIP_MPU_COMPATIBLE +#define API_VAR_REF(name) (*(name)) +#define API_VAR_DECLARE(type, name) type * name +#define API_VAR_ALLOC_EXT(type, pool, name, errorblock) do { \ + name = (type *)memp_malloc(pool); \ + if (name == NULL) { \ + errorblock; \ + } \ + } while(0) +#define API_VAR_ALLOC(type, pool, name, errorval) API_VAR_ALLOC_EXT(type, pool, name, return errorval) +#define API_VAR_ALLOC_POOL(type, pool, name, errorval) do { \ + name = (type *)LWIP_MEMPOOL_ALLOC(pool); \ + if (name == NULL) { \ + return errorval; \ + } \ + } while(0) +#define API_VAR_FREE(pool, name) memp_free(pool, name) +#define API_VAR_FREE_POOL(pool, name) LWIP_MEMPOOL_FREE(pool, name) +#define API_EXPR_REF(expr) (&(expr)) +#if LWIP_NETCONN_SEM_PER_THREAD +#define API_EXPR_REF_SEM(expr) (expr) +#else +#define API_EXPR_REF_SEM(expr) API_EXPR_REF(expr) +#endif +#define API_EXPR_DEREF(expr) expr +#define API_MSG_M_DEF(m) m +#define API_MSG_M_DEF_C(t, m) t m +#else /* LWIP_MPU_COMPATIBLE */ +#define API_VAR_REF(name) name +#define API_VAR_DECLARE(type, name) type name +#define API_VAR_ALLOC_EXT(type, pool, name, errorblock) +#define API_VAR_ALLOC(type, pool, name, errorval) +#define API_VAR_ALLOC_POOL(type, pool, name, errorval) +#define API_VAR_FREE(pool, name) +#define API_VAR_FREE_POOL(pool, name) +#define API_EXPR_REF(expr) expr +#define API_EXPR_REF_SEM(expr) API_EXPR_REF(expr) +#define API_EXPR_DEREF(expr) (*(expr)) +#define API_MSG_M_DEF(m) *m +#define API_MSG_M_DEF_C(t, m) const t * m +#endif /* LWIP_MPU_COMPATIBLE */ + +err_t tcpip_send_msg_wait_sem(tcpip_callback_fn fn, void *apimsg, sys_sem_t* sem); + +struct tcpip_api_call_data +{ +#if !LWIP_TCPIP_CORE_LOCKING + err_t err; +#if !LWIP_NETCONN_SEM_PER_THREAD + sys_sem_t sem; +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ +#else /* !LWIP_TCPIP_CORE_LOCKING */ + u8_t dummy; /* avoid empty struct :-( */ +#endif /* !LWIP_TCPIP_CORE_LOCKING */ +}; +typedef err_t (*tcpip_api_call_fn)(struct tcpip_api_call_data* call); +err_t tcpip_api_call(tcpip_api_call_fn fn, struct tcpip_api_call_data *call); + +enum tcpip_msg_type { +#if !LWIP_TCPIP_CORE_LOCKING + TCPIP_MSG_API, + TCPIP_MSG_API_CALL, +#endif /* !LWIP_TCPIP_CORE_LOCKING */ +#if !LWIP_TCPIP_CORE_LOCKING_INPUT + TCPIP_MSG_INPKT, +#endif /* !LWIP_TCPIP_CORE_LOCKING_INPUT */ +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS + TCPIP_MSG_TIMEOUT, + TCPIP_MSG_UNTIMEOUT, +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + TCPIP_MSG_CALLBACK, + TCPIP_MSG_CALLBACK_STATIC +}; + +struct tcpip_msg { + enum tcpip_msg_type type; + union { +#if !LWIP_TCPIP_CORE_LOCKING + struct { + tcpip_callback_fn function; + void* msg; + } api_msg; + struct { + tcpip_api_call_fn function; + struct tcpip_api_call_data *arg; + sys_sem_t *sem; + } api_call; +#endif /* LWIP_TCPIP_CORE_LOCKING */ +#if !LWIP_TCPIP_CORE_LOCKING_INPUT + struct { + struct pbuf *p; + struct netif *netif; + netif_input_fn input_fn; + } inp; +#endif /* !LWIP_TCPIP_CORE_LOCKING_INPUT */ + struct { + tcpip_callback_fn function; + void *ctx; + } cb; +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS + struct { + u32_t msecs; + sys_timeout_handler h; + void *arg; + } tmo; +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + } msg; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* !NO_SYS */ + +#endif /* LWIP_HDR_TCPIP_PRIV_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/autoip.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/autoip.h new file mode 100644 index 0000000..3fe1532 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/autoip.h @@ -0,0 +1,78 @@ +/** + * @file + * AutoIP protocol definitions + */ + +/* + * + * Copyright (c) 2007 Dominik Spies + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Dominik Spies + * + * This is a AutoIP implementation for the lwIP TCP/IP stack. It aims to conform + * with RFC 3927. + * + */ + +#ifndef LWIP_HDR_PROT_AUTOIP_H +#define LWIP_HDR_PROT_AUTOIP_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* 169.254.0.0 */ +#define AUTOIP_NET 0xA9FE0000 +/* 169.254.1.0 */ +#define AUTOIP_RANGE_START (AUTOIP_NET | 0x0100) +/* 169.254.254.255 */ +#define AUTOIP_RANGE_END (AUTOIP_NET | 0xFEFF) + +/* RFC 3927 Constants */ +#define PROBE_WAIT 1 /* second (initial random delay) */ +#define PROBE_MIN 1 /* second (minimum delay till repeated probe) */ +#define PROBE_MAX 2 /* seconds (maximum delay till repeated probe) */ +#define PROBE_NUM 3 /* (number of probe packets) */ +#define ANNOUNCE_NUM 2 /* (number of announcement packets) */ +#define ANNOUNCE_INTERVAL 2 /* seconds (time between announcement packets) */ +#define ANNOUNCE_WAIT 2 /* seconds (delay before announcing) */ +#define MAX_CONFLICTS 10 /* (max conflicts before rate limiting) */ +#define RATE_LIMIT_INTERVAL 60 /* seconds (delay between successive attempts) */ +#define DEFEND_INTERVAL 10 /* seconds (min. wait between defensive ARPs) */ + +/* AutoIP client states */ +typedef enum { + AUTOIP_STATE_OFF = 0, + AUTOIP_STATE_PROBING = 1, + AUTOIP_STATE_ANNOUNCING = 2, + AUTOIP_STATE_BOUND = 3 +} autoip_state_enum_t; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_AUTOIP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp.h new file mode 100644 index 0000000..6d16ce3 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp.h @@ -0,0 +1,178 @@ +/** + * @file + * DHCP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Leon Woestenberg + * Copyright (c) 2001-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Leon Woestenberg + * + */ +#ifndef LWIP_HDR_PROT_DHCP_H +#define LWIP_HDR_PROT_DHCP_H + +#include "lwip/opt.h" +#include "lwip/arch.h" +#include "lwip/prot/ip4.h" + +#ifdef __cplusplus +extern "C" { +#endif + + /* DHCP message item offsets and length */ +#define DHCP_CHADDR_LEN 16U +#define DHCP_SNAME_OFS 44U +#define DHCP_SNAME_LEN 64U +#define DHCP_FILE_OFS 108U +#define DHCP_FILE_LEN 128U +#define DHCP_MSG_LEN 236U +#define DHCP_OPTIONS_OFS (DHCP_MSG_LEN + 4U) /* 4 byte: cookie */ + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** minimum set of fields of any DHCP message */ +struct dhcp_msg +{ + PACK_STRUCT_FLD_8(u8_t op); + PACK_STRUCT_FLD_8(u8_t htype); + PACK_STRUCT_FLD_8(u8_t hlen); + PACK_STRUCT_FLD_8(u8_t hops); + PACK_STRUCT_FIELD(u32_t xid); + PACK_STRUCT_FIELD(u16_t secs); + PACK_STRUCT_FIELD(u16_t flags); + PACK_STRUCT_FLD_S(ip4_addr_p_t ciaddr); + PACK_STRUCT_FLD_S(ip4_addr_p_t yiaddr); + PACK_STRUCT_FLD_S(ip4_addr_p_t siaddr); + PACK_STRUCT_FLD_S(ip4_addr_p_t giaddr); + PACK_STRUCT_FLD_8(u8_t chaddr[DHCP_CHADDR_LEN]); + PACK_STRUCT_FLD_8(u8_t sname[DHCP_SNAME_LEN]); + PACK_STRUCT_FLD_8(u8_t file[DHCP_FILE_LEN]); + PACK_STRUCT_FIELD(u32_t cookie); +#define DHCP_MIN_OPTIONS_LEN 68U +/** make sure user does not configure this too small */ +#if ((defined(DHCP_OPTIONS_LEN)) && (DHCP_OPTIONS_LEN < DHCP_MIN_OPTIONS_LEN)) +# undef DHCP_OPTIONS_LEN +#endif +/** allow this to be configured in lwipopts.h, but not too small */ +#if (!defined(DHCP_OPTIONS_LEN)) +/** set this to be sufficient for your options in outgoing DHCP msgs */ +# define DHCP_OPTIONS_LEN DHCP_MIN_OPTIONS_LEN +#endif + PACK_STRUCT_FLD_8(u8_t options[DHCP_OPTIONS_LEN]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + + +/* DHCP client states */ +typedef enum { + DHCP_STATE_OFF = 0, + DHCP_STATE_REQUESTING = 1, + DHCP_STATE_INIT = 2, + DHCP_STATE_REBOOTING = 3, + DHCP_STATE_REBINDING = 4, + DHCP_STATE_RENEWING = 5, + DHCP_STATE_SELECTING = 6, + DHCP_STATE_INFORMING = 7, + DHCP_STATE_CHECKING = 8, + DHCP_STATE_PERMANENT = 9, /* not yet implemented */ + DHCP_STATE_BOUND = 10, + DHCP_STATE_RELEASING = 11, /* not yet implemented */ + DHCP_STATE_BACKING_OFF = 12 +} dhcp_state_enum_t; + +/* DHCP op codes */ +#define DHCP_BOOTREQUEST 1 +#define DHCP_BOOTREPLY 2 + +/* DHCP message types */ +#define DHCP_DISCOVER 1 +#define DHCP_OFFER 2 +#define DHCP_REQUEST 3 +#define DHCP_DECLINE 4 +#define DHCP_ACK 5 +#define DHCP_NAK 6 +#define DHCP_RELEASE 7 +#define DHCP_INFORM 8 + +#define DHCP_MAGIC_COOKIE 0x63825363UL + +/* This is a list of options for BOOTP and DHCP, see RFC 2132 for descriptions */ + +/* BootP options */ +#define DHCP_OPTION_PAD 0 +#define DHCP_OPTION_SUBNET_MASK 1 /* RFC 2132 3.3 */ +#define DHCP_OPTION_ROUTER 3 +#define DHCP_OPTION_DNS_SERVER 6 +#define DHCP_OPTION_HOSTNAME 12 +#define DHCP_OPTION_IP_TTL 23 +#define DHCP_OPTION_MTU 26 +#define DHCP_OPTION_BROADCAST 28 +#define DHCP_OPTION_TCP_TTL 37 +#define DHCP_OPTION_NTP 42 +#define DHCP_OPTION_END 255 + +/* DHCP options */ +#define DHCP_OPTION_REQUESTED_IP 50 /* RFC 2132 9.1, requested IP address */ +#define DHCP_OPTION_LEASE_TIME 51 /* RFC 2132 9.2, time in seconds, in 4 bytes */ +#define DHCP_OPTION_OVERLOAD 52 /* RFC2132 9.3, use file and/or sname field for options */ + +#define DHCP_OPTION_MESSAGE_TYPE 53 /* RFC 2132 9.6, important for DHCP */ +#define DHCP_OPTION_MESSAGE_TYPE_LEN 1 + +#define DHCP_OPTION_SERVER_ID 54 /* RFC 2132 9.7, server IP address */ +#define DHCP_OPTION_PARAMETER_REQUEST_LIST 55 /* RFC 2132 9.8, requested option types */ + +#define DHCP_OPTION_MAX_MSG_SIZE 57 /* RFC 2132 9.10, message size accepted >= 576 */ +#define DHCP_OPTION_MAX_MSG_SIZE_LEN 2 + +#define DHCP_OPTION_T1 58 /* T1 renewal time */ +#define DHCP_OPTION_T2 59 /* T2 rebinding time */ +#define DHCP_OPTION_US 60 +#define DHCP_OPTION_CLIENT_ID 61 +#define DHCP_OPTION_TFTP_SERVERNAME 66 +#define DHCP_OPTION_BOOTFILE 67 + +/* possible combinations of overloading the file and sname fields with options */ +#define DHCP_OVERLOAD_NONE 0 +#define DHCP_OVERLOAD_FILE 1 +#define DHCP_OVERLOAD_SNAME 2 +#define DHCP_OVERLOAD_SNAME_FILE 3 + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_DHCP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp6.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp6.h new file mode 100644 index 0000000..1be5f84 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dhcp6.h @@ -0,0 +1,138 @@ +/** + * @file + * DHCPv6 protocol definitions + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_PROT_DHCP6_H +#define LWIP_HDR_PROT_DHCP6_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define DHCP6_CLIENT_PORT 546 +#define DHCP6_SERVER_PORT 547 + + + /* DHCPv6 message item offsets and length */ +#define DHCP6_TRANSACTION_ID_LEN 3 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** minimum set of fields of any DHCPv6 message */ +struct dhcp6_msg +{ + PACK_STRUCT_FLD_8(u8_t msgtype); + PACK_STRUCT_FLD_8(u8_t transaction_id[DHCP6_TRANSACTION_ID_LEN]); + /* options follow */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + + +/* DHCP6 client states */ +typedef enum { + DHCP6_STATE_OFF = 0, + DHCP6_STATE_STATELESS_IDLE = 1, + DHCP6_STATE_REQUESTING_CONFIG = 2 +} dhcp6_state_enum_t; + +/* DHCPv6 message types */ +#define DHCP6_SOLICIT 1 +#define DHCP6_ADVERTISE 2 +#define DHCP6_REQUEST 3 +#define DHCP6_CONFIRM 4 +#define DHCP6_RENEW 5 +#define DHCP6_REBIND 6 +#define DHCP6_REPLY 7 +#define DHCP6_RELEASE 8 +#define DHCP6_DECLINE 9 +#define DHCP6_RECONFIGURE 10 +#define DHCP6_INFOREQUEST 11 +#define DHCP6_RELAYFORW 12 +#define DHCP6_RELAYREPL 13 +/* More message types see https://www.iana.org/assignments/dhcpv6-parameters/dhcpv6-parameters.xhtml */ + +/** DHCPv6 status codes */ +#define DHCP6_STATUS_SUCCESS 0 /* Success. */ +#define DHCP6_STATUS_UNSPECFAIL 1 /* Failure, reason unspecified; this status code is sent by either a client or a server to indicate a failure not explicitly specified in this document. */ +#define DHCP6_STATUS_NOADDRSAVAIL 2 /* Server has no addresses available to assign to the IA(s). */ +#define DHCP6_STATUS_NOBINDING 3 /* Client record (binding) unavailable. */ +#define DHCP6_STATUS_NOTONLINK 4 /* The prefix for the address is not appropriate for the link to which the client is attached. */ +#define DHCP6_STATUS_USEMULTICAST 5 /* Sent by a server to a client to force the client to send messages to the server using the All_DHCP_Relay_Agents_and_Servers address. */ +/* More status codes see https://www.iana.org/assignments/dhcpv6-parameters/dhcpv6-parameters.xhtml */ + +/** DHCPv6 DUID types */ +#define DHCP6_DUID_LLT 1 /* LLT: Link-layer Address Plus Time */ +#define DHCP6_DUID_EN 2 /* EN: Enterprise number */ +#define DHCP6_DUID_LL 3 /* LL: Link-layer Address */ +#define DHCP6_DUID_UUID 4 /* UUID (RFC 6355) */ + +/* DHCPv6 options */ +#define DHCP6_OPTION_CLIENTID 1 +#define DHCP6_OPTION_SERVERID 2 +#define DHCP6_OPTION_IA_NA 3 +#define DHCP6_OPTION_IA_TA 4 +#define DHCP6_OPTION_IAADDR 5 +#define DHCP6_OPTION_ORO 6 +#define DHCP6_OPTION_PREFERENCE 7 +#define DHCP6_OPTION_ELAPSED_TIME 8 +#define DHCP6_OPTION_RELAY_MSG 9 +#define DHCP6_OPTION_AUTH 11 +#define DHCP6_OPTION_UNICAST 12 +#define DHCP6_OPTION_STATUS_CODE 13 +#define DHCP6_OPTION_RAPID_COMMIT 14 +#define DHCP6_OPTION_USER_CLASS 15 +#define DHCP6_OPTION_VENDOR_CLASS 16 +#define DHCP6_OPTION_VENDOR_OPTS 17 +#define DHCP6_OPTION_INTERFACE_ID 18 +#define DHCP6_OPTION_RECONF_MSG 19 +#define DHCP6_OPTION_RECONF_ACCEPT 20 +/* More options see https://www.iana.org/assignments/dhcpv6-parameters/dhcpv6-parameters.xhtml */ +#define DHCP6_OPTION_DNS_SERVERS 23 /* RFC 3646 */ +#define DHCP6_OPTION_DOMAIN_LIST 24 /* RFC 3646 */ +#define DHCP6_OPTION_SNTP_SERVERS 31 /* RFC 4075 */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_DHCP6_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dns.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dns.h new file mode 100644 index 0000000..022a32c --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/dns.h @@ -0,0 +1,140 @@ +/** + * @file + * DNS - host name to IP address resolver. + */ + +/* + * Port to lwIP from uIP + * by Jim Pettinato April 2007 + * + * security fixes and more by Simon Goldschmidt + * + * uIP version Copyright (c) 2002-2003, Adam Dunkels. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef LWIP_HDR_PROT_DNS_H +#define LWIP_HDR_PROT_DNS_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** DNS server port address */ +#ifndef DNS_SERVER_PORT +#define DNS_SERVER_PORT 53 +#endif + +/* DNS field TYPE used for "Resource Records" */ +#define DNS_RRTYPE_A 1 /* a host address */ +#define DNS_RRTYPE_NS 2 /* an authoritative name server */ +#define DNS_RRTYPE_MD 3 /* a mail destination (Obsolete - use MX) */ +#define DNS_RRTYPE_MF 4 /* a mail forwarder (Obsolete - use MX) */ +#define DNS_RRTYPE_CNAME 5 /* the canonical name for an alias */ +#define DNS_RRTYPE_SOA 6 /* marks the start of a zone of authority */ +#define DNS_RRTYPE_MB 7 /* a mailbox domain name (EXPERIMENTAL) */ +#define DNS_RRTYPE_MG 8 /* a mail group member (EXPERIMENTAL) */ +#define DNS_RRTYPE_MR 9 /* a mail rename domain name (EXPERIMENTAL) */ +#define DNS_RRTYPE_NULL 10 /* a null RR (EXPERIMENTAL) */ +#define DNS_RRTYPE_WKS 11 /* a well known service description */ +#define DNS_RRTYPE_PTR 12 /* a domain name pointer */ +#define DNS_RRTYPE_HINFO 13 /* host information */ +#define DNS_RRTYPE_MINFO 14 /* mailbox or mail list information */ +#define DNS_RRTYPE_MX 15 /* mail exchange */ +#define DNS_RRTYPE_TXT 16 /* text strings */ +#define DNS_RRTYPE_AAAA 28 /* IPv6 address */ +#define DNS_RRTYPE_SRV 33 /* service location */ +#define DNS_RRTYPE_ANY 255 /* any type */ + +/* DNS field CLASS used for "Resource Records" */ +#define DNS_RRCLASS_IN 1 /* the Internet */ +#define DNS_RRCLASS_CS 2 /* the CSNET class (Obsolete - used only for examples in some obsolete RFCs) */ +#define DNS_RRCLASS_CH 3 /* the CHAOS class */ +#define DNS_RRCLASS_HS 4 /* Hesiod [Dyer 87] */ +#define DNS_RRCLASS_ANY 255 /* any class */ +#define DNS_RRCLASS_FLUSH 0x800 /* Flush bit */ + +/* DNS protocol flags */ +#define DNS_FLAG1_RESPONSE 0x80 +#define DNS_FLAG1_OPCODE_STATUS 0x10 +#define DNS_FLAG1_OPCODE_INVERSE 0x08 +#define DNS_FLAG1_OPCODE_STANDARD 0x00 +#define DNS_FLAG1_AUTHORATIVE 0x04 +#define DNS_FLAG1_TRUNC 0x02 +#define DNS_FLAG1_RD 0x01 +#define DNS_FLAG2_RA 0x80 +#define DNS_FLAG2_ERR_MASK 0x0f +#define DNS_FLAG2_ERR_NONE 0x00 +#define DNS_FLAG2_ERR_NAME 0x03 + +#define DNS_HDR_GET_OPCODE(hdr) ((((hdr)->flags1) >> 3) & 0xF) + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** DNS message header */ +struct dns_hdr { + PACK_STRUCT_FIELD(u16_t id); + PACK_STRUCT_FLD_8(u8_t flags1); + PACK_STRUCT_FLD_8(u8_t flags2); + PACK_STRUCT_FIELD(u16_t numquestions); + PACK_STRUCT_FIELD(u16_t numanswers); + PACK_STRUCT_FIELD(u16_t numauthrr); + PACK_STRUCT_FIELD(u16_t numextrarr); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define SIZEOF_DNS_HDR 12 + + +/* Multicast DNS definitions */ + +/** UDP port for multicast DNS queries */ +#ifndef DNS_MQUERY_PORT +#define DNS_MQUERY_PORT 5353 +#endif + +/* IPv4 group for multicast DNS queries: 224.0.0.251 */ +#ifndef DNS_MQUERY_IPV4_GROUP_INIT +#define DNS_MQUERY_IPV4_GROUP_INIT IPADDR4_INIT_BYTES(224,0,0,251) +#endif + +/* IPv6 group for multicast DNS queries: FF02::FB */ +#ifndef DNS_MQUERY_IPV6_GROUP_INIT +#define DNS_MQUERY_IPV6_GROUP_INIT IPADDR6_INIT_HOST(0xFF020000,0,0,0xFB) +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_DNS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/etharp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/etharp.h new file mode 100644 index 0000000..9e4b02c --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/etharp.h @@ -0,0 +1,114 @@ +/** + * @file + * ARP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ETHARP_H +#define LWIP_HDR_PROT_ETHARP_H + +#include "lwip/arch.h" +#include "lwip/prot/ethernet.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef ETHARP_HWADDR_LEN +#define ETHARP_HWADDR_LEN ETH_HWADDR_LEN +#endif + +/** + * struct ip4_addr_wordaligned is used in the definition of the ARP packet format in + * order to support compilers that don't have structure packing. + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip4_addr_wordaligned { + PACK_STRUCT_FIELD(u16_t addrw[2]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** MEMCPY-like copying of IP addresses where addresses are known to be + * 16-bit-aligned if the port is correctly configured (so a port could define + * this to copying 2 u16_t's) - no NULL-pointer-checking needed. */ +#ifndef IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T +#define IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(dest, src) SMEMCPY(dest, src, sizeof(ip4_addr_t)) +#endif + + /** MEMCPY-like copying of IP addresses where addresses are known to be + * 16-bit-aligned if the port is correctly configured (so a port could define + * this to copying 2 u16_t's) - no NULL-pointer-checking needed. */ +#ifndef IPADDR_WORDALIGNED_COPY_FROM_IP4_ADDR_T +#define IPADDR_WORDALIGNED_COPY_FROM_IP4_ADDR_T(dest, src) SMEMCPY(dest, src, sizeof(ip4_addr_t)) +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** the ARP message, see RFC 826 ("Packet format") */ +struct etharp_hdr { + PACK_STRUCT_FIELD(u16_t hwtype); + PACK_STRUCT_FIELD(u16_t proto); + PACK_STRUCT_FLD_8(u8_t hwlen); + PACK_STRUCT_FLD_8(u8_t protolen); + PACK_STRUCT_FIELD(u16_t opcode); + PACK_STRUCT_FLD_S(struct eth_addr shwaddr); + PACK_STRUCT_FLD_S(struct ip4_addr_wordaligned sipaddr); + PACK_STRUCT_FLD_S(struct eth_addr dhwaddr); + PACK_STRUCT_FLD_S(struct ip4_addr_wordaligned dipaddr); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define SIZEOF_ETHARP_HDR 28 + +/* ARP message types (opcodes) */ +enum etharp_opcode { + ARP_REQUEST = 1, + ARP_REPLY = 2 +}; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ETHARP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ethernet.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ethernet.h new file mode 100644 index 0000000..fef49e2 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ethernet.h @@ -0,0 +1,125 @@ +/** + * @file + * Ethernet protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ETHERNET_H +#define LWIP_HDR_PROT_ETHERNET_H + +#include "lwip/arch.h" +#include "lwip/prot/ieee.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef ETH_HWADDR_LEN +#ifdef ETHARP_HWADDR_LEN +#define ETH_HWADDR_LEN ETHARP_HWADDR_LEN /* compatibility mode */ +#else +#define ETH_HWADDR_LEN 6 +#endif +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** An Ethernet MAC address */ +struct eth_addr { + PACK_STRUCT_FLD_8(u8_t addr[ETH_HWADDR_LEN]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Initialize a struct eth_addr with its 6 bytes (takes care of correct braces) */ +#define ETH_ADDR(b0, b1, b2, b3, b4, b5) {{b0, b1, b2, b3, b4, b5}} + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** Ethernet header */ +struct eth_hdr { +#if ETH_PAD_SIZE + PACK_STRUCT_FLD_8(u8_t padding[ETH_PAD_SIZE]); +#endif + PACK_STRUCT_FLD_S(struct eth_addr dest); + PACK_STRUCT_FLD_S(struct eth_addr src); + PACK_STRUCT_FIELD(u16_t type); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define SIZEOF_ETH_HDR (14 + ETH_PAD_SIZE) + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** VLAN header inserted between ethernet header and payload + * if 'type' in ethernet header is ETHTYPE_VLAN. + * See IEEE802.Q */ +struct eth_vlan_hdr { + PACK_STRUCT_FIELD(u16_t prio_vid); + PACK_STRUCT_FIELD(u16_t tpid); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define SIZEOF_VLAN_HDR 4 +#define VLAN_ID(vlan_hdr) (lwip_htons((vlan_hdr)->prio_vid) & 0xFFF) + +/** The 24-bit IANA IPv4-multicast OUI is 01-00-5e: */ +#define LL_IP4_MULTICAST_ADDR_0 0x01 +#define LL_IP4_MULTICAST_ADDR_1 0x00 +#define LL_IP4_MULTICAST_ADDR_2 0x5e + +/** IPv6 multicast uses this prefix */ +#define LL_IP6_MULTICAST_ADDR_0 0x33 +#define LL_IP6_MULTICAST_ADDR_1 0x33 + +#define eth_addr_cmp(addr1, addr2) (memcmp((addr1)->addr, (addr2)->addr, ETH_HWADDR_LEN) == 0) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ETHERNET_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/iana.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/iana.h new file mode 100644 index 0000000..859d238 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/iana.h @@ -0,0 +1,97 @@ +/** + * @file + * IANA assigned numbers (RFC 1700 and successors) + * + * @defgroup iana IANA assigned numbers + * @ingroup infrastructure + */ + +/* + * Copyright (c) 2017 Dirk Ziegelmeier. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ + +#ifndef LWIP_HDR_PROT_IANA_H +#define LWIP_HDR_PROT_IANA_H + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @ingroup iana + * Hardware types + */ +enum lwip_iana_hwtype { + /** Ethernet */ + LWIP_IANA_HWTYPE_ETHERNET = 1 +}; + +/** + * @ingroup iana + * Port numbers + * https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt + */ +enum lwip_iana_port_number { + /** SMTP */ + LWIP_IANA_PORT_SMTP = 25, + /** DHCP server */ + LWIP_IANA_PORT_DHCP_SERVER = 67, + /** DHCP client */ + LWIP_IANA_PORT_DHCP_CLIENT = 68, + /** TFTP */ + LWIP_IANA_PORT_TFTP = 69, + /** HTTP */ + LWIP_IANA_PORT_HTTP = 80, + /** SNTP */ + LWIP_IANA_PORT_SNTP = 123, + /** NETBIOS */ + LWIP_IANA_PORT_NETBIOS = 137, + /** SNMP */ + LWIP_IANA_PORT_SNMP = 161, + /** SNMP traps */ + LWIP_IANA_PORT_SNMP_TRAP = 162, + /** HTTPS */ + LWIP_IANA_PORT_HTTPS = 443, + /** SMTPS */ + LWIP_IANA_PORT_SMTPS = 465, + /** MQTT */ + LWIP_IANA_PORT_MQTT = 1883, + /** MDNS */ + LWIP_IANA_PORT_MDNS = 5353, + /** Secure MQTT */ + LWIP_IANA_PORT_SECURE_MQTT = 8883 +}; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IANA_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp.h new file mode 100644 index 0000000..d8183fd --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp.h @@ -0,0 +1,91 @@ +/** + * @file + * ICMP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ICMP_H +#define LWIP_HDR_PROT_ICMP_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define ICMP_ER 0 /* echo reply */ +#define ICMP_DUR 3 /* destination unreachable */ +#define ICMP_SQ 4 /* source quench */ +#define ICMP_RD 5 /* redirect */ +#define ICMP_ECHO 8 /* echo */ +#define ICMP_TE 11 /* time exceeded */ +#define ICMP_PP 12 /* parameter problem */ +#define ICMP_TS 13 /* timestamp */ +#define ICMP_TSR 14 /* timestamp reply */ +#define ICMP_IRQ 15 /* information request */ +#define ICMP_IR 16 /* information reply */ +#define ICMP_AM 17 /* address mask request */ +#define ICMP_AMR 18 /* address mask reply */ + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +/** This is the standard ICMP header only that the u32_t data + * is split to two u16_t like ICMP echo needs it. + * This header is also used for other ICMP types that do not + * use the data part. + */ +PACK_STRUCT_BEGIN +struct icmp_echo_hdr { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u16_t id); + PACK_STRUCT_FIELD(u16_t seqno); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* Compatibility defines, old versions used to combine type and code to an u16_t */ +#define ICMPH_TYPE(hdr) ((hdr)->type) +#define ICMPH_CODE(hdr) ((hdr)->code) +#define ICMPH_TYPE_SET(hdr, t) ((hdr)->type = (t)) +#define ICMPH_CODE_SET(hdr, c) ((hdr)->code = (c)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ICMP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp6.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp6.h new file mode 100644 index 0000000..b1c6d56 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/icmp6.h @@ -0,0 +1,170 @@ +/** + * @file + * ICMP6 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ICMP6_H +#define LWIP_HDR_PROT_ICMP6_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** ICMP type */ +enum icmp6_type { + /** Destination unreachable */ + ICMP6_TYPE_DUR = 1, + /** Packet too big */ + ICMP6_TYPE_PTB = 2, + /** Time exceeded */ + ICMP6_TYPE_TE = 3, + /** Parameter problem */ + ICMP6_TYPE_PP = 4, + /** Private experimentation */ + ICMP6_TYPE_PE1 = 100, + /** Private experimentation */ + ICMP6_TYPE_PE2 = 101, + /** Reserved for expansion of error messages */ + ICMP6_TYPE_RSV_ERR = 127, + + /** Echo request */ + ICMP6_TYPE_EREQ = 128, + /** Echo reply */ + ICMP6_TYPE_EREP = 129, + /** Multicast listener query */ + ICMP6_TYPE_MLQ = 130, + /** Multicast listener report */ + ICMP6_TYPE_MLR = 131, + /** Multicast listener done */ + ICMP6_TYPE_MLD = 132, + /** Router solicitation */ + ICMP6_TYPE_RS = 133, + /** Router advertisement */ + ICMP6_TYPE_RA = 134, + /** Neighbor solicitation */ + ICMP6_TYPE_NS = 135, + /** Neighbor advertisement */ + ICMP6_TYPE_NA = 136, + /** Redirect */ + ICMP6_TYPE_RD = 137, + /** Multicast router advertisement */ + ICMP6_TYPE_MRA = 151, + /** Multicast router solicitation */ + ICMP6_TYPE_MRS = 152, + /** Multicast router termination */ + ICMP6_TYPE_MRT = 153, + /** Private experimentation */ + ICMP6_TYPE_PE3 = 200, + /** Private experimentation */ + ICMP6_TYPE_PE4 = 201, + /** Reserved for expansion of informational messages */ + ICMP6_TYPE_RSV_INF = 255 +}; + +/** ICMP destination unreachable codes */ +enum icmp6_dur_code { + /** No route to destination */ + ICMP6_DUR_NO_ROUTE = 0, + /** Communication with destination administratively prohibited */ + ICMP6_DUR_PROHIBITED = 1, + /** Beyond scope of source address */ + ICMP6_DUR_SCOPE = 2, + /** Address unreachable */ + ICMP6_DUR_ADDRESS = 3, + /** Port unreachable */ + ICMP6_DUR_PORT = 4, + /** Source address failed ingress/egress policy */ + ICMP6_DUR_POLICY = 5, + /** Reject route to destination */ + ICMP6_DUR_REJECT_ROUTE = 6 +}; + +/** ICMP time exceeded codes */ +enum icmp6_te_code { + /** Hop limit exceeded in transit */ + ICMP6_TE_HL = 0, + /** Fragment reassembly time exceeded */ + ICMP6_TE_FRAG = 1 +}; + +/** ICMP parameter code */ +enum icmp6_pp_code { + /** Erroneous header field encountered */ + ICMP6_PP_FIELD = 0, + /** Unrecognized next header type encountered */ + ICMP6_PP_HEADER = 1, + /** Unrecognized IPv6 option encountered */ + ICMP6_PP_OPTION = 2 +}; + +/** This is the standard ICMP6 header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct icmp6_hdr { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u32_t data); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** This is the ICMP6 header adapted for echo req/resp. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct icmp6_echo_hdr { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u16_t id); + PACK_STRUCT_FIELD(u16_t seqno); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ICMP6_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ieee.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ieee.h new file mode 100644 index 0000000..ef651b1 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ieee.h @@ -0,0 +1,91 @@ +/** + * @file + * IEEE assigned numbers + * + * @defgroup ieee IEEE assigned numbers + * @ingroup infrastructure + */ + +/* + * Copyright (c) 2017 Dirk Ziegelmeier. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ + +#ifndef LWIP_HDR_PROT_IEEE_H +#define LWIP_HDR_PROT_IEEE_H + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @ingroup ieee + * A list of often ethtypes (although lwIP does not use all of them). + */ +enum lwip_ieee_eth_type { + /** Internet protocol v4 */ + ETHTYPE_IP = 0x0800U, + /** Address resolution protocol */ + ETHTYPE_ARP = 0x0806U, + /** Wake on lan */ + ETHTYPE_WOL = 0x0842U, + /** RARP */ + ETHTYPE_RARP = 0x8035U, + /** Virtual local area network */ + ETHTYPE_VLAN = 0x8100U, + /** Internet protocol v6 */ + ETHTYPE_IPV6 = 0x86DDU, + /** PPP Over Ethernet Discovery Stage */ + ETHTYPE_PPPOEDISC = 0x8863U, + /** PPP Over Ethernet Session Stage */ + ETHTYPE_PPPOE = 0x8864U, + /** Jumbo Frames */ + ETHTYPE_JUMBO = 0x8870U, + /** Process field network */ + ETHTYPE_PROFINET = 0x8892U, + /** Ethernet for control automation technology */ + ETHTYPE_ETHERCAT = 0x88A4U, + /** Link layer discovery protocol */ + ETHTYPE_LLDP = 0x88CCU, + /** Serial real-time communication system */ + ETHTYPE_SERCOS = 0x88CDU, + /** Media redundancy protocol */ + ETHTYPE_MRP = 0x88E3U, + /** Precision time protocol */ + ETHTYPE_PTP = 0x88F7U, + /** Q-in-Q, 802.1ad */ + ETHTYPE_QINQ = 0x9100U +}; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IEEE_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/igmp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/igmp.h new file mode 100644 index 0000000..41e4e67 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/igmp.h @@ -0,0 +1,90 @@ +/** + * @file + * IGMP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_IGMP_H +#define LWIP_HDR_PROT_IGMP_H + +#include "lwip/arch.h" +#include "lwip/prot/ip4.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * IGMP constants + */ +#define IGMP_TTL 1 +#define IGMP_MINLEN 8 +#define ROUTER_ALERT 0x9404U +#define ROUTER_ALERTLEN 4 + +/* + * IGMP message types, including version number. + */ +#define IGMP_MEMB_QUERY 0x11 /* Membership query */ +#define IGMP_V1_MEMB_REPORT 0x12 /* Ver. 1 membership report */ +#define IGMP_V2_MEMB_REPORT 0x16 /* Ver. 2 membership report */ +#define IGMP_LEAVE_GROUP 0x17 /* Leave-group message */ + +/* Group membership states */ +#define IGMP_GROUP_NON_MEMBER 0 +#define IGMP_GROUP_DELAYING_MEMBER 1 +#define IGMP_GROUP_IDLE_MEMBER 2 + +/** + * IGMP packet format. + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct igmp_msg { + PACK_STRUCT_FLD_8(u8_t igmp_msgtype); + PACK_STRUCT_FLD_8(u8_t igmp_maxresp); + PACK_STRUCT_FIELD(u16_t igmp_checksum); + PACK_STRUCT_FLD_S(ip4_addr_p_t igmp_group_address); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IGMP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip.h new file mode 100644 index 0000000..3ce496c --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip.h @@ -0,0 +1,59 @@ +/** + * @file + * IP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_IP_H +#define LWIP_HDR_PROT_IP_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define IP_PROTO_ICMP 1 +#define IP_PROTO_IGMP 2 +#define IP_PROTO_UDP 17 +#define IP_PROTO_UDPLITE 136 +#define IP_PROTO_TCP 6 + +/** This operates on a void* by loading the first byte */ +#define IP_HDR_GET_VERSION(ptr) ((*(u8_t*)(ptr)) >> 4) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip4.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip4.h new file mode 100644 index 0000000..4635a7a --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip4.h @@ -0,0 +1,131 @@ +/** + * @file + * IPv4 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_IP4_H +#define LWIP_HDR_PROT_IP4_H + +#include "lwip/arch.h" +#include "lwip/ip4_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** This is the packed version of ip4_addr_t, + used in network headers that are itself packed */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip4_addr_packed { + PACK_STRUCT_FIELD(u32_t addr); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +typedef struct ip4_addr_packed ip4_addr_p_t; + +/* Size of the IPv4 header. Same as 'sizeof(struct ip_hdr)'. */ +#define IP_HLEN 20 +/* Maximum size of the IPv4 header with options. */ +#define IP_HLEN_MAX 60 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/* The IPv4 header */ +struct ip_hdr { + /* version / header length */ + PACK_STRUCT_FLD_8(u8_t _v_hl); + /* type of service */ + PACK_STRUCT_FLD_8(u8_t _tos); + /* total length */ + PACK_STRUCT_FIELD(u16_t _len); + /* identification */ + PACK_STRUCT_FIELD(u16_t _id); + /* fragment offset field */ + PACK_STRUCT_FIELD(u16_t _offset); +#define IP_RF 0x8000U /* reserved fragment flag */ +#define IP_DF 0x4000U /* don't fragment flag */ +#define IP_MF 0x2000U /* more fragments flag */ +#define IP_OFFMASK 0x1fffU /* mask for fragmenting bits */ + /* time to live */ + PACK_STRUCT_FLD_8(u8_t _ttl); + /* protocol*/ + PACK_STRUCT_FLD_8(u8_t _proto); + /* checksum */ + PACK_STRUCT_FIELD(u16_t _chksum); + /* source and destination IP addresses */ + PACK_STRUCT_FLD_S(ip4_addr_p_t src); + PACK_STRUCT_FLD_S(ip4_addr_p_t dest); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* Macros to get struct ip_hdr fields: */ +#define IPH_V(hdr) ((hdr)->_v_hl >> 4) +#define IPH_HL(hdr) ((hdr)->_v_hl & 0x0f) +#define IPH_HL_BYTES(hdr) ((u8_t)(IPH_HL(hdr) * 4)) +#define IPH_TOS(hdr) ((hdr)->_tos) +#define IPH_LEN(hdr) ((hdr)->_len) +#define IPH_ID(hdr) ((hdr)->_id) +#define IPH_OFFSET(hdr) ((hdr)->_offset) +#define IPH_OFFSET_BYTES(hdr) ((u16_t)((lwip_ntohs(IPH_OFFSET(hdr)) & IP_OFFMASK) * 8U)) +#define IPH_TTL(hdr) ((hdr)->_ttl) +#define IPH_PROTO(hdr) ((hdr)->_proto) +#define IPH_CHKSUM(hdr) ((hdr)->_chksum) + +/* Macros to set struct ip_hdr fields: */ +#define IPH_VHL_SET(hdr, v, hl) (hdr)->_v_hl = (u8_t)((((v) << 4) | (hl))) +#define IPH_TOS_SET(hdr, tos) (hdr)->_tos = (tos) +#define IPH_LEN_SET(hdr, len) (hdr)->_len = (len) +#define IPH_ID_SET(hdr, id) (hdr)->_id = (id) +#define IPH_OFFSET_SET(hdr, off) (hdr)->_offset = (off) +#define IPH_TTL_SET(hdr, ttl) (hdr)->_ttl = (u8_t)(ttl) +#define IPH_PROTO_SET(hdr, proto) (hdr)->_proto = (u8_t)(proto) +#define IPH_CHKSUM_SET(hdr, chksum) (hdr)->_chksum = (chksum) + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IP4_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip6.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip6.h new file mode 100644 index 0000000..272d6ce --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/ip6.h @@ -0,0 +1,233 @@ +/** + * @file + * IPv6 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_IP6_H +#define LWIP_HDR_PROT_IP6_H + +#include "lwip/arch.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** This is the packed version of ip6_addr_t, + used in network headers that are itself packed */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_addr_packed { + PACK_STRUCT_FIELD(u32_t addr[4]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +typedef struct ip6_addr_packed ip6_addr_p_t; + +#define IP6_HLEN 40 + +#define IP6_NEXTH_HOPBYHOP 0 +#define IP6_NEXTH_TCP 6 +#define IP6_NEXTH_UDP 17 +#define IP6_NEXTH_ENCAPS 41 +#define IP6_NEXTH_ROUTING 43 +#define IP6_NEXTH_FRAGMENT 44 +#define IP6_NEXTH_ICMP6 58 +#define IP6_NEXTH_NONE 59 +#define IP6_NEXTH_DESTOPTS 60 +#define IP6_NEXTH_UDPLITE 136 + +/** The IPv6 header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_hdr { + /** version / traffic class / flow label */ + PACK_STRUCT_FIELD(u32_t _v_tc_fl); + /** payload length */ + PACK_STRUCT_FIELD(u16_t _plen); + /** next header */ + PACK_STRUCT_FLD_8(u8_t _nexth); + /** hop limit */ + PACK_STRUCT_FLD_8(u8_t _hoplim); + /** source and destination IP addresses */ + PACK_STRUCT_FLD_S(ip6_addr_p_t src); + PACK_STRUCT_FLD_S(ip6_addr_p_t dest); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define IP6H_V(hdr) ((lwip_ntohl((hdr)->_v_tc_fl) >> 28) & 0x0f) +#define IP6H_TC(hdr) ((lwip_ntohl((hdr)->_v_tc_fl) >> 20) & 0xff) +#define IP6H_FL(hdr) (lwip_ntohl((hdr)->_v_tc_fl) & 0x000fffff) +#define IP6H_PLEN(hdr) (lwip_ntohs((hdr)->_plen)) +#define IP6H_NEXTH(hdr) ((hdr)->_nexth) +#define IP6H_NEXTH_P(hdr) ((u8_t *)(hdr) + 6) +#define IP6H_HOPLIM(hdr) ((hdr)->_hoplim) +#define IP6H_VTCFL_SET(hdr, v, tc, fl) (hdr)->_v_tc_fl = (lwip_htonl((((u32_t)(v)) << 28) | (((u32_t)(tc)) << 20) | (fl))) +#define IP6H_PLEN_SET(hdr, plen) (hdr)->_plen = lwip_htons(plen) +#define IP6H_NEXTH_SET(hdr, nexth) (hdr)->_nexth = (nexth) +#define IP6H_HOPLIM_SET(hdr, hl) (hdr)->_hoplim = (u8_t)(hl) + +/* ipv6 extended options header */ +#define IP6_PAD1_OPTION 0 +#define IP6_PADN_OPTION 1 +#define IP6_ROUTER_ALERT_OPTION 5 +#define IP6_JUMBO_OPTION 194 +#define IP6_HOME_ADDRESS_OPTION 201 +#define IP6_ROUTER_ALERT_DLEN 2 +#define IP6_ROUTER_ALERT_VALUE_MLD 0 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_opt_hdr { + /* router alert option type */ + PACK_STRUCT_FLD_8(u8_t _opt_type); + /* router alert option data len */ + PACK_STRUCT_FLD_8(u8_t _opt_dlen); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define IP6_OPT_HLEN 2 +#define IP6_OPT_TYPE_ACTION(hdr) ((((hdr)->_opt_type) >> 6) & 0x3) +#define IP6_OPT_TYPE_CHANGE(hdr) ((((hdr)->_opt_type) >> 5) & 0x1) +#define IP6_OPT_TYPE(hdr) ((hdr)->_opt_type) +#define IP6_OPT_DLEN(hdr) ((hdr)->_opt_dlen) + +/* Hop-by-Hop header. */ +#define IP6_HBH_HLEN 2 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_hbh_hdr { + /* next header */ + PACK_STRUCT_FLD_8(u8_t _nexth); + /* header length in 8-octet units */ + PACK_STRUCT_FLD_8(u8_t _hlen); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define IP6_HBH_NEXTH(hdr) ((hdr)->_nexth) + +/* Destination header. */ +#define IP6_DEST_HLEN 2 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_dest_hdr { + /* next header */ + PACK_STRUCT_FLD_8(u8_t _nexth); + /* header length in 8-octet units */ + PACK_STRUCT_FLD_8(u8_t _hlen); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define IP6_DEST_NEXTH(hdr) ((hdr)->_nexth) + +/* Routing header */ +#define IP6_ROUT_TYPE2 2 +#define IP6_ROUT_RPL 3 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_rout_hdr { + /* next header */ + PACK_STRUCT_FLD_8(u8_t _nexth); + /* reserved */ + PACK_STRUCT_FLD_8(u8_t _hlen); + /* fragment offset */ + PACK_STRUCT_FIELD(u8_t _routing_type); + /* fragmented packet identification */ + PACK_STRUCT_FIELD(u8_t _segments_left); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define IP6_ROUT_NEXTH(hdr) ((hdr)->_nexth) +#define IP6_ROUT_TYPE(hdr) ((hdr)->_routing_type) +#define IP6_ROUT_SEG_LEFT(hdr) ((hdr)->_segments_left) + +/* Fragment header. */ +#define IP6_FRAG_HLEN 8 +#define IP6_FRAG_OFFSET_MASK 0xfff8 +#define IP6_FRAG_MORE_FLAG 0x0001 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_frag_hdr { + /* next header */ + PACK_STRUCT_FLD_8(u8_t _nexth); + /* reserved */ + PACK_STRUCT_FLD_8(u8_t reserved); + /* fragment offset */ + PACK_STRUCT_FIELD(u16_t _fragment_offset); + /* fragmented packet identification */ + PACK_STRUCT_FIELD(u32_t _identification); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define IP6_FRAG_NEXTH(hdr) ((hdr)->_nexth) +#define IP6_FRAG_MBIT(hdr) (lwip_ntohs((hdr)->_fragment_offset) & 0x1) +#define IP6_FRAG_ID(hdr) (lwip_ntohl((hdr)->_identification)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_IP6_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/mld6.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/mld6.h new file mode 100644 index 0000000..27a63bf --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/mld6.h @@ -0,0 +1,71 @@ +/** + * @file + * MLD6 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_MLD6_H +#define LWIP_HDR_PROT_MLD6_H + +#include "lwip/arch.h" +#include "lwip/prot/ip6.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define MLD6_HBH_HLEN 8 +/** Multicast listener report/query/done message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct mld_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u16_t max_resp_delay); + PACK_STRUCT_FIELD(u16_t reserved); + PACK_STRUCT_FLD_S(ip6_addr_p_t multicast_address); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_MLD6_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/nd6.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/nd6.h new file mode 100644 index 0000000..af87e29 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/nd6.h @@ -0,0 +1,274 @@ +/** + * @file + * ND6 protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_ND6_H +#define LWIP_HDR_PROT_ND6_H + +#include "lwip/arch.h" +#include "lwip/ip6_addr.h" +#include "lwip/prot/ip6.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** Neighbor solicitation message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ns_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u32_t reserved); + PACK_STRUCT_FLD_S(ip6_addr_p_t target_address); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Neighbor advertisement message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct na_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FLD_8(u8_t flags); + PACK_STRUCT_FLD_8(u8_t reserved[3]); + PACK_STRUCT_FLD_S(ip6_addr_p_t target_address); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define ND6_FLAG_ROUTER (0x80) +#define ND6_FLAG_SOLICITED (0x40) +#define ND6_FLAG_OVERRIDE (0x20) + +/** Router solicitation message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct rs_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u32_t reserved); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Router advertisement message header. */ +#define ND6_RA_FLAG_MANAGED_ADDR_CONFIG (0x80) +#define ND6_RA_FLAG_OTHER_CONFIG (0x40) +#define ND6_RA_FLAG_HOME_AGENT (0x20) +#define ND6_RA_PREFERENCE_MASK (0x18) +#define ND6_RA_PREFERENCE_HIGH (0x08) +#define ND6_RA_PREFERENCE_MEDIUM (0x00) +#define ND6_RA_PREFERENCE_LOW (0x18) +#define ND6_RA_PREFERENCE_DISABLED (0x10) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ra_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FLD_8(u8_t current_hop_limit); + PACK_STRUCT_FLD_8(u8_t flags); + PACK_STRUCT_FIELD(u16_t router_lifetime); + PACK_STRUCT_FIELD(u32_t reachable_time); + PACK_STRUCT_FIELD(u32_t retrans_timer); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Redirect message header. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct redirect_header { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u32_t reserved); + PACK_STRUCT_FLD_S(ip6_addr_p_t target_address); + PACK_STRUCT_FLD_S(ip6_addr_p_t destination_address); + /* Options follow. */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Link-layer address option. */ +#define ND6_OPTION_TYPE_SOURCE_LLADDR (0x01) +#define ND6_OPTION_TYPE_TARGET_LLADDR (0x02) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct lladdr_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FLD_8(u8_t addr[NETIF_MAX_HWADDR_LEN]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Prefix information option. */ +#define ND6_OPTION_TYPE_PREFIX_INFO (0x03) +#define ND6_PREFIX_FLAG_ON_LINK (0x80) +#define ND6_PREFIX_FLAG_AUTONOMOUS (0x40) +#define ND6_PREFIX_FLAG_ROUTER_ADDRESS (0x20) +#define ND6_PREFIX_FLAG_SITE_PREFIX (0x10) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct prefix_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FLD_8(u8_t prefix_length); + PACK_STRUCT_FLD_8(u8_t flags); + PACK_STRUCT_FIELD(u32_t valid_lifetime); + PACK_STRUCT_FIELD(u32_t preferred_lifetime); + PACK_STRUCT_FLD_8(u8_t reserved2[3]); + PACK_STRUCT_FLD_8(u8_t site_prefix_length); + PACK_STRUCT_FLD_S(ip6_addr_p_t prefix); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Redirected header option. */ +#define ND6_OPTION_TYPE_REDIR_HDR (0x04) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct redirected_header_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FLD_8(u8_t reserved[6]); + /* Portion of redirected packet follows. */ + /* PACK_STRUCT_FLD_8(u8_t redirected[8]); */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** MTU option. */ +#define ND6_OPTION_TYPE_MTU (0x05) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct mtu_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FIELD(u16_t reserved); + PACK_STRUCT_FIELD(u32_t mtu); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Route information option. */ +#define ND6_OPTION_TYPE_ROUTE_INFO (24) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct route_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FLD_8(u8_t prefix_length); + PACK_STRUCT_FLD_8(u8_t preference); + PACK_STRUCT_FIELD(u32_t route_lifetime); + PACK_STRUCT_FLD_S(ip6_addr_p_t prefix); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/** Recursive DNS Server Option. */ +#define ND6_OPTION_TYPE_RDNSS (25) +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct rdnss_option { + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t length); + PACK_STRUCT_FIELD(u16_t reserved); + PACK_STRUCT_FIELD(u32_t lifetime); + PACK_STRUCT_FLD_S(ip6_addr_p_t rdnss_address[1]); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define SIZEOF_RDNSS_OPTION_BASE 8 /* size without addresses */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_ND6_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/tcp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/tcp.h new file mode 100644 index 0000000..f5a3dc8 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/tcp.h @@ -0,0 +1,100 @@ +/** + * @file + * TCP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_TCP_H +#define LWIP_HDR_PROT_TCP_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Length of the TCP header, excluding options. */ +#define TCP_HLEN 20 + +/* Fields are (of course) in network byte order. + * Some fields are converted to host byte order in tcp_input(). + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct tcp_hdr { + PACK_STRUCT_FIELD(u16_t src); + PACK_STRUCT_FIELD(u16_t dest); + PACK_STRUCT_FIELD(u32_t seqno); + PACK_STRUCT_FIELD(u32_t ackno); + PACK_STRUCT_FIELD(u16_t _hdrlen_rsvd_flags); + PACK_STRUCT_FIELD(u16_t wnd); + PACK_STRUCT_FIELD(u16_t chksum); + PACK_STRUCT_FIELD(u16_t urgp); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* TCP header flags bits */ +#define TCP_FIN 0x01U +#define TCP_SYN 0x02U +#define TCP_RST 0x04U +#define TCP_PSH 0x08U +#define TCP_ACK 0x10U +#define TCP_URG 0x20U +#define TCP_ECE 0x40U +#define TCP_CWR 0x80U +/* Valid TCP header flags */ +#define TCP_FLAGS 0x3fU + +#define TCP_MAX_OPTION_BYTES 40 + +#define TCPH_HDRLEN(phdr) ((u16_t)(lwip_ntohs((phdr)->_hdrlen_rsvd_flags) >> 12)) +#define TCPH_HDRLEN_BYTES(phdr) ((u8_t)(TCPH_HDRLEN(phdr) << 2)) +#define TCPH_FLAGS(phdr) ((u8_t)((lwip_ntohs((phdr)->_hdrlen_rsvd_flags) & TCP_FLAGS))) + +#define TCPH_HDRLEN_SET(phdr, len) (phdr)->_hdrlen_rsvd_flags = lwip_htons(((len) << 12) | TCPH_FLAGS(phdr)) +#define TCPH_FLAGS_SET(phdr, flags) (phdr)->_hdrlen_rsvd_flags = (((phdr)->_hdrlen_rsvd_flags & PP_HTONS(~TCP_FLAGS)) | lwip_htons(flags)) +#define TCPH_HDRLEN_FLAGS_SET(phdr, len, flags) (phdr)->_hdrlen_rsvd_flags = (u16_t)(lwip_htons((u16_t)((len) << 12) | (flags))) + +#define TCPH_SET_FLAG(phdr, flags ) (phdr)->_hdrlen_rsvd_flags = ((phdr)->_hdrlen_rsvd_flags | lwip_htons(flags)) +#define TCPH_UNSET_FLAG(phdr, flags) (phdr)->_hdrlen_rsvd_flags = ((phdr)->_hdrlen_rsvd_flags & ~lwip_htons(flags)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_TCP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/udp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/udp.h new file mode 100644 index 0000000..4904b7e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/prot/udp.h @@ -0,0 +1,68 @@ +/** + * @file + * UDP protocol definitions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_PROT_UDP_H +#define LWIP_HDR_PROT_UDP_H + +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define UDP_HLEN 8 + +/* Fields are (of course) in network byte order. */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct udp_hdr { + PACK_STRUCT_FIELD(u16_t src); + PACK_STRUCT_FIELD(u16_t dest); /* src/dest UDP ports */ + PACK_STRUCT_FIELD(u16_t len); + PACK_STRUCT_FIELD(u16_t chksum); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_PROT_UDP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/raw.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/raw.h new file mode 100644 index 0000000..1be1273 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/raw.h @@ -0,0 +1,143 @@ +/** + * @file + * raw API (to be used from TCPIP thread)\n + * See also @ref raw_raw + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_RAW_H +#define LWIP_HDR_RAW_H + +#include "lwip/opt.h" + +#if LWIP_RAW /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/def.h" +#include "lwip/ip.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define RAW_FLAGS_CONNECTED 0x01U +#define RAW_FLAGS_HDRINCL 0x02U +#define RAW_FLAGS_MULTICAST_LOOP 0x04U + +struct raw_pcb; + +/** Function prototype for raw pcb receive callback functions. + * @param arg user supplied argument (raw_pcb.recv_arg) + * @param pcb the raw_pcb which received data + * @param p the packet buffer that was received + * @param addr the remote IP address from which the packet was received + * @return 1 if the packet was 'eaten' (aka. deleted), + * 0 if the packet lives on + * If returning 1, the callback is responsible for freeing the pbuf + * if it's not used any more. + */ +typedef u8_t (*raw_recv_fn)(void *arg, struct raw_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr); + +/** the RAW protocol control block */ +struct raw_pcb { + /* Common members of all PCB types */ + IP_PCB; + + struct raw_pcb *next; + + u8_t protocol; + u8_t flags; + +#if LWIP_MULTICAST_TX_OPTIONS + /** outgoing network interface for multicast packets, by interface index (if nonzero) */ + u8_t mcast_ifindex; + /** TTL for outgoing multicast packets */ + u8_t mcast_ttl; +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + /** receive callback function */ + raw_recv_fn recv; + /* user-supplied argument for the recv callback */ + void *recv_arg; +#if LWIP_IPV6 + /* fields for handling checksum computations as per RFC3542. */ + u16_t chksum_offset; + u8_t chksum_reqd; +#endif +}; + +/* The following functions is the application layer interface to the + RAW code. */ +struct raw_pcb * raw_new (u8_t proto); +struct raw_pcb * raw_new_ip_type(u8_t type, u8_t proto); +void raw_remove (struct raw_pcb *pcb); +err_t raw_bind (struct raw_pcb *pcb, const ip_addr_t *ipaddr); +void raw_bind_netif (struct raw_pcb *pcb, const struct netif *netif); +err_t raw_connect (struct raw_pcb *pcb, const ip_addr_t *ipaddr); +void raw_disconnect (struct raw_pcb *pcb); + +err_t raw_sendto (struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *ipaddr); +err_t raw_sendto_if_src(struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, struct netif *netif, const ip_addr_t *src_ip); +err_t raw_send (struct raw_pcb *pcb, struct pbuf *p); + +void raw_recv (struct raw_pcb *pcb, raw_recv_fn recv, void *recv_arg); + +#define raw_flags(pcb) ((pcb)->flags) +#define raw_setflags(pcb,f) ((pcb)->flags = (f)) + +#define raw_set_flags(pcb, set_flags) do { (pcb)->flags = (u8_t)((pcb)->flags | (set_flags)); } while(0) +#define raw_clear_flags(pcb, clr_flags) do { (pcb)->flags = (u8_t)((pcb)->flags & (u8_t)(~(clr_flags) & 0xff)); } while(0) +#define raw_is_flag_set(pcb, flag) (((pcb)->flags & (flag)) != 0) + +#define raw_init() /* Compatibility define, no init needed. */ + +/* for compatibility with older implementation */ +#define raw_new_ip6(proto) raw_new_ip_type(IPADDR_TYPE_V6, proto) + +#if LWIP_MULTICAST_TX_OPTIONS +#define raw_set_multicast_netif_index(pcb, idx) ((pcb)->mcast_ifindex = (idx)) +#define raw_get_multicast_netif_index(pcb) ((pcb)->mcast_ifindex) +#define raw_set_multicast_ttl(pcb, value) ((pcb)->mcast_ttl = (value)) +#define raw_get_multicast_ttl(pcb) ((pcb)->mcast_ttl) +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_RAW */ + +#endif /* LWIP_HDR_RAW_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/sio.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/sio.h new file mode 100644 index 0000000..efa8ade --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/sio.h @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + */ + +/* + * This is the interface to the platform specific serial IO module + * It needs to be implemented by those platforms which need SLIP or PPP + */ + +#ifndef SIO_H +#define SIO_H + +#include "lwip/arch.h" +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* If you want to define sio_fd_t elsewhere or differently, + define this in your cc.h file. */ +#ifndef __sio_fd_t_defined +typedef void * sio_fd_t; +#endif + +/* The following functions can be defined to something else in your cc.h file + or be implemented in your custom sio.c file. */ + +#ifndef sio_open +/** + * Opens a serial device for communication. + * + * @param devnum device number + * @return handle to serial device if successful, NULL otherwise + */ +sio_fd_t sio_open(u8_t devnum); +#endif + +#ifndef sio_send +/** + * Sends a single character to the serial device. + * + * @param c character to send + * @param fd serial device handle + * + * @note This function will block until the character can be sent. + */ +void sio_send(u8_t c, sio_fd_t fd); +#endif + +#ifndef sio_recv +/** + * Receives a single character from the serial device. + * + * @param fd serial device handle + * + * @note This function will block until a character is received. + */ +u8_t sio_recv(sio_fd_t fd); +#endif + +#ifndef sio_read +/** + * Reads from the serial device. + * + * @param fd serial device handle + * @param data pointer to data buffer for receiving + * @param len maximum length (in bytes) of data to receive + * @return number of bytes actually received - may be 0 if aborted by sio_read_abort + * + * @note This function will block until data can be received. The blocking + * can be cancelled by calling sio_read_abort(). + */ +u32_t sio_read(sio_fd_t fd, u8_t *data, u32_t len); +#endif + +#ifndef sio_tryread +/** + * Tries to read from the serial device. Same as sio_read but returns + * immediately if no data is available and never blocks. + * + * @param fd serial device handle + * @param data pointer to data buffer for receiving + * @param len maximum length (in bytes) of data to receive + * @return number of bytes actually received + */ +u32_t sio_tryread(sio_fd_t fd, u8_t *data, u32_t len); +#endif + +#ifndef sio_write +/** + * Writes to the serial device. + * + * @param fd serial device handle + * @param data pointer to data to send + * @param len length (in bytes) of data to send + * @return number of bytes actually sent + * + * @note This function will block until all data can be sent. + */ +u32_t sio_write(sio_fd_t fd, u8_t *data, u32_t len); +#endif + +#ifndef sio_read_abort +/** + * Aborts a blocking sio_read() call. + * + * @param fd serial device handle + */ +void sio_read_abort(sio_fd_t fd); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* SIO_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/snmp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/snmp.h new file mode 100644 index 0000000..80e6e57 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/snmp.h @@ -0,0 +1,213 @@ +/** + * @file + * SNMP support API for implementing netifs and statitics for MIB2 + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ +#ifndef LWIP_HDR_SNMP_H +#define LWIP_HDR_SNMP_H + +#include "lwip/opt.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct udp_pcb; +struct netif; + +/** + * @defgroup netif_mib2 MIB2 statistics + * @ingroup netif + */ + +/* MIB2 statistics functions */ +#if MIB2_STATS /* don't build if not configured for use in lwipopts.h */ +/** + * @ingroup netif_mib2 + * @see RFC1213, "MIB-II, 6. Definitions" + */ +enum snmp_ifType { + snmp_ifType_other=1, /* none of the following */ + snmp_ifType_regular1822, + snmp_ifType_hdh1822, + snmp_ifType_ddn_x25, + snmp_ifType_rfc877_x25, + snmp_ifType_ethernet_csmacd, + snmp_ifType_iso88023_csmacd, + snmp_ifType_iso88024_tokenBus, + snmp_ifType_iso88025_tokenRing, + snmp_ifType_iso88026_man, + snmp_ifType_starLan, + snmp_ifType_proteon_10Mbit, + snmp_ifType_proteon_80Mbit, + snmp_ifType_hyperchannel, + snmp_ifType_fddi, + snmp_ifType_lapb, + snmp_ifType_sdlc, + snmp_ifType_ds1, /* T-1 */ + snmp_ifType_e1, /* european equiv. of T-1 */ + snmp_ifType_basicISDN, + snmp_ifType_primaryISDN, /* proprietary serial */ + snmp_ifType_propPointToPointSerial, + snmp_ifType_ppp, + snmp_ifType_softwareLoopback, + snmp_ifType_eon, /* CLNP over IP [11] */ + snmp_ifType_ethernet_3Mbit, + snmp_ifType_nsip, /* XNS over IP */ + snmp_ifType_slip, /* generic SLIP */ + snmp_ifType_ultra, /* ULTRA technologies */ + snmp_ifType_ds3, /* T-3 */ + snmp_ifType_sip, /* SMDS */ + snmp_ifType_frame_relay +}; + +/** This macro has a precision of ~49 days because sys_now returns u32_t. \#define your own if you want ~490 days. */ +#ifndef MIB2_COPY_SYSUPTIME_TO +#define MIB2_COPY_SYSUPTIME_TO(ptrToVal) (*(ptrToVal) = (sys_now() / 10)) +#endif + +/** + * @ingroup netif_mib2 + * Increment stats member for SNMP MIB2 stats (struct stats_mib2_netif_ctrs) + */ +#define MIB2_STATS_NETIF_INC(n, x) do { ++(n)->mib2_counters.x; } while(0) +/** + * @ingroup netif_mib2 + * Add value to stats member for SNMP MIB2 stats (struct stats_mib2_netif_ctrs) + */ +#define MIB2_STATS_NETIF_ADD(n, x, val) do { (n)->mib2_counters.x += (val); } while(0) + +/** + * @ingroup netif_mib2 + * Init MIB2 statistic counters in netif + * @param netif Netif to init + * @param type one of enum @ref snmp_ifType + * @param speed your link speed here (units: bits per second) + */ +#define MIB2_INIT_NETIF(netif, type, speed) do { \ + (netif)->link_type = (type); \ + (netif)->link_speed = (speed);\ + (netif)->ts = 0; \ + (netif)->mib2_counters.ifinoctets = 0; \ + (netif)->mib2_counters.ifinucastpkts = 0; \ + (netif)->mib2_counters.ifinnucastpkts = 0; \ + (netif)->mib2_counters.ifindiscards = 0; \ + (netif)->mib2_counters.ifinerrors = 0; \ + (netif)->mib2_counters.ifinunknownprotos = 0; \ + (netif)->mib2_counters.ifoutoctets = 0; \ + (netif)->mib2_counters.ifoutucastpkts = 0; \ + (netif)->mib2_counters.ifoutnucastpkts = 0; \ + (netif)->mib2_counters.ifoutdiscards = 0; \ + (netif)->mib2_counters.ifouterrors = 0; } while(0) +#else /* MIB2_STATS */ +#ifndef MIB2_COPY_SYSUPTIME_TO +#define MIB2_COPY_SYSUPTIME_TO(ptrToVal) +#endif +#define MIB2_INIT_NETIF(netif, type, speed) +#define MIB2_STATS_NETIF_INC(n, x) +#define MIB2_STATS_NETIF_ADD(n, x, val) +#endif /* MIB2_STATS */ + +/* LWIP MIB2 callbacks */ +#if LWIP_MIB2_CALLBACKS /* don't build if not configured for use in lwipopts.h */ +/* network interface */ +void mib2_netif_added(struct netif *ni); +void mib2_netif_removed(struct netif *ni); + +#if LWIP_IPV4 && LWIP_ARP +/* ARP (for atTable and ipNetToMediaTable) */ +void mib2_add_arp_entry(struct netif *ni, ip4_addr_t *ip); +void mib2_remove_arp_entry(struct netif *ni, ip4_addr_t *ip); +#else /* LWIP_IPV4 && LWIP_ARP */ +#define mib2_add_arp_entry(ni,ip) +#define mib2_remove_arp_entry(ni,ip) +#endif /* LWIP_IPV4 && LWIP_ARP */ + +/* IP */ +#if LWIP_IPV4 +void mib2_add_ip4(struct netif *ni); +void mib2_remove_ip4(struct netif *ni); +void mib2_add_route_ip4(u8_t dflt, struct netif *ni); +void mib2_remove_route_ip4(u8_t dflt, struct netif *ni); +#endif /* LWIP_IPV4 */ + +/* UDP */ +#if LWIP_UDP +void mib2_udp_bind(struct udp_pcb *pcb); +void mib2_udp_unbind(struct udp_pcb *pcb); +#endif /* LWIP_UDP */ + +#else /* LWIP_MIB2_CALLBACKS */ +/* LWIP_MIB2_CALLBACKS support not available */ +/* define everything to be empty */ + +/* network interface */ +#define mib2_netif_added(ni) +#define mib2_netif_removed(ni) + +/* ARP */ +#define mib2_add_arp_entry(ni,ip) +#define mib2_remove_arp_entry(ni,ip) + +/* IP */ +#define mib2_add_ip4(ni) +#define mib2_remove_ip4(ni) +#define mib2_add_route_ip4(dflt, ni) +#define mib2_remove_route_ip4(dflt, ni) + +/* UDP */ +#define mib2_udp_bind(pcb) +#define mib2_udp_unbind(pcb) +#endif /* LWIP_MIB2_CALLBACKS */ + +/* for source-code compatibility reasons only, can be removed (not used internally) */ +#define NETIF_INIT_SNMP MIB2_INIT_NETIF +#define snmp_add_ifinoctets(ni,value) MIB2_STATS_NETIF_ADD(ni, ifinoctets, value) +#define snmp_inc_ifinucastpkts(ni) MIB2_STATS_NETIF_INC(ni, ifinucastpkts) +#define snmp_inc_ifinnucastpkts(ni) MIB2_STATS_NETIF_INC(ni, ifinnucastpkts) +#define snmp_inc_ifindiscards(ni) MIB2_STATS_NETIF_INC(ni, ifindiscards) +#define snmp_inc_ifinerrors(ni) MIB2_STATS_NETIF_INC(ni, ifinerrors) +#define snmp_inc_ifinunknownprotos(ni) MIB2_STATS_NETIF_INC(ni, ifinunknownprotos) +#define snmp_add_ifoutoctets(ni,value) MIB2_STATS_NETIF_ADD(ni, ifoutoctets, value) +#define snmp_inc_ifoutucastpkts(ni) MIB2_STATS_NETIF_INC(ni, ifoutucastpkts) +#define snmp_inc_ifoutnucastpkts(ni) MIB2_STATS_NETIF_INC(ni, ifoutnucastpkts) +#define snmp_inc_ifoutdiscards(ni) MIB2_STATS_NETIF_INC(ni, ifoutdiscards) +#define snmp_inc_ifouterrors(ni) MIB2_STATS_NETIF_INC(ni, ifouterrors) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_SNMP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/sockets.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/sockets.h new file mode 100644 index 0000000..f448591 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/sockets.h @@ -0,0 +1,688 @@ +/** + * @file + * Socket API (to be used from non-TCPIP threads) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + + +#ifndef LWIP_HDR_SOCKETS_H +#define LWIP_HDR_SOCKETS_H + +#include "lwip/opt.h" + +#if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/err.h" +#include "lwip/inet.h" +#include "lwip/errno.h" + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* If your port already typedef's sa_family_t, define SA_FAMILY_T_DEFINED + to prevent this code from redefining it. */ +#if !defined(sa_family_t) && !defined(SA_FAMILY_T_DEFINED) +typedef u8_t sa_family_t; +#endif +/* If your port already typedef's in_port_t, define IN_PORT_T_DEFINED + to prevent this code from redefining it. */ +#if !defined(in_port_t) && !defined(IN_PORT_T_DEFINED) +typedef u16_t in_port_t; +#endif + +#if LWIP_IPV4 +/* members are in network byte order */ +struct sockaddr_in { + u8_t sin_len; + sa_family_t sin_family; + in_port_t sin_port; + struct in_addr sin_addr; +#define SIN_ZERO_LEN 8 + char sin_zero[SIN_ZERO_LEN]; +}; +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +struct sockaddr_in6 { + u8_t sin6_len; /* length of this structure */ + sa_family_t sin6_family; /* AF_INET6 */ + in_port_t sin6_port; /* Transport layer port # */ + u32_t sin6_flowinfo; /* IPv6 flow information */ + struct in6_addr sin6_addr; /* IPv6 address */ + u32_t sin6_scope_id; /* Set of interfaces for scope */ +}; +#endif /* LWIP_IPV6 */ + +struct sockaddr { + u8_t sa_len; + sa_family_t sa_family; + char sa_data[14]; +}; + +struct sockaddr_storage { + u8_t s2_len; + sa_family_t ss_family; + char s2_data1[2]; + u32_t s2_data2[3]; +#if LWIP_IPV6 + u32_t s2_data3[3]; +#endif /* LWIP_IPV6 */ +}; + +/* If your port already typedef's socklen_t, define SOCKLEN_T_DEFINED + to prevent this code from redefining it. */ +#if !defined(socklen_t) && !defined(SOCKLEN_T_DEFINED) +typedef u32_t socklen_t; +#endif + +#if !defined IOV_MAX +#define IOV_MAX 0xFFFF +#elif IOV_MAX > 0xFFFF +#error "IOV_MAX larger than supported by LwIP" +#endif /* IOV_MAX */ + +#if !defined(iovec) +struct iovec { + void *iov_base; + size_t iov_len; +}; +#endif + +struct msghdr { + void *msg_name; + socklen_t msg_namelen; + struct iovec *msg_iov; + int msg_iovlen; + void *msg_control; + socklen_t msg_controllen; + int msg_flags; +}; + +/* struct msghdr->msg_flags bit field values */ +#define MSG_TRUNC 0x04 +#define MSG_CTRUNC 0x08 + +/* RFC 3542, Section 20: Ancillary Data */ +struct cmsghdr { + socklen_t cmsg_len; /* number of bytes, including header */ + int cmsg_level; /* originating protocol */ + int cmsg_type; /* protocol-specific type */ +}; +/* Data section follows header and possible padding, typically referred to as + unsigned char cmsg_data[]; */ + +/* cmsg header/data alignment. NOTE: we align to native word size (double word +size on 16-bit arch) so structures are not placed at an unaligned address. +16-bit arch needs double word to ensure 32-bit alignment because socklen_t +could be 32 bits. If we ever have cmsg data with a 64-bit variable, alignment +will need to increase long long */ +#define ALIGN_H(size) (((size) + sizeof(long) - 1U) & ~(sizeof(long)-1U)) +#define ALIGN_D(size) ALIGN_H(size) + +#define CMSG_FIRSTHDR(mhdr) \ + ((mhdr)->msg_controllen >= sizeof(struct cmsghdr) ? \ + (struct cmsghdr *)(mhdr)->msg_control : \ + (struct cmsghdr *)NULL) + +#define CMSG_NXTHDR(mhdr, cmsg) \ + (((cmsg) == NULL) ? CMSG_FIRSTHDR(mhdr) : \ + (((u8_t *)(cmsg) + ALIGN_H((cmsg)->cmsg_len) \ + + ALIGN_D(sizeof(struct cmsghdr)) > \ + (u8_t *)((mhdr)->msg_control) + (mhdr)->msg_controllen) ? \ + (struct cmsghdr *)NULL : \ + (struct cmsghdr *)((void*)((u8_t *)(cmsg) + \ + ALIGN_H((cmsg)->cmsg_len))))) + +#define CMSG_DATA(cmsg) ((void*)((u8_t *)(cmsg) + \ + ALIGN_D(sizeof(struct cmsghdr)))) + +#define CMSG_SPACE(length) (ALIGN_D(sizeof(struct cmsghdr)) + \ + ALIGN_H(length)) + +#define CMSG_LEN(length) (ALIGN_D(sizeof(struct cmsghdr)) + \ + length) + +/* Set socket options argument */ +#define IFNAMSIZ NETIF_NAMESIZE +struct ifreq { + char ifr_name[IFNAMSIZ]; /* Interface name */ +}; + +/* Socket protocol types (TCP/UDP/RAW) */ +#define SOCK_STREAM 1 +#define SOCK_DGRAM 2 +#define SOCK_RAW 3 + +/* + * Option flags per-socket. These must match the SOF_ flags in ip.h (checked in init.c) + */ +#define SO_REUSEADDR 0x0004 /* Allow local address reuse */ +#define SO_KEEPALIVE 0x0008 /* keep connections alive */ +#define SO_BROADCAST 0x0020 /* permit to send and to receive broadcast messages (see IP_SOF_BROADCAST option) */ + + +/* + * Additional options, not kept in so_options. + */ +#define SO_DEBUG 0x0001 /* Unimplemented: turn on debugging info recording */ +#define SO_ACCEPTCONN 0x0002 /* socket has had listen() */ +#define SO_DONTROUTE 0x0010 /* Unimplemented: just use interface addresses */ +#define SO_USELOOPBACK 0x0040 /* Unimplemented: bypass hardware when possible */ +#define SO_LINGER 0x0080 /* linger on close if data present */ +#define SO_DONTLINGER ((int)(~SO_LINGER)) +#define SO_OOBINLINE 0x0100 /* Unimplemented: leave received OOB data in line */ +#define SO_REUSEPORT 0x0200 /* Unimplemented: allow local address & port reuse */ +#define SO_SNDBUF 0x1001 /* Unimplemented: send buffer size */ +#define SO_RCVBUF 0x1002 /* receive buffer size */ +#define SO_SNDLOWAT 0x1003 /* Unimplemented: send low-water mark */ +#define SO_RCVLOWAT 0x1004 /* Unimplemented: receive low-water mark */ +#define SO_SNDTIMEO 0x1005 /* send timeout */ +#define SO_RCVTIMEO 0x1006 /* receive timeout */ +#define SO_ERROR 0x1007 /* get error status and clear */ +#define SO_TYPE 0x1008 /* get socket type */ +#define SO_CONTIMEO 0x1009 /* Unimplemented: connect timeout */ +#define SO_NO_CHECK 0x100a /* don't create UDP checksum */ +#define SO_BINDTODEVICE 0x100b /* bind to device */ + +/* + * Structure used for manipulating linger option. + */ +struct linger { + int l_onoff; /* option on/off */ + int l_linger; /* linger time in seconds */ +}; + +/* + * Level number for (get/set)sockopt() to apply to socket itself. + */ +#define SOL_SOCKET 0xfff /* options for socket level */ + + +#define AF_UNSPEC 0 +#define AF_INET 2 +#if LWIP_IPV6 +#define AF_INET6 10 +#else /* LWIP_IPV6 */ +#define AF_INET6 AF_UNSPEC +#endif /* LWIP_IPV6 */ +#define PF_INET AF_INET +#define PF_INET6 AF_INET6 +#define PF_UNSPEC AF_UNSPEC + +#define IPPROTO_IP 0 +#define IPPROTO_ICMP 1 +#define IPPROTO_TCP 6 +#define IPPROTO_UDP 17 +#if LWIP_IPV6 +#define IPPROTO_IPV6 41 +#define IPPROTO_ICMPV6 58 +#endif /* LWIP_IPV6 */ +#define IPPROTO_UDPLITE 136 +#define IPPROTO_RAW 255 + +/* Flags we can use with send and recv. */ +#define MSG_PEEK 0x01 /* Peeks at an incoming message */ +#define MSG_WAITALL 0x02 /* Unimplemented: Requests that the function block until the full amount of data requested can be returned */ +#define MSG_OOB 0x04 /* Unimplemented: Requests out-of-band data. The significance and semantics of out-of-band data are protocol-specific */ +#define MSG_DONTWAIT 0x08 /* Nonblocking i/o for this operation only */ +#define MSG_MORE 0x10 /* Sender will send more */ +#define MSG_NOSIGNAL 0x20 /* Uninmplemented: Requests not to send the SIGPIPE signal if an attempt to send is made on a stream-oriented socket that is no longer connected. */ + + +/* + * Options for level IPPROTO_IP + */ +#define IP_TOS 1 +#define IP_TTL 2 +#define IP_PKTINFO 8 + +#if LWIP_TCP +/* + * Options for level IPPROTO_TCP + */ +#define TCP_NODELAY 0x01 /* don't delay send to coalesce packets */ +#define TCP_KEEPALIVE 0x02 /* send KEEPALIVE probes when idle for pcb->keep_idle milliseconds */ +#define TCP_KEEPIDLE 0x03 /* set pcb->keep_idle - Same as TCP_KEEPALIVE, but use seconds for get/setsockopt */ +#define TCP_KEEPINTVL 0x04 /* set pcb->keep_intvl - Use seconds for get/setsockopt */ +#define TCP_KEEPCNT 0x05 /* set pcb->keep_cnt - Use number of probes sent for get/setsockopt */ +#endif /* LWIP_TCP */ + +#if LWIP_IPV6 +/* + * Options for level IPPROTO_IPV6 + */ +#define IPV6_CHECKSUM 7 /* RFC3542: calculate and insert the ICMPv6 checksum for raw sockets. */ +#define IPV6_V6ONLY 27 /* RFC3493: boolean control to restrict AF_INET6 sockets to IPv6 communications only. */ +#endif /* LWIP_IPV6 */ + +#if LWIP_UDP && LWIP_UDPLITE +/* + * Options for level IPPROTO_UDPLITE + */ +#define UDPLITE_SEND_CSCOV 0x01 /* sender checksum coverage */ +#define UDPLITE_RECV_CSCOV 0x02 /* minimal receiver checksum coverage */ +#endif /* LWIP_UDP && LWIP_UDPLITE*/ + + +#if LWIP_MULTICAST_TX_OPTIONS +/* + * Options and types for UDP multicast traffic handling + */ +#define IP_MULTICAST_TTL 5 +#define IP_MULTICAST_IF 6 +#define IP_MULTICAST_LOOP 7 +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#if LWIP_IGMP +/* + * Options and types related to multicast membership + */ +#define IP_ADD_MEMBERSHIP 3 +#define IP_DROP_MEMBERSHIP 4 + +typedef struct ip_mreq { + struct in_addr imr_multiaddr; /* IP multicast address of group */ + struct in_addr imr_interface; /* local IP address of interface */ +} ip_mreq; +#endif /* LWIP_IGMP */ + +#if LWIP_IPV4 +struct in_pktinfo { + unsigned int ipi_ifindex; /* Interface index */ + struct in_addr ipi_addr; /* Destination (from header) address */ +}; +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6_MLD +/* + * Options and types related to IPv6 multicast membership + */ +#define IPV6_JOIN_GROUP 12 +#define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP +#define IPV6_LEAVE_GROUP 13 +#define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP + +typedef struct ipv6_mreq { + struct in6_addr ipv6mr_multiaddr; /* IPv6 multicast addr */ + unsigned int ipv6mr_interface; /* interface index, or 0 */ +} ipv6_mreq; +#endif /* LWIP_IPV6_MLD */ + +/* + * The Type of Service provides an indication of the abstract + * parameters of the quality of service desired. These parameters are + * to be used to guide the selection of the actual service parameters + * when transmitting a datagram through a particular network. Several + * networks offer service precedence, which somehow treats high + * precedence traffic as more important than other traffic (generally + * by accepting only traffic above a certain precedence at time of high + * load). The major choice is a three way tradeoff between low-delay, + * high-reliability, and high-throughput. + * The use of the Delay, Throughput, and Reliability indications may + * increase the cost (in some sense) of the service. In many networks + * better performance for one of these parameters is coupled with worse + * performance on another. Except for very unusual cases at most two + * of these three indications should be set. + */ +#define IPTOS_TOS_MASK 0x1E +#define IPTOS_TOS(tos) ((tos) & IPTOS_TOS_MASK) +#define IPTOS_LOWDELAY 0x10 +#define IPTOS_THROUGHPUT 0x08 +#define IPTOS_RELIABILITY 0x04 +#define IPTOS_LOWCOST 0x02 +#define IPTOS_MINCOST IPTOS_LOWCOST + +/* + * The Network Control precedence designation is intended to be used + * within a network only. The actual use and control of that + * designation is up to each network. The Internetwork Control + * designation is intended for use by gateway control originators only. + * If the actual use of these precedence designations is of concern to + * a particular network, it is the responsibility of that network to + * control the access to, and use of, those precedence designations. + */ +#define IPTOS_PREC_MASK 0xe0 +#define IPTOS_PREC(tos) ((tos) & IPTOS_PREC_MASK) +#define IPTOS_PREC_NETCONTROL 0xe0 +#define IPTOS_PREC_INTERNETCONTROL 0xc0 +#define IPTOS_PREC_CRITIC_ECP 0xa0 +#define IPTOS_PREC_FLASHOVERRIDE 0x80 +#define IPTOS_PREC_FLASH 0x60 +#define IPTOS_PREC_IMMEDIATE 0x40 +#define IPTOS_PREC_PRIORITY 0x20 +#define IPTOS_PREC_ROUTINE 0x00 + + +/* + * Commands for ioctlsocket(), taken from the BSD file fcntl.h. + * lwip_ioctl only supports FIONREAD and FIONBIO, for now + * + * Ioctl's have the command encoded in the lower word, + * and the size of any in or out parameters in the upper + * word. The high 2 bits of the upper word are used + * to encode the in/out status of the parameter; for now + * we restrict parameters to at most 128 bytes. + */ +#if !defined(FIONREAD) || !defined(FIONBIO) +#define IOCPARM_MASK 0x7fU /* parameters must be < 128 bytes */ +#define IOC_VOID 0x20000000UL /* no parameters */ +#define IOC_OUT 0x40000000UL /* copy out parameters */ +#define IOC_IN 0x80000000UL /* copy in parameters */ +#define IOC_INOUT (IOC_IN|IOC_OUT) + /* 0x20000000 distinguishes new & + old ioctl's */ +#define _IO(x,y) ((long)(IOC_VOID|((x)<<8)|(y))) + +#define _IOR(x,y,t) ((long)(IOC_OUT|((sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|(y))) + +#define _IOW(x,y,t) ((long)(IOC_IN|((sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|(y))) +#endif /* !defined(FIONREAD) || !defined(FIONBIO) */ + +#ifndef FIONREAD +#define FIONREAD _IOR('f', 127, unsigned long) /* get # bytes to read */ +#endif +#ifndef FIONBIO +#define FIONBIO _IOW('f', 126, unsigned long) /* set/clear non-blocking i/o */ +#endif + +/* Socket I/O Controls: unimplemented */ +#ifndef SIOCSHIWAT +#define SIOCSHIWAT _IOW('s', 0, unsigned long) /* set high watermark */ +#define SIOCGHIWAT _IOR('s', 1, unsigned long) /* get high watermark */ +#define SIOCSLOWAT _IOW('s', 2, unsigned long) /* set low watermark */ +#define SIOCGLOWAT _IOR('s', 3, unsigned long) /* get low watermark */ +#define SIOCATMARK _IOR('s', 7, unsigned long) /* at oob mark? */ +#endif + +/* commands for fnctl */ +#ifndef F_GETFL +#define F_GETFL 3 +#endif +#ifndef F_SETFL +#define F_SETFL 4 +#endif + +/* File status flags and file access modes for fnctl, + these are bits in an int. */ +#ifndef O_NONBLOCK +#define O_NONBLOCK 1 /* nonblocking I/O */ +#endif +#ifndef O_NDELAY +#define O_NDELAY O_NONBLOCK /* same as O_NONBLOCK, for compatibility */ +#endif +#ifndef O_RDONLY +#define O_RDONLY 2 +#endif +#ifndef O_WRONLY +#define O_WRONLY 4 +#endif +#ifndef O_RDWR +#define O_RDWR (O_RDONLY|O_WRONLY) +#endif + +#ifndef SHUT_RD + #define SHUT_RD 0 + #define SHUT_WR 1 + #define SHUT_RDWR 2 +#endif + +/* FD_SET used for lwip_select */ +#ifndef FD_SET +#undef FD_SETSIZE +/* Make FD_SETSIZE match NUM_SOCKETS in socket.c */ +#define FD_SETSIZE MEMP_NUM_NETCONN +#define LWIP_SELECT_MAXNFDS (FD_SETSIZE + LWIP_SOCKET_OFFSET) +#define FDSETSAFESET(n, code) do { \ + if (((n) - LWIP_SOCKET_OFFSET < MEMP_NUM_NETCONN) && (((int)(n) - LWIP_SOCKET_OFFSET) >= 0)) { \ + code; }} while(0) +#define FDSETSAFEGET(n, code) (((n) - LWIP_SOCKET_OFFSET < MEMP_NUM_NETCONN) && (((int)(n) - LWIP_SOCKET_OFFSET) >= 0) ?\ + (code) : 0) +#define FD_SET(n, p) FDSETSAFESET(n, (p)->fd_bits[((n)-LWIP_SOCKET_OFFSET)/8] = (u8_t)((p)->fd_bits[((n)-LWIP_SOCKET_OFFSET)/8] | (1 << (((n)-LWIP_SOCKET_OFFSET) & 7)))) +#define FD_CLR(n, p) FDSETSAFESET(n, (p)->fd_bits[((n)-LWIP_SOCKET_OFFSET)/8] = (u8_t)((p)->fd_bits[((n)-LWIP_SOCKET_OFFSET)/8] & ~(1 << (((n)-LWIP_SOCKET_OFFSET) & 7)))) +#define FD_ISSET(n,p) FDSETSAFEGET(n, (p)->fd_bits[((n)-LWIP_SOCKET_OFFSET)/8] & (1 << (((n)-LWIP_SOCKET_OFFSET) & 7))) +#define FD_ZERO(p) memset((void*)(p), 0, sizeof(*(p))) + +typedef struct fd_set +{ + unsigned char fd_bits [(FD_SETSIZE+7)/8]; +} fd_set; + +#elif FD_SETSIZE < (LWIP_SOCKET_OFFSET + MEMP_NUM_NETCONN) +#error "external FD_SETSIZE too small for number of sockets" +#else +#define LWIP_SELECT_MAXNFDS FD_SETSIZE +#endif /* FD_SET */ + +/* poll-related defines and types */ +/* @todo: find a better way to guard the definition of these defines and types if already defined */ +#if !defined(POLLIN) && !defined(POLLOUT) +#define POLLIN 0x1 +#define POLLOUT 0x2 +#define POLLERR 0x4 +#define POLLNVAL 0x8 +/* Below values are unimplemented */ +#define POLLRDNORM 0x10 +#define POLLRDBAND 0x20 +#define POLLPRI 0x40 +#define POLLWRNORM 0x80 +#define POLLWRBAND 0x100 +#define POLLHUP 0x200 +typedef unsigned int nfds_t; +struct pollfd +{ + int fd; + short events; + short revents; +}; +#endif + +/** LWIP_TIMEVAL_PRIVATE: if you want to use the struct timeval provided + * by your system, set this to 0 and include in cc.h */ +#ifndef LWIP_TIMEVAL_PRIVATE +#define LWIP_TIMEVAL_PRIVATE 1 +#endif + +#if LWIP_TIMEVAL_PRIVATE +struct timeval { + long tv_sec; /* seconds */ + long tv_usec; /* and microseconds */ +}; +#endif /* LWIP_TIMEVAL_PRIVATE */ + +#define lwip_socket_init() /* Compatibility define, no init needed. */ +void lwip_socket_thread_init(void); /* LWIP_NETCONN_SEM_PER_THREAD==1: initialize thread-local semaphore */ +void lwip_socket_thread_cleanup(void); /* LWIP_NETCONN_SEM_PER_THREAD==1: destroy thread-local semaphore */ + +#if LWIP_COMPAT_SOCKETS == 2 +/* This helps code parsers/code completion by not having the COMPAT functions as defines */ +#define lwip_accept accept +#define lwip_bind bind +#define lwip_shutdown shutdown +#define lwip_getpeername getpeername +#define lwip_getsockname getsockname +#define lwip_setsockopt setsockopt +#define lwip_getsockopt getsockopt +#define lwip_close closesocket +#define lwip_connect connect +#define lwip_listen listen +#define lwip_recv recv +#define lwip_recvmsg recvmsg +#define lwip_recvfrom recvfrom +#define lwip_send send +#define lwip_sendmsg sendmsg +#define lwip_sendto sendto +#define lwip_socket socket +#if LWIP_SOCKET_SELECT +#define lwip_select select +#endif +#if LWIP_SOCKET_POLL +#define lwip_poll poll +#endif +#define lwip_ioctl ioctlsocket +#define lwip_inet_ntop inet_ntop +#define lwip_inet_pton inet_pton + +#if LWIP_POSIX_SOCKETS_IO_NAMES +#define lwip_read read +#define lwip_readv readv +#define lwip_write write +#define lwip_writev writev +#undef lwip_close +#define lwip_close close +#define closesocket(s) close(s) +int fcntl(int s, int cmd, ...); +#undef lwip_ioctl +#define lwip_ioctl ioctl +#define ioctlsocket ioctl +#endif /* LWIP_POSIX_SOCKETS_IO_NAMES */ +#endif /* LWIP_COMPAT_SOCKETS == 2 */ + +int lwip_accept(int s, struct sockaddr *addr, socklen_t *addrlen); +int lwip_bind(int s, const struct sockaddr *name, socklen_t namelen); +int lwip_shutdown(int s, int how); +int lwip_getpeername (int s, struct sockaddr *name, socklen_t *namelen); +int lwip_getsockname (int s, struct sockaddr *name, socklen_t *namelen); +int lwip_getsockopt (int s, int level, int optname, void *optval, socklen_t *optlen); +int lwip_setsockopt (int s, int level, int optname, const void *optval, socklen_t optlen); + int lwip_close(int s); +int lwip_connect(int s, const struct sockaddr *name, socklen_t namelen); +int lwip_listen(int s, int backlog); +ssize_t lwip_recv(int s, void *mem, size_t len, int flags); +ssize_t lwip_read(int s, void *mem, size_t len); +ssize_t lwip_readv(int s, const struct iovec *iov, int iovcnt); +ssize_t lwip_recvfrom(int s, void *mem, size_t len, int flags, + struct sockaddr *from, socklen_t *fromlen); +ssize_t lwip_recvmsg(int s, struct msghdr *message, int flags); +ssize_t lwip_send(int s, const void *dataptr, size_t size, int flags); +ssize_t lwip_sendmsg(int s, const struct msghdr *message, int flags); +ssize_t lwip_sendto(int s, const void *dataptr, size_t size, int flags, + const struct sockaddr *to, socklen_t tolen); +int lwip_socket(int domain, int type, int protocol); +ssize_t lwip_write(int s, const void *dataptr, size_t size); +ssize_t lwip_writev(int s, const struct iovec *iov, int iovcnt); +#if LWIP_SOCKET_SELECT +int lwip_select(int maxfdp1, fd_set *readset, fd_set *writeset, fd_set *exceptset, + struct timeval *timeout); +#endif +#if LWIP_SOCKET_POLL +int lwip_poll(struct pollfd *fds, nfds_t nfds, int timeout); +#endif +int lwip_ioctl(int s, long cmd, void *argp); +int lwip_fcntl(int s, int cmd, int val); +const char *lwip_inet_ntop(int af, const void *src, char *dst, socklen_t size); +int lwip_inet_pton(int af, const char *src, void *dst); + +#if LWIP_COMPAT_SOCKETS +#if LWIP_COMPAT_SOCKETS != 2 +/** @ingroup socket */ +#define accept(s,addr,addrlen) lwip_accept(s,addr,addrlen) +/** @ingroup socket */ +#define bind(s,name,namelen) lwip_bind(s,name,namelen) +/** @ingroup socket */ +#define shutdown(s,how) lwip_shutdown(s,how) +/** @ingroup socket */ +#define getpeername(s,name,namelen) lwip_getpeername(s,name,namelen) +/** @ingroup socket */ +#define getsockname(s,name,namelen) lwip_getsockname(s,name,namelen) +/** @ingroup socket */ +#define setsockopt(s,level,optname,opval,optlen) lwip_setsockopt(s,level,optname,opval,optlen) +/** @ingroup socket */ +#define getsockopt(s,level,optname,opval,optlen) lwip_getsockopt(s,level,optname,opval,optlen) +/** @ingroup socket */ +#define closesocket(s) lwip_close(s) +/** @ingroup socket */ +#define connect(s,name,namelen) lwip_connect(s,name,namelen) +/** @ingroup socket */ +#define listen(s,backlog) lwip_listen(s,backlog) +/** @ingroup socket */ +#define recv(s,mem,len,flags) lwip_recv(s,mem,len,flags) +/** @ingroup socket */ +#define recvmsg(s,message,flags) lwip_recvmsg(s,message,flags) +/** @ingroup socket */ +#define recvfrom(s,mem,len,flags,from,fromlen) lwip_recvfrom(s,mem,len,flags,from,fromlen) +/** @ingroup socket */ +#define send(s,dataptr,size,flags) lwip_send(s,dataptr,size,flags) +/** @ingroup socket */ +#define sendmsg(s,message,flags) lwip_sendmsg(s,message,flags) +/** @ingroup socket */ +#define sendto(s,dataptr,size,flags,to,tolen) lwip_sendto(s,dataptr,size,flags,to,tolen) +/** @ingroup socket */ +#define socket(domain,type,protocol) lwip_socket(domain,type,protocol) +#if LWIP_SOCKET_SELECT +/** @ingroup socket */ +#define select(maxfdp1,readset,writeset,exceptset,timeout) lwip_select(maxfdp1,readset,writeset,exceptset,timeout) +#endif +#if LWIP_SOCKET_POLL +/** @ingroup socket */ +#define poll(fds,nfds,timeout) lwip_poll(fds,nfds,timeout) +#endif +/** @ingroup socket */ +#define ioctlsocket(s,cmd,argp) lwip_ioctl(s,cmd,argp) +/** @ingroup socket */ +#define inet_ntop(af,src,dst,size) lwip_inet_ntop(af,src,dst,size) +/** @ingroup socket */ +#define inet_pton(af,src,dst) lwip_inet_pton(af,src,dst) + +#if LWIP_POSIX_SOCKETS_IO_NAMES +/** @ingroup socket */ +#define read(s,mem,len) lwip_read(s,mem,len) +/** @ingroup socket */ +#define readv(s,iov,iovcnt) lwip_readv(s,iov,iovcnt) +/** @ingroup socket */ +#define write(s,dataptr,len) lwip_write(s,dataptr,len) +/** @ingroup socket */ +#define writev(s,iov,iovcnt) lwip_writev(s,iov,iovcnt) +/** @ingroup socket */ +#define close(s) lwip_close(s) +/** @ingroup socket */ +#define fcntl(s,cmd,val) lwip_fcntl(s,cmd,val) +/** @ingroup socket */ +#define ioctl(s,cmd,argp) lwip_ioctl(s,cmd,argp) +#endif /* LWIP_POSIX_SOCKETS_IO_NAMES */ +#endif /* LWIP_COMPAT_SOCKETS != 2 */ + +#endif /* LWIP_COMPAT_SOCKETS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_SOCKET */ + +#endif /* LWIP_HDR_SOCKETS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/stats.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/stats.h new file mode 100644 index 0000000..1cf4e59 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/stats.h @@ -0,0 +1,491 @@ +/** + * @file + * Statistics API (to be used from TCPIP thread) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_STATS_H +#define LWIP_HDR_STATS_H + +#include "lwip/opt.h" + +#include "lwip/mem.h" +#include "lwip/memp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_STATS + +#ifndef LWIP_STATS_LARGE +#define LWIP_STATS_LARGE 0 +#endif + +#if LWIP_STATS_LARGE +#define STAT_COUNTER u32_t +#define STAT_COUNTER_F U32_F +#else +#define STAT_COUNTER u16_t +#define STAT_COUNTER_F U16_F +#endif + +/** Protocol related stats */ +struct stats_proto { + STAT_COUNTER xmit; /* Transmitted packets. */ + STAT_COUNTER recv; /* Received packets. */ + STAT_COUNTER fw; /* Forwarded packets. */ + STAT_COUNTER drop; /* Dropped packets. */ + STAT_COUNTER chkerr; /* Checksum error. */ + STAT_COUNTER lenerr; /* Invalid length error. */ + STAT_COUNTER memerr; /* Out of memory error. */ + STAT_COUNTER rterr; /* Routing error. */ + STAT_COUNTER proterr; /* Protocol error. */ + STAT_COUNTER opterr; /* Error in options. */ + STAT_COUNTER err; /* Misc error. */ + STAT_COUNTER cachehit; +}; + +/** IGMP stats */ +struct stats_igmp { + STAT_COUNTER xmit; /* Transmitted packets. */ + STAT_COUNTER recv; /* Received packets. */ + STAT_COUNTER drop; /* Dropped packets. */ + STAT_COUNTER chkerr; /* Checksum error. */ + STAT_COUNTER lenerr; /* Invalid length error. */ + STAT_COUNTER memerr; /* Out of memory error. */ + STAT_COUNTER proterr; /* Protocol error. */ + STAT_COUNTER rx_v1; /* Received v1 frames. */ + STAT_COUNTER rx_group; /* Received group-specific queries. */ + STAT_COUNTER rx_general; /* Received general queries. */ + STAT_COUNTER rx_report; /* Received reports. */ + STAT_COUNTER tx_join; /* Sent joins. */ + STAT_COUNTER tx_leave; /* Sent leaves. */ + STAT_COUNTER tx_report; /* Sent reports. */ +}; + +/** Memory stats */ +struct stats_mem { +#if defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY + const char *name; +#endif /* defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY */ + STAT_COUNTER err; + mem_size_t avail; + mem_size_t used; + mem_size_t max; + STAT_COUNTER illegal; +}; + +/** System element stats */ +struct stats_syselem { + STAT_COUNTER used; + STAT_COUNTER max; + STAT_COUNTER err; +}; + +/** System stats */ +struct stats_sys { + struct stats_syselem sem; + struct stats_syselem mutex; + struct stats_syselem mbox; +}; + +/** SNMP MIB2 stats */ +struct stats_mib2 { + /* IP */ + u32_t ipinhdrerrors; + u32_t ipinaddrerrors; + u32_t ipinunknownprotos; + u32_t ipindiscards; + u32_t ipindelivers; + u32_t ipoutrequests; + u32_t ipoutdiscards; + u32_t ipoutnoroutes; + u32_t ipreasmoks; + u32_t ipreasmfails; + u32_t ipfragoks; + u32_t ipfragfails; + u32_t ipfragcreates; + u32_t ipreasmreqds; + u32_t ipforwdatagrams; + u32_t ipinreceives; + + /* TCP */ + u32_t tcpactiveopens; + u32_t tcppassiveopens; + u32_t tcpattemptfails; + u32_t tcpestabresets; + u32_t tcpoutsegs; + u32_t tcpretranssegs; + u32_t tcpinsegs; + u32_t tcpinerrs; + u32_t tcpoutrsts; + + /* UDP */ + u32_t udpindatagrams; + u32_t udpnoports; + u32_t udpinerrors; + u32_t udpoutdatagrams; + + /* ICMP */ + u32_t icmpinmsgs; + u32_t icmpinerrors; + u32_t icmpindestunreachs; + u32_t icmpintimeexcds; + u32_t icmpinparmprobs; + u32_t icmpinsrcquenchs; + u32_t icmpinredirects; + u32_t icmpinechos; + u32_t icmpinechoreps; + u32_t icmpintimestamps; + u32_t icmpintimestampreps; + u32_t icmpinaddrmasks; + u32_t icmpinaddrmaskreps; + u32_t icmpoutmsgs; + u32_t icmpouterrors; + u32_t icmpoutdestunreachs; + u32_t icmpouttimeexcds; + u32_t icmpoutechos; /* can be incremented by user application ('ping') */ + u32_t icmpoutechoreps; +}; + +/** + * @ingroup netif_mib2 + * SNMP MIB2 interface stats + */ +struct stats_mib2_netif_ctrs { + /** The total number of octets received on the interface, including framing characters */ + u32_t ifinoctets; + /** The number of packets, delivered by this sub-layer to a higher (sub-)layer, which were + * not addressed to a multicast or broadcast address at this sub-layer */ + u32_t ifinucastpkts; + /** The number of packets, delivered by this sub-layer to a higher (sub-)layer, which were + * addressed to a multicast or broadcast address at this sub-layer */ + u32_t ifinnucastpkts; + /** The number of inbound packets which were chosen to be discarded even though no errors had + * been detected to prevent their being deliverable to a higher-layer protocol. One possible + * reason for discarding such a packet could be to free up buffer space */ + u32_t ifindiscards; + /** For packet-oriented interfaces, the number of inbound packets that contained errors + * preventing them from being deliverable to a higher-layer protocol. For character- + * oriented or fixed-length interfaces, the number of inbound transmission units that + * contained errors preventing them from being deliverable to a higher-layer protocol. */ + u32_t ifinerrors; + /** For packet-oriented interfaces, the number of packets received via the interface which + * were discarded because of an unknown or unsupported protocol. For character-oriented + * or fixed-length interfaces that support protocol multiplexing the number of transmission + * units received via the interface which were discarded because of an unknown or unsupported + * protocol. For any interface that does not support protocol multiplexing, this counter will + * always be 0 */ + u32_t ifinunknownprotos; + /** The total number of octets transmitted out of the interface, including framing characters. */ + u32_t ifoutoctets; + /** The total number of packets that higher-level protocols requested be transmitted, and + * which were not addressed to a multicast or broadcast address at this sub-layer, including + * those that were discarded or not sent. */ + u32_t ifoutucastpkts; + /** The total number of packets that higher-level protocols requested be transmitted, and which + * were addressed to a multicast or broadcast address at this sub-layer, including + * those that were discarded or not sent. */ + u32_t ifoutnucastpkts; + /** The number of outbound packets which were chosen to be discarded even though no errors had + * been detected to prevent their being transmitted. One possible reason for discarding + * such a packet could be to free up buffer space. */ + u32_t ifoutdiscards; + /** For packet-oriented interfaces, the number of outbound packets that could not be transmitted + * because of errors. For character-oriented or fixed-length interfaces, the number of outbound + * transmission units that could not be transmitted because of errors. */ + u32_t ifouterrors; +}; + +/** lwIP stats container */ +struct stats_ { +#if LINK_STATS + /** Link level */ + struct stats_proto link; +#endif +#if ETHARP_STATS + /** ARP */ + struct stats_proto etharp; +#endif +#if IPFRAG_STATS + /** Fragmentation */ + struct stats_proto ip_frag; +#endif +#if IP_STATS + /** IP */ + struct stats_proto ip; +#endif +#if ICMP_STATS + /** ICMP */ + struct stats_proto icmp; +#endif +#if IGMP_STATS + /** IGMP */ + struct stats_igmp igmp; +#endif +#if UDP_STATS + /** UDP */ + struct stats_proto udp; +#endif +#if TCP_STATS + /** TCP */ + struct stats_proto tcp; +#endif +#if MEM_STATS + /** Heap */ + struct stats_mem mem; +#endif +#if MEMP_STATS + /** Internal memory pools */ + struct stats_mem *memp[MEMP_MAX]; +#endif +#if SYS_STATS + /** System */ + struct stats_sys sys; +#endif +#if IP6_STATS + /** IPv6 */ + struct stats_proto ip6; +#endif +#if ICMP6_STATS + /** ICMP6 */ + struct stats_proto icmp6; +#endif +#if IP6_FRAG_STATS + /** IPv6 fragmentation */ + struct stats_proto ip6_frag; +#endif +#if MLD6_STATS + /** Multicast listener discovery */ + struct stats_igmp mld6; +#endif +#if ND6_STATS + /** Neighbor discovery */ + struct stats_proto nd6; +#endif +#if MIB2_STATS + /** SNMP MIB2 */ + struct stats_mib2 mib2; +#endif +}; + +/** Global variable containing lwIP internal statistics. Add this to your debugger's watchlist. */ +extern struct stats_ lwip_stats; + +/** Init statistics */ +void stats_init(void); + +#define STATS_INC(x) ++lwip_stats.x +#define STATS_DEC(x) --lwip_stats.x +#define STATS_INC_USED(x, y, type) do { lwip_stats.x.used = (type)(lwip_stats.x.used + y); \ + if (lwip_stats.x.max < lwip_stats.x.used) { \ + lwip_stats.x.max = lwip_stats.x.used; \ + } \ + } while(0) +#define STATS_GET(x) lwip_stats.x +#else /* LWIP_STATS */ +#define stats_init() +#define STATS_INC(x) +#define STATS_DEC(x) +#define STATS_INC_USED(x, y, type) +#endif /* LWIP_STATS */ + +#if TCP_STATS +#define TCP_STATS_INC(x) STATS_INC(x) +#define TCP_STATS_DISPLAY() stats_display_proto(&lwip_stats.tcp, "TCP") +#else +#define TCP_STATS_INC(x) +#define TCP_STATS_DISPLAY() +#endif + +#if UDP_STATS +#define UDP_STATS_INC(x) STATS_INC(x) +#define UDP_STATS_DISPLAY() stats_display_proto(&lwip_stats.udp, "UDP") +#else +#define UDP_STATS_INC(x) +#define UDP_STATS_DISPLAY() +#endif + +#if ICMP_STATS +#define ICMP_STATS_INC(x) STATS_INC(x) +#define ICMP_STATS_DISPLAY() stats_display_proto(&lwip_stats.icmp, "ICMP") +#else +#define ICMP_STATS_INC(x) +#define ICMP_STATS_DISPLAY() +#endif + +#if IGMP_STATS +#define IGMP_STATS_INC(x) STATS_INC(x) +#define IGMP_STATS_DISPLAY() stats_display_igmp(&lwip_stats.igmp, "IGMP") +#else +#define IGMP_STATS_INC(x) +#define IGMP_STATS_DISPLAY() +#endif + +#if IP_STATS +#define IP_STATS_INC(x) STATS_INC(x) +#define IP_STATS_DISPLAY() stats_display_proto(&lwip_stats.ip, "IP") +#else +#define IP_STATS_INC(x) +#define IP_STATS_DISPLAY() +#endif + +#if IPFRAG_STATS +#define IPFRAG_STATS_INC(x) STATS_INC(x) +#define IPFRAG_STATS_DISPLAY() stats_display_proto(&lwip_stats.ip_frag, "IP_FRAG") +#else +#define IPFRAG_STATS_INC(x) +#define IPFRAG_STATS_DISPLAY() +#endif + +#if ETHARP_STATS +#define ETHARP_STATS_INC(x) STATS_INC(x) +#define ETHARP_STATS_DISPLAY() stats_display_proto(&lwip_stats.etharp, "ETHARP") +#else +#define ETHARP_STATS_INC(x) +#define ETHARP_STATS_DISPLAY() +#endif + +#if LINK_STATS +#define LINK_STATS_INC(x) STATS_INC(x) +#define LINK_STATS_DISPLAY() stats_display_proto(&lwip_stats.link, "LINK") +#else +#define LINK_STATS_INC(x) +#define LINK_STATS_DISPLAY() +#endif + +#if MEM_STATS +#define MEM_STATS_AVAIL(x, y) lwip_stats.mem.x = y +#define MEM_STATS_INC(x) STATS_INC(mem.x) +#define MEM_STATS_INC_USED(x, y) STATS_INC_USED(mem, y, mem_size_t) +#define MEM_STATS_DEC_USED(x, y) lwip_stats.mem.x = (mem_size_t)((lwip_stats.mem.x) - (y)) +#define MEM_STATS_DISPLAY() stats_display_mem(&lwip_stats.mem, "HEAP") +#else +#define MEM_STATS_AVAIL(x, y) +#define MEM_STATS_INC(x) +#define MEM_STATS_INC_USED(x, y) +#define MEM_STATS_DEC_USED(x, y) +#define MEM_STATS_DISPLAY() +#endif + + #if MEMP_STATS +#define MEMP_STATS_DEC(x, i) STATS_DEC(memp[i]->x) +#define MEMP_STATS_DISPLAY(i) stats_display_memp(lwip_stats.memp[i], i) +#define MEMP_STATS_GET(x, i) STATS_GET(memp[i]->x) + #else +#define MEMP_STATS_DEC(x, i) +#define MEMP_STATS_DISPLAY(i) +#define MEMP_STATS_GET(x, i) 0 +#endif + +#if SYS_STATS +#define SYS_STATS_INC(x) STATS_INC(sys.x) +#define SYS_STATS_DEC(x) STATS_DEC(sys.x) +#define SYS_STATS_INC_USED(x) STATS_INC_USED(sys.x, 1, STAT_COUNTER) +#define SYS_STATS_DISPLAY() stats_display_sys(&lwip_stats.sys) +#else +#define SYS_STATS_INC(x) +#define SYS_STATS_DEC(x) +#define SYS_STATS_INC_USED(x) +#define SYS_STATS_DISPLAY() +#endif + +#if IP6_STATS +#define IP6_STATS_INC(x) STATS_INC(x) +#define IP6_STATS_DISPLAY() stats_display_proto(&lwip_stats.ip6, "IPv6") +#else +#define IP6_STATS_INC(x) +#define IP6_STATS_DISPLAY() +#endif + +#if ICMP6_STATS +#define ICMP6_STATS_INC(x) STATS_INC(x) +#define ICMP6_STATS_DISPLAY() stats_display_proto(&lwip_stats.icmp6, "ICMPv6") +#else +#define ICMP6_STATS_INC(x) +#define ICMP6_STATS_DISPLAY() +#endif + +#if IP6_FRAG_STATS +#define IP6_FRAG_STATS_INC(x) STATS_INC(x) +#define IP6_FRAG_STATS_DISPLAY() stats_display_proto(&lwip_stats.ip6_frag, "IPv6 FRAG") +#else +#define IP6_FRAG_STATS_INC(x) +#define IP6_FRAG_STATS_DISPLAY() +#endif + +#if MLD6_STATS +#define MLD6_STATS_INC(x) STATS_INC(x) +#define MLD6_STATS_DISPLAY() stats_display_igmp(&lwip_stats.mld6, "MLDv1") +#else +#define MLD6_STATS_INC(x) +#define MLD6_STATS_DISPLAY() +#endif + +#if ND6_STATS +#define ND6_STATS_INC(x) STATS_INC(x) +#define ND6_STATS_DISPLAY() stats_display_proto(&lwip_stats.nd6, "ND") +#else +#define ND6_STATS_INC(x) +#define ND6_STATS_DISPLAY() +#endif + +#if MIB2_STATS +#define MIB2_STATS_INC(x) STATS_INC(x) +#else +#define MIB2_STATS_INC(x) +#endif + +/* Display of statistics */ +#if LWIP_STATS_DISPLAY +void stats_display(void); +void stats_display_proto(struct stats_proto *proto, const char *name); +void stats_display_igmp(struct stats_igmp *igmp, const char *name); +void stats_display_mem(struct stats_mem *mem, const char *name); +void stats_display_memp(struct stats_mem *mem, int index); +void stats_display_sys(struct stats_sys *sys); +#else /* LWIP_STATS_DISPLAY */ +#define stats_display() +#define stats_display_proto(proto, name) +#define stats_display_igmp(igmp, name) +#define stats_display_mem(mem, name) +#define stats_display_memp(mem, index) +#define stats_display_sys(sys) +#endif /* LWIP_STATS_DISPLAY */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_STATS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/sys.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/sys.h new file mode 100644 index 0000000..cd83470 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/sys.h @@ -0,0 +1,560 @@ +/** + * @file + * OS abstraction layer + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + */ + +#ifndef LWIP_HDR_SYS_H +#define LWIP_HDR_SYS_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if NO_SYS + +/* For a totally minimal and standalone system, we provide null + definitions of the sys_ functions. */ +typedef u8_t sys_sem_t; +typedef u8_t sys_mutex_t; +typedef u8_t sys_mbox_t; + +#define sys_sem_new(s, c) ERR_OK +#define sys_sem_signal(s) +#define sys_sem_wait(s) +#define sys_arch_sem_wait(s,t) +#define sys_sem_free(s) +#define sys_sem_valid(s) 0 +#define sys_sem_valid_val(s) 0 +#define sys_sem_set_invalid(s) +#define sys_sem_set_invalid_val(s) +#define sys_mutex_new(mu) ERR_OK +#define sys_mutex_lock(mu) +#define sys_mutex_unlock(mu) +#define sys_mutex_free(mu) +#define sys_mutex_valid(mu) 0 +#define sys_mutex_set_invalid(mu) +#define sys_mbox_new(m, s) ERR_OK +#define sys_mbox_fetch(m,d) +#define sys_mbox_tryfetch(m,d) +#define sys_mbox_post(m,d) +#define sys_mbox_trypost(m,d) +#define sys_mbox_free(m) +#define sys_mbox_valid(m) +#define sys_mbox_valid_val(m) +#define sys_mbox_set_invalid(m) +#define sys_mbox_set_invalid_val(m) + +#define sys_thread_new(n,t,a,s,p) + +#define sys_msleep(t) + +#else /* NO_SYS */ + +/** Return code for timeouts from sys_arch_mbox_fetch and sys_arch_sem_wait */ +#define SYS_ARCH_TIMEOUT 0xffffffffUL + +/** sys_mbox_tryfetch() returns SYS_MBOX_EMPTY if appropriate. + * For now we use the same magic value, but we allow this to change in future. + */ +#define SYS_MBOX_EMPTY SYS_ARCH_TIMEOUT + +#include "lwip/err.h" +#include "arch/sys_arch.h" + +/** Function prototype for thread functions */ +typedef void (*lwip_thread_fn)(void *arg); + +/* Function prototypes for functions to be implemented by platform ports + (in sys_arch.c) */ + +/* Mutex functions: */ + +/** Define LWIP_COMPAT_MUTEX if the port has no mutexes and binary semaphores + should be used instead */ +#ifndef LWIP_COMPAT_MUTEX +#define LWIP_COMPAT_MUTEX 0 +#endif + +#if LWIP_COMPAT_MUTEX +/* for old ports that don't have mutexes: define them to binary semaphores */ +#define sys_mutex_t sys_sem_t +#define sys_mutex_new(mutex) sys_sem_new(mutex, 1) +#define sys_mutex_lock(mutex) sys_sem_wait(mutex) +#define sys_mutex_unlock(mutex) sys_sem_signal(mutex) +#define sys_mutex_free(mutex) sys_sem_free(mutex) +#define sys_mutex_valid(mutex) sys_sem_valid(mutex) +#define sys_mutex_set_invalid(mutex) sys_sem_set_invalid(mutex) + +#else /* LWIP_COMPAT_MUTEX */ + +/** + * @ingroup sys_mutex + * Create a new mutex. + * Note that mutexes are expected to not be taken recursively by the lwIP code, + * so both implementation types (recursive or non-recursive) should work. + * The mutex is allocated to the memory that 'mutex' + * points to (which can be both a pointer or the actual OS structure). + * If the mutex has been created, ERR_OK should be returned. Returning any + * other error will provide a hint what went wrong, but except for assertions, + * no real error handling is implemented. + * + * @param mutex pointer to the mutex to create + * @return ERR_OK if successful, another err_t otherwise + */ +err_t sys_mutex_new(sys_mutex_t *mutex); +/** + * @ingroup sys_mutex + * Blocks the thread until the mutex can be grabbed. + * @param mutex the mutex to lock + */ +void sys_mutex_lock(sys_mutex_t *mutex); +/** + * @ingroup sys_mutex + * Releases the mutex previously locked through 'sys_mutex_lock()'. + * @param mutex the mutex to unlock + */ +void sys_mutex_unlock(sys_mutex_t *mutex); +/** + * @ingroup sys_mutex + * Deallocates a mutex. + * @param mutex the mutex to delete + */ +void sys_mutex_free(sys_mutex_t *mutex); +#ifndef sys_mutex_valid +/** + * @ingroup sys_mutex + * Returns 1 if the mutes is valid, 0 if it is not valid. + * When using pointers, a simple way is to check the pointer for != NULL. + * When directly using OS structures, implementing this may be more complex. + * This may also be a define, in which case the function is not prototyped. + */ +int sys_mutex_valid(sys_mutex_t *mutex); +#endif +#ifndef sys_mutex_set_invalid +/** + * @ingroup sys_mutex + * Invalidate a mutex so that sys_mutex_valid() returns 0. + * ATTENTION: This does NOT mean that the mutex shall be deallocated: + * sys_mutex_free() is always called before calling this function! + * This may also be a define, in which case the function is not prototyped. + */ +void sys_mutex_set_invalid(sys_mutex_t *mutex); +#endif +#endif /* LWIP_COMPAT_MUTEX */ + +/* Semaphore functions: */ + +/** + * @ingroup sys_sem + * Create a new semaphore + * Creates a new semaphore. The semaphore is allocated to the memory that 'sem' + * points to (which can be both a pointer or the actual OS structure). + * The "count" argument specifies the initial state of the semaphore (which is + * either 0 or 1). + * If the semaphore has been created, ERR_OK should be returned. Returning any + * other error will provide a hint what went wrong, but except for assertions, + * no real error handling is implemented. + * + * @param sem pointer to the semaphore to create + * @param count initial count of the semaphore + * @return ERR_OK if successful, another err_t otherwise + */ +err_t sys_sem_new(sys_sem_t *sem, u8_t count); +/** + * @ingroup sys_sem + * Signals a semaphore + * @param sem the semaphore to signal + */ +void sys_sem_signal(sys_sem_t *sem); +/** + * @ingroup sys_sem + * Blocks the thread while waiting for the semaphore to be signaled. If the + * "timeout" argument is non-zero, the thread should only be blocked for the + * specified time (measured in milliseconds). If the "timeout" argument is zero, + * the thread should be blocked until the semaphore is signalled. + * + * The return value is SYS_ARCH_TIMEOUT if the semaphore wasn't signaled within + * the specified time or any other value if it was signaled (with or without + * waiting). + * Notice that lwIP implements a function with a similar name, + * sys_sem_wait(), that uses the sys_arch_sem_wait() function. + * + * @param sem the semaphore to wait for + * @param timeout timeout in milliseconds to wait (0 = wait forever) + * @return SYS_ARCH_TIMEOUT on timeout, any other value on success + */ +u32_t sys_arch_sem_wait(sys_sem_t *sem, u32_t timeout); +/** + * @ingroup sys_sem + * Deallocates a semaphore. + * @param sem semaphore to delete + */ +void sys_sem_free(sys_sem_t *sem); +/** Wait for a semaphore - forever/no timeout */ +#define sys_sem_wait(sem) sys_arch_sem_wait(sem, 0) +#ifndef sys_sem_valid +/** + * @ingroup sys_sem + * Returns 1 if the semaphore is valid, 0 if it is not valid. + * When using pointers, a simple way is to check the pointer for != NULL. + * When directly using OS structures, implementing this may be more complex. + * This may also be a define, in which case the function is not prototyped. + */ +int sys_sem_valid(sys_sem_t *sem); +#endif +#ifndef sys_sem_set_invalid +/** + * @ingroup sys_sem + * Invalidate a semaphore so that sys_sem_valid() returns 0. + * ATTENTION: This does NOT mean that the semaphore shall be deallocated: + * sys_sem_free() is always called before calling this function! + * This may also be a define, in which case the function is not prototyped. + */ +void sys_sem_set_invalid(sys_sem_t *sem); +#endif +#ifndef sys_sem_valid_val +/** + * Same as sys_sem_valid() but taking a value, not a pointer + */ +#define sys_sem_valid_val(sem) sys_sem_valid(&(sem)) +#endif +#ifndef sys_sem_set_invalid_val +/** + * Same as sys_sem_set_invalid() but taking a value, not a pointer + */ +#define sys_sem_set_invalid_val(sem) sys_sem_set_invalid(&(sem)) +#endif + +#ifndef sys_msleep +/** + * @ingroup sys_misc + * Sleep for specified number of ms + */ +void sys_msleep(u32_t ms); /* only has a (close to) 1 ms resolution. */ +#endif + +/* Mailbox functions. */ + +/** + * @ingroup sys_mbox + * Creates an empty mailbox for maximum "size" elements. Elements stored + * in mailboxes are pointers. You have to define macros "_MBOX_SIZE" + * in your lwipopts.h, or ignore this parameter in your implementation + * and use a default size. + * If the mailbox has been created, ERR_OK should be returned. Returning any + * other error will provide a hint what went wrong, but except for assertions, + * no real error handling is implemented. + * + * @param mbox pointer to the mbox to create + * @param size (minimum) number of messages in this mbox + * @return ERR_OK if successful, another err_t otherwise + */ +err_t sys_mbox_new(sys_mbox_t *mbox, int size); +/** + * @ingroup sys_mbox + * Post a message to an mbox - may not fail + * -> blocks if full, only to be used from tasks NOT from ISR! + * + * @param mbox mbox to posts the message + * @param msg message to post (ATTENTION: can be NULL) + */ +void sys_mbox_post(sys_mbox_t *mbox, void *msg); +/** + * @ingroup sys_mbox + * Try to post a message to an mbox - may fail if full. + * Can be used from ISR (if the sys arch layer allows this). + * Returns ERR_MEM if it is full, else, ERR_OK if the "msg" is posted. + * + * @param mbox mbox to posts the message + * @param msg message to post (ATTENTION: can be NULL) + */ +err_t sys_mbox_trypost(sys_mbox_t *mbox, void *msg); +/** + * @ingroup sys_mbox + * Try to post a message to an mbox - may fail if full. + * To be be used from ISR. + * Returns ERR_MEM if it is full, else, ERR_OK if the "msg" is posted. + * + * @param mbox mbox to posts the message + * @param msg message to post (ATTENTION: can be NULL) + */ +err_t sys_mbox_trypost_fromisr(sys_mbox_t *mbox, void *msg); +/** + * @ingroup sys_mbox + * Blocks the thread until a message arrives in the mailbox, but does + * not block the thread longer than "timeout" milliseconds (similar to + * the sys_arch_sem_wait() function). If "timeout" is 0, the thread should + * be blocked until a message arrives. The "msg" argument is a result + * parameter that is set by the function (i.e., by doing "*msg = + * ptr"). The "msg" parameter maybe NULL to indicate that the message + * should be dropped. + * The return values are the same as for the sys_arch_sem_wait() function: + * SYS_ARCH_TIMEOUT if there was a timeout, any other value if a messages + * is received. + * + * Note that a function with a similar name, sys_mbox_fetch(), is + * implemented by lwIP. + * + * @param mbox mbox to get a message from + * @param msg pointer where the message is stored + * @param timeout maximum time (in milliseconds) to wait for a message (0 = wait forever) + * @return SYS_ARCH_TIMEOUT on timeout, any other value if a message has been received + */ +u32_t sys_arch_mbox_fetch(sys_mbox_t *mbox, void **msg, u32_t timeout); +/* Allow port to override with a macro, e.g. special timeout for sys_arch_mbox_fetch() */ +#ifndef sys_arch_mbox_tryfetch +/** + * @ingroup sys_mbox + * This is similar to sys_arch_mbox_fetch, however if a message is not + * present in the mailbox, it immediately returns with the code + * SYS_MBOX_EMPTY. On success 0 is returned. + * To allow for efficient implementations, this can be defined as a + * function-like macro in sys_arch.h instead of a normal function. For + * example, a naive implementation could be: + * \#define sys_arch_mbox_tryfetch(mbox,msg) sys_arch_mbox_fetch(mbox,msg,1) + * although this would introduce unnecessary delays. + * + * @param mbox mbox to get a message from + * @param msg pointer where the message is stored + * @return 0 (milliseconds) if a message has been received + * or SYS_MBOX_EMPTY if the mailbox is empty + */ +u32_t sys_arch_mbox_tryfetch(sys_mbox_t *mbox, void **msg); +#endif +/** + * For now, we map straight to sys_arch implementation. + */ +#define sys_mbox_tryfetch(mbox, msg) sys_arch_mbox_tryfetch(mbox, msg) +/** + * @ingroup sys_mbox + * Deallocates a mailbox. If there are messages still present in the + * mailbox when the mailbox is deallocated, it is an indication of a + * programming error in lwIP and the developer should be notified. + * + * @param mbox mbox to delete + */ +void sys_mbox_free(sys_mbox_t *mbox); +#define sys_mbox_fetch(mbox, msg) sys_arch_mbox_fetch(mbox, msg, 0) +#ifndef sys_mbox_valid +/** + * @ingroup sys_mbox + * Returns 1 if the mailbox is valid, 0 if it is not valid. + * When using pointers, a simple way is to check the pointer for != NULL. + * When directly using OS structures, implementing this may be more complex. + * This may also be a define, in which case the function is not prototyped. + */ +int sys_mbox_valid(sys_mbox_t *mbox); +#endif +#ifndef sys_mbox_set_invalid +/** + * @ingroup sys_mbox + * Invalidate a mailbox so that sys_mbox_valid() returns 0. + * ATTENTION: This does NOT mean that the mailbox shall be deallocated: + * sys_mbox_free() is always called before calling this function! + * This may also be a define, in which case the function is not prototyped. + */ +void sys_mbox_set_invalid(sys_mbox_t *mbox); +#endif +#ifndef sys_mbox_valid_val +/** + * Same as sys_mbox_valid() but taking a value, not a pointer + */ +#define sys_mbox_valid_val(mbox) sys_mbox_valid(&(mbox)) +#endif +#ifndef sys_mbox_set_invalid_val +/** + * Same as sys_mbox_set_invalid() but taking a value, not a pointer + */ +#define sys_mbox_set_invalid_val(mbox) sys_mbox_set_invalid(&(mbox)) +#endif + + +/** + * @ingroup sys_misc + * The only thread function: + * Starts a new thread named "name" with priority "prio" that will begin its + * execution in the function "thread()". The "arg" argument will be passed as an + * argument to the thread() function. The stack size to used for this thread is + * the "stacksize" parameter. The id of the new thread is returned. Both the id + * and the priority are system dependent. + * ATTENTION: although this function returns a value, it MUST NOT FAIL (ports have to assert this!) + * + * @param name human-readable name for the thread (used for debugging purposes) + * @param thread thread-function + * @param arg parameter passed to 'thread' + * @param stacksize stack size in bytes for the new thread (may be ignored by ports) + * @param prio priority of the new thread (may be ignored by ports) */ +sys_thread_t sys_thread_new(const char *name, lwip_thread_fn thread, void *arg, int stacksize, int prio); + +#endif /* NO_SYS */ + +/** + * @ingroup sys_misc + * sys_init() must be called before anything else. + * Initialize the sys_arch layer. + */ +void sys_init(void); + +#ifndef sys_jiffies +/** + * Ticks/jiffies since power up. + */ +u32_t sys_jiffies(void); +#endif + +/** + * @ingroup sys_time + * Returns the current time in milliseconds, + * may be the same as sys_jiffies or at least based on it. + * Don't care for wraparound, this is only used for time diffs. + * Not implementing this function means you cannot use some modules (e.g. TCP + * timestamps, internal timeouts for NO_SYS==1). + */ +u32_t sys_now(void); + +/* Critical Region Protection */ +/* These functions must be implemented in the sys_arch.c file. + In some implementations they can provide a more light-weight protection + mechanism than using semaphores. Otherwise semaphores can be used for + implementation */ +#ifndef SYS_ARCH_PROTECT +/** SYS_LIGHTWEIGHT_PROT + * define SYS_LIGHTWEIGHT_PROT in lwipopts.h if you want inter-task protection + * for certain critical regions during buffer allocation, deallocation and memory + * allocation and deallocation. + */ +#if SYS_LIGHTWEIGHT_PROT + +/** + * @ingroup sys_prot + * SYS_ARCH_DECL_PROTECT + * declare a protection variable. This macro will default to defining a variable of + * type sys_prot_t. If a particular port needs a different implementation, then + * this macro may be defined in sys_arch.h. + */ +#define SYS_ARCH_DECL_PROTECT(lev) sys_prot_t lev +/** + * @ingroup sys_prot + * SYS_ARCH_PROTECT + * Perform a "fast" protect. This could be implemented by + * disabling interrupts for an embedded system or by using a semaphore or + * mutex. The implementation should allow calling SYS_ARCH_PROTECT when + * already protected. The old protection level is returned in the variable + * "lev". This macro will default to calling the sys_arch_protect() function + * which should be implemented in sys_arch.c. If a particular port needs a + * different implementation, then this macro may be defined in sys_arch.h + */ +#define SYS_ARCH_PROTECT(lev) lev = sys_arch_protect() +/** + * @ingroup sys_prot + * SYS_ARCH_UNPROTECT + * Perform a "fast" set of the protection level to "lev". This could be + * implemented by setting the interrupt level to "lev" within the MACRO or by + * using a semaphore or mutex. This macro will default to calling the + * sys_arch_unprotect() function which should be implemented in + * sys_arch.c. If a particular port needs a different implementation, then + * this macro may be defined in sys_arch.h + */ +#define SYS_ARCH_UNPROTECT(lev) sys_arch_unprotect(lev) +sys_prot_t sys_arch_protect(void); +void sys_arch_unprotect(sys_prot_t pval); + +#else + +#define SYS_ARCH_DECL_PROTECT(lev) +#define SYS_ARCH_PROTECT(lev) +#define SYS_ARCH_UNPROTECT(lev) + +#endif /* SYS_LIGHTWEIGHT_PROT */ + +#endif /* SYS_ARCH_PROTECT */ + +/* + * Macros to set/get and increase/decrease variables in a thread-safe way. + * Use these for accessing variable that are used from more than one thread. + */ + +#ifndef SYS_ARCH_INC +#define SYS_ARCH_INC(var, val) do { \ + SYS_ARCH_DECL_PROTECT(old_level); \ + SYS_ARCH_PROTECT(old_level); \ + var += val; \ + SYS_ARCH_UNPROTECT(old_level); \ + } while(0) +#endif /* SYS_ARCH_INC */ + +#ifndef SYS_ARCH_DEC +#define SYS_ARCH_DEC(var, val) do { \ + SYS_ARCH_DECL_PROTECT(old_level); \ + SYS_ARCH_PROTECT(old_level); \ + var -= val; \ + SYS_ARCH_UNPROTECT(old_level); \ + } while(0) +#endif /* SYS_ARCH_DEC */ + +#ifndef SYS_ARCH_GET +#define SYS_ARCH_GET(var, ret) do { \ + SYS_ARCH_DECL_PROTECT(old_level); \ + SYS_ARCH_PROTECT(old_level); \ + ret = var; \ + SYS_ARCH_UNPROTECT(old_level); \ + } while(0) +#endif /* SYS_ARCH_GET */ + +#ifndef SYS_ARCH_SET +#define SYS_ARCH_SET(var, val) do { \ + SYS_ARCH_DECL_PROTECT(old_level); \ + SYS_ARCH_PROTECT(old_level); \ + var = val; \ + SYS_ARCH_UNPROTECT(old_level); \ + } while(0) +#endif /* SYS_ARCH_SET */ + +#ifndef SYS_ARCH_LOCKED +#define SYS_ARCH_LOCKED(code) do { \ + SYS_ARCH_DECL_PROTECT(old_level); \ + SYS_ARCH_PROTECT(old_level); \ + code; \ + SYS_ARCH_UNPROTECT(old_level); \ + } while(0) +#endif /* SYS_ARCH_LOCKED */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_SYS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/tcp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/tcp.h new file mode 100644 index 0000000..8c9ba98 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/tcp.h @@ -0,0 +1,500 @@ +/** + * @file + * TCP API (to be used from TCPIP thread)\n + * See also @ref tcp_raw + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_TCP_H +#define LWIP_HDR_TCP_H + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/tcpbase.h" +#include "lwip/mem.h" +#include "lwip/pbuf.h" +#include "lwip/ip.h" +#include "lwip/icmp.h" +#include "lwip/err.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct tcp_pcb; +struct tcp_pcb_listen; + +/** Function prototype for tcp accept callback functions. Called when a new + * connection can be accepted on a listening pcb. + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param newpcb The new connection pcb + * @param err An error code if there has been an error accepting. + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + */ +typedef err_t (*tcp_accept_fn)(void *arg, struct tcp_pcb *newpcb, err_t err); + +/** Function prototype for tcp receive callback functions. Called when data has + * been received. + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param tpcb The connection pcb which received data + * @param p The received data (or NULL when the connection has been closed!) + * @param err An error code if there has been an error receiving + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + */ +typedef err_t (*tcp_recv_fn)(void *arg, struct tcp_pcb *tpcb, + struct pbuf *p, err_t err); + +/** Function prototype for tcp sent callback functions. Called when sent data has + * been acknowledged by the remote side. Use it to free corresponding resources. + * This also means that the pcb has now space available to send new data. + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param tpcb The connection pcb for which data has been acknowledged + * @param len The amount of bytes acknowledged + * @return ERR_OK: try to send some data by calling tcp_output + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + */ +typedef err_t (*tcp_sent_fn)(void *arg, struct tcp_pcb *tpcb, + u16_t len); + +/** Function prototype for tcp poll callback functions. Called periodically as + * specified by @see tcp_poll. + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param tpcb tcp pcb + * @return ERR_OK: try to send some data by calling tcp_output + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + */ +typedef err_t (*tcp_poll_fn)(void *arg, struct tcp_pcb *tpcb); + +/** Function prototype for tcp error callback functions. Called when the pcb + * receives a RST or is unexpectedly closed for any other reason. + * + * @note The corresponding pcb is already freed when this callback is called! + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param err Error code to indicate why the pcb has been closed + * ERR_ABRT: aborted through tcp_abort or by a TCP timer + * ERR_RST: the connection was reset by the remote host + */ +typedef void (*tcp_err_fn)(void *arg, err_t err); + +/** Function prototype for tcp connected callback functions. Called when a pcb + * is connected to the remote side after initiating a connection attempt by + * calling tcp_connect(). + * + * @param arg Additional argument to pass to the callback function (@see tcp_arg()) + * @param tpcb The connection pcb which is connected + * @param err An unused error code, always ERR_OK currently ;-) @todo! + * Only return ERR_ABRT if you have called tcp_abort from within the + * callback function! + * + * @note When a connection attempt fails, the error callback is currently called! + */ +typedef err_t (*tcp_connected_fn)(void *arg, struct tcp_pcb *tpcb, err_t err); + +#if LWIP_WND_SCALE +#define RCV_WND_SCALE(pcb, wnd) (((wnd) >> (pcb)->rcv_scale)) +#define SND_WND_SCALE(pcb, wnd) (((wnd) << (pcb)->snd_scale)) +#define TCPWND16(x) ((u16_t)LWIP_MIN((x), 0xFFFF)) +#define TCP_WND_MAX(pcb) ((tcpwnd_size_t)(((pcb)->flags & TF_WND_SCALE) ? TCP_WND : TCPWND16(TCP_WND))) +#else +#define RCV_WND_SCALE(pcb, wnd) (wnd) +#define SND_WND_SCALE(pcb, wnd) (wnd) +#define TCPWND16(x) (x) +#define TCP_WND_MAX(pcb) TCP_WND +#endif +/* Increments a tcpwnd_size_t and holds at max value rather than rollover */ +#define TCP_WND_INC(wnd, inc) do { \ + if ((tcpwnd_size_t)(wnd + inc) >= wnd) { \ + wnd = (tcpwnd_size_t)(wnd + inc); \ + } else { \ + wnd = (tcpwnd_size_t)-1; \ + } \ + } while(0) + +#if LWIP_TCP_SACK_OUT +/** SACK ranges to include in ACK packets. + * SACK entry is invalid if left==right. */ +struct tcp_sack_range { + /** Left edge of the SACK: the first acknowledged sequence number. */ + u32_t left; + /** Right edge of the SACK: the last acknowledged sequence number +1 (so first NOT acknowledged). */ + u32_t right; +}; +#endif /* LWIP_TCP_SACK_OUT */ + +/** Function prototype for deallocation of arguments. Called *just before* the + * pcb is freed, so don't expect to be able to do anything with this pcb! + * + * @param id ext arg id (allocated via @ref tcp_ext_arg_alloc_id) + * @param data pointer to the data (set via @ref tcp_ext_arg_set before) + */ +typedef void (*tcp_extarg_callback_pcb_destroyed_fn)(u8_t id, void *data); + +/** Function prototype to transition arguments from a listening pcb to an accepted pcb + * + * @param id ext arg id (allocated via @ref tcp_ext_arg_alloc_id) + * @param lpcb the listening pcb accepting a connection + * @param cpcb the newly allocated connection pcb + * @return ERR_OK if OK, any error if connection should be dropped + */ +typedef err_t (*tcp_extarg_callback_passive_open_fn)(u8_t id, struct tcp_pcb_listen *lpcb, struct tcp_pcb *cpcb); + +/** A table of callback functions that is invoked for ext arguments */ +struct tcp_ext_arg_callbacks { + /** @ref tcp_extarg_callback_pcb_destroyed_fn */ + tcp_extarg_callback_pcb_destroyed_fn destroy; + /** @ref tcp_extarg_callback_passive_open_fn */ + tcp_extarg_callback_passive_open_fn passive_open; +}; + +#define LWIP_TCP_PCB_NUM_EXT_ARG_ID_INVALID 0xFF + +#if LWIP_TCP_PCB_NUM_EXT_ARGS +/* This is the structure for ext args in tcp pcbs (used as array) */ +struct tcp_pcb_ext_args { + const struct tcp_ext_arg_callbacks *callbacks; + void *data; +}; +/* This is a helper define to prevent zero size arrays if disabled */ +#define TCP_PCB_EXTARGS struct tcp_pcb_ext_args ext_args[LWIP_TCP_PCB_NUM_EXT_ARGS]; +#else +#define TCP_PCB_EXTARGS +#endif + +typedef u16_t tcpflags_t; +#define TCP_ALLFLAGS 0xffffU + +/** + * members common to struct tcp_pcb and struct tcp_listen_pcb + */ +#define TCP_PCB_COMMON(type) \ + type *next; /* for the linked list */ \ + void *callback_arg; \ + TCP_PCB_EXTARGS \ + enum tcp_state state; /* TCP state */ \ + u8_t prio; \ + /* ports are in host byte order */ \ + u16_t local_port + + +/** the TCP protocol control block for listening pcbs */ +struct tcp_pcb_listen { +/** Common members of all PCB types */ + IP_PCB; +/** Protocol specific PCB members */ + TCP_PCB_COMMON(struct tcp_pcb_listen); + +#if LWIP_CALLBACK_API + /* Function to call when a listener has been connected. */ + tcp_accept_fn accept; +#endif /* LWIP_CALLBACK_API */ + +#if TCP_LISTEN_BACKLOG + u8_t backlog; + u8_t accepts_pending; +#endif /* TCP_LISTEN_BACKLOG */ +}; + + +/** the TCP protocol control block */ +struct tcp_pcb { +/** common PCB members */ + IP_PCB; +/** protocol specific PCB members */ + TCP_PCB_COMMON(struct tcp_pcb); + + /* ports are in host byte order */ + u16_t remote_port; + + tcpflags_t flags; +#define TF_ACK_DELAY 0x01U /* Delayed ACK. */ +#define TF_ACK_NOW 0x02U /* Immediate ACK. */ +#define TF_INFR 0x04U /* In fast recovery. */ +#define TF_CLOSEPEND 0x08U /* If this is set, tcp_close failed to enqueue the FIN (retried in tcp_tmr) */ +#define TF_RXCLOSED 0x10U /* rx closed by tcp_shutdown */ +#define TF_FIN 0x20U /* Connection was closed locally (FIN segment enqueued). */ +#define TF_NODELAY 0x40U /* Disable Nagle algorithm */ +#define TF_NAGLEMEMERR 0x80U /* nagle enabled, memerr, try to output to prevent delayed ACK to happen */ +#if LWIP_WND_SCALE +#define TF_WND_SCALE 0x0100U /* Window Scale option enabled */ +#endif +#if TCP_LISTEN_BACKLOG +#define TF_BACKLOGPEND 0x0200U /* If this is set, a connection pcb has increased the backlog on its listener */ +#endif +#if LWIP_TCP_TIMESTAMPS +#define TF_TIMESTAMP 0x0400U /* Timestamp option enabled */ +#endif +#define TF_RTO 0x0800U /* RTO timer has fired, in-flight data moved to unsent and being retransmitted */ +#if LWIP_TCP_SACK_OUT +#define TF_SACK 0x1000U /* Selective ACKs enabled */ +#endif + + /* the rest of the fields are in host byte order + as we have to do some math with them */ + + /* Timers */ + u8_t polltmr, pollinterval; + u8_t last_timer; + u32_t tmr; + + /* receiver variables */ + u32_t rcv_nxt; /* next seqno expected */ + tcpwnd_size_t rcv_wnd; /* receiver window available */ + tcpwnd_size_t rcv_ann_wnd; /* receiver window to announce */ + u32_t rcv_ann_right_edge; /* announced right edge of window */ + +#if LWIP_TCP_SACK_OUT + /* SACK ranges to include in ACK packets (entry is invalid if left==right) */ + struct tcp_sack_range rcv_sacks[LWIP_TCP_MAX_SACK_NUM]; +#define LWIP_TCP_SACK_VALID(pcb, idx) ((pcb)->rcv_sacks[idx].left != (pcb)->rcv_sacks[idx].right) +#endif /* LWIP_TCP_SACK_OUT */ + + /* Retransmission timer. */ + s16_t rtime; + + u16_t mss; /* maximum segment size */ + + /* RTT (round trip time) estimation variables */ + u32_t rttest; /* RTT estimate in 500ms ticks */ + u32_t rtseq; /* sequence number being timed */ + s16_t sa, sv; /* @see "Congestion Avoidance and Control" by Van Jacobson and Karels */ + + s16_t rto; /* retransmission time-out (in ticks of TCP_SLOW_INTERVAL) */ + u8_t nrtx; /* number of retransmissions */ + + /* fast retransmit/recovery */ + u8_t dupacks; + u32_t lastack; /* Highest acknowledged seqno. */ + + /* congestion avoidance/control variables */ + tcpwnd_size_t cwnd; + tcpwnd_size_t ssthresh; + + /* first byte following last rto byte */ + u32_t rto_end; + + /* sender variables */ + u32_t snd_nxt; /* next new seqno to be sent */ + u32_t snd_wl1, snd_wl2; /* Sequence and acknowledgement numbers of last + window update. */ + u32_t snd_lbb; /* Sequence number of next byte to be buffered. */ + tcpwnd_size_t snd_wnd; /* sender window */ + tcpwnd_size_t snd_wnd_max; /* the maximum sender window announced by the remote host */ + + tcpwnd_size_t snd_buf; /* Available buffer space for sending (in bytes). */ +#define TCP_SNDQUEUELEN_OVERFLOW (0xffffU-3) + u16_t snd_queuelen; /* Number of pbufs currently in the send buffer. */ + +#if TCP_OVERSIZE + /* Extra bytes available at the end of the last pbuf in unsent. */ + u16_t unsent_oversize; +#endif /* TCP_OVERSIZE */ + + tcpwnd_size_t bytes_acked; + + /* These are ordered by sequence number: */ + struct tcp_seg *unsent; /* Unsent (queued) segments. */ + struct tcp_seg *unacked; /* Sent but unacknowledged segments. */ +#if TCP_QUEUE_OOSEQ + struct tcp_seg *ooseq; /* Received out of sequence segments. */ +#endif /* TCP_QUEUE_OOSEQ */ + + struct pbuf *refused_data; /* Data previously received but not yet taken by upper layer */ + +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG + struct tcp_pcb_listen* listener; +#endif /* LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG */ + +#if LWIP_CALLBACK_API + /* Function to be called when more send buffer space is available. */ + tcp_sent_fn sent; + /* Function to be called when (in-sequence) data has arrived. */ + tcp_recv_fn recv; + /* Function to be called when a connection has been set up. */ + tcp_connected_fn connected; + /* Function which is called periodically. */ + tcp_poll_fn poll; + /* Function to be called whenever a fatal error occurs. */ + tcp_err_fn errf; +#endif /* LWIP_CALLBACK_API */ + +#if LWIP_TCP_TIMESTAMPS + u32_t ts_lastacksent; + u32_t ts_recent; +#endif /* LWIP_TCP_TIMESTAMPS */ + + /* idle time before KEEPALIVE is sent */ + u32_t keep_idle; +#if LWIP_TCP_KEEPALIVE + u32_t keep_intvl; + u32_t keep_cnt; +#endif /* LWIP_TCP_KEEPALIVE */ + + /* Persist timer counter */ + u8_t persist_cnt; + /* Persist timer back-off */ + u8_t persist_backoff; + /* Number of persist probes */ + u8_t persist_probe; + + /* KEEPALIVE counter */ + u8_t keep_cnt_sent; + +#if LWIP_WND_SCALE + u8_t snd_scale; + u8_t rcv_scale; +#endif +}; + +#if LWIP_EVENT_API + +enum lwip_event { + LWIP_EVENT_ACCEPT, + LWIP_EVENT_SENT, + LWIP_EVENT_RECV, + LWIP_EVENT_CONNECTED, + LWIP_EVENT_POLL, + LWIP_EVENT_ERR +}; + +err_t lwip_tcp_event(void *arg, struct tcp_pcb *pcb, + enum lwip_event, + struct pbuf *p, + u16_t size, + err_t err); + +#endif /* LWIP_EVENT_API */ + +/* Application program's interface: */ +struct tcp_pcb * tcp_new (void); +struct tcp_pcb * tcp_new_ip_type (u8_t type); + +void tcp_arg (struct tcp_pcb *pcb, void *arg); +#if LWIP_CALLBACK_API +void tcp_recv (struct tcp_pcb *pcb, tcp_recv_fn recv); +void tcp_sent (struct tcp_pcb *pcb, tcp_sent_fn sent); +void tcp_err (struct tcp_pcb *pcb, tcp_err_fn err); +void tcp_accept (struct tcp_pcb *pcb, tcp_accept_fn accept); +#endif /* LWIP_CALLBACK_API */ +void tcp_poll (struct tcp_pcb *pcb, tcp_poll_fn poll, u8_t interval); + +#define tcp_set_flags(pcb, set_flags) do { (pcb)->flags = (tcpflags_t)((pcb)->flags | (set_flags)); } while(0) +#define tcp_clear_flags(pcb, clr_flags) do { (pcb)->flags = (tcpflags_t)((pcb)->flags & (tcpflags_t)(~(clr_flags) & TCP_ALLFLAGS)); } while(0) +#define tcp_is_flag_set(pcb, flag) (((pcb)->flags & (flag)) != 0) + +#if LWIP_TCP_TIMESTAMPS +#define tcp_mss(pcb) (((pcb)->flags & TF_TIMESTAMP) ? ((pcb)->mss - 12) : (pcb)->mss) +#else /* LWIP_TCP_TIMESTAMPS */ +/** @ingroup tcp_raw */ +#define tcp_mss(pcb) ((pcb)->mss) +#endif /* LWIP_TCP_TIMESTAMPS */ +/** @ingroup tcp_raw */ +#define tcp_sndbuf(pcb) (TCPWND16((pcb)->snd_buf)) +/** @ingroup tcp_raw */ +#define tcp_sndqueuelen(pcb) ((pcb)->snd_queuelen) +/** @ingroup tcp_raw */ +#define tcp_nagle_disable(pcb) tcp_set_flags(pcb, TF_NODELAY) +/** @ingroup tcp_raw */ +#define tcp_nagle_enable(pcb) tcp_clear_flags(pcb, TF_NODELAY) +/** @ingroup tcp_raw */ +#define tcp_nagle_disabled(pcb) tcp_is_flag_set(pcb, TF_NODELAY) + +#if TCP_LISTEN_BACKLOG +#define tcp_backlog_set(pcb, new_backlog) do { \ + LWIP_ASSERT("pcb->state == LISTEN (called for wrong pcb?)", (pcb)->state == LISTEN); \ + ((struct tcp_pcb_listen *)(pcb))->backlog = ((new_backlog) ? (new_backlog) : 1); } while(0) +void tcp_backlog_delayed(struct tcp_pcb* pcb); +void tcp_backlog_accepted(struct tcp_pcb* pcb); +#else /* TCP_LISTEN_BACKLOG */ +#define tcp_backlog_set(pcb, new_backlog) +#define tcp_backlog_delayed(pcb) +#define tcp_backlog_accepted(pcb) +#endif /* TCP_LISTEN_BACKLOG */ +#define tcp_accepted(pcb) do { LWIP_UNUSED_ARG(pcb); } while(0) /* compatibility define, not needed any more */ + +void tcp_recved (struct tcp_pcb *pcb, u16_t len); +err_t tcp_bind (struct tcp_pcb *pcb, const ip_addr_t *ipaddr, + u16_t port); +void tcp_bind_netif(struct tcp_pcb *pcb, const struct netif *netif); +err_t tcp_connect (struct tcp_pcb *pcb, const ip_addr_t *ipaddr, + u16_t port, tcp_connected_fn connected); + +struct tcp_pcb * tcp_listen_with_backlog_and_err(struct tcp_pcb *pcb, u8_t backlog, err_t *err); +struct tcp_pcb * tcp_listen_with_backlog(struct tcp_pcb *pcb, u8_t backlog); +/** @ingroup tcp_raw */ +#define tcp_listen(pcb) tcp_listen_with_backlog(pcb, TCP_DEFAULT_LISTEN_BACKLOG) + +void tcp_abort (struct tcp_pcb *pcb); +err_t tcp_close (struct tcp_pcb *pcb); +err_t tcp_shutdown(struct tcp_pcb *pcb, int shut_rx, int shut_tx); + +err_t tcp_write (struct tcp_pcb *pcb, const void *dataptr, u16_t len, + u8_t apiflags); + +void tcp_setprio (struct tcp_pcb *pcb, u8_t prio); + +err_t tcp_output (struct tcp_pcb *pcb); + +err_t tcp_tcp_get_tcp_addrinfo(struct tcp_pcb *pcb, int local, ip_addr_t *addr, u16_t *port); + +#define tcp_dbg_get_tcp_state(pcb) ((pcb)->state) + +/* for compatibility with older implementation */ +#define tcp_new_ip6() tcp_new_ip_type(IPADDR_TYPE_V6) + +#if LWIP_TCP_PCB_NUM_EXT_ARGS +u8_t tcp_ext_arg_alloc_id(void); +void tcp_ext_arg_set_callbacks(struct tcp_pcb *pcb, uint8_t id, const struct tcp_ext_arg_callbacks * const callbacks); +void tcp_ext_arg_set(struct tcp_pcb *pcb, uint8_t id, void *arg); +void *tcp_ext_arg_get(const struct tcp_pcb *pcb, uint8_t id); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_TCP */ + +#endif /* LWIP_HDR_TCP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/tcpbase.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/tcpbase.h new file mode 100644 index 0000000..6614b80 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/tcpbase.h @@ -0,0 +1,88 @@ +/** + * @file + * Base TCP API definitions shared by TCP and ALTCP\n + * See also @ref tcp_raw + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_TCPBASE_H +#define LWIP_HDR_TCPBASE_H + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#ifdef __cplusplus +extern "C" { +#endif + + +#if LWIP_WND_SCALE +typedef u32_t tcpwnd_size_t; +#else +typedef u16_t tcpwnd_size_t; +#endif + +enum tcp_state { + CLOSED = 0, + LISTEN = 1, + SYN_SENT = 2, + SYN_RCVD = 3, + ESTABLISHED = 4, + FIN_WAIT_1 = 5, + FIN_WAIT_2 = 6, + CLOSE_WAIT = 7, + CLOSING = 8, + LAST_ACK = 9, + TIME_WAIT = 10 +}; +/* ATTENTION: this depends on state number ordering! */ +#define TCP_STATE_IS_CLOSING(state) ((state) >= FIN_WAIT_1) + +/* Flags for "apiflags" parameter in tcp_write */ +#define TCP_WRITE_FLAG_COPY 0x01 +#define TCP_WRITE_FLAG_MORE 0x02 + +#define TCP_PRIO_MIN 1 +#define TCP_PRIO_NORMAL 64 +#define TCP_PRIO_MAX 127 + +const char* tcp_debug_state_str(enum tcp_state s); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_TCP */ + +#endif /* LWIP_HDR_TCPBASE_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/tcpip.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/tcpip.h new file mode 100644 index 0000000..ec14701 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/tcpip.h @@ -0,0 +1,113 @@ +/** + * @file + * Functions to sync with TCPIP thread + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_TCPIP_H +#define LWIP_HDR_TCPIP_H + +#include "lwip/opt.h" + +#if !NO_SYS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/err.h" +#include "lwip/timeouts.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_TCPIP_CORE_LOCKING +/** The global semaphore to lock the stack. */ +extern sys_mutex_t lock_tcpip_core; +#if !defined LOCK_TCPIP_CORE || defined __DOXYGEN__ +/** Lock lwIP core mutex (needs @ref LWIP_TCPIP_CORE_LOCKING 1) */ +#define LOCK_TCPIP_CORE() sys_mutex_lock(&lock_tcpip_core) +/** Unlock lwIP core mutex (needs @ref LWIP_TCPIP_CORE_LOCKING 1) */ +#define UNLOCK_TCPIP_CORE() sys_mutex_unlock(&lock_tcpip_core) +#endif /* LOCK_TCPIP_CORE */ +#else /* LWIP_TCPIP_CORE_LOCKING */ +#define LOCK_TCPIP_CORE() +#define UNLOCK_TCPIP_CORE() +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +struct pbuf; +struct netif; + +/** Function prototype for the init_done function passed to tcpip_init */ +typedef void (*tcpip_init_done_fn)(void *arg); +/** Function prototype for functions passed to tcpip_callback() */ +typedef void (*tcpip_callback_fn)(void *ctx); + +/* Forward declarations */ +struct tcpip_callback_msg; + +void tcpip_init(tcpip_init_done_fn tcpip_init_done, void *arg); + +err_t tcpip_inpkt(struct pbuf *p, struct netif *inp, netif_input_fn input_fn); +err_t tcpip_input(struct pbuf *p, struct netif *inp); + +err_t tcpip_try_callback(tcpip_callback_fn function, void *ctx); +err_t tcpip_callback(tcpip_callback_fn function, void *ctx); +/** @ingroup lwip_os + * @deprecated use tcpip_try_callback() or tcpip_callback() instead + */ +#define tcpip_callback_with_block(function, ctx, block) ((block != 0)? tcpip_callback(function, ctx) : tcpip_try_callback(function, ctx)) + +struct tcpip_callback_msg* tcpip_callbackmsg_new(tcpip_callback_fn function, void *ctx); +void tcpip_callbackmsg_delete(struct tcpip_callback_msg* msg); +err_t tcpip_callbackmsg_trycallback(struct tcpip_callback_msg* msg); +err_t tcpip_callbackmsg_trycallback_fromisr(struct tcpip_callback_msg* msg); + +/* free pbufs or heap memory from another context without blocking */ +err_t pbuf_free_callback(struct pbuf *p); +err_t mem_free_callback(void *m); + +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS +err_t tcpip_timeout(u32_t msecs, sys_timeout_handler h, void *arg); +err_t tcpip_untimeout(sys_timeout_handler h, void *arg); +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + +#ifdef TCPIP_THREAD_TEST +int tcpip_thread_poll_one(void); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* !NO_SYS */ + +#endif /* LWIP_HDR_TCPIP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/timeouts.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/timeouts.h new file mode 100644 index 0000000..3ab3c0e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/timeouts.h @@ -0,0 +1,128 @@ +/** + * @file + * Timer implementations + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_TIMEOUTS_H +#define LWIP_HDR_TIMEOUTS_H + +#include "lwip/opt.h" +#include "lwip/err.h" +#if !NO_SYS +#include "lwip/sys.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef LWIP_DEBUG_TIMERNAMES +#ifdef LWIP_DEBUG +#define LWIP_DEBUG_TIMERNAMES SYS_DEBUG +#else /* LWIP_DEBUG */ +#define LWIP_DEBUG_TIMERNAMES 0 +#endif /* LWIP_DEBUG*/ +#endif + +/** Returned by sys_timeouts_sleeptime() to indicate there is no timer, so we + * can sleep forever. + */ +#define SYS_TIMEOUTS_SLEEPTIME_INFINITE 0xFFFFFFFF + +/** Function prototype for a stack-internal timer function that has to be + * called at a defined interval */ +typedef void (* lwip_cyclic_timer_handler)(void); + +/** This struct contains information about a stack-internal timer function + that has to be called at a defined interval */ +struct lwip_cyclic_timer { + u32_t interval_ms; + lwip_cyclic_timer_handler handler; +#if LWIP_DEBUG_TIMERNAMES + const char* handler_name; +#endif /* LWIP_DEBUG_TIMERNAMES */ +}; + +/** This array contains all stack-internal cyclic timers. To get the number of + * timers, use lwip_num_cyclic_timers */ +extern const struct lwip_cyclic_timer lwip_cyclic_timers[]; +/** Array size of lwip_cyclic_timers[] */ +extern const int lwip_num_cyclic_timers; + +#if LWIP_TIMERS + +/** Function prototype for a timeout callback function. Register such a function + * using sys_timeout(). + * + * @param arg Additional argument to pass to the function - set up by sys_timeout() + */ +typedef void (* sys_timeout_handler)(void *arg); + +struct sys_timeo { + struct sys_timeo *next; + u32_t time; + sys_timeout_handler h; + void *arg; +#if LWIP_DEBUG_TIMERNAMES + const char* handler_name; +#endif /* LWIP_DEBUG_TIMERNAMES */ +}; + +void sys_timeouts_init(void); + +#if LWIP_DEBUG_TIMERNAMES +void sys_timeout_debug(u32_t msecs, sys_timeout_handler handler, void *arg, const char* handler_name); +#define sys_timeout(msecs, handler, arg) sys_timeout_debug(msecs, handler, arg, #handler) +#else /* LWIP_DEBUG_TIMERNAMES */ +void sys_timeout(u32_t msecs, sys_timeout_handler handler, void *arg); +#endif /* LWIP_DEBUG_TIMERNAMES */ + +void sys_untimeout(sys_timeout_handler handler, void *arg); +void sys_restart_timeouts(void); +void sys_check_timeouts(void); +u32_t sys_timeouts_sleeptime(void); + +#if LWIP_TESTMODE +struct sys_timeo** sys_timeouts_get_next_timeout(void); +void lwip_cyclic_timer(void *arg); +#endif + +#endif /* LWIP_TIMERS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_TIMEOUTS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/udp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/udp.h new file mode 100644 index 0000000..3794af4 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/udp.h @@ -0,0 +1,195 @@ +/** + * @file + * UDP API (to be used from TCPIP thread)\n + * See also @ref udp_raw + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_UDP_H +#define LWIP_HDR_UDP_H + +#include "lwip/opt.h" + +#if LWIP_UDP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/ip_addr.h" +#include "lwip/ip.h" +#include "lwip/ip6_addr.h" +#include "lwip/prot/udp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define UDP_FLAGS_NOCHKSUM 0x01U +#define UDP_FLAGS_UDPLITE 0x02U +#define UDP_FLAGS_CONNECTED 0x04U +#define UDP_FLAGS_MULTICAST_LOOP 0x08U + +struct udp_pcb; + +/** Function prototype for udp pcb receive callback functions + * addr and port are in same byte order as in the pcb + * The callback is responsible for freeing the pbuf + * if it's not used any more. + * + * ATTENTION: Be aware that 'addr' might point into the pbuf 'p' so freeing this pbuf + * can make 'addr' invalid, too. + * + * @param arg user supplied argument (udp_pcb.recv_arg) + * @param pcb the udp_pcb which received data + * @param p the packet buffer that was received + * @param addr the remote IP address from which the packet was received + * @param port the remote port from which the packet was received + */ +typedef void (*udp_recv_fn)(void *arg, struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr, u16_t port); + +/** the UDP protocol control block */ +struct udp_pcb { +/** Common members of all PCB types */ + IP_PCB; + +/* Protocol specific PCB members */ + + struct udp_pcb *next; + + u8_t flags; + /** ports are in host byte order */ + u16_t local_port, remote_port; + +#if LWIP_MULTICAST_TX_OPTIONS +#if LWIP_IPV4 + /** outgoing network interface for multicast packets, by IPv4 address (if not 'any') */ + ip4_addr_t mcast_ip4; +#endif /* LWIP_IPV4 */ + /** outgoing network interface for multicast packets, by interface index (if nonzero) */ + u8_t mcast_ifindex; + /** TTL for outgoing multicast packets */ + u8_t mcast_ttl; +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#if LWIP_UDPLITE + /** used for UDP_LITE only */ + u16_t chksum_len_rx, chksum_len_tx; +#endif /* LWIP_UDPLITE */ + + /** receive callback function */ + udp_recv_fn recv; + /** user-supplied argument for the recv callback */ + void *recv_arg; +}; +/* udp_pcbs export for external reference (e.g. SNMP agent) */ +extern struct udp_pcb *udp_pcbs; + +/* The following functions is the application layer interface to the + UDP code. */ +struct udp_pcb * udp_new (void); +struct udp_pcb * udp_new_ip_type(u8_t type); +void udp_remove (struct udp_pcb *pcb); +err_t udp_bind (struct udp_pcb *pcb, const ip_addr_t *ipaddr, + u16_t port); +void udp_bind_netif (struct udp_pcb *pcb, const struct netif* netif); +err_t udp_connect (struct udp_pcb *pcb, const ip_addr_t *ipaddr, + u16_t port); +void udp_disconnect (struct udp_pcb *pcb); +void udp_recv (struct udp_pcb *pcb, udp_recv_fn recv, + void *recv_arg); +err_t udp_sendto_if (struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, + struct netif *netif); +err_t udp_sendto_if_src(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, + struct netif *netif, const ip_addr_t *src_ip); +err_t udp_sendto (struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port); +err_t udp_send (struct udp_pcb *pcb, struct pbuf *p); + +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP +err_t udp_sendto_if_chksum(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, + struct netif *netif, u8_t have_chksum, + u16_t chksum); +err_t udp_sendto_chksum(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, + u8_t have_chksum, u16_t chksum); +err_t udp_send_chksum(struct udp_pcb *pcb, struct pbuf *p, + u8_t have_chksum, u16_t chksum); +err_t udp_sendto_if_src_chksum(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, struct netif *netif, + u8_t have_chksum, u16_t chksum, const ip_addr_t *src_ip); +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + +#define udp_flags(pcb) ((pcb)->flags) +#define udp_setflags(pcb, f) ((pcb)->flags = (f)) + +#define udp_set_flags(pcb, set_flags) do { (pcb)->flags = (u8_t)((pcb)->flags | (set_flags)); } while(0) +#define udp_clear_flags(pcb, clr_flags) do { (pcb)->flags = (u8_t)((pcb)->flags & (u8_t)(~(clr_flags) & 0xff)); } while(0) +#define udp_is_flag_set(pcb, flag) (((pcb)->flags & (flag)) != 0) + +/* The following functions are the lower layer interface to UDP. */ +void udp_input (struct pbuf *p, struct netif *inp); + +void udp_init (void); + +/* for compatibility with older implementation */ +#define udp_new_ip6() udp_new_ip_type(IPADDR_TYPE_V6) + +#if LWIP_MULTICAST_TX_OPTIONS +#if LWIP_IPV4 +#define udp_set_multicast_netif_addr(pcb, ip4addr) ip4_addr_copy((pcb)->mcast_ip4, *(ip4addr)) +#define udp_get_multicast_netif_addr(pcb) (&(pcb)->mcast_ip4) +#endif /* LWIP_IPV4 */ +#define udp_set_multicast_netif_index(pcb, idx) ((pcb)->mcast_ifindex = (idx)) +#define udp_get_multicast_netif_index(pcb) ((pcb)->mcast_ifindex) +#define udp_set_multicast_ttl(pcb, value) ((pcb)->mcast_ttl = (value)) +#define udp_get_multicast_ttl(pcb) ((pcb)->mcast_ttl) +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#if UDP_DEBUG +void udp_debug_print(struct udp_hdr *udphdr); +#else +#define udp_debug_print(udphdr) +#endif + +void udp_netif_ip_addr_changed(const ip_addr_t* old_addr, const ip_addr_t* new_addr); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_UDP */ + +#endif /* LWIP_HDR_UDP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/bridgeif.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/bridgeif.h new file mode 100644 index 0000000..927c619 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/bridgeif.h @@ -0,0 +1,127 @@ +/** + * @file + * lwIP netif implementing an IEEE 802.1D MAC Bridge + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_NETIF_BRIDGEIF_H +#define LWIP_HDR_NETIF_BRIDGEIF_H + +#include "netif/bridgeif_opts.h" + +#include "lwip/err.h" +#include "lwip/prot/ethernet.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct netif; + +#if (BRIDGEIF_MAX_PORTS < 0) || (BRIDGEIF_MAX_PORTS >= 64) +#error BRIDGEIF_MAX_PORTS must be [1..63] +#elif BRIDGEIF_MAX_PORTS < 8 +typedef u8_t bridgeif_portmask_t; +#elif BRIDGEIF_MAX_PORTS < 16 +typedef u16_t bridgeif_portmask_t; +#elif BRIDGEIF_MAX_PORTS < 32 +typedef u32_t bridgeif_portmask_t; +#elif BRIDGEIF_MAX_PORTS < 64 +typedef u64_t bridgeif_portmask_t; +#endif + +#define BR_FLOOD ((bridgeif_portmask_t)-1) + +/** @ingroup bridgeif + * Initialisation data for @ref bridgeif_init. + * An instance of this type must be passed as parameter 'state' to @ref netif_add + * when the bridge is added. + */ +typedef struct bridgeif_initdata_s { + /** MAC address of the bridge (cannot use the netif's addresses) */ + struct eth_addr ethaddr; + /** Maximum number of ports in the bridge (ports are stored in an array, this + influences memory allocated for netif->state of the bridge netif). */ + u8_t max_ports; + /** Maximum number of dynamic/learning entries in the bridge's forwarding database. + In the default implementation, this controls memory consumption only. */ + u16_t max_fdb_dynamic_entries; + /** Maximum number of static forwarding entries. Influences memory consumption! */ + u16_t max_fdb_static_entries; +} bridgeif_initdata_t; + +/** @ingroup bridgeif + * Use this for constant initialization of a bridgeif_initdat_t + * (ethaddr must be passed as ETH_ADDR()) + */ +#define BRIDGEIF_INITDATA1(max_ports, max_fdb_dynamic_entries, max_fdb_static_entries, ethaddr) {ethaddr, max_ports, max_fdb_dynamic_entries, max_fdb_static_entries} +/** @ingroup bridgeif + * Use this for constant initialization of a bridgeif_initdat_t + * (each byte of ethaddr must be passed) + */ +#define BRIDGEIF_INITDATA2(max_ports, max_fdb_dynamic_entries, max_fdb_static_entries, e0, e1, e2, e3, e4, e5) {{e0, e1, e2, e3, e4, e5}, max_ports, max_fdb_dynamic_entries, max_fdb_static_entries} + +err_t bridgeif_init(struct netif *netif); +err_t bridgeif_add_port(struct netif *bridgeif, struct netif *portif); +err_t bridgeif_fdb_add(struct netif *bridgeif, const struct eth_addr *addr, bridgeif_portmask_t ports); +err_t bridgeif_fdb_remove(struct netif *bridgeif, const struct eth_addr *addr); + +/* FDB interface, can be replaced by own implementation */ +void bridgeif_fdb_update_src(void *fdb_ptr, struct eth_addr *src_addr, u8_t port_idx); +bridgeif_portmask_t bridgeif_fdb_get_dst_ports(void *fdb_ptr, struct eth_addr *dst_addr); +void* bridgeif_fdb_init(u16_t max_fdb_entries); + +#if BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT +#ifndef BRIDGEIF_DECL_PROTECT +/* define bridgeif protection to sys_arch_protect... */ +#include "lwip/sys.h" +#define BRIDGEIF_DECL_PROTECT(lev) SYS_ARCH_DECL_PROTECT(lev) +#define BRIDGEIF_READ_PROTECT(lev) SYS_ARCH_PROTECT(lev) +#define BRIDGEIF_READ_UNPROTECT(lev) SYS_ARCH_UNPROTECT(lev) +#define BRIDGEIF_WRITE_PROTECT(lev) +#define BRIDGEIF_WRITE_UNPROTECT(lev) +#endif +#else /* BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT */ +#include "lwip/tcpip.h" +#define BRIDGEIF_DECL_PROTECT(lev) +#define BRIDGEIF_READ_PROTECT(lev) +#define BRIDGEIF_READ_UNPROTECT(lev) +#define BRIDGEIF_WRITE_PROTECT(lev) +#define BRIDGEIF_WRITE_UNPROTECT(lev) +#endif /* BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_NETIF_BRIDGEIF_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/bridgeif_opts.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/bridgeif_opts.h new file mode 100644 index 0000000..5866410 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/bridgeif_opts.h @@ -0,0 +1,90 @@ +/** + * @file + * lwIP netif implementing an IEEE 802.1D MAC Bridge + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#ifndef LWIP_HDR_NETIF_BRIDGEIF_OPTS_H +#define LWIP_HDR_NETIF_BRIDGEIF_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup bridgeif_opts Options + * @ingroup bridgeif + * @{ + */ + +/** BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT==1: set port netif's 'input' function + * to call directly into bridgeif code and on top of that, directly call into + * the selected forwarding port's 'linkoutput' function. + * This means that the bridgeif input/output path is protected from concurrent access + * but as well, *all* bridge port netif's drivers must correctly handle concurrent access! + * == 0: get into tcpip_thread for every input packet (no multithreading) + * ATTENTION: as ==0 relies on tcpip.h, the default depends on NO_SYS setting + */ +#ifndef BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT +#define BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT NO_SYS +#endif + +/** BRIDGEIF_MAX_PORTS: this is used to create a typedef used for forwarding + * bit-fields: the number of bits required is this + 1 (for the internal/cpu port) + * (63 is the maximum, resulting in an u64_t for the bit mask) + * ATTENTION: this controls the maximum number of the implementation only! + * The max. number of ports per bridge must still be passed via netif_add parameter! + */ +#ifndef BRIDGEIF_MAX_PORTS +#define BRIDGEIF_MAX_PORTS 7 +#endif + +/** BRIDGEIF_DEBUG: Enable generic debugging in bridgeif.c. */ +#ifndef BRIDGEIF_DEBUG +#define BRIDGEIF_DEBUG LWIP_DBG_OFF +#endif + +/** BRIDGEIF_DEBUG: Enable FDB debugging in bridgeif.c. */ +#ifndef BRIDGEIF_FDB_DEBUG +#define BRIDGEIF_FDB_DEBUG LWIP_DBG_OFF +#endif + +/** BRIDGEIF_DEBUG: Enable forwarding debugging in bridgeif.c. */ +#ifndef BRIDGEIF_FW_DEBUG +#define BRIDGEIF_FW_DEBUG LWIP_DBG_OFF +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_NETIF_BRIDGEIF_OPTS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/etharp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/etharp.h new file mode 100644 index 0000000..c00de04 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/etharp.h @@ -0,0 +1,3 @@ +/* ARP has been moved to core/ipv4, provide this #include for compatibility only */ +#include "lwip/etharp.h" +#include "netif/ethernet.h" diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ethernet.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ethernet.h new file mode 100644 index 0000000..6e94e16 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ethernet.h @@ -0,0 +1,77 @@ +/** + * @file + * Ethernet input function - handles INCOMING ethernet level traffic + * To be used in most low-level netif implementations + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * Copyright (c) 2003-2004 Leon Woestenberg + * Copyright (c) 2003-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_NETIF_ETHERNET_H +#define LWIP_HDR_NETIF_ETHERNET_H + +#include "lwip/opt.h" + +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/prot/ethernet.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_ARP || LWIP_ETHERNET + +/** Define this to 1 and define LWIP_ARP_FILTER_NETIF_FN(pbuf, netif, type) + * to a filter function that returns the correct netif when using multiple + * netifs on one hardware interface where the netif's low-level receive + * routine cannot decide for the correct netif (e.g. when mapping multiple + * IP addresses to one hardware interface). + */ +#ifndef LWIP_ARP_FILTER_NETIF +#define LWIP_ARP_FILTER_NETIF 0 +#endif + +err_t ethernet_input(struct pbuf *p, struct netif *netif); +err_t ethernet_output(struct netif* netif, struct pbuf* p, const struct eth_addr* src, const struct eth_addr* dst, u16_t eth_type); + +extern const struct eth_addr ethbroadcast, ethzero; + +#endif /* LWIP_ARP || LWIP_ETHERNET */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_NETIF_ETHERNET_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ieee802154.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ieee802154.h new file mode 100644 index 0000000..0a11a2b --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ieee802154.h @@ -0,0 +1,112 @@ +/** + * @file + * Definitions for IEEE 802.15.4 MAC frames + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_NETIF_IEEE802154_H +#define LWIP_HDR_NETIF_IEEE802154_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +/** General MAC frame format + * This shows the full featured header, mainly for documentation. + * Some fields are omitted or shortened to achieve frame compression. + */ +struct ieee_802154_hdr { + /** See IEEE_802154_FC_* defines */ + PACK_STRUCT_FIELD(u16_t frame_control); + /** Sequence number is omitted if IEEE_802154_FC_SEQNO_SUPPR is set in frame_control */ + PACK_STRUCT_FLD_8(u8_t sequence_number); + /** Destination PAN ID is omitted if Destination Addressing Mode is 0 */ + PACK_STRUCT_FIELD(u16_t destination_pan_id); + /** Destination Address is omitted if Destination Addressing Mode is 0 */ + PACK_STRUCT_FLD_8(u8_t destination_address[8]); + /** Source PAN ID is omitted if Source Addressing Mode is 0 + or if IEEE_802154_FC_PANID_COMPR is set in frame control*/ + PACK_STRUCT_FIELD(u16_t source_pan_id); + /** Source Address is omitted if Source Addressing Mode is 0 */ + PACK_STRUCT_FLD_8(u8_t source_address[8]); + /* The rest is variable */ +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* Addressing modes (2 bits) */ +#define IEEE_802154_ADDR_MODE_NO_ADDR 0x00 /* PAN ID and address fields are not present */ +#define IEEE_802154_ADDR_MODE_RESERVED 0x01 /* Reserved */ +#define IEEE_802154_ADDR_MODE_SHORT 0x02 /* Address field contains a short address (16 bit) */ +#define IEEE_802154_ADDR_MODE_EXT 0x03 /* Address field contains an extended address (64 bit) */ + +/* IEEE 802.15.4 Frame Control definitions (2 bytes; see IEEE 802.15.4-2015 ch. 7.2.1) */ +#define IEEE_802154_FC_FT_MASK 0x0007 /* bits 0..2: Frame Type */ +#define IEEE_802154_FC_FT_BEACON 0x00 +#define IEEE_802154_FC_FT_DATA 0x01 +#define IEEE_802154_FC_FT_ACK 0x02 +#define IEEE_802154_FC_FT_MAC_CMD 0x03 +#define IEEE_802154_FC_FT_RESERVED 0x04 +#define IEEE_802154_FC_FT_MULTIPURPOSE 0x05 +#define IEEE_802154_FC_FT_FRAG 0x06 +#define IEEE_802154_FC_FT_EXT 0x07 +#define IEEE_802154_FC_SEC_EN 0x0008 /* bit 3: Security Enabled */ +#define IEEE_802154_FC_FRAME_PEND 0x0010 /* bit 4: Frame Pending */ +#define IEEE_802154_FC_ACK_REQ 0x0020 /* bit 5: AR (ACK required) */ +#define IEEE_802154_FC_PANID_COMPR 0x0040 /* bit 6: PAN ID Compression (src and dst are equal, src PAN ID omitted) */ +#define IEEE_802154_FC_RESERVED 0x0080 +#define IEEE_802154_FC_SEQNO_SUPPR 0x0100 /* bit 8: Sequence Number Suppression */ +#define IEEE_802154_FC_IE_PRESENT 0x0200 /* bit 9: IE Present */ +#define IEEE_802154_FC_DST_ADDR_MODE_MASK 0x0c00 /* bits 10..11: Destination Addressing Mode */ +#define IEEE_802154_FC_DST_ADDR_MODE_NO_ADDR (IEEE_802154_ADDR_MODE_NO_ADDR << 10) +#define IEEE_802154_FC_DST_ADDR_MODE_SHORT (IEEE_802154_ADDR_MODE_SHORT << 10) +#define IEEE_802154_FC_DST_ADDR_MODE_EXT (IEEE_802154_ADDR_MODE_EXT << 10) +#define IEEE_802154_FC_FRAME_VERSION_MASK 0x3000 /* bits 12..13: Frame Version */ +#define IEEE_802154_FC_FRAME_VERSION_GET(x) (((x) & IEEE_802154_FC_FRAME_VERSION_MASK) >> 12) +#define IEEE_802154_FC_SRC_ADDR_MODE_MASK 0xc000 /* bits 14..15: Source Addressing Mode */ +#define IEEE_802154_FC_SRC_ADDR_MODE_SHORT (IEEE_802154_ADDR_MODE_SHORT << 14) +#define IEEE_802154_FC_SRC_ADDR_MODE_EXT (IEEE_802154_ADDR_MODE_EXT << 14) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_NETIF_IEEE802154_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6.h new file mode 100644 index 0000000..b84b838 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6.h @@ -0,0 +1,89 @@ +/** + * @file + * + * 6LowPAN output for IPv6. Uses ND tables for link-layer addressing. Fragments packets to 6LowPAN units. + */ + +/* + * Copyright (c) 2015 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_LOWPAN6_H +#define LWIP_HDR_LOWPAN6_H + +#include "netif/lowpan6_opts.h" + +#if LWIP_IPV6 + +#include "netif/lowpan6_common.h" +#include "lwip/pbuf.h" +#include "lwip/ip.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** 1 second period for reassembly */ +#define LOWPAN6_TMR_INTERVAL 1000 + +void lowpan6_tmr(void); + +err_t lowpan6_set_context(u8_t idx, const ip6_addr_t * context); +err_t lowpan6_set_short_addr(u8_t addr_high, u8_t addr_low); + +#if LWIP_IPV4 +err_t lowpan4_output(struct netif *netif, struct pbuf *q, const ip4_addr_t *ipaddr); +#endif /* LWIP_IPV4 */ +err_t lowpan6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr); +err_t lowpan6_input(struct pbuf * p, struct netif *netif); +err_t lowpan6_if_init(struct netif *netif); + +/* pan_id in network byte order. */ +err_t lowpan6_set_pan_id(u16_t pan_id); + +u16_t lowpan6_calc_crc(const void *buf, u16_t len); + +#if !NO_SYS +err_t tcpip_6lowpan_input(struct pbuf *p, struct netif *inp); +#endif /* !NO_SYS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_LOWPAN6_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_ble.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_ble.h new file mode 100644 index 0000000..46a5f2e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_ble.h @@ -0,0 +1,78 @@ +/** + * @file + * 6LowPAN over BLE for IPv6 (RFC7668). + */ + +/* + * Copyright (c) 2017 Benjamin Aigner + * Copyright (c) 2015 Inico Technologies Ltd. , Author: Ivan Delamer + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Benjamin Aigner + * + * Based on the original 6lowpan implementation of lwIP ( @see 6lowpan.c) + */ + +#ifndef LWIP_HDR_LOWPAN6_BLE_H +#define LWIP_HDR_LOWPAN6_BLE_H + +#include "netif/lowpan6_opts.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "netif/lowpan6_common.h" +#include "lwip/pbuf.h" +#include "lwip/ip.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +err_t rfc7668_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr); +err_t rfc7668_input(struct pbuf * p, struct netif *netif); +err_t rfc7668_set_local_addr_eui64(struct netif *netif, const u8_t *local_addr, size_t local_addr_len); +err_t rfc7668_set_local_addr_mac48(struct netif *netif, const u8_t *local_addr, size_t local_addr_len, int is_public_addr); +err_t rfc7668_set_peer_addr_eui64(struct netif *netif, const u8_t *peer_addr, size_t peer_addr_len); +err_t rfc7668_set_peer_addr_mac48(struct netif *netif, const u8_t *peer_addr, size_t peer_addr_len, int is_public_addr); +err_t rfc7668_set_context(u8_t index, const ip6_addr_t * context); +err_t rfc7668_if_init(struct netif *netif); + +#if !NO_SYS +err_t tcpip_rfc7668_input(struct pbuf *p, struct netif *inp); +#endif + +void ble_addr_to_eui64(uint8_t *dst, const uint8_t *src, int public_addr); +void eui64_to_ble_addr(uint8_t *dst, const uint8_t *src); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_LOWPAN6_BLE_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_common.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_common.h new file mode 100644 index 0000000..d80eef9 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_common.h @@ -0,0 +1,82 @@ +/** + * @file + * + * Common 6LowPAN routines for IPv6. Uses ND tables for link-layer addressing. Fragments packets to 6LowPAN units. + */ + +/* + * Copyright (c) 2015 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_LOWPAN6_COMMON_H +#define LWIP_HDR_LOWPAN6_COMMON_H + +#include "netif/lowpan6_opts.h" + +#if LWIP_IPV6 /* don't build if IPv6 is disabled in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/ip.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** Helper define for a link layer address, which can be encoded as 0, 2 or 8 bytes */ +struct lowpan6_link_addr { + /* encoded length of the address */ + u8_t addr_len; + /* address bytes */ + u8_t addr[8]; +}; + +s8_t lowpan6_get_address_mode(const ip6_addr_t *ip6addr, const struct lowpan6_link_addr *mac_addr); + +#if LWIP_6LOWPAN_IPHC +err_t lowpan6_compress_headers(struct netif *netif, u8_t *inbuf, size_t inbuf_size, u8_t *outbuf, size_t outbuf_size, + u8_t *lowpan6_header_len_out, u8_t *hidden_header_len_out, ip6_addr_t *lowpan6_contexts, + const struct lowpan6_link_addr *src, const struct lowpan6_link_addr *dst); +struct pbuf *lowpan6_decompress(struct pbuf *p, u16_t datagram_size, ip6_addr_t *lowpan6_contexts, + struct lowpan6_link_addr *src, struct lowpan6_link_addr *dest); +#endif /* LWIP_6LOWPAN_IPHC */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_LOWPAN6_COMMON_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_opts.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_opts.h new file mode 100644 index 0000000..5303c15 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/lowpan6_opts.h @@ -0,0 +1,122 @@ +/** + * @file + * 6LowPAN options list + */ + +/* + * Copyright (c) 2015 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_LOWPAN6_OPTS_H +#define LWIP_HDR_LOWPAN6_OPTS_H + +#include "lwip/opt.h" + +/** LWIP_6LOWPAN_NUM_CONTEXTS: define the number of compression + * contexts per netif type + */ +#ifndef LWIP_6LOWPAN_NUM_CONTEXTS +#define LWIP_6LOWPAN_NUM_CONTEXTS 10 +#endif + +/** LWIP_6LOWPAN_INFER_SHORT_ADDRESS: set this to 0 to disable creating + * short addresses for matching addresses (debug only) + */ +#ifndef LWIP_6LOWPAN_INFER_SHORT_ADDRESS +#define LWIP_6LOWPAN_INFER_SHORT_ADDRESS 1 +#endif + +/** LWIP_6LOWPAN_IPHC: set this to 0 to disable IP header compression as per + * RFC 6282 (which is mandatory for BLE) + */ +#ifndef LWIP_6LOWPAN_IPHC +#define LWIP_6LOWPAN_IPHC 1 +#endif + +/** Set this to 1 if your IEEE 802.15.4 interface can calculate and check the + * CRC in hardware. This means TX packets get 2 zero bytes added on transmission + * which are to be filled with the CRC. + */ +#ifndef LWIP_6LOWPAN_802154_HW_CRC +#define LWIP_6LOWPAN_802154_HW_CRC 0 +#endif + +/** If LWIP_6LOWPAN_802154_HW_CRC==0, this can override the default slow + * implementation of the CRC used for 6LoWPAN over IEEE 802.15.4 (which uses + * a shift register). + */ +#ifndef LWIP_6LOWPAN_CALC_CRC +#define LWIP_6LOWPAN_CALC_CRC(buf, len) lowpan6_calc_crc(buf, len) +#endif + +/** Debug level for 6LoWPAN in general */ +#ifndef LWIP_LOWPAN6_DEBUG +#define LWIP_LOWPAN6_DEBUG LWIP_DBG_OFF +#endif + +/** Debug level for 6LoWPAN over IEEE 802.15.4 */ +#ifndef LWIP_LOWPAN6_802154_DEBUG +#define LWIP_LOWPAN6_802154_DEBUG LWIP_DBG_OFF +#endif + +/** LWIP_LOWPAN6_IP_COMPRESSED_DEBUG: enable compressed IP frame + * output debugging + */ +#ifndef LWIP_LOWPAN6_IP_COMPRESSED_DEBUG +#define LWIP_LOWPAN6_IP_COMPRESSED_DEBUG LWIP_DBG_OFF +#endif + +/** LWIP_LOWPAN6_DECOMPRESSION_DEBUG: enable decompression debug output + */ +#ifndef LWIP_LOWPAN6_DECOMPRESSION_DEBUG +#define LWIP_LOWPAN6_DECOMPRESSION_DEBUG LWIP_DBG_OFF +#endif + +/** LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG: enable decompressed IP frame + * output debugging */ +#ifndef LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG +#define LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG LWIP_DBG_OFF +#endif + +/** LWIP_RFC7668_LINUX_WORKAROUND_PUBLIC_ADDRESS: + * Currently, the linux kernel driver for 6lowpan sets/clears a bit in + * the address, depending on the BD address (either public or not). + * Might not be RFC7668 conform, so you may select to do that (=1) or + * not (=0) */ +#ifndef LWIP_RFC7668_LINUX_WORKAROUND_PUBLIC_ADDRESS +#define LWIP_RFC7668_LINUX_WORKAROUND_PUBLIC_ADDRESS 1 +#endif + + +#endif /* LWIP_HDR_LOWPAN6_OPTS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ccp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ccp.h new file mode 100644 index 0000000..c35336f --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ccp.h @@ -0,0 +1,164 @@ +/* + * ccp.h - Definitions for PPP Compression Control Protocol. + * + * Copyright (c) 1994-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: ccp.h,v 1.12 2004/11/04 10:02:26 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && CCP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef CCP_H +#define CCP_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * CCP codes. + */ + +#define CCP_CONFREQ 1 +#define CCP_CONFACK 2 +#define CCP_TERMREQ 5 +#define CCP_TERMACK 6 +#define CCP_RESETREQ 14 +#define CCP_RESETACK 15 + +/* + * Max # bytes for a CCP option + */ + +#define CCP_MAX_OPTION_LENGTH 32 + +/* + * Parts of a CCP packet. + */ + +#define CCP_CODE(dp) ((dp)[0]) +#define CCP_ID(dp) ((dp)[1]) +#define CCP_LENGTH(dp) (((dp)[2] << 8) + (dp)[3]) +#define CCP_HDRLEN 4 + +#define CCP_OPT_CODE(dp) ((dp)[0]) +#define CCP_OPT_LENGTH(dp) ((dp)[1]) +#define CCP_OPT_MINLEN 2 + +#if BSDCOMPRESS_SUPPORT +/* + * Definitions for BSD-Compress. + */ + +#define CI_BSD_COMPRESS 21 /* config. option for BSD-Compress */ +#define CILEN_BSD_COMPRESS 3 /* length of config. option */ + +/* Macros for handling the 3rd byte of the BSD-Compress config option. */ +#define BSD_NBITS(x) ((x) & 0x1F) /* number of bits requested */ +#define BSD_VERSION(x) ((x) >> 5) /* version of option format */ +#define BSD_CURRENT_VERSION 1 /* current version number */ +#define BSD_MAKE_OPT(v, n) (((v) << 5) | (n)) + +#define BSD_MIN_BITS 9 /* smallest code size supported */ +#define BSD_MAX_BITS 15 /* largest code size supported */ +#endif /* BSDCOMPRESS_SUPPORT */ + +#if DEFLATE_SUPPORT +/* + * Definitions for Deflate. + */ + +#define CI_DEFLATE 26 /* config option for Deflate */ +#define CI_DEFLATE_DRAFT 24 /* value used in original draft RFC */ +#define CILEN_DEFLATE 4 /* length of its config option */ + +#define DEFLATE_MIN_SIZE 9 +#define DEFLATE_MAX_SIZE 15 +#define DEFLATE_METHOD_VAL 8 +#define DEFLATE_SIZE(x) (((x) >> 4) + 8) +#define DEFLATE_METHOD(x) ((x) & 0x0F) +#define DEFLATE_MAKE_OPT(w) ((((w) - 8) << 4) + DEFLATE_METHOD_VAL) +#define DEFLATE_CHK_SEQUENCE 0 +#endif /* DEFLATE_SUPPORT */ + +#if MPPE_SUPPORT +/* + * Definitions for MPPE. + */ + +#define CI_MPPE 18 /* config option for MPPE */ +#define CILEN_MPPE 6 /* length of config option */ +#endif /* MPPE_SUPPORT */ + +#if PREDICTOR_SUPPORT +/* + * Definitions for other, as yet unsupported, compression methods. + */ + +#define CI_PREDICTOR_1 1 /* config option for Predictor-1 */ +#define CILEN_PREDICTOR_1 2 /* length of its config option */ +#define CI_PREDICTOR_2 2 /* config option for Predictor-2 */ +#define CILEN_PREDICTOR_2 2 /* length of its config option */ +#endif /* PREDICTOR_SUPPORT */ + +typedef struct ccp_options { +#if DEFLATE_SUPPORT + unsigned int deflate :1; /* do Deflate? */ + unsigned int deflate_correct :1; /* use correct code for deflate? */ + unsigned int deflate_draft :1; /* use draft RFC code for deflate? */ +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + unsigned int bsd_compress :1; /* do BSD Compress? */ +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + unsigned int predictor_1 :1; /* do Predictor-1? */ + unsigned int predictor_2 :1; /* do Predictor-2? */ +#endif /* PREDICTOR_SUPPORT */ + +#if MPPE_SUPPORT + u8_t mppe; /* MPPE bitfield */ +#endif /* MPPE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + u_short bsd_bits; /* # bits/code for BSD Compress */ +#endif /* BSDCOMPRESS_SUPPORT */ +#if DEFLATE_SUPPORT + u_short deflate_size; /* lg(window size) for Deflate */ +#endif /* DEFLATE_SUPPORT */ + u8_t method; /* code for chosen compression method */ +} ccp_options; + +extern const struct protent ccp_protent; + +void ccp_resetrequest(ppp_pcb *pcb); /* Issue a reset-request. */ + +#ifdef __cplusplus +} +#endif + +#endif /* CCP_H */ +#endif /* PPP_SUPPORT && CCP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-md5.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-md5.h new file mode 100644 index 0000000..1f58dfe --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-md5.h @@ -0,0 +1,36 @@ +/* + * chap-md5.h - New CHAP/MD5 implementation. + * + * Copyright (c) 2003 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && CHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +extern const struct chap_digest_type md5_digest; + +#endif /* PPP_SUPPORT && CHAP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-new.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-new.h new file mode 100644 index 0000000..dadd9c2 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap-new.h @@ -0,0 +1,200 @@ +/* + * chap-new.c - New CHAP implementation. + * + * Copyright (c) 2003 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && CHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef CHAP_H +#define CHAP_H + +#include "ppp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * CHAP packets begin with a standard header with code, id, len (2 bytes). + */ +#define CHAP_HDRLEN 4 + +/* + * Values for the code field. + */ +#define CHAP_CHALLENGE 1 +#define CHAP_RESPONSE 2 +#define CHAP_SUCCESS 3 +#define CHAP_FAILURE 4 + +/* + * CHAP digest codes. + */ +#define CHAP_MD5 5 +#if MSCHAP_SUPPORT +#define CHAP_MICROSOFT 0x80 +#define CHAP_MICROSOFT_V2 0x81 +#endif /* MSCHAP_SUPPORT */ + +/* + * Semi-arbitrary limits on challenge and response fields. + */ +#define MAX_CHALLENGE_LEN 64 +#define MAX_RESPONSE_LEN 64 + +/* + * These limits apply to challenge and response packets we send. + * The +4 is the +1 that we actually need rounded up. + */ +#define CHAL_MAX_PKTLEN (PPP_HDRLEN + CHAP_HDRLEN + 4 + MAX_CHALLENGE_LEN + MAXNAMELEN) +#define RESP_MAX_PKTLEN (PPP_HDRLEN + CHAP_HDRLEN + 4 + MAX_RESPONSE_LEN + MAXNAMELEN) + +/* bitmask of supported algorithms */ +#if MSCHAP_SUPPORT +#define MDTYPE_MICROSOFT_V2 0x1 +#define MDTYPE_MICROSOFT 0x2 +#endif /* MSCHAP_SUPPORT */ +#define MDTYPE_MD5 0x4 +#define MDTYPE_NONE 0 + +#if MSCHAP_SUPPORT +/* Return the digest alg. ID for the most preferred digest type. */ +#define CHAP_DIGEST(mdtype) \ + ((mdtype) & MDTYPE_MD5)? CHAP_MD5: \ + ((mdtype) & MDTYPE_MICROSOFT_V2)? CHAP_MICROSOFT_V2: \ + ((mdtype) & MDTYPE_MICROSOFT)? CHAP_MICROSOFT: \ + 0 +#else /* !MSCHAP_SUPPORT */ +#define CHAP_DIGEST(mdtype) \ + ((mdtype) & MDTYPE_MD5)? CHAP_MD5: \ + 0 +#endif /* MSCHAP_SUPPORT */ + +/* Return the bit flag (lsb set) for our most preferred digest type. */ +#define CHAP_MDTYPE(mdtype) ((mdtype) ^ ((mdtype) - 1)) & (mdtype) + +/* Return the bit flag for a given digest algorithm ID. */ +#if MSCHAP_SUPPORT +#define CHAP_MDTYPE_D(digest) \ + ((digest) == CHAP_MICROSOFT_V2)? MDTYPE_MICROSOFT_V2: \ + ((digest) == CHAP_MICROSOFT)? MDTYPE_MICROSOFT: \ + ((digest) == CHAP_MD5)? MDTYPE_MD5: \ + 0 +#else /* !MSCHAP_SUPPORT */ +#define CHAP_MDTYPE_D(digest) \ + ((digest) == CHAP_MD5)? MDTYPE_MD5: \ + 0 +#endif /* MSCHAP_SUPPORT */ + +/* Can we do the requested digest? */ +#if MSCHAP_SUPPORT +#define CHAP_CANDIGEST(mdtype, digest) \ + ((digest) == CHAP_MICROSOFT_V2)? (mdtype) & MDTYPE_MICROSOFT_V2: \ + ((digest) == CHAP_MICROSOFT)? (mdtype) & MDTYPE_MICROSOFT: \ + ((digest) == CHAP_MD5)? (mdtype) & MDTYPE_MD5: \ + 0 +#else /* !MSCHAP_SUPPORT */ +#define CHAP_CANDIGEST(mdtype, digest) \ + ((digest) == CHAP_MD5)? (mdtype) & MDTYPE_MD5: \ + 0 +#endif /* MSCHAP_SUPPORT */ + +/* + * The code for each digest type has to supply one of these. + */ +struct chap_digest_type { + int code; + +#if PPP_SERVER + /* + * Note: challenge and response arguments below are formatted as + * a length byte followed by the actual challenge/response data. + */ + void (*generate_challenge)(ppp_pcb *pcb, unsigned char *challenge); + int (*verify_response)(ppp_pcb *pcb, int id, const char *name, + const unsigned char *secret, int secret_len, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space); +#endif /* PPP_SERVER */ + void (*make_response)(ppp_pcb *pcb, unsigned char *response, int id, const char *our_name, + const unsigned char *challenge, const char *secret, int secret_len, + unsigned char *priv); + int (*check_success)(ppp_pcb *pcb, unsigned char *pkt, int len, unsigned char *priv); + void (*handle_failure)(ppp_pcb *pcb, unsigned char *pkt, int len); +}; + +/* + * Each interface is described by chap structure. + */ +#if CHAP_SUPPORT +typedef struct chap_client_state { + u8_t flags; + const char *name; + const struct chap_digest_type *digest; + unsigned char priv[64]; /* private area for digest's use */ +} chap_client_state; + +#if PPP_SERVER +typedef struct chap_server_state { + u8_t flags; + u8_t id; + const char *name; + const struct chap_digest_type *digest; + int challenge_xmits; + int challenge_pktlen; + unsigned char challenge[CHAL_MAX_PKTLEN]; +} chap_server_state; +#endif /* PPP_SERVER */ +#endif /* CHAP_SUPPORT */ + +#if 0 /* UNUSED */ +/* Hook for a plugin to validate CHAP challenge */ +extern int (*chap_verify_hook)(char *name, char *ourname, int id, + const struct chap_digest_type *digest, + unsigned char *challenge, unsigned char *response, + char *message, int message_space); +#endif /* UNUSED */ + +#if PPP_SERVER +/* Called by authentication code to start authenticating the peer. */ +extern void chap_auth_peer(ppp_pcb *pcb, const char *our_name, int digest_code); +#endif /* PPP_SERVER */ + +/* Called by auth. code to start authenticating us to the peer. */ +extern void chap_auth_with_peer(ppp_pcb *pcb, const char *our_name, int digest_code); + +/* Represents the CHAP protocol to the main pppd code */ +extern const struct protent chap_protent; + +#ifdef __cplusplus +} +#endif + +#endif /* CHAP_H */ +#endif /* PPP_SUPPORT && CHAP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap_ms.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap_ms.h new file mode 100644 index 0000000..ed52b40 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/chap_ms.h @@ -0,0 +1,44 @@ +/* + * chap_ms.h - Challenge Handshake Authentication Protocol definitions. + * + * Copyright (c) 1995 Eric Rosenquist. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: chap_ms.h,v 1.13 2004/11/15 22:13:26 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && MSCHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef CHAPMS_INCLUDE +#define CHAPMS_INCLUDE + +extern const struct chap_digest_type chapms_digest; +extern const struct chap_digest_type chapms2_digest; + +#endif /* CHAPMS_INCLUDE */ + +#endif /* PPP_SUPPORT && MSCHAP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/eap.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/eap.h new file mode 100644 index 0000000..61efdaf --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/eap.h @@ -0,0 +1,169 @@ +/* + * eap.h - Extensible Authentication Protocol for PPP (RFC 2284) + * + * Copyright (c) 2001 by Sun Microsystems, Inc. + * All rights reserved. + * + * Non-exclusive rights to redistribute, modify, translate, and use + * this software in source and binary forms, in whole or in part, is + * hereby granted, provided that the above copyright notice is + * duplicated in any source form, and that neither the name of the + * copyright holder nor the author is used to endorse or promote + * products derived from this software. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * Original version by James Carlson + * + * $Id: eap.h,v 1.2 2003/06/11 23:56:26 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && EAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef PPP_EAP_H +#define PPP_EAP_H + +#include "ppp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Packet header = Code, id, length. + */ +#define EAP_HEADERLEN 4 + + +/* EAP message codes. */ +#define EAP_REQUEST 1 +#define EAP_RESPONSE 2 +#define EAP_SUCCESS 3 +#define EAP_FAILURE 4 + +/* EAP types */ +#define EAPT_IDENTITY 1 +#define EAPT_NOTIFICATION 2 +#define EAPT_NAK 3 /* (response only) */ +#define EAPT_MD5CHAP 4 +#define EAPT_OTP 5 /* One-Time Password; RFC 1938 */ +#define EAPT_TOKEN 6 /* Generic Token Card */ +/* 7 and 8 are unassigned. */ +#define EAPT_RSA 9 /* RSA Public Key Authentication */ +#define EAPT_DSS 10 /* DSS Unilateral */ +#define EAPT_KEA 11 /* KEA */ +#define EAPT_KEA_VALIDATE 12 /* KEA-VALIDATE */ +#define EAPT_TLS 13 /* EAP-TLS */ +#define EAPT_DEFENDER 14 /* Defender Token (AXENT) */ +#define EAPT_W2K 15 /* Windows 2000 EAP */ +#define EAPT_ARCOT 16 /* Arcot Systems */ +#define EAPT_CISCOWIRELESS 17 /* Cisco Wireless */ +#define EAPT_NOKIACARD 18 /* Nokia IP smart card */ +#define EAPT_SRP 19 /* Secure Remote Password */ +/* 20 is deprecated */ + +/* EAP SRP-SHA1 Subtypes */ +#define EAPSRP_CHALLENGE 1 /* Request 1 - Challenge */ +#define EAPSRP_CKEY 1 /* Response 1 - Client Key */ +#define EAPSRP_SKEY 2 /* Request 2 - Server Key */ +#define EAPSRP_CVALIDATOR 2 /* Response 2 - Client Validator */ +#define EAPSRP_SVALIDATOR 3 /* Request 3 - Server Validator */ +#define EAPSRP_ACK 3 /* Response 3 - final ack */ +#define EAPSRP_LWRECHALLENGE 4 /* Req/resp 4 - Lightweight rechal */ + +#define SRPVAL_EBIT 0x00000001 /* Use shared key for ECP */ + +#define SRP_PSEUDO_ID "pseudo_" +#define SRP_PSEUDO_LEN 7 + +#define MD5_SIGNATURE_SIZE 16 +#define EAP_MIN_CHALLENGE_LENGTH 17 +#define EAP_MAX_CHALLENGE_LENGTH 24 +#define EAP_MIN_MAX_POWER_OF_TWO_CHALLENGE_LENGTH 3 /* 2^3-1 = 7, 17+7 = 24 */ + +#define EAP_STATES \ + "Initial", "Pending", "Closed", "Listen", "Identify", \ + "SRP1", "SRP2", "SRP3", "MD5Chall", "Open", "SRP4", "BadAuth" + +#define eap_client_active(pcb) ((pcb)->eap.es_client.ea_state == eapListen) +#if PPP_SERVER +#define eap_server_active(pcb) \ + ((pcb)->eap.es_server.ea_state >= eapIdentify && \ + (pcb)->eap.es_server.ea_state <= eapMD5Chall) +#endif /* PPP_SERVER */ + +/* + * Complete EAP state for one PPP session. + */ +enum eap_state_code { + eapInitial = 0, /* No EAP authentication yet requested */ + eapPending, /* Waiting for LCP (no timer) */ + eapClosed, /* Authentication not in use */ + eapListen, /* Client ready (and timer running) */ + eapIdentify, /* EAP Identify sent */ + eapSRP1, /* Sent EAP SRP-SHA1 Subtype 1 */ + eapSRP2, /* Sent EAP SRP-SHA1 Subtype 2 */ + eapSRP3, /* Sent EAP SRP-SHA1 Subtype 3 */ + eapMD5Chall, /* Sent MD5-Challenge */ + eapOpen, /* Completed authentication */ + eapSRP4, /* Sent EAP SRP-SHA1 Subtype 4 */ + eapBadAuth /* Failed authentication */ +}; + +struct eap_auth { + const char *ea_name; /* Our name */ + char ea_peer[MAXNAMELEN +1]; /* Peer's name */ + void *ea_session; /* Authentication library linkage */ + u_char *ea_skey; /* Shared encryption key */ + u_short ea_namelen; /* Length of our name */ + u_short ea_peerlen; /* Length of peer's name */ + enum eap_state_code ea_state; + u_char ea_id; /* Current id */ + u_char ea_requests; /* Number of Requests sent/received */ + u_char ea_responses; /* Number of Responses */ + u_char ea_type; /* One of EAPT_* */ + u32_t ea_keyflags; /* SRP shared key usage flags */ +}; + +#ifndef EAP_MAX_CHALLENGE_LENGTH +#define EAP_MAX_CHALLENGE_LENGTH 24 +#endif +typedef struct eap_state { + struct eap_auth es_client; /* Client (authenticatee) data */ +#if PPP_SERVER + struct eap_auth es_server; /* Server (authenticator) data */ +#endif /* PPP_SERVER */ + int es_savedtime; /* Saved timeout */ + int es_rechallenge; /* EAP rechallenge interval */ + int es_lwrechallenge; /* SRP lightweight rechallenge inter */ + u8_t es_usepseudo; /* Use SRP Pseudonym if offered one */ + int es_usedpseudo; /* Set if we already sent PN */ + int es_challen; /* Length of challenge string */ + u_char es_challenge[EAP_MAX_CHALLENGE_LENGTH]; +} eap_state; + +/* + * Timeouts. + */ +#if 0 /* moved to ppp_opts.h */ +#define EAP_DEFTIMEOUT 3 /* Timeout (seconds) for rexmit */ +#define EAP_DEFTRANSMITS 10 /* max # times to transmit */ +#define EAP_DEFREQTIME 20 /* Time to wait for peer request */ +#define EAP_DEFALLOWREQ 20 /* max # times to accept requests */ +#endif /* moved to ppp_opts.h */ + +void eap_authwithpeer(ppp_pcb *pcb, const char *localname); +void eap_authpeer(ppp_pcb *pcb, const char *localname); + +extern const struct protent eap_protent; + +#ifdef __cplusplus +} +#endif + +#endif /* PPP_EAP_H */ + +#endif /* PPP_SUPPORT && EAP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ecp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ecp.h new file mode 100644 index 0000000..8be1872 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ecp.h @@ -0,0 +1,62 @@ +/* + * ecp.h - Definitions for PPP Encryption Control Protocol. + * + * Copyright (c) 2002 Google, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: ecp.h,v 1.2 2003/01/10 07:12:36 fcusack Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && ECP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef ECP_H +#define ECP_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct ecp_options { + bool required; /* Is ECP required? */ + unsigned enctype; /* Encryption type */ +} ecp_options; + +extern fsm ecp_fsm[]; +extern ecp_options ecp_wantoptions[]; +extern ecp_options ecp_gotoptions[]; +extern ecp_options ecp_allowoptions[]; +extern ecp_options ecp_hisoptions[]; + +extern const struct protent ecp_protent; + +#ifdef __cplusplus +} +#endif + +#endif /* ECP_H */ +#endif /* PPP_SUPPORT && ECP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/eui64.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/eui64.h new file mode 100644 index 0000000..ad199ea --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/eui64.h @@ -0,0 +1,102 @@ +/* + * eui64.h - EUI64 routines for IPv6CP. + * + * Copyright (c) 1999 Tommi Komulainen. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Tommi Komulainen + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: eui64.h,v 1.6 2002/12/04 23:03:32 paulus Exp $ +*/ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPP_IPV6_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef EUI64_H +#define EUI64_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * @todo: + * + * Maybe this should be done by processing struct in6_addr directly... + */ +typedef union +{ + u8_t e8[8]; + u16_t e16[4]; + u32_t e32[2]; +} eui64_t; + +#define eui64_iszero(e) (((e).e32[0] | (e).e32[1]) == 0) +#define eui64_equals(e, o) (((e).e32[0] == (o).e32[0]) && \ + ((e).e32[1] == (o).e32[1])) +#define eui64_zero(e) (e).e32[0] = (e).e32[1] = 0; + +#define eui64_copy(s, d) memcpy(&(d), &(s), sizeof(eui64_t)) + +#define eui64_magic(e) do { \ + (e).e32[0] = magic(); \ + (e).e32[1] = magic(); \ + (e).e8[0] &= ~2; \ + } while (0) +#define eui64_magic_nz(x) do { \ + eui64_magic(x); \ + } while (eui64_iszero(x)) +#define eui64_magic_ne(x, y) do { \ + eui64_magic(x); \ + } while (eui64_equals(x, y)) + +#define eui64_get(ll, cp) do { \ + eui64_copy((*cp), (ll)); \ + (cp) += sizeof(eui64_t); \ + } while (0) + +#define eui64_put(ll, cp) do { \ + eui64_copy((ll), (*cp)); \ + (cp) += sizeof(eui64_t); \ + } while (0) + +#define eui64_set32(e, l) do { \ + (e).e32[0] = 0; \ + (e).e32[1] = lwip_htonl(l); \ + } while (0) +#define eui64_setlo32(e, l) eui64_set32(e, l) + +char *eui64_ntoa(eui64_t); /* Returns ascii representation of id */ + +#ifdef __cplusplus +} +#endif + +#endif /* EUI64_H */ +#endif /* PPP_SUPPORT && PPP_IPV6_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/fsm.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/fsm.h new file mode 100644 index 0000000..e91536d --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/fsm.h @@ -0,0 +1,182 @@ +/* + * fsm.h - {Link, IP} Control Protocol Finite State Machine definitions. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: fsm.h,v 1.10 2004/11/13 02:28:15 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef FSM_H +#define FSM_H + +#include "ppp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Packet header = Code, id, length. + */ +#define HEADERLEN 4 + + +/* + * CP (LCP, IPCP, etc.) codes. + */ +#define CONFREQ 1 /* Configuration Request */ +#define CONFACK 2 /* Configuration Ack */ +#define CONFNAK 3 /* Configuration Nak */ +#define CONFREJ 4 /* Configuration Reject */ +#define TERMREQ 5 /* Termination Request */ +#define TERMACK 6 /* Termination Ack */ +#define CODEREJ 7 /* Code Reject */ + + +/* + * Each FSM is described by an fsm structure and fsm callbacks. + */ +typedef struct fsm { + ppp_pcb *pcb; /* PPP Interface */ + const struct fsm_callbacks *callbacks; /* Callback routines */ + const char *term_reason; /* Reason for closing protocol */ + u8_t seen_ack; /* Have received valid Ack/Nak/Rej to Req */ + /* -- This is our only flag, we might use u_int :1 if we have more flags */ + u16_t protocol; /* Data Link Layer Protocol field value */ + u8_t state; /* State */ + u8_t flags; /* Contains option bits */ + u8_t id; /* Current id */ + u8_t reqid; /* Current request id */ + u8_t retransmits; /* Number of retransmissions left */ + u8_t nakloops; /* Number of nak loops since last ack */ + u8_t rnakloops; /* Number of naks received */ + u8_t maxnakloops; /* Maximum number of nak loops tolerated + (necessary because IPCP require a custom large max nak loops value) */ + u8_t term_reason_len; /* Length of term_reason */ +} fsm; + + +typedef struct fsm_callbacks { + void (*resetci) /* Reset our Configuration Information */ + (fsm *); + int (*cilen) /* Length of our Configuration Information */ + (fsm *); + void (*addci) /* Add our Configuration Information */ + (fsm *, u_char *, int *); + int (*ackci) /* ACK our Configuration Information */ + (fsm *, u_char *, int); + int (*nakci) /* NAK our Configuration Information */ + (fsm *, u_char *, int, int); + int (*rejci) /* Reject our Configuration Information */ + (fsm *, u_char *, int); + int (*reqci) /* Request peer's Configuration Information */ + (fsm *, u_char *, int *, int); + void (*up) /* Called when fsm reaches PPP_FSM_OPENED state */ + (fsm *); + void (*down) /* Called when fsm leaves PPP_FSM_OPENED state */ + (fsm *); + void (*starting) /* Called when we want the lower layer */ + (fsm *); + void (*finished) /* Called when we don't want the lower layer */ + (fsm *); + void (*protreject) /* Called when Protocol-Reject received */ + (int); + void (*retransmit) /* Retransmission is necessary */ + (fsm *); + int (*extcode) /* Called when unknown code received */ + (fsm *, int, int, u_char *, int); + const char *proto_name; /* String name for protocol (for messages) */ +} fsm_callbacks; + + +/* + * Link states. + */ +#define PPP_FSM_INITIAL 0 /* Down, hasn't been opened */ +#define PPP_FSM_STARTING 1 /* Down, been opened */ +#define PPP_FSM_CLOSED 2 /* Up, hasn't been opened */ +#define PPP_FSM_STOPPED 3 /* Open, waiting for down event */ +#define PPP_FSM_CLOSING 4 /* Terminating the connection, not open */ +#define PPP_FSM_STOPPING 5 /* Terminating, but open */ +#define PPP_FSM_REQSENT 6 /* We've sent a Config Request */ +#define PPP_FSM_ACKRCVD 7 /* We've received a Config Ack */ +#define PPP_FSM_ACKSENT 8 /* We've sent a Config Ack */ +#define PPP_FSM_OPENED 9 /* Connection available */ + + +/* + * Flags - indicate options controlling FSM operation + */ +#define OPT_PASSIVE 1 /* Don't die if we don't get a response */ +#define OPT_RESTART 2 /* Treat 2nd OPEN as DOWN, UP */ +#define OPT_SILENT 4 /* Wait for peer to speak first */ + + +/* + * Timeouts. + */ +#if 0 /* moved to ppp_opts.h */ +#define DEFTIMEOUT 3 /* Timeout time in seconds */ +#define DEFMAXTERMREQS 2 /* Maximum Terminate-Request transmissions */ +#define DEFMAXCONFREQS 10 /* Maximum Configure-Request transmissions */ +#define DEFMAXNAKLOOPS 5 /* Maximum number of nak loops */ +#endif /* moved to ppp_opts.h */ + + +/* + * Prototypes + */ +void fsm_init(fsm *f); +void fsm_lowerup(fsm *f); +void fsm_lowerdown(fsm *f); +void fsm_open(fsm *f); +void fsm_close(fsm *f, const char *reason); +void fsm_input(fsm *f, u_char *inpacket, int l); +void fsm_protreject(fsm *f); +void fsm_sdata(fsm *f, u_char code, u_char id, const u_char *data, int datalen); + +#ifdef __cplusplus +} +#endif + +#endif /* FSM_H */ +#endif /* PPP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipcp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipcp.h new file mode 100644 index 0000000..1735158 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipcp.h @@ -0,0 +1,134 @@ +/* + * ipcp.h - IP Control Protocol definitions. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: ipcp.h,v 1.14 2002/12/04 23:03:32 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPP_IPV4_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef IPCP_H +#define IPCP_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Options. + */ +#define CI_ADDRS 1 /* IP Addresses */ +#if VJ_SUPPORT +#define CI_COMPRESSTYPE 2 /* Compression Type */ +#endif /* VJ_SUPPORT */ +#define CI_ADDR 3 + +#if LWIP_DNS +#define CI_MS_DNS1 129 /* Primary DNS value */ +#define CI_MS_DNS2 131 /* Secondary DNS value */ +#endif /* LWIP_DNS */ +#if 0 /* UNUSED - WINS */ +#define CI_MS_WINS1 130 /* Primary WINS value */ +#define CI_MS_WINS2 132 /* Secondary WINS value */ +#endif /* UNUSED - WINS */ + +#if VJ_SUPPORT +#define MAX_STATES 16 /* from slcompress.h */ + +#define IPCP_VJMODE_OLD 1 /* "old" mode (option # = 0x0037) */ +#define IPCP_VJMODE_RFC1172 2 /* "old-rfc"mode (option # = 0x002d) */ +#define IPCP_VJMODE_RFC1332 3 /* "new-rfc"mode (option # = 0x002d, */ + /* maxslot and slot number compression) */ + +#define IPCP_VJ_COMP 0x002d /* current value for VJ compression option*/ +#define IPCP_VJ_COMP_OLD 0x0037 /* "old" (i.e, broken) value for VJ */ + /* compression option*/ +#endif /* VJ_SUPPORT */ + +typedef struct ipcp_options { + unsigned int neg_addr :1; /* Negotiate IP Address? */ + unsigned int old_addrs :1; /* Use old (IP-Addresses) option? */ + unsigned int req_addr :1; /* Ask peer to send IP address? */ +#if 0 /* UNUSED */ + unsigned int default_route :1; /* Assign default route through interface? */ + unsigned int replace_default_route :1; /* Replace default route through interface? */ +#endif /* UNUSED */ +#if 0 /* UNUSED - PROXY ARP */ + unsigned int proxy_arp :1; /* Make proxy ARP entry for peer? */ +#endif /* UNUSED - PROXY ARP */ +#if VJ_SUPPORT + unsigned int neg_vj :1; /* Van Jacobson Compression? */ + unsigned int old_vj :1; /* use old (short) form of VJ option? */ + unsigned int cflag :1; +#endif /* VJ_SUPPORT */ + unsigned int accept_local :1; /* accept peer's value for ouraddr */ + unsigned int accept_remote :1; /* accept peer's value for hisaddr */ +#if LWIP_DNS + unsigned int req_dns1 :1; /* Ask peer to send primary DNS address? */ + unsigned int req_dns2 :1; /* Ask peer to send secondary DNS address? */ +#endif /* LWIP_DNS */ + + u32_t ouraddr, hisaddr; /* Addresses in NETWORK BYTE ORDER */ +#if LWIP_DNS + u32_t dnsaddr[2]; /* Primary and secondary MS DNS entries */ +#endif /* LWIP_DNS */ +#if 0 /* UNUSED - WINS */ + u32_t winsaddr[2]; /* Primary and secondary MS WINS entries */ +#endif /* UNUSED - WINS */ + +#if VJ_SUPPORT + u16_t vj_protocol; /* protocol value to use in VJ option */ + u8_t maxslotindex; /* values for RFC1332 VJ compression neg. */ +#endif /* VJ_SUPPORT */ +} ipcp_options; + +#if 0 /* UNUSED, already defined by lwIP */ +char *ip_ntoa (u32_t); +#endif /* UNUSED, already defined by lwIP */ + +extern const struct protent ipcp_protent; + +#ifdef __cplusplus +} +#endif + +#endif /* IPCP_H */ +#endif /* PPP_SUPPORT && PPP_IPV4_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipv6cp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipv6cp.h new file mode 100644 index 0000000..48b0715 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ipv6cp.h @@ -0,0 +1,191 @@ +/* + * ipv6cp.h - PPP IPV6 Control Protocol. + * + * Copyright (c) 1999 Tommi Komulainen. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Tommi Komulainen + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/* Original version, based on RFC2023 : + + Copyright (c) 1995, 1996, 1997 Francis.Dupont@inria.fr, INRIA Rocquencourt, + Alain.Durand@imag.fr, IMAG, + Jean-Luc.Richier@imag.fr, IMAG-LSR. + + Copyright (c) 1998, 1999 Francis.Dupont@inria.fr, GIE DYADE, + Alain.Durand@imag.fr, IMAG, + Jean-Luc.Richier@imag.fr, IMAG-LSR. + + Ce travail a été fait au sein du GIE DYADE (Groupement d'Intérêt + Économique ayant pour membres BULL S.A. et l'INRIA). + + Ce logiciel informatique est disponible aux conditions + usuelles dans la recherche, c'est-à-dire qu'il peut + être utilisé, copié, modifié, distribué à l'unique + condition que ce texte soit conservé afin que + l'origine de ce logiciel soit reconnue. + + Le nom de l'Institut National de Recherche en Informatique + et en Automatique (INRIA), de l'IMAG, ou d'une personne morale + ou physique ayant participé à l'élaboration de ce logiciel ne peut + être utilisé sans son accord préalable explicite. + + Ce logiciel est fourni tel quel sans aucune garantie, + support ou responsabilité d'aucune sorte. + Ce logiciel est dérivé de sources d'origine + "University of California at Berkeley" et + "Digital Equipment Corporation" couvertes par des copyrights. + + L'Institut d'Informatique et de Mathématiques Appliquées de Grenoble (IMAG) + est une fédération d'unités mixtes de recherche du CNRS, de l'Institut National + Polytechnique de Grenoble et de l'Université Joseph Fourier regroupant + sept laboratoires dont le laboratoire Logiciels, Systèmes, Réseaux (LSR). + + This work has been done in the context of GIE DYADE (joint R & D venture + between BULL S.A. and INRIA). + + This software is available with usual "research" terms + with the aim of retain credits of the software. + Permission to use, copy, modify and distribute this software for any + purpose and without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies, + and the name of INRIA, IMAG, or any contributor not be used in advertising + or publicity pertaining to this material without the prior explicit + permission. The software is provided "as is" without any + warranties, support or liabilities of any kind. + This software is derived from source code from + "University of California at Berkeley" and + "Digital Equipment Corporation" protected by copyrights. + + Grenoble's Institute of Computer Science and Applied Mathematics (IMAG) + is a federation of seven research units funded by the CNRS, National + Polytechnic Institute of Grenoble and University Joseph Fourier. + The research unit in Software, Systems, Networks (LSR) is member of IMAG. +*/ + +/* + * Derived from : + * + * + * ipcp.h - IP Control Protocol definitions. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: ipv6cp.h,v 1.7 2002/12/04 23:03:32 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPP_IPV6_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef IPV6CP_H +#define IPV6CP_H + +#include "eui64.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Options. + */ +#define CI_IFACEID 1 /* Interface Identifier */ +#ifdef IPV6CP_COMP +#define CI_COMPRESSTYPE 2 /* Compression Type */ +#endif /* IPV6CP_COMP */ + +/* No compression types yet defined. + *#define IPV6CP_COMP 0x004f + */ +typedef struct ipv6cp_options { + unsigned int neg_ifaceid :1; /* Negotiate interface identifier? */ + unsigned int req_ifaceid :1; /* Ask peer to send interface identifier? */ + unsigned int accept_local :1; /* accept peer's value for iface id? */ + unsigned int opt_local :1; /* ourtoken set by option */ + unsigned int opt_remote :1; /* histoken set by option */ + unsigned int use_ip :1; /* use IP as interface identifier */ +#if 0 + unsigned int use_persistent :1; /* use uniquely persistent value for address */ +#endif +#ifdef IPV6CP_COMP + unsigned int neg_vj :1; /* Van Jacobson Compression? */ +#endif /* IPV6CP_COMP */ + +#ifdef IPV6CP_COMP + u_short vj_protocol; /* protocol value to use in VJ option */ +#endif /* IPV6CP_COMP */ + eui64_t ourid, hisid; /* Interface identifiers */ +} ipv6cp_options; + +extern const struct protent ipv6cp_protent; + +#ifdef __cplusplus +} +#endif + +#endif /* IPV6CP_H */ +#endif /* PPP_SUPPORT && PPP_IPV6_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/lcp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/lcp.h new file mode 100644 index 0000000..37d0548 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/lcp.h @@ -0,0 +1,179 @@ +/* + * lcp.h - Link Control Protocol definitions. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: lcp.h,v 1.20 2004/11/14 22:53:42 carlsonj Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef LCP_H +#define LCP_H + +#include "ppp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Options. + */ +#define CI_VENDOR 0 /* Vendor Specific */ +#define CI_MRU 1 /* Maximum Receive Unit */ +#define CI_ASYNCMAP 2 /* Async Control Character Map */ +#define CI_AUTHTYPE 3 /* Authentication Type */ +#define CI_QUALITY 4 /* Quality Protocol */ +#define CI_MAGICNUMBER 5 /* Magic Number */ +#define CI_PCOMPRESSION 7 /* Protocol Field Compression */ +#define CI_ACCOMPRESSION 8 /* Address/Control Field Compression */ +#define CI_FCSALTERN 9 /* FCS-Alternatives */ +#define CI_SDP 10 /* Self-Describing-Pad */ +#define CI_NUMBERED 11 /* Numbered-Mode */ +#define CI_CALLBACK 13 /* callback */ +#define CI_MRRU 17 /* max reconstructed receive unit; multilink */ +#define CI_SSNHF 18 /* short sequence numbers for multilink */ +#define CI_EPDISC 19 /* endpoint discriminator */ +#define CI_MPPLUS 22 /* Multi-Link-Plus-Procedure */ +#define CI_LDISC 23 /* Link-Discriminator */ +#define CI_LCPAUTH 24 /* LCP Authentication */ +#define CI_COBS 25 /* Consistent Overhead Byte Stuffing */ +#define CI_PREFELIS 26 /* Prefix Elision */ +#define CI_MPHDRFMT 27 /* MP Header Format */ +#define CI_I18N 28 /* Internationalization */ +#define CI_SDL 29 /* Simple Data Link */ + +/* + * LCP-specific packet types (code numbers). + */ +#define PROTREJ 8 /* Protocol Reject */ +#define ECHOREQ 9 /* Echo Request */ +#define ECHOREP 10 /* Echo Reply */ +#define DISCREQ 11 /* Discard Request */ +#define IDENTIF 12 /* Identification */ +#define TIMEREM 13 /* Time Remaining */ + +/* Value used as data for CI_CALLBACK option */ +#define CBCP_OPT 6 /* Use callback control protocol */ + +#if 0 /* moved to ppp_opts.h */ +#define DEFMRU 1500 /* Try for this */ +#define MINMRU 128 /* No MRUs below this */ +#define MAXMRU 16384 /* Normally limit MRU to this */ +#endif /* moved to ppp_opts.h */ + +/* An endpoint discriminator, used with multilink. */ +#define MAX_ENDP_LEN 20 /* maximum length of discriminator value */ +struct epdisc { + unsigned char class_; /* -- The word "class" is reserved in C++. */ + unsigned char length; + unsigned char value[MAX_ENDP_LEN]; +}; + +/* + * The state of options is described by an lcp_options structure. + */ +typedef struct lcp_options { + unsigned int passive :1; /* Don't die if we don't get a response */ + unsigned int silent :1; /* Wait for the other end to start first */ +#if 0 /* UNUSED */ + unsigned int restart :1; /* Restart vs. exit after close */ +#endif /* UNUSED */ + unsigned int neg_mru :1; /* Negotiate the MRU? */ + unsigned int neg_asyncmap :1; /* Negotiate the async map? */ +#if PAP_SUPPORT + unsigned int neg_upap :1; /* Ask for UPAP authentication? */ +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + unsigned int neg_chap :1; /* Ask for CHAP authentication? */ +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + unsigned int neg_eap :1; /* Ask for EAP authentication? */ +#endif /* EAP_SUPPORT */ + unsigned int neg_magicnumber :1; /* Ask for magic number? */ + unsigned int neg_pcompression :1; /* HDLC Protocol Field Compression? */ + unsigned int neg_accompression :1; /* HDLC Address/Control Field Compression? */ +#if LQR_SUPPORT + unsigned int neg_lqr :1; /* Negotiate use of Link Quality Reports */ +#endif /* LQR_SUPPORT */ + unsigned int neg_cbcp :1; /* Negotiate use of CBCP */ +#ifdef HAVE_MULTILINK + unsigned int neg_mrru :1; /* negotiate multilink MRRU */ +#endif /* HAVE_MULTILINK */ + unsigned int neg_ssnhf :1; /* negotiate short sequence numbers */ + unsigned int neg_endpoint :1; /* negotiate endpoint discriminator */ + + u16_t mru; /* Value of MRU */ +#ifdef HAVE_MULTILINK + u16_t mrru; /* Value of MRRU, and multilink enable */ +#endif /* MULTILINK */ +#if CHAP_SUPPORT + u8_t chap_mdtype; /* which MD types (hashing algorithm) */ +#endif /* CHAP_SUPPORT */ + u32_t asyncmap; /* Value of async map */ + u32_t magicnumber; + u8_t numloops; /* Number of loops during magic number neg. */ +#if LQR_SUPPORT + u32_t lqr_period; /* Reporting period for LQR 1/100ths second */ +#endif /* LQR_SUPPORT */ + struct epdisc endpoint; /* endpoint discriminator */ +} lcp_options; + +void lcp_open(ppp_pcb *pcb); +void lcp_close(ppp_pcb *pcb, const char *reason); +void lcp_lowerup(ppp_pcb *pcb); +void lcp_lowerdown(ppp_pcb *pcb); +void lcp_sprotrej(ppp_pcb *pcb, u_char *p, int len); /* send protocol reject */ + +extern const struct protent lcp_protent; + +#if 0 /* moved to ppp_opts.h */ +/* Default number of times we receive our magic number from the peer + before deciding the link is looped-back. */ +#define DEFLOOPBACKFAIL 10 +#endif /* moved to ppp_opts.h */ + +#ifdef __cplusplus +} +#endif + +#endif /* LCP_H */ +#endif /* PPP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/magic.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/magic.h new file mode 100644 index 0000000..ffbd073 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/magic.h @@ -0,0 +1,130 @@ +/* + * magic.h - PPP Magic Number definitions. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: magic.h,v 1.5 2003/06/11 23:56:26 paulus Exp $ + */ +/***************************************************************************** +* randm.h - Random number generator header file. +* +* Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. +* Copyright (c) 1998 Global Election Systems Inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 03-01-01 Marc Boucher +* Ported to lwIP. +* 98-05-29 Guy Lancaster , Global Election Systems Inc. +* Extracted from avos. +*****************************************************************************/ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef MAGIC_H +#define MAGIC_H + +#ifdef __cplusplus +extern "C" { +#endif + +/*********************** +*** PUBLIC FUNCTIONS *** +***********************/ + +/* + * Initialize the random number generator. + */ +void magic_init(void); + +/* + * Randomize our random seed value. To be called for truely random events + * such as user operations and network traffic. + */ +void magic_randomize(void); + +/* + * Return a new random number. + */ +u32_t magic(void); /* Returns the next magic number */ + +/* + * Fill buffer with random bytes + * + * Use the random pool to generate random data. This degrades to pseudo + * random when used faster than randomness is supplied using magic_churnrand(). + * Thus it's important to make sure that the results of this are not + * published directly because one could predict the next result to at + * least some degree. Also, it's important to get a good seed before + * the first use. + */ +void magic_random_bytes(unsigned char *buf, u32_t buf_len); + +/* + * Return a new random number between 0 and (2^pow)-1 included. + */ +u32_t magic_pow(u8_t pow); + +#ifdef __cplusplus +} +#endif + +#endif /* MAGIC_H */ + +#endif /* PPP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/mppe.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/mppe.h new file mode 100644 index 0000000..4f504fb --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/mppe.h @@ -0,0 +1,181 @@ +/* + * mppe.h - Definitions for MPPE + * + * Copyright (c) 2008 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && MPPE_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef MPPE_H +#define MPPE_H + +#include "netif/ppp/pppcrypt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define MPPE_PAD 4 /* MPPE growth per frame */ +#define MPPE_MAX_KEY_LEN 16 /* largest key length (128-bit) */ + +/* option bits for ccp_options.mppe */ +#define MPPE_OPT_40 0x01 /* 40 bit */ +#define MPPE_OPT_128 0x02 /* 128 bit */ +#define MPPE_OPT_STATEFUL 0x04 /* stateful mode */ +/* unsupported opts */ +#define MPPE_OPT_56 0x08 /* 56 bit */ +#define MPPE_OPT_MPPC 0x10 /* MPPC compression */ +#define MPPE_OPT_D 0x20 /* Unknown */ +#define MPPE_OPT_UNSUPPORTED (MPPE_OPT_56|MPPE_OPT_MPPC|MPPE_OPT_D) +#define MPPE_OPT_UNKNOWN 0x40 /* Bits !defined in RFC 3078 were set */ + +/* + * This is not nice ... the alternative is a bitfield struct though. + * And unfortunately, we cannot share the same bits for the option + * names above since C and H are the same bit. We could do a u_int32 + * but then we have to do a lwip_htonl() all the time and/or we still need + * to know which octet is which. + */ +#define MPPE_C_BIT 0x01 /* MPPC */ +#define MPPE_D_BIT 0x10 /* Obsolete, usage unknown */ +#define MPPE_L_BIT 0x20 /* 40-bit */ +#define MPPE_S_BIT 0x40 /* 128-bit */ +#define MPPE_M_BIT 0x80 /* 56-bit, not supported */ +#define MPPE_H_BIT 0x01 /* Stateless (in a different byte) */ + +/* Does not include H bit; used for least significant octet only. */ +#define MPPE_ALL_BITS (MPPE_D_BIT|MPPE_L_BIT|MPPE_S_BIT|MPPE_M_BIT|MPPE_H_BIT) + +/* Build a CI from mppe opts (see RFC 3078) */ +#define MPPE_OPTS_TO_CI(opts, ci) \ + do { \ + u_char *ptr = ci; /* u_char[4] */ \ + \ + /* H bit */ \ + if (opts & MPPE_OPT_STATEFUL) \ + *ptr++ = 0x0; \ + else \ + *ptr++ = MPPE_H_BIT; \ + *ptr++ = 0; \ + *ptr++ = 0; \ + \ + /* S,L bits */ \ + *ptr = 0; \ + if (opts & MPPE_OPT_128) \ + *ptr |= MPPE_S_BIT; \ + if (opts & MPPE_OPT_40) \ + *ptr |= MPPE_L_BIT; \ + /* M,D,C bits not supported */ \ + } while (/* CONSTCOND */ 0) + +/* The reverse of the above */ +#define MPPE_CI_TO_OPTS(ci, opts) \ + do { \ + const u_char *ptr = ci; /* u_char[4] */ \ + \ + opts = 0; \ + \ + /* H bit */ \ + if (!(ptr[0] & MPPE_H_BIT)) \ + opts |= MPPE_OPT_STATEFUL; \ + \ + /* S,L bits */ \ + if (ptr[3] & MPPE_S_BIT) \ + opts |= MPPE_OPT_128; \ + if (ptr[3] & MPPE_L_BIT) \ + opts |= MPPE_OPT_40; \ + \ + /* M,D,C bits */ \ + if (ptr[3] & MPPE_M_BIT) \ + opts |= MPPE_OPT_56; \ + if (ptr[3] & MPPE_D_BIT) \ + opts |= MPPE_OPT_D; \ + if (ptr[3] & MPPE_C_BIT) \ + opts |= MPPE_OPT_MPPC; \ + \ + /* Other bits */ \ + if (ptr[0] & ~MPPE_H_BIT) \ + opts |= MPPE_OPT_UNKNOWN; \ + if (ptr[1] || ptr[2]) \ + opts |= MPPE_OPT_UNKNOWN; \ + if (ptr[3] & ~MPPE_ALL_BITS) \ + opts |= MPPE_OPT_UNKNOWN; \ + } while (/* CONSTCOND */ 0) + +/* Shared MPPE padding between MSCHAP and MPPE */ +#define SHA1_PAD_SIZE 40 + +static const u8_t mppe_sha1_pad1[SHA1_PAD_SIZE] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; +static const u8_t mppe_sha1_pad2[SHA1_PAD_SIZE] = { + 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, + 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, + 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, + 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2 +}; + +/* + * State for an MPPE (de)compressor. + */ +typedef struct ppp_mppe_state { + lwip_arc4_context arc4; + u8_t master_key[MPPE_MAX_KEY_LEN]; + u8_t session_key[MPPE_MAX_KEY_LEN]; + u8_t keylen; /* key length in bytes */ + /* NB: 128-bit == 16, 40-bit == 8! + * If we want to support 56-bit, the unit has to change to bits + */ + u8_t bits; /* MPPE control bits */ + u16_t ccount; /* 12-bit coherency count (seqno) */ + u16_t sanity_errors; /* take down LCP if too many */ + unsigned int stateful :1; /* stateful mode flag */ + unsigned int discard :1; /* stateful mode packet loss flag */ +} ppp_mppe_state; + +void mppe_set_key(ppp_pcb *pcb, ppp_mppe_state *state, u8_t *key); +void mppe_init(ppp_pcb *pcb, ppp_mppe_state *state, u8_t options); +void mppe_comp_reset(ppp_pcb *pcb, ppp_mppe_state *state); +err_t mppe_compress(ppp_pcb *pcb, ppp_mppe_state *state, struct pbuf **pb, u16_t protocol); +void mppe_decomp_reset(ppp_pcb *pcb, ppp_mppe_state *state); +err_t mppe_decompress(ppp_pcb *pcb, ppp_mppe_state *state, struct pbuf **pb); + +#ifdef __cplusplus +} +#endif + +#endif /* MPPE_H */ +#endif /* PPP_SUPPORT && MPPE_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp.h new file mode 100644 index 0000000..3595b7b --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp.h @@ -0,0 +1,698 @@ +/***************************************************************************** +* ppp.h - Network Point to Point Protocol header file. +* +* Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. +* portions Copyright (c) 1997 Global Election Systems Inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 03-01-01 Marc Boucher +* Ported to lwIP. +* 97-11-05 Guy Lancaster , Global Election Systems Inc. +* Original derived from BSD codes. +*****************************************************************************/ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef PPP_H +#define PPP_H + +#include "lwip/def.h" +#include "lwip/stats.h" +#include "lwip/mem.h" +#include "lwip/netif.h" +#include "lwip/sys.h" +#include "lwip/timeouts.h" +#if PPP_IPV6_SUPPORT +#include "lwip/ip6_addr.h" +#endif /* PPP_IPV6_SUPPORT */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Disable non-working or rarely used PPP feature, so rarely that we don't want to bloat ppp_opts.h with them */ +#ifndef PPP_OPTIONS +#define PPP_OPTIONS 0 +#endif + +#ifndef PPP_NOTIFY +#define PPP_NOTIFY 0 +#endif + +#ifndef PPP_REMOTENAME +#define PPP_REMOTENAME 0 +#endif + +#ifndef PPP_IDLETIMELIMIT +#define PPP_IDLETIMELIMIT 0 +#endif + +#ifndef PPP_LCP_ADAPTIVE +#define PPP_LCP_ADAPTIVE 0 +#endif + +#ifndef PPP_MAXCONNECT +#define PPP_MAXCONNECT 0 +#endif + +#ifndef PPP_ALLOWED_ADDRS +#define PPP_ALLOWED_ADDRS 0 +#endif + +#ifndef PPP_PROTOCOLNAME +#define PPP_PROTOCOLNAME 0 +#endif + +#ifndef PPP_STATS_SUPPORT +#define PPP_STATS_SUPPORT 0 +#endif + +#ifndef DEFLATE_SUPPORT +#define DEFLATE_SUPPORT 0 +#endif + +#ifndef BSDCOMPRESS_SUPPORT +#define BSDCOMPRESS_SUPPORT 0 +#endif + +#ifndef PREDICTOR_SUPPORT +#define PREDICTOR_SUPPORT 0 +#endif + +/************************* +*** PUBLIC DEFINITIONS *** +*************************/ + +/* + * The basic PPP frame. + */ +#define PPP_HDRLEN 4 /* octets for standard ppp header */ +#define PPP_FCSLEN 2 /* octets for FCS */ + +/* + * Values for phase. + */ +#define PPP_PHASE_DEAD 0 +#define PPP_PHASE_MASTER 1 +#define PPP_PHASE_HOLDOFF 2 +#define PPP_PHASE_INITIALIZE 3 +#define PPP_PHASE_SERIALCONN 4 +#define PPP_PHASE_DORMANT 5 +#define PPP_PHASE_ESTABLISH 6 +#define PPP_PHASE_AUTHENTICATE 7 +#define PPP_PHASE_CALLBACK 8 +#define PPP_PHASE_NETWORK 9 +#define PPP_PHASE_RUNNING 10 +#define PPP_PHASE_TERMINATE 11 +#define PPP_PHASE_DISCONNECT 12 + +/* Error codes. */ +#define PPPERR_NONE 0 /* No error. */ +#define PPPERR_PARAM 1 /* Invalid parameter. */ +#define PPPERR_OPEN 2 /* Unable to open PPP session. */ +#define PPPERR_DEVICE 3 /* Invalid I/O device for PPP. */ +#define PPPERR_ALLOC 4 /* Unable to allocate resources. */ +#define PPPERR_USER 5 /* User interrupt. */ +#define PPPERR_CONNECT 6 /* Connection lost. */ +#define PPPERR_AUTHFAIL 7 /* Failed authentication challenge. */ +#define PPPERR_PROTOCOL 8 /* Failed to meet protocol. */ +#define PPPERR_PEERDEAD 9 /* Connection timeout */ +#define PPPERR_IDLETIMEOUT 10 /* Idle Timeout */ +#define PPPERR_CONNECTTIME 11 /* Max connect time reached */ +#define PPPERR_LOOPBACK 12 /* Loopback detected */ + +/* Whether auth support is enabled at all */ +#define PPP_AUTH_SUPPORT (PAP_SUPPORT || CHAP_SUPPORT || EAP_SUPPORT) + +/************************ +*** PUBLIC DATA TYPES *** +************************/ + +/* + * Other headers require ppp_pcb definition for prototypes, but ppp_pcb + * require some structure definition from other headers as well, we are + * fixing the dependency loop here by declaring the ppp_pcb type then + * by including headers containing necessary struct definition for ppp_pcb + */ +typedef struct ppp_pcb_s ppp_pcb; + +/* Type definitions for BSD code. */ +#ifndef __u_char_defined +typedef unsigned long u_long; +typedef unsigned int u_int; +typedef unsigned short u_short; +typedef unsigned char u_char; +#endif + +#include "fsm.h" +#include "lcp.h" +#if CCP_SUPPORT +#include "ccp.h" +#endif /* CCP_SUPPORT */ +#if MPPE_SUPPORT +#include "mppe.h" +#endif /* MPPE_SUPPORT */ +#if PPP_IPV4_SUPPORT +#include "ipcp.h" +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT +#include "ipv6cp.h" +#endif /* PPP_IPV6_SUPPORT */ +#if PAP_SUPPORT +#include "upap.h" +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT +#include "chap-new.h" +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT +#include "eap.h" +#endif /* EAP_SUPPORT */ +#if VJ_SUPPORT +#include "vj.h" +#endif /* VJ_SUPPORT */ + +/* Link status callback function prototype */ +typedef void (*ppp_link_status_cb_fn)(ppp_pcb *pcb, int err_code, void *ctx); + +/* + * PPP configuration. + */ +typedef struct ppp_settings_s { + +#if PPP_SERVER && PPP_AUTH_SUPPORT + unsigned int auth_required :1; /* Peer is required to authenticate */ + unsigned int null_login :1; /* Username of "" and a password of "" are acceptable */ +#endif /* PPP_SERVER && PPP_AUTH_SUPPORT */ +#if PPP_REMOTENAME + unsigned int explicit_remote :1; /* remote_name specified with remotename opt */ +#endif /* PPP_REMOTENAME */ +#if PAP_SUPPORT + unsigned int refuse_pap :1; /* Don't proceed auth. with PAP */ +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + unsigned int refuse_chap :1; /* Don't proceed auth. with CHAP */ +#endif /* CHAP_SUPPORT */ +#if MSCHAP_SUPPORT + unsigned int refuse_mschap :1; /* Don't proceed auth. with MS-CHAP */ + unsigned int refuse_mschap_v2 :1; /* Don't proceed auth. with MS-CHAPv2 */ +#endif /* MSCHAP_SUPPORT */ +#if EAP_SUPPORT + unsigned int refuse_eap :1; /* Don't proceed auth. with EAP */ +#endif /* EAP_SUPPORT */ +#if LWIP_DNS + unsigned int usepeerdns :1; /* Ask peer for DNS adds */ +#endif /* LWIP_DNS */ + unsigned int persist :1; /* Persist mode, always try to open the connection */ +#if PRINTPKT_SUPPORT + unsigned int hide_password :1; /* Hide password in dumped packets */ +#endif /* PRINTPKT_SUPPORT */ + unsigned int noremoteip :1; /* Let him have no IP address */ + unsigned int lax_recv :1; /* accept control chars in asyncmap */ + unsigned int noendpoint :1; /* don't send/accept endpoint discriminator */ +#if PPP_LCP_ADAPTIVE + unsigned int lcp_echo_adaptive :1; /* request echo only if the link was idle */ +#endif /* PPP_LCP_ADAPTIVE */ +#if MPPE_SUPPORT + unsigned int require_mppe :1; /* Require MPPE (Microsoft Point to Point Encryption) */ + unsigned int refuse_mppe_40 :1; /* Allow MPPE 40-bit mode? */ + unsigned int refuse_mppe_128 :1; /* Allow MPPE 128-bit mode? */ + unsigned int refuse_mppe_stateful :1; /* Allow MPPE stateful mode? */ +#endif /* MPPE_SUPPORT */ + + u16_t listen_time; /* time to listen first (ms), waiting for peer to send LCP packet */ + +#if PPP_IDLETIMELIMIT + u16_t idle_time_limit; /* Disconnect if idle for this many seconds */ +#endif /* PPP_IDLETIMELIMIT */ +#if PPP_MAXCONNECT + u32_t maxconnect; /* Maximum connect time (seconds) */ +#endif /* PPP_MAXCONNECT */ + +#if PPP_AUTH_SUPPORT + /* auth data */ + const char *user; /* Username for PAP */ + const char *passwd; /* Password for PAP, secret for CHAP */ +#if PPP_REMOTENAME + char remote_name[MAXNAMELEN + 1]; /* Peer's name for authentication */ +#endif /* PPP_REMOTENAME */ + +#if PAP_SUPPORT + u8_t pap_timeout_time; /* Timeout (seconds) for auth-req retrans. */ + u8_t pap_max_transmits; /* Number of auth-reqs sent */ +#if PPP_SERVER + u8_t pap_req_timeout; /* Time to wait for auth-req from peer */ +#endif /* PPP_SERVER */ +#endif /* PAP_SUPPPORT */ + +#if CHAP_SUPPORT + u8_t chap_timeout_time; /* Timeout (seconds) for retransmitting req */ + u8_t chap_max_transmits; /* max # times to send challenge */ +#if PPP_SERVER + u8_t chap_rechallenge_time; /* Time to wait for auth-req from peer */ +#endif /* PPP_SERVER */ +#endif /* CHAP_SUPPPORT */ + +#if EAP_SUPPORT + u8_t eap_req_time; /* Time to wait (for retransmit/fail) */ + u8_t eap_allow_req; /* Max Requests allowed */ +#if PPP_SERVER + u8_t eap_timeout_time; /* Time to wait (for retransmit/fail) */ + u8_t eap_max_transmits; /* Max Requests allowed */ +#endif /* PPP_SERVER */ +#endif /* EAP_SUPPORT */ + +#endif /* PPP_AUTH_SUPPORT */ + + u8_t fsm_timeout_time; /* Timeout time in seconds */ + u8_t fsm_max_conf_req_transmits; /* Maximum Configure-Request transmissions */ + u8_t fsm_max_term_transmits; /* Maximum Terminate-Request transmissions */ + u8_t fsm_max_nak_loops; /* Maximum number of nak loops tolerated */ + + u8_t lcp_loopbackfail; /* Number of times we receive our magic number from the peer + before deciding the link is looped-back. */ + u8_t lcp_echo_interval; /* Interval between LCP echo-requests */ + u8_t lcp_echo_fails; /* Tolerance to unanswered echo-requests */ + +} ppp_settings; + +#if PPP_SERVER +struct ppp_addrs { +#if PPP_IPV4_SUPPORT + ip4_addr_t our_ipaddr, his_ipaddr, netmask; +#if LWIP_DNS + ip4_addr_t dns1, dns2; +#endif /* LWIP_DNS */ +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT + ip6_addr_t our6_ipaddr, his6_ipaddr; +#endif /* PPP_IPV6_SUPPORT */ +}; +#endif /* PPP_SERVER */ + +/* + * PPP interface control block. + */ +struct ppp_pcb_s { + ppp_settings settings; + const struct link_callbacks *link_cb; + void *link_ctx_cb; + void (*link_status_cb)(ppp_pcb *pcb, int err_code, void *ctx); /* Status change callback */ +#if PPP_NOTIFY_PHASE + void (*notify_phase_cb)(ppp_pcb *pcb, u8_t phase, void *ctx); /* Notify phase callback */ +#endif /* PPP_NOTIFY_PHASE */ + void *ctx_cb; /* Callbacks optional pointer */ + struct netif *netif; /* PPP interface */ + u8_t phase; /* where the link is at */ + u8_t err_code; /* Code indicating why interface is down. */ + + /* flags */ +#if PPP_IPV4_SUPPORT + unsigned int ask_for_local :1; /* request our address from peer */ + unsigned int ipcp_is_open :1; /* haven't called np_finished() */ + unsigned int ipcp_is_up :1; /* have called ipcp_up() */ + unsigned int if4_up :1; /* True when the IPv4 interface is up. */ +#if 0 /* UNUSED - PROXY ARP */ + unsigned int proxy_arp_set :1; /* Have created proxy arp entry */ +#endif /* UNUSED - PROXY ARP */ +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT + unsigned int ipv6cp_is_up :1; /* have called ip6cp_up() */ + unsigned int if6_up :1; /* True when the IPv6 interface is up. */ +#endif /* PPP_IPV6_SUPPORT */ + unsigned int lcp_echo_timer_running :1; /* set if a timer is running */ +#if VJ_SUPPORT + unsigned int vj_enabled :1; /* Flag indicating VJ compression enabled. */ +#endif /* VJ_SUPPORT */ +#if CCP_SUPPORT + unsigned int ccp_all_rejected :1; /* we rejected all peer's options */ +#endif /* CCP_SUPPORT */ +#if MPPE_SUPPORT + unsigned int mppe_keys_set :1; /* Have the MPPE keys been set? */ +#endif /* MPPE_SUPPORT */ + +#if PPP_AUTH_SUPPORT + /* auth data */ +#if PPP_SERVER && defined(HAVE_MULTILINK) + char peer_authname[MAXNAMELEN + 1]; /* The name by which the peer authenticated itself to us. */ +#endif /* PPP_SERVER && defined(HAVE_MULTILINK) */ + u16_t auth_pending; /* Records which authentication operations haven't completed yet. */ + u16_t auth_done; /* Records which authentication operations have been completed. */ + +#if PAP_SUPPORT + upap_state upap; /* PAP data */ +#endif /* PAP_SUPPORT */ + +#if CHAP_SUPPORT + chap_client_state chap_client; /* CHAP client data */ +#if PPP_SERVER + chap_server_state chap_server; /* CHAP server data */ +#endif /* PPP_SERVER */ +#endif /* CHAP_SUPPORT */ + +#if EAP_SUPPORT + eap_state eap; /* EAP data */ +#endif /* EAP_SUPPORT */ +#endif /* PPP_AUTH_SUPPORT */ + + fsm lcp_fsm; /* LCP fsm structure */ + lcp_options lcp_wantoptions; /* Options that we want to request */ + lcp_options lcp_gotoptions; /* Options that peer ack'd */ + lcp_options lcp_allowoptions; /* Options we allow peer to request */ + lcp_options lcp_hisoptions; /* Options that we ack'd */ + u16_t peer_mru; /* currently negotiated peer MRU */ + u8_t lcp_echos_pending; /* Number of outstanding echo msgs */ + u8_t lcp_echo_number; /* ID number of next echo frame */ + + u8_t num_np_open; /* Number of network protocols which we have opened. */ + u8_t num_np_up; /* Number of network protocols which have come up. */ + +#if VJ_SUPPORT + struct vjcompress vj_comp; /* Van Jacobson compression header. */ +#endif /* VJ_SUPPORT */ + +#if CCP_SUPPORT + fsm ccp_fsm; /* CCP fsm structure */ + ccp_options ccp_wantoptions; /* what to request the peer to use */ + ccp_options ccp_gotoptions; /* what the peer agreed to do */ + ccp_options ccp_allowoptions; /* what we'll agree to do */ + ccp_options ccp_hisoptions; /* what we agreed to do */ + u8_t ccp_localstate; /* Local state (mainly for handling reset-reqs and reset-acks). */ + u8_t ccp_receive_method; /* Method chosen on receive path */ + u8_t ccp_transmit_method; /* Method chosen on transmit path */ +#if MPPE_SUPPORT + ppp_mppe_state mppe_comp; /* MPPE "compressor" structure */ + ppp_mppe_state mppe_decomp; /* MPPE "decompressor" structure */ +#endif /* MPPE_SUPPORT */ +#endif /* CCP_SUPPORT */ + +#if PPP_IPV4_SUPPORT + fsm ipcp_fsm; /* IPCP fsm structure */ + ipcp_options ipcp_wantoptions; /* Options that we want to request */ + ipcp_options ipcp_gotoptions; /* Options that peer ack'd */ + ipcp_options ipcp_allowoptions; /* Options we allow peer to request */ + ipcp_options ipcp_hisoptions; /* Options that we ack'd */ +#endif /* PPP_IPV4_SUPPORT */ + +#if PPP_IPV6_SUPPORT + fsm ipv6cp_fsm; /* IPV6CP fsm structure */ + ipv6cp_options ipv6cp_wantoptions; /* Options that we want to request */ + ipv6cp_options ipv6cp_gotoptions; /* Options that peer ack'd */ + ipv6cp_options ipv6cp_allowoptions; /* Options we allow peer to request */ + ipv6cp_options ipv6cp_hisoptions; /* Options that we ack'd */ +#endif /* PPP_IPV6_SUPPORT */ +}; + +/************************ + *** PUBLIC FUNCTIONS *** + ************************/ + +/* + * WARNING: For multi-threads environment, all ppp_set_* functions most + * only be called while the PPP is in the dead phase (i.e. disconnected). + */ + +#if PPP_AUTH_SUPPORT +/* + * Set PPP authentication. + * + * Warning: Using PPPAUTHTYPE_ANY might have security consequences. + * RFC 1994 says: + * + * In practice, within or associated with each PPP server, there is a + * database which associates "user" names with authentication + * information ("secrets"). It is not anticipated that a particular + * named user would be authenticated by multiple methods. This would + * make the user vulnerable to attacks which negotiate the least secure + * method from among a set (such as PAP rather than CHAP). If the same + * secret was used, PAP would reveal the secret to be used later with + * CHAP. + * + * Instead, for each user name there should be an indication of exactly + * one method used to authenticate that user name. If a user needs to + * make use of different authentication methods under different + * circumstances, then distinct user names SHOULD be employed, each of + * which identifies exactly one authentication method. + * + * Default is none auth type, unset (NULL) user and passwd. + */ +#define PPPAUTHTYPE_NONE 0x00 +#define PPPAUTHTYPE_PAP 0x01 +#define PPPAUTHTYPE_CHAP 0x02 +#define PPPAUTHTYPE_MSCHAP 0x04 +#define PPPAUTHTYPE_MSCHAP_V2 0x08 +#define PPPAUTHTYPE_EAP 0x10 +#define PPPAUTHTYPE_ANY 0xff +void ppp_set_auth(ppp_pcb *pcb, u8_t authtype, const char *user, const char *passwd); + +/* + * If set, peer is required to authenticate. This is mostly necessary for PPP server support. + * + * Default is false. + */ +#define ppp_set_auth_required(ppp, boolval) (ppp->settings.auth_required = boolval) +#endif /* PPP_AUTH_SUPPORT */ + +#if PPP_IPV4_SUPPORT +/* + * Set PPP interface "our" and "his" IPv4 addresses. This is mostly necessary for PPP server + * support but it can also be used on a PPP link where each side choose its own IP address. + * + * Default is unset (0.0.0.0). + */ +#define ppp_set_ipcp_ouraddr(ppp, addr) do { ppp->ipcp_wantoptions.ouraddr = ip4_addr_get_u32(addr); \ + ppp->ask_for_local = ppp->ipcp_wantoptions.ouraddr != 0; } while(0) +#define ppp_set_ipcp_hisaddr(ppp, addr) (ppp->ipcp_wantoptions.hisaddr = ip4_addr_get_u32(addr)) +#if LWIP_DNS +/* + * Set DNS server addresses that are sent if the peer asks for them. This is mostly necessary + * for PPP server support. + * + * Default is unset (0.0.0.0). + */ +#define ppp_set_ipcp_dnsaddr(ppp, index, addr) (ppp->ipcp_allowoptions.dnsaddr[index] = ip4_addr_get_u32(addr)) + +/* + * If set, we ask the peer for up to 2 DNS server addresses. Received DNS server addresses are + * registered using the dns_setserver() function. + * + * Default is false. + */ +#define ppp_set_usepeerdns(ppp, boolval) (ppp->settings.usepeerdns = boolval) +#endif /* LWIP_DNS */ +#endif /* PPP_IPV4_SUPPORT */ + +#if MPPE_SUPPORT +/* Disable MPPE (Microsoft Point to Point Encryption). This parameter is exclusive. */ +#define PPP_MPPE_DISABLE 0x00 +/* Require the use of MPPE (Microsoft Point to Point Encryption). */ +#define PPP_MPPE_ENABLE 0x01 +/* Allow MPPE to use stateful mode. Stateless mode is still attempted first. */ +#define PPP_MPPE_ALLOW_STATEFUL 0x02 +/* Refuse the use of MPPE with 40-bit encryption. Conflict with PPP_MPPE_REFUSE_128. */ +#define PPP_MPPE_REFUSE_40 0x04 +/* Refuse the use of MPPE with 128-bit encryption. Conflict with PPP_MPPE_REFUSE_40. */ +#define PPP_MPPE_REFUSE_128 0x08 +/* + * Set MPPE configuration + * + * Default is disabled. + */ +void ppp_set_mppe(ppp_pcb *pcb, u8_t flags); +#endif /* MPPE_SUPPORT */ + +/* + * Wait for up to intval milliseconds for a valid PPP packet from the peer. + * At the end of this time, or when a valid PPP packet is received from the + * peer, we commence negotiation by sending our first LCP packet. + * + * Default is 0. + */ +#define ppp_set_listen_time(ppp, intval) (ppp->settings.listen_time = intval) + +/* + * If set, we will attempt to initiate a connection but if no reply is received from + * the peer, we will then just wait passively for a valid LCP packet from the peer. + * + * Default is false. + */ +#define ppp_set_passive(ppp, boolval) (ppp->lcp_wantoptions.passive = boolval) + +/* + * If set, we will not transmit LCP packets to initiate a connection until a valid + * LCP packet is received from the peer. This is what we usually call the server mode. + * + * Default is false. + */ +#define ppp_set_silent(ppp, boolval) (ppp->lcp_wantoptions.silent = boolval) + +/* + * If set, enable protocol field compression negotiation in both the receive and + * the transmit direction. + * + * Default is true. + */ +#define ppp_set_neg_pcomp(ppp, boolval) (ppp->lcp_wantoptions.neg_pcompression = \ + ppp->lcp_allowoptions.neg_pcompression = boolval) + +/* + * If set, enable Address/Control compression in both the receive and the transmit + * direction. + * + * Default is true. + */ +#define ppp_set_neg_accomp(ppp, boolval) (ppp->lcp_wantoptions.neg_accompression = \ + ppp->lcp_allowoptions.neg_accompression = boolval) + +/* + * If set, enable asyncmap negotiation. Otherwise forcing all control characters to + * be escaped for both the transmit and the receive direction. + * + * Default is true. + */ +#define ppp_set_neg_asyncmap(ppp, boolval) (ppp->lcp_wantoptions.neg_asyncmap = \ + ppp->lcp_allowoptions.neg_asyncmap = boolval) + +/* + * This option sets the Async-Control-Character-Map (ACCM) for this end of the link. + * The ACCM is a set of 32 bits, one for each of the ASCII control characters with + * values from 0 to 31, where a 1 bit indicates that the corresponding control + * character should not be used in PPP packets sent to this system. The map is + * an unsigned 32 bits integer where the least significant bit (00000001) represents + * character 0 and the most significant bit (80000000) represents character 31. + * We will then ask the peer to send these characters as a 2-byte escape sequence. + * + * Default is 0. + */ +#define ppp_set_asyncmap(ppp, intval) (ppp->lcp_wantoptions.asyncmap = intval) + +/* + * Set a PPP interface as the default network interface + * (used to output all packets for which no specific route is found). + */ +#define ppp_set_default(ppp) netif_set_default(ppp->netif) + +#if PPP_NOTIFY_PHASE +/* + * Set a PPP notify phase callback. + * + * This can be used for example to set a LED pattern depending on the + * current phase of the PPP session. + */ +typedef void (*ppp_notify_phase_cb_fn)(ppp_pcb *pcb, u8_t phase, void *ctx); +void ppp_set_notify_phase_callback(ppp_pcb *pcb, ppp_notify_phase_cb_fn notify_phase_cb); +#endif /* PPP_NOTIFY_PHASE */ + +/* + * Initiate a PPP connection. + * + * This can only be called if PPP is in the dead phase. + * + * Holdoff is the time to wait (in seconds) before initiating + * the connection. + * + * If this port connects to a modem, the modem connection must be + * established before calling this. + */ +err_t ppp_connect(ppp_pcb *pcb, u16_t holdoff); + +#if PPP_SERVER +/* + * Listen for an incoming PPP connection. + * + * This can only be called if PPP is in the dead phase. + * + * If this port connects to a modem, the modem connection must be + * established before calling this. + */ +err_t ppp_listen(ppp_pcb *pcb); +#endif /* PPP_SERVER */ + +/* + * Initiate the end of a PPP connection. + * Any outstanding packets in the queues are dropped. + * + * Setting nocarrier to 1 close the PPP connection without initiating the + * shutdown procedure. Always using nocarrier = 0 is still recommended, + * this is going to take a little longer time if your link is down, but + * is a safer choice for the PPP state machine. + * + * Return 0 on success, an error code on failure. + */ +err_t ppp_close(ppp_pcb *pcb, u8_t nocarrier); + +/* + * Release the control block. + * + * This can only be called if PPP is in the dead phase. + * + * You must use ppp_close() before if you wish to terminate + * an established PPP session. + * + * Return 0 on success, an error code on failure. + */ +err_t ppp_free(ppp_pcb *pcb); + +/* + * PPP IOCTL commands. + * + * Get the up status - 0 for down, non-zero for up. The argument must + * point to an int. + */ +#define PPPCTLG_UPSTATUS 0 + +/* + * Get the PPP error code. The argument must point to an int. + * Returns a PPPERR_* value. + */ +#define PPPCTLG_ERRCODE 1 + +/* + * Get the fd associated with a PPP over serial + */ +#define PPPCTLG_FD 2 + +/* + * Get and set parameters for the given connection. + * Return 0 on success, an error code on failure. + */ +err_t ppp_ioctl(ppp_pcb *pcb, u8_t cmd, void *arg); + +/* Get the PPP netif interface */ +#define ppp_netif(ppp) (ppp->netif) + +/* Set an lwIP-style status-callback for the selected PPP device */ +#define ppp_set_netif_statuscallback(ppp, status_cb) \ + netif_set_status_callback(ppp->netif, status_cb); + +/* Set an lwIP-style link-callback for the selected PPP device */ +#define ppp_set_netif_linkcallback(ppp, link_cb) \ + netif_set_link_callback(ppp->netif, link_cb); + +#ifdef __cplusplus +} +#endif + +#endif /* PPP_H */ + +#endif /* PPP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_impl.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_impl.h new file mode 100644 index 0000000..280ff00 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_impl.h @@ -0,0 +1,722 @@ +/***************************************************************************** +* ppp.h - Network Point to Point Protocol header file. +* +* Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. +* portions Copyright (c) 1997 Global Election Systems Inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 03-01-01 Marc Boucher +* Ported to lwIP. +* 97-11-05 Guy Lancaster , Global Election Systems Inc. +* Original derived from BSD codes. +*****************************************************************************/ +#ifndef LWIP_HDR_PPP_IMPL_H +#define LWIP_HDR_PPP_IMPL_H + +#include "netif/ppp/ppp_opts.h" + +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifdef PPP_INCLUDE_SETTINGS_HEADER +#include "ppp_settings.h" +#endif + +#include /* formats */ +#include +#include +#include /* strtol() */ + +#include "lwip/netif.h" +#include "lwip/def.h" +#include "lwip/timeouts.h" + +#include "ppp.h" +#include "pppdebug.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Memory used for control packets. + * + * PPP_CTRL_PBUF_MAX_SIZE is the amount of memory we allocate when we + * cannot figure out how much we are going to use before filling the buffer. + */ +#if PPP_USE_PBUF_RAM +#define PPP_CTRL_PBUF_TYPE PBUF_RAM +#define PPP_CTRL_PBUF_MAX_SIZE 512 +#else /* PPP_USE_PBUF_RAM */ +#define PPP_CTRL_PBUF_TYPE PBUF_POOL +#define PPP_CTRL_PBUF_MAX_SIZE PBUF_POOL_BUFSIZE +#endif /* PPP_USE_PBUF_RAM */ + +/* + * The basic PPP frame. + */ +#define PPP_ADDRESS(p) (((u_char *)(p))[0]) +#define PPP_CONTROL(p) (((u_char *)(p))[1]) +#define PPP_PROTOCOL(p) ((((u_char *)(p))[2] << 8) + ((u_char *)(p))[3]) + +/* + * Significant octet values. + */ +#define PPP_ALLSTATIONS 0xff /* All-Stations broadcast address */ +#define PPP_UI 0x03 /* Unnumbered Information */ +#define PPP_FLAG 0x7e /* Flag Sequence */ +#define PPP_ESCAPE 0x7d /* Asynchronous Control Escape */ +#define PPP_TRANS 0x20 /* Asynchronous transparency modifier */ + +/* + * Protocol field values. + */ +#define PPP_IP 0x21 /* Internet Protocol */ +#if 0 /* UNUSED */ +#define PPP_AT 0x29 /* AppleTalk Protocol */ +#define PPP_IPX 0x2b /* IPX protocol */ +#endif /* UNUSED */ +#if VJ_SUPPORT +#define PPP_VJC_COMP 0x2d /* VJ compressed TCP */ +#define PPP_VJC_UNCOMP 0x2f /* VJ uncompressed TCP */ +#endif /* VJ_SUPPORT */ +#if PPP_IPV6_SUPPORT +#define PPP_IPV6 0x57 /* Internet Protocol Version 6 */ +#endif /* PPP_IPV6_SUPPORT */ +#if CCP_SUPPORT +#define PPP_COMP 0xfd /* compressed packet */ +#endif /* CCP_SUPPORT */ +#define PPP_IPCP 0x8021 /* IP Control Protocol */ +#if 0 /* UNUSED */ +#define PPP_ATCP 0x8029 /* AppleTalk Control Protocol */ +#define PPP_IPXCP 0x802b /* IPX Control Protocol */ +#endif /* UNUSED */ +#if PPP_IPV6_SUPPORT +#define PPP_IPV6CP 0x8057 /* IPv6 Control Protocol */ +#endif /* PPP_IPV6_SUPPORT */ +#if CCP_SUPPORT +#define PPP_CCP 0x80fd /* Compression Control Protocol */ +#endif /* CCP_SUPPORT */ +#if ECP_SUPPORT +#define PPP_ECP 0x8053 /* Encryption Control Protocol */ +#endif /* ECP_SUPPORT */ +#define PPP_LCP 0xc021 /* Link Control Protocol */ +#if PAP_SUPPORT +#define PPP_PAP 0xc023 /* Password Authentication Protocol */ +#endif /* PAP_SUPPORT */ +#if LQR_SUPPORT +#define PPP_LQR 0xc025 /* Link Quality Report protocol */ +#endif /* LQR_SUPPORT */ +#if CHAP_SUPPORT +#define PPP_CHAP 0xc223 /* Cryptographic Handshake Auth. Protocol */ +#endif /* CHAP_SUPPORT */ +#if CBCP_SUPPORT +#define PPP_CBCP 0xc029 /* Callback Control Protocol */ +#endif /* CBCP_SUPPORT */ +#if EAP_SUPPORT +#define PPP_EAP 0xc227 /* Extensible Authentication Protocol */ +#endif /* EAP_SUPPORT */ + +/* + * The following struct gives the addresses of procedures to call + * for a particular lower link level protocol. + */ +struct link_callbacks { + /* Start a connection (e.g. Initiate discovery phase) */ + void (*connect) (ppp_pcb *pcb, void *ctx); +#if PPP_SERVER + /* Listen for an incoming connection (Passive mode) */ + void (*listen) (ppp_pcb *pcb, void *ctx); +#endif /* PPP_SERVER */ + /* End a connection (i.e. initiate disconnect phase) */ + void (*disconnect) (ppp_pcb *pcb, void *ctx); + /* Free lower protocol control block */ + err_t (*free) (ppp_pcb *pcb, void *ctx); + /* Write a pbuf to a ppp link, only used from PPP functions to send PPP packets. */ + err_t (*write)(ppp_pcb *pcb, void *ctx, struct pbuf *p); + /* Send a packet from lwIP core (IPv4 or IPv6) */ + err_t (*netif_output)(ppp_pcb *pcb, void *ctx, struct pbuf *p, u_short protocol); + /* configure the transmit-side characteristics of the PPP interface */ + void (*send_config)(ppp_pcb *pcb, void *ctx, u32_t accm, int pcomp, int accomp); + /* confire the receive-side characteristics of the PPP interface */ + void (*recv_config)(ppp_pcb *pcb, void *ctx, u32_t accm, int pcomp, int accomp); +}; + +/* + * What to do with network protocol (NP) packets. + */ +enum NPmode { + NPMODE_PASS, /* pass the packet through */ + NPMODE_DROP, /* silently drop the packet */ + NPMODE_ERROR, /* return an error */ + NPMODE_QUEUE /* save it up for later. */ +}; + +/* + * Statistics. + */ +#if PPP_STATS_SUPPORT +struct pppstat { + unsigned int ppp_ibytes; /* bytes received */ + unsigned int ppp_ipackets; /* packets received */ + unsigned int ppp_ierrors; /* receive errors */ + unsigned int ppp_obytes; /* bytes sent */ + unsigned int ppp_opackets; /* packets sent */ + unsigned int ppp_oerrors; /* transmit errors */ +}; + +#if VJ_SUPPORT +struct vjstat { + unsigned int vjs_packets; /* outbound packets */ + unsigned int vjs_compressed; /* outbound compressed packets */ + unsigned int vjs_searches; /* searches for connection state */ + unsigned int vjs_misses; /* times couldn't find conn. state */ + unsigned int vjs_uncompressedin; /* inbound uncompressed packets */ + unsigned int vjs_compressedin; /* inbound compressed packets */ + unsigned int vjs_errorin; /* inbound unknown type packets */ + unsigned int vjs_tossed; /* inbound packets tossed because of error */ +}; +#endif /* VJ_SUPPORT */ + +struct ppp_stats { + struct pppstat p; /* basic PPP statistics */ +#if VJ_SUPPORT + struct vjstat vj; /* VJ header compression statistics */ +#endif /* VJ_SUPPORT */ +}; + +#if CCP_SUPPORT +struct compstat { + unsigned int unc_bytes; /* total uncompressed bytes */ + unsigned int unc_packets; /* total uncompressed packets */ + unsigned int comp_bytes; /* compressed bytes */ + unsigned int comp_packets; /* compressed packets */ + unsigned int inc_bytes; /* incompressible bytes */ + unsigned int inc_packets; /* incompressible packets */ + unsigned int ratio; /* recent compression ratio << 8 */ +}; + +struct ppp_comp_stats { + struct compstat c; /* packet compression statistics */ + struct compstat d; /* packet decompression statistics */ +}; +#endif /* CCP_SUPPORT */ + +#endif /* PPP_STATS_SUPPORT */ + +#if PPP_IDLETIMELIMIT +/* + * The following structure records the time in seconds since + * the last NP packet was sent or received. + */ +struct ppp_idle { + time_t xmit_idle; /* time since last NP packet sent */ + time_t recv_idle; /* time since last NP packet received */ +}; +#endif /* PPP_IDLETIMELIMIT */ + +/* values for epdisc.class */ +#define EPD_NULL 0 /* null discriminator, no data */ +#define EPD_LOCAL 1 +#define EPD_IP 2 +#define EPD_MAC 3 +#define EPD_MAGIC 4 +#define EPD_PHONENUM 5 + +/* + * Global variables. + */ +#ifdef HAVE_MULTILINK +extern u8_t multilink; /* enable multilink operation */ +extern u8_t doing_multilink; +extern u8_t multilink_master; +extern u8_t bundle_eof; +extern u8_t bundle_terminating; +#endif + +#ifdef MAXOCTETS +extern unsigned int maxoctets; /* Maximum octetes per session (in bytes) */ +extern int maxoctets_dir; /* Direction : + 0 - in+out (default) + 1 - in + 2 - out + 3 - max(in,out) */ +extern int maxoctets_timeout; /* Timeout for check of octets limit */ +#define PPP_OCTETS_DIRECTION_SUM 0 +#define PPP_OCTETS_DIRECTION_IN 1 +#define PPP_OCTETS_DIRECTION_OUT 2 +#define PPP_OCTETS_DIRECTION_MAXOVERAL 3 +/* same as previos, but little different on RADIUS side */ +#define PPP_OCTETS_DIRECTION_MAXSESSION 4 +#endif + +/* Data input may be used by CCP and ECP, remove this entry + * from struct protent to save some flash + */ +#define PPP_DATAINPUT 0 + +/* + * The following struct gives the addresses of procedures to call + * for a particular protocol. + */ +struct protent { + u_short protocol; /* PPP protocol number */ + /* Initialization procedure */ + void (*init) (ppp_pcb *pcb); + /* Process a received packet */ + void (*input) (ppp_pcb *pcb, u_char *pkt, int len); + /* Process a received protocol-reject */ + void (*protrej) (ppp_pcb *pcb); + /* Lower layer has come up */ + void (*lowerup) (ppp_pcb *pcb); + /* Lower layer has gone down */ + void (*lowerdown) (ppp_pcb *pcb); + /* Open the protocol */ + void (*open) (ppp_pcb *pcb); + /* Close the protocol */ + void (*close) (ppp_pcb *pcb, const char *reason); +#if PRINTPKT_SUPPORT + /* Print a packet in readable form */ + int (*printpkt) (const u_char *pkt, int len, + void (*printer) (void *, const char *, ...), + void *arg); +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + /* Process a received data packet */ + void (*datainput) (ppp_pcb *pcb, u_char *pkt, int len); +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + const char *name; /* Text name of protocol */ + const char *data_name; /* Text name of corresponding data protocol */ +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + option_t *options; /* List of command-line options */ + /* Check requested options, assign defaults */ + void (*check_options) (void); +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + /* Configure interface for demand-dial */ + int (*demand_conf) (int unit); + /* Say whether to bring up link for this pkt */ + int (*active_pkt) (u_char *pkt, int len); +#endif /* DEMAND_SUPPORT */ +}; + +/* Table of pointers to supported protocols */ +extern const struct protent* const protocols[]; + + +/* Values for auth_pending, auth_done */ +#if PAP_SUPPORT +#define PAP_WITHPEER 0x1 +#define PAP_PEER 0x2 +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT +#define CHAP_WITHPEER 0x4 +#define CHAP_PEER 0x8 +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT +#define EAP_WITHPEER 0x10 +#define EAP_PEER 0x20 +#endif /* EAP_SUPPORT */ + +/* Values for auth_done only */ +#if CHAP_SUPPORT +#define CHAP_MD5_WITHPEER 0x40 +#define CHAP_MD5_PEER 0x80 +#if MSCHAP_SUPPORT +#define CHAP_MS_SHIFT 8 /* LSB position for MS auths */ +#define CHAP_MS_WITHPEER 0x100 +#define CHAP_MS_PEER 0x200 +#define CHAP_MS2_WITHPEER 0x400 +#define CHAP_MS2_PEER 0x800 +#endif /* MSCHAP_SUPPORT */ +#endif /* CHAP_SUPPORT */ + +/* Supported CHAP protocols */ +#if CHAP_SUPPORT + +#if MSCHAP_SUPPORT +#define CHAP_MDTYPE_SUPPORTED (MDTYPE_MICROSOFT_V2 | MDTYPE_MICROSOFT | MDTYPE_MD5) +#else /* MSCHAP_SUPPORT */ +#define CHAP_MDTYPE_SUPPORTED (MDTYPE_MD5) +#endif /* MSCHAP_SUPPORT */ + +#else /* CHAP_SUPPORT */ +#define CHAP_MDTYPE_SUPPORTED (MDTYPE_NONE) +#endif /* CHAP_SUPPORT */ + +#if PPP_STATS_SUPPORT +/* + * PPP statistics structure + */ +struct pppd_stats { + unsigned int bytes_in; + unsigned int bytes_out; + unsigned int pkts_in; + unsigned int pkts_out; +}; +#endif /* PPP_STATS_SUPPORT */ + + +/* + * PPP private functions + */ + + +/* + * Functions called from lwIP core. + */ + +/* initialize the PPP subsystem */ +int ppp_init(void); + +/* + * Functions called from PPP link protocols. + */ + +/* Create a new PPP control block */ +ppp_pcb *ppp_new(struct netif *pppif, const struct link_callbacks *callbacks, void *link_ctx_cb, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb); + +/* Initiate LCP open request */ +void ppp_start(ppp_pcb *pcb); + +/* Called when link failed to setup */ +void ppp_link_failed(ppp_pcb *pcb); + +/* Called when link is normally down (i.e. it was asked to end) */ +void ppp_link_end(ppp_pcb *pcb); + +/* function called to process input packet */ +void ppp_input(ppp_pcb *pcb, struct pbuf *pb); + + +/* + * Functions called by PPP protocols. + */ + +/* function called by all PPP subsystems to send packets */ +err_t ppp_write(ppp_pcb *pcb, struct pbuf *p); + +/* functions called by auth.c link_terminated() */ +void ppp_link_terminated(ppp_pcb *pcb); + +void new_phase(ppp_pcb *pcb, int p); + +int ppp_send_config(ppp_pcb *pcb, int mtu, u32_t accm, int pcomp, int accomp); +int ppp_recv_config(ppp_pcb *pcb, int mru, u32_t accm, int pcomp, int accomp); + +#if PPP_IPV4_SUPPORT +int sifaddr(ppp_pcb *pcb, u32_t our_adr, u32_t his_adr, u32_t netmask); +int cifaddr(ppp_pcb *pcb, u32_t our_adr, u32_t his_adr); +#if 0 /* UNUSED - PROXY ARP */ +int sifproxyarp(ppp_pcb *pcb, u32_t his_adr); +int cifproxyarp(ppp_pcb *pcb, u32_t his_adr); +#endif /* UNUSED - PROXY ARP */ +#if LWIP_DNS +int sdns(ppp_pcb *pcb, u32_t ns1, u32_t ns2); +int cdns(ppp_pcb *pcb, u32_t ns1, u32_t ns2); +#endif /* LWIP_DNS */ +#if VJ_SUPPORT +int sifvjcomp(ppp_pcb *pcb, int vjcomp, int cidcomp, int maxcid); +#endif /* VJ_SUPPORT */ +int sifup(ppp_pcb *pcb); +int sifdown (ppp_pcb *pcb); +u32_t get_mask(u32_t addr); +#endif /* PPP_IPV4_SUPPORT */ + +#if PPP_IPV6_SUPPORT +int sif6addr(ppp_pcb *pcb, eui64_t our_eui64, eui64_t his_eui64); +int cif6addr(ppp_pcb *pcb, eui64_t our_eui64, eui64_t his_eui64); +int sif6up(ppp_pcb *pcb); +int sif6down (ppp_pcb *pcb); +#endif /* PPP_IPV6_SUPPORT */ + +#if DEMAND_SUPPORT +int sifnpmode(ppp_pcb *pcb, int proto, enum NPmode mode); +#endif /* DEMAND_SUPPORt */ + +void netif_set_mtu(ppp_pcb *pcb, int mtu); +int netif_get_mtu(ppp_pcb *pcb); + +#if CCP_SUPPORT +#if 0 /* unused */ +int ccp_test(ppp_pcb *pcb, u_char *opt_ptr, int opt_len, int for_transmit); +#endif /* unused */ +void ccp_set(ppp_pcb *pcb, u8_t isopen, u8_t isup, u8_t receive_method, u8_t transmit_method); +void ccp_reset_comp(ppp_pcb *pcb); +void ccp_reset_decomp(ppp_pcb *pcb); +#if 0 /* unused */ +int ccp_fatal_error(ppp_pcb *pcb); +#endif /* unused */ +#endif /* CCP_SUPPORT */ + +#if PPP_IDLETIMELIMIT +int get_idle_time(ppp_pcb *pcb, struct ppp_idle *ip); +#endif /* PPP_IDLETIMELIMIT */ + +#if DEMAND_SUPPORT +int get_loop_output(void); +#endif /* DEMAND_SUPPORT */ + +/* Optional protocol names list, to make our messages a little more informative. */ +#if PPP_PROTOCOLNAME +const char * protocol_name(int proto); +#endif /* PPP_PROTOCOLNAME */ + +/* Optional stats support, to get some statistics on the PPP interface */ +#if PPP_STATS_SUPPORT +void print_link_stats(void); /* Print stats, if available */ +void reset_link_stats(int u); /* Reset (init) stats when link goes up */ +void update_link_stats(int u); /* Get stats at link termination */ +#endif /* PPP_STATS_SUPPORT */ + + + +/* + * Inline versions of get/put char/short/long. + * Pointer is advanced; we assume that both arguments + * are lvalues and will already be in registers. + * cp MUST be u_char *. + */ +#define GETCHAR(c, cp) { \ + (c) = *(cp)++; \ +} +#define PUTCHAR(c, cp) { \ + *(cp)++ = (u_char) (c); \ +} +#define GETSHORT(s, cp) { \ + (s) = *(cp)++ << 8; \ + (s) |= *(cp)++; \ +} +#define PUTSHORT(s, cp) { \ + *(cp)++ = (u_char) ((s) >> 8); \ + *(cp)++ = (u_char) (s); \ +} +#define GETLONG(l, cp) { \ + (l) = *(cp)++ << 8; \ + (l) |= *(cp)++; (l) <<= 8; \ + (l) |= *(cp)++; (l) <<= 8; \ + (l) |= *(cp)++; \ +} +#define PUTLONG(l, cp) { \ + *(cp)++ = (u_char) ((l) >> 24); \ + *(cp)++ = (u_char) ((l) >> 16); \ + *(cp)++ = (u_char) ((l) >> 8); \ + *(cp)++ = (u_char) (l); \ +} + +#define INCPTR(n, cp) ((cp) += (n)) +#define DECPTR(n, cp) ((cp) -= (n)) + +/* + * System dependent definitions for user-level 4.3BSD UNIX implementation. + */ +#define TIMEOUT(f, a, t) do { sys_untimeout((f), (a)); sys_timeout((t)*1000, (f), (a)); } while(0) +#define TIMEOUTMS(f, a, t) do { sys_untimeout((f), (a)); sys_timeout((t), (f), (a)); } while(0) +#define UNTIMEOUT(f, a) sys_untimeout((f), (a)) + +#define BZERO(s, n) memset(s, 0, n) +#define BCMP(s1, s2, l) memcmp(s1, s2, l) + +#define PRINTMSG(m, l) { ppp_info("Remote message: %0.*v", l, m); } + +/* + * MAKEHEADER - Add Header fields to a packet. + */ +#define MAKEHEADER(p, t) { \ + PUTCHAR(PPP_ALLSTATIONS, p); \ + PUTCHAR(PPP_UI, p); \ + PUTSHORT(t, p); } + +/* Procedures exported from auth.c */ +void link_required(ppp_pcb *pcb); /* we are starting to use the link */ +void link_terminated(ppp_pcb *pcb); /* we are finished with the link */ +void link_down(ppp_pcb *pcb); /* the LCP layer has left the Opened state */ +void upper_layers_down(ppp_pcb *pcb); /* take all NCPs down */ +void link_established(ppp_pcb *pcb); /* the link is up; authenticate now */ +void start_networks(ppp_pcb *pcb); /* start all the network control protos */ +void continue_networks(ppp_pcb *pcb); /* start network [ip, etc] control protos */ +#if PPP_AUTH_SUPPORT +#if PPP_SERVER +int auth_check_passwd(ppp_pcb *pcb, char *auser, int userlen, char *apasswd, int passwdlen, const char **msg, int *msglen); + /* check the user name and passwd against configuration */ +void auth_peer_fail(ppp_pcb *pcb, int protocol); + /* peer failed to authenticate itself */ +void auth_peer_success(ppp_pcb *pcb, int protocol, int prot_flavor, const char *name, int namelen); + /* peer successfully authenticated itself */ +#endif /* PPP_SERVER */ +void auth_withpeer_fail(ppp_pcb *pcb, int protocol); + /* we failed to authenticate ourselves */ +void auth_withpeer_success(ppp_pcb *pcb, int protocol, int prot_flavor); + /* we successfully authenticated ourselves */ +#endif /* PPP_AUTH_SUPPORT */ +void np_up(ppp_pcb *pcb, int proto); /* a network protocol has come up */ +void np_down(ppp_pcb *pcb, int proto); /* a network protocol has gone down */ +void np_finished(ppp_pcb *pcb, int proto); /* a network protocol no longer needs link */ +#if PPP_AUTH_SUPPORT +int get_secret(ppp_pcb *pcb, const char *client, const char *server, char *secret, int *secret_len, int am_server); + /* get "secret" for chap */ +#endif /* PPP_AUTH_SUPPORT */ + +/* Procedures exported from ipcp.c */ +/* int parse_dotted_ip (char *, u32_t *); */ + +/* Procedures exported from demand.c */ +#if DEMAND_SUPPORT +void demand_conf (void); /* config interface(s) for demand-dial */ +void demand_block (void); /* set all NPs to queue up packets */ +void demand_unblock (void); /* set all NPs to pass packets */ +void demand_discard (void); /* set all NPs to discard packets */ +void demand_rexmit (int, u32_t); /* retransmit saved frames for an NP*/ +int loop_chars (unsigned char *, int); /* process chars from loopback */ +int loop_frame (unsigned char *, int); /* should we bring link up? */ +#endif /* DEMAND_SUPPORT */ + +/* Procedures exported from multilink.c */ +#ifdef HAVE_MULTILINK +void mp_check_options (void); /* Check multilink-related options */ +int mp_join_bundle (void); /* join our link to an appropriate bundle */ +void mp_exit_bundle (void); /* have disconnected our link from bundle */ +void mp_bundle_terminated (void); +char *epdisc_to_str (struct epdisc *); /* string from endpoint discrim. */ +int str_to_epdisc (struct epdisc *, char *); /* endpt disc. from str */ +#else +#define mp_bundle_terminated() /* nothing */ +#define mp_exit_bundle() /* nothing */ +#define doing_multilink 0 +#define multilink_master 0 +#endif + +/* Procedures exported from utils.c. */ +void ppp_print_string(const u_char *p, int len, void (*printer) (void *, const char *, ...), void *arg); /* Format a string for output */ +int ppp_slprintf(char *buf, int buflen, const char *fmt, ...); /* sprintf++ */ +int ppp_vslprintf(char *buf, int buflen, const char *fmt, va_list args); /* vsprintf++ */ +size_t ppp_strlcpy(char *dest, const char *src, size_t len); /* safe strcpy */ +size_t ppp_strlcat(char *dest, const char *src, size_t len); /* safe strncpy */ +void ppp_dbglog(const char *fmt, ...); /* log a debug message */ +void ppp_info(const char *fmt, ...); /* log an informational message */ +void ppp_notice(const char *fmt, ...); /* log a notice-level message */ +void ppp_warn(const char *fmt, ...); /* log a warning message */ +void ppp_error(const char *fmt, ...); /* log an error message */ +void ppp_fatal(const char *fmt, ...); /* log an error message and die(1) */ +#if PRINTPKT_SUPPORT +void ppp_dump_packet(ppp_pcb *pcb, const char *tag, unsigned char *p, int len); + /* dump packet to debug log if interesting */ +#endif /* PRINTPKT_SUPPORT */ + +/* + * Number of necessary timers analysis. + * + * PPP use at least one timer per each of its protocol, but not all protocols are + * active at the same time, thus the number of necessary timeouts is actually + * lower than enabled protocols. Here is the actual necessary timeouts based + * on code analysis. + * + * Note that many features analysed here are not working at all and are only + * there for a comprehensive analysis of necessary timers in order to prevent + * having to redo that each time we add a feature. + * + * Timer list + * + * | holdoff timeout + * | low level protocol timeout (PPPoE or PPPoL2P) + * | LCP delayed UP + * | LCP retransmit (FSM) + * | LCP Echo timer + * .| PAP or CHAP or EAP authentication + * . | ECP retransmit (FSM) + * . | CCP retransmit (FSM) when MPPE is enabled + * . | CCP retransmit (FSM) when MPPE is NOT enabled + * . | IPCP retransmit (FSM) + * . .| IP6CP retransmit (FSM) + * . . | Idle time limit + * . . | Max connect time + * . . | Max octets + * . . | CCP RACK timeout + * . . . + * PPP_PHASE_DEAD + * PPP_PHASE_HOLDOFF + * | . . . + * PPP_PHASE_INITIALIZE + * | . . . + * PPP_PHASE_ESTABLISH + * | . . . + * |. . . + * | . . + * PPP_PHASE_AUTHENTICATE + * | . . + * || . . + * PPP_PHASE_NETWORK + * | || . . + * | ||| . + * PPP_PHASE_RUNNING + * | .||||| + * | . |||| + * PPP_PHASE_TERMINATE + * | . |||| + * PPP_PHASE_NETWORK + * |. . + * PPP_PHASE_ESTABLISH + * PPP_PHASE_DISCONNECT + * PPP_PHASE_DEAD + * + * Alright, PPP basic retransmission and LCP Echo consume one timer. + * 1 + * + * If authentication is enabled one timer is necessary during authentication. + * 1 + PPP_AUTH_SUPPORT + * + * If ECP is enabled one timer is necessary before IPCP and/or IP6CP, one more + * is necessary if CCP is enabled (only with MPPE support but we don't care much + * up to this detail level). + * 1 + ECP_SUPPORT + CCP_SUPPORT + * + * If CCP is enabled it might consume a timer during IPCP or IP6CP, thus + * we might use IPCP, IP6CP and CCP timers simultaneously. + * 1 + PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT + CCP_SUPPORT + * + * When entering running phase, IPCP or IP6CP is still running. If idle time limit + * is enabled one more timer is necessary. Same for max connect time and max + * octets features. Furthermore CCP RACK might be used past this point. + * 1 + PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT -1 + PPP_IDLETIMELIMIT + PPP_MAXCONNECT + MAXOCTETS + CCP_SUPPORT + * + * IPv4 or IPv6 must be enabled, therefore we don't need to take care the authentication + * and the CCP + ECP case, thus reducing overall complexity. + * 1 + LWIP_MAX(PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT + CCP_SUPPORT, PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT -1 + PPP_IDLETIMELIMIT + PPP_MAXCONNECT + MAXOCTETS + CCP_SUPPORT) + * + * We don't support PPP_IDLETIMELIMIT + PPP_MAXCONNECT + MAXOCTETS features + * and adding those defines to ppp_opts.h just for having the value always + * defined to 0 isn't worth it. + * 1 + LWIP_MAX(PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT + CCP_SUPPORT, PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT -1 + CCP_SUPPORT) + * + * Thus, the following is enough for now. + * 1 + PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT + CCP_SUPPORT + */ + +#ifdef __cplusplus +} +#endif + +#endif /* PPP_SUPPORT */ +#endif /* LWIP_HDR_PPP_IMPL_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_opts.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_opts.h new file mode 100644 index 0000000..502a2cc --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/ppp_opts.h @@ -0,0 +1,610 @@ +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#ifndef LWIP_PPP_OPTS_H +#define LWIP_PPP_OPTS_H + +#include "lwip/opt.h" + +/** + * PPP_SUPPORT==1: Enable PPP. + */ +#ifndef PPP_SUPPORT +#define PPP_SUPPORT 0 +#endif + +/** + * PPPOE_SUPPORT==1: Enable PPP Over Ethernet + */ +#ifndef PPPOE_SUPPORT +#define PPPOE_SUPPORT 0 +#endif + +/** + * PPPOL2TP_SUPPORT==1: Enable PPP Over L2TP + */ +#ifndef PPPOL2TP_SUPPORT +#define PPPOL2TP_SUPPORT 0 +#endif + +/** + * PPPOL2TP_AUTH_SUPPORT==1: Enable PPP Over L2TP Auth (enable MD5 support) + */ +#ifndef PPPOL2TP_AUTH_SUPPORT +#define PPPOL2TP_AUTH_SUPPORT PPPOL2TP_SUPPORT +#endif + +/** + * PPPOS_SUPPORT==1: Enable PPP Over Serial + */ +#ifndef PPPOS_SUPPORT +#define PPPOS_SUPPORT PPP_SUPPORT +#endif + +/** + * LWIP_PPP_API==1: Enable PPP API (in pppapi.c) + */ +#ifndef LWIP_PPP_API +#define LWIP_PPP_API (PPP_SUPPORT && (NO_SYS == 0)) +#endif + +#if PPP_SUPPORT + +/** + * MEMP_NUM_PPP_PCB: the number of simultaneously active PPP + * connections (requires the PPP_SUPPORT option) + */ +#ifndef MEMP_NUM_PPP_PCB +#define MEMP_NUM_PPP_PCB 1 +#endif + +/** + * PPP_NUM_TIMEOUTS_PER_PCB: the number of sys_timeouts running in parallel per + * ppp_pcb. See the detailed explanation at the end of ppp_impl.h about simultaneous + * timers analysis. + */ +#ifndef PPP_NUM_TIMEOUTS_PER_PCB +#define PPP_NUM_TIMEOUTS_PER_PCB (1 + PPP_IPV4_SUPPORT + PPP_IPV6_SUPPORT + CCP_SUPPORT) +#endif + +/* The number of sys_timeouts required for the PPP module */ +#define PPP_NUM_TIMEOUTS (PPP_SUPPORT * PPP_NUM_TIMEOUTS_PER_PCB * MEMP_NUM_PPP_PCB) + +/** + * MEMP_NUM_PPPOS_INTERFACES: the number of concurrently active PPPoS + * interfaces (only used with PPPOS_SUPPORT==1) + */ +#ifndef MEMP_NUM_PPPOS_INTERFACES +#define MEMP_NUM_PPPOS_INTERFACES MEMP_NUM_PPP_PCB +#endif + +/** + * MEMP_NUM_PPPOE_INTERFACES: the number of concurrently active PPPoE + * interfaces (only used with PPPOE_SUPPORT==1) + */ +#ifndef MEMP_NUM_PPPOE_INTERFACES +#define MEMP_NUM_PPPOE_INTERFACES 1 +#endif + +/** + * MEMP_NUM_PPPOL2TP_INTERFACES: the number of concurrently active PPPoL2TP + * interfaces (only used with PPPOL2TP_SUPPORT==1) + */ +#ifndef MEMP_NUM_PPPOL2TP_INTERFACES +#define MEMP_NUM_PPPOL2TP_INTERFACES 1 +#endif + +/** + * MEMP_NUM_PPP_API_MSG: Number of concurrent PPP API messages (in pppapi.c) + */ +#ifndef MEMP_NUM_PPP_API_MSG +#define MEMP_NUM_PPP_API_MSG 5 +#endif + +/** + * PPP_DEBUG: Enable debugging for PPP. + */ +#ifndef PPP_DEBUG +#define PPP_DEBUG LWIP_DBG_OFF +#endif + +/** + * PPP_INPROC_IRQ_SAFE==1 call pppos_input() using tcpip_callback(). + * + * Please read the "PPPoS input path" chapter in the PPP documentation about this option. + */ +#ifndef PPP_INPROC_IRQ_SAFE +#define PPP_INPROC_IRQ_SAFE 0 +#endif + +/** + * PRINTPKT_SUPPORT==1: Enable PPP print packet support + * + * Mandatory for debugging, it displays exchanged packet content in debug trace. + */ +#ifndef PRINTPKT_SUPPORT +#define PRINTPKT_SUPPORT 0 +#endif + +/** + * PPP_IPV4_SUPPORT==1: Enable PPP IPv4 support + */ +#ifndef PPP_IPV4_SUPPORT +#define PPP_IPV4_SUPPORT (LWIP_IPV4) +#endif + +/** + * PPP_IPV6_SUPPORT==1: Enable PPP IPv6 support + */ +#ifndef PPP_IPV6_SUPPORT +#define PPP_IPV6_SUPPORT (LWIP_IPV6) +#endif + +/** + * PPP_NOTIFY_PHASE==1: Support PPP notify phase support + * + * PPP notify phase support allows you to set a callback which is + * called on change of the internal PPP state machine. + * + * This can be used for example to set a LED pattern depending on the + * current phase of the PPP session. + */ +#ifndef PPP_NOTIFY_PHASE +#define PPP_NOTIFY_PHASE 0 +#endif + +/** + * pbuf_type PPP is using for LCP, PAP, CHAP, EAP, CCP, IPCP and IP6CP packets. + * + * Memory allocated must be single buffered for PPP to works, it requires pbuf + * that are not going to be chained when allocated. This requires setting + * PBUF_POOL_BUFSIZE to at least 512 bytes, which is quite huge for small systems. + * + * Setting PPP_USE_PBUF_RAM to 1 makes PPP use memory from heap where continuous + * buffers are required, allowing you to use a smaller PBUF_POOL_BUFSIZE. + */ +#ifndef PPP_USE_PBUF_RAM +#define PPP_USE_PBUF_RAM 0 +#endif + +/** + * PPP_FCS_TABLE: Keep a 256*2 byte table to speed up FCS calculation for PPPoS + */ +#ifndef PPP_FCS_TABLE +#define PPP_FCS_TABLE 1 +#endif + +/** + * PAP_SUPPORT==1: Support PAP. + */ +#ifndef PAP_SUPPORT +#define PAP_SUPPORT 0 +#endif + +/** + * CHAP_SUPPORT==1: Support CHAP. + */ +#ifndef CHAP_SUPPORT +#define CHAP_SUPPORT 0 +#endif + +/** + * MSCHAP_SUPPORT==1: Support MSCHAP. + */ +#ifndef MSCHAP_SUPPORT +#define MSCHAP_SUPPORT 0 +#endif +#if MSCHAP_SUPPORT +/* MSCHAP requires CHAP support */ +#undef CHAP_SUPPORT +#define CHAP_SUPPORT 1 +#endif /* MSCHAP_SUPPORT */ + +/** + * EAP_SUPPORT==1: Support EAP. + */ +#ifndef EAP_SUPPORT +#define EAP_SUPPORT 0 +#endif + +/** + * CCP_SUPPORT==1: Support CCP. + */ +#ifndef CCP_SUPPORT +#define CCP_SUPPORT 0 +#endif + +/** + * MPPE_SUPPORT==1: Support MPPE. + */ +#ifndef MPPE_SUPPORT +#define MPPE_SUPPORT 0 +#endif +#if MPPE_SUPPORT +/* MPPE requires CCP support */ +#undef CCP_SUPPORT +#define CCP_SUPPORT 1 +/* MPPE requires MSCHAP support */ +#undef MSCHAP_SUPPORT +#define MSCHAP_SUPPORT 1 +/* MSCHAP requires CHAP support */ +#undef CHAP_SUPPORT +#define CHAP_SUPPORT 1 +#endif /* MPPE_SUPPORT */ + +/** + * CBCP_SUPPORT==1: Support CBCP. CURRENTLY NOT SUPPORTED! DO NOT SET! + */ +#ifndef CBCP_SUPPORT +#define CBCP_SUPPORT 0 +#endif + +/** + * ECP_SUPPORT==1: Support ECP. CURRENTLY NOT SUPPORTED! DO NOT SET! + */ +#ifndef ECP_SUPPORT +#define ECP_SUPPORT 0 +#endif + +/** + * DEMAND_SUPPORT==1: Support dial on demand. CURRENTLY NOT SUPPORTED! DO NOT SET! + */ +#ifndef DEMAND_SUPPORT +#define DEMAND_SUPPORT 0 +#endif + +/** + * LQR_SUPPORT==1: Support Link Quality Report. Do nothing except exchanging some LCP packets. + */ +#ifndef LQR_SUPPORT +#define LQR_SUPPORT 0 +#endif + +/** + * PPP_SERVER==1: Enable PPP server support (waiting for incoming PPP session). + * + * Currently only supported for PPPoS. + */ +#ifndef PPP_SERVER +#define PPP_SERVER 0 +#endif + +#if PPP_SERVER +/* + * PPP_OUR_NAME: Our name for authentication purposes + */ +#ifndef PPP_OUR_NAME +#define PPP_OUR_NAME "lwIP" +#endif +#endif /* PPP_SERVER */ + +/** + * VJ_SUPPORT==1: Support VJ header compression. + */ +#ifndef VJ_SUPPORT +#define VJ_SUPPORT 1 +#endif +/* VJ compression is only supported for TCP over IPv4 over PPPoS. */ +#if !PPPOS_SUPPORT || !PPP_IPV4_SUPPORT || !LWIP_TCP +#undef VJ_SUPPORT +#define VJ_SUPPORT 0 +#endif /* !PPPOS_SUPPORT */ + +/** + * PPP_MD5_RANDM==1: Use MD5 for better randomness. + * Enabled by default if CHAP, EAP, or L2TP AUTH support is enabled. + */ +#ifndef PPP_MD5_RANDM +#define PPP_MD5_RANDM (CHAP_SUPPORT || EAP_SUPPORT || PPPOL2TP_AUTH_SUPPORT) +#endif + +/** + * PolarSSL embedded library + * + * + * lwIP contains some files fetched from the latest BSD release of + * the PolarSSL project (PolarSSL 0.10.1-bsd) for ciphers and encryption + * methods we need for lwIP PPP support. + * + * The PolarSSL files were cleaned to contain only the necessary struct + * fields and functions needed for lwIP. + * + * The PolarSSL API was not changed at all, so if you are already using + * PolarSSL you can choose to skip the compilation of the included PolarSSL + * library into lwIP. + * + * If you are not using the embedded copy you must include external + * libraries into your arch/cc.h port file. + * + * Beware of the stack requirements which can be a lot larger if you are not + * using our cleaned PolarSSL library. + */ + +/** + * LWIP_USE_EXTERNAL_POLARSSL: Use external PolarSSL library + */ +#ifndef LWIP_USE_EXTERNAL_POLARSSL +#define LWIP_USE_EXTERNAL_POLARSSL 0 +#endif + +/** + * LWIP_USE_EXTERNAL_MBEDTLS: Use external mbed TLS library + */ +#ifndef LWIP_USE_EXTERNAL_MBEDTLS +#define LWIP_USE_EXTERNAL_MBEDTLS 0 +#endif + +/* + * PPP Timeouts + */ + +/** + * FSM_DEFTIMEOUT: Timeout time in seconds + */ +#ifndef FSM_DEFTIMEOUT +#define FSM_DEFTIMEOUT 6 +#endif + +/** + * FSM_DEFMAXTERMREQS: Maximum Terminate-Request transmissions + */ +#ifndef FSM_DEFMAXTERMREQS +#define FSM_DEFMAXTERMREQS 2 +#endif + +/** + * FSM_DEFMAXCONFREQS: Maximum Configure-Request transmissions + */ +#ifndef FSM_DEFMAXCONFREQS +#define FSM_DEFMAXCONFREQS 10 +#endif + +/** + * FSM_DEFMAXNAKLOOPS: Maximum number of nak loops + */ +#ifndef FSM_DEFMAXNAKLOOPS +#define FSM_DEFMAXNAKLOOPS 5 +#endif + +/** + * UPAP_DEFTIMEOUT: Timeout (seconds) for retransmitting req + */ +#ifndef UPAP_DEFTIMEOUT +#define UPAP_DEFTIMEOUT 6 +#endif + +/** + * UPAP_DEFTRANSMITS: Maximum number of auth-reqs to send + */ +#ifndef UPAP_DEFTRANSMITS +#define UPAP_DEFTRANSMITS 10 +#endif + +#if PPP_SERVER +/** + * UPAP_DEFREQTIME: Time to wait for auth-req from peer + */ +#ifndef UPAP_DEFREQTIME +#define UPAP_DEFREQTIME 30 +#endif +#endif /* PPP_SERVER */ + +/** + * CHAP_DEFTIMEOUT: Timeout (seconds) for retransmitting req + */ +#ifndef CHAP_DEFTIMEOUT +#define CHAP_DEFTIMEOUT 6 +#endif + +/** + * CHAP_DEFTRANSMITS: max # times to send challenge + */ +#ifndef CHAP_DEFTRANSMITS +#define CHAP_DEFTRANSMITS 10 +#endif + +#if PPP_SERVER +/** + * CHAP_DEFRECHALLENGETIME: If this option is > 0, rechallenge the peer every n seconds + */ +#ifndef CHAP_DEFRECHALLENGETIME +#define CHAP_DEFRECHALLENGETIME 0 +#endif +#endif /* PPP_SERVER */ + +/** + * EAP_DEFREQTIME: Time to wait for peer request + */ +#ifndef EAP_DEFREQTIME +#define EAP_DEFREQTIME 6 +#endif + +/** + * EAP_DEFALLOWREQ: max # times to accept requests + */ +#ifndef EAP_DEFALLOWREQ +#define EAP_DEFALLOWREQ 10 +#endif + +#if PPP_SERVER +/** + * EAP_DEFTIMEOUT: Timeout (seconds) for rexmit + */ +#ifndef EAP_DEFTIMEOUT +#define EAP_DEFTIMEOUT 6 +#endif + +/** + * EAP_DEFTRANSMITS: max # times to transmit + */ +#ifndef EAP_DEFTRANSMITS +#define EAP_DEFTRANSMITS 10 +#endif +#endif /* PPP_SERVER */ + +/** + * LCP_DEFLOOPBACKFAIL: Default number of times we receive our magic number from the peer + * before deciding the link is looped-back. + */ +#ifndef LCP_DEFLOOPBACKFAIL +#define LCP_DEFLOOPBACKFAIL 10 +#endif + +/** + * LCP_ECHOINTERVAL: Interval in seconds between keepalive echo requests, 0 to disable. + */ +#ifndef LCP_ECHOINTERVAL +#define LCP_ECHOINTERVAL 0 +#endif + +/** + * LCP_MAXECHOFAILS: Number of unanswered echo requests before failure. + */ +#ifndef LCP_MAXECHOFAILS +#define LCP_MAXECHOFAILS 3 +#endif + +/** + * PPP_MAXIDLEFLAG: Max Xmit idle time (in ms) before resend flag char. + */ +#ifndef PPP_MAXIDLEFLAG +#define PPP_MAXIDLEFLAG 100 +#endif + +/** + * PPP Packet sizes + */ + +/** + * PPP_MRU: Default MRU + */ +#ifndef PPP_MRU +#define PPP_MRU 1500 +#endif + +/** + * PPP_DEFMRU: Default MRU to try + */ +#ifndef PPP_DEFMRU +#define PPP_DEFMRU 1500 +#endif + +/** + * PPP_MAXMRU: Normally limit MRU to this (pppd default = 16384) + */ +#ifndef PPP_MAXMRU +#define PPP_MAXMRU 1500 +#endif + +/** + * PPP_MINMRU: No MRUs below this + */ +#ifndef PPP_MINMRU +#define PPP_MINMRU 128 +#endif + +/** + * PPPOL2TP_DEFMRU: Default MTU and MRU for L2TP + * Default = 1500 - PPPoE(6) - PPP Protocol(2) - IPv4 header(20) - UDP Header(8) + * - L2TP Header(6) - HDLC Header(2) - PPP Protocol(2) - MPPE Header(2) - PPP Protocol(2) + */ +#if PPPOL2TP_SUPPORT +#ifndef PPPOL2TP_DEFMRU +#define PPPOL2TP_DEFMRU 1450 +#endif +#endif /* PPPOL2TP_SUPPORT */ + +/** + * MAXNAMELEN: max length of hostname or name for auth + */ +#ifndef MAXNAMELEN +#define MAXNAMELEN 256 +#endif + +/** + * MAXSECRETLEN: max length of password or secret + */ +#ifndef MAXSECRETLEN +#define MAXSECRETLEN 256 +#endif + +/* ------------------------------------------------------------------------- */ + +/* + * Build triggers for embedded PolarSSL + */ +#if !LWIP_USE_EXTERNAL_POLARSSL && !LWIP_USE_EXTERNAL_MBEDTLS + +/* CHAP, EAP, L2TP AUTH and MD5 Random require MD5 support */ +#if CHAP_SUPPORT || EAP_SUPPORT || PPPOL2TP_AUTH_SUPPORT || PPP_MD5_RANDM +#define LWIP_INCLUDED_POLARSSL_MD5 1 +#endif /* CHAP_SUPPORT || EAP_SUPPORT || PPPOL2TP_AUTH_SUPPORT || PPP_MD5_RANDM */ + +#if MSCHAP_SUPPORT + +/* MSCHAP require MD4 support */ +#define LWIP_INCLUDED_POLARSSL_MD4 1 +/* MSCHAP require SHA1 support */ +#define LWIP_INCLUDED_POLARSSL_SHA1 1 +/* MSCHAP require DES support */ +#define LWIP_INCLUDED_POLARSSL_DES 1 + +/* MS-CHAP support is required for MPPE */ +#if MPPE_SUPPORT +/* MPPE require ARC4 support */ +#define LWIP_INCLUDED_POLARSSL_ARC4 1 +#endif /* MPPE_SUPPORT */ + +#endif /* MSCHAP_SUPPORT */ + +#endif /* !LWIP_USE_EXTERNAL_POLARSSL && !LWIP_USE_EXTERNAL_MBEDTLS */ + +/* Default value if unset */ +#ifndef LWIP_INCLUDED_POLARSSL_MD4 +#define LWIP_INCLUDED_POLARSSL_MD4 0 +#endif /* LWIP_INCLUDED_POLARSSL_MD4 */ +#ifndef LWIP_INCLUDED_POLARSSL_MD5 +#define LWIP_INCLUDED_POLARSSL_MD5 0 +#endif /* LWIP_INCLUDED_POLARSSL_MD5 */ +#ifndef LWIP_INCLUDED_POLARSSL_SHA1 +#define LWIP_INCLUDED_POLARSSL_SHA1 0 +#endif /* LWIP_INCLUDED_POLARSSL_SHA1 */ +#ifndef LWIP_INCLUDED_POLARSSL_DES +#define LWIP_INCLUDED_POLARSSL_DES 0 +#endif /* LWIP_INCLUDED_POLARSSL_DES */ +#ifndef LWIP_INCLUDED_POLARSSL_ARC4 +#define LWIP_INCLUDED_POLARSSL_ARC4 0 +#endif /* LWIP_INCLUDED_POLARSSL_ARC4 */ + +#endif /* PPP_SUPPORT */ + +/* Default value if unset */ +#ifndef PPP_NUM_TIMEOUTS +#define PPP_NUM_TIMEOUTS 0 +#endif /* PPP_NUM_TIMEOUTS */ + +#endif /* LWIP_PPP_OPTS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppapi.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppapi.h new file mode 100644 index 0000000..70c262a --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppapi.h @@ -0,0 +1,137 @@ +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#ifndef LWIP_PPPAPI_H +#define LWIP_PPPAPI_H + +#include "netif/ppp/ppp_opts.h" + +#if LWIP_PPP_API /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/sys.h" +#include "lwip/netif.h" +#include "lwip/priv/tcpip_priv.h" +#include "netif/ppp/ppp.h" +#if PPPOS_SUPPORT +#include "netif/ppp/pppos.h" +#endif /* PPPOS_SUPPORT */ + +#ifdef __cplusplus +extern "C" { +#endif + +struct pppapi_msg_msg { + ppp_pcb *ppp; + union { +#if PPP_NOTIFY_PHASE + struct { + ppp_notify_phase_cb_fn notify_phase_cb; + } setnotifyphasecb; +#endif /* PPP_NOTIFY_PHASE */ +#if PPPOS_SUPPORT + struct { + struct netif *pppif; + pppos_output_cb_fn output_cb; + ppp_link_status_cb_fn link_status_cb; + void *ctx_cb; + } serialcreate; +#endif /* PPPOS_SUPPORT */ +#if PPPOE_SUPPORT + struct { + struct netif *pppif; + struct netif *ethif; + const char *service_name; + const char *concentrator_name; + ppp_link_status_cb_fn link_status_cb; + void *ctx_cb; + } ethernetcreate; +#endif /* PPPOE_SUPPORT */ +#if PPPOL2TP_SUPPORT + struct { + struct netif *pppif; + struct netif *netif; + API_MSG_M_DEF_C(ip_addr_t, ipaddr); + u16_t port; +#if PPPOL2TP_AUTH_SUPPORT + const u8_t *secret; + u8_t secret_len; +#endif /* PPPOL2TP_AUTH_SUPPORT */ + ppp_link_status_cb_fn link_status_cb; + void *ctx_cb; + } l2tpcreate; +#endif /* PPPOL2TP_SUPPORT */ + struct { + u16_t holdoff; + } connect; + struct { + u8_t nocarrier; + } close; + struct { + u8_t cmd; + void *arg; + } ioctl; + } msg; +}; + +struct pppapi_msg { + struct tcpip_api_call_data call; + struct pppapi_msg_msg msg; +}; + +/* API for application */ +err_t pppapi_set_default(ppp_pcb *pcb); +#if PPP_NOTIFY_PHASE +err_t pppapi_set_notify_phase_callback(ppp_pcb *pcb, ppp_notify_phase_cb_fn notify_phase_cb); +#endif /* PPP_NOTIFY_PHASE */ +#if PPPOS_SUPPORT +ppp_pcb *pppapi_pppos_create(struct netif *pppif, pppos_output_cb_fn output_cb, ppp_link_status_cb_fn link_status_cb, void *ctx_cb); +#endif /* PPPOS_SUPPORT */ +#if PPPOE_SUPPORT +ppp_pcb *pppapi_pppoe_create(struct netif *pppif, struct netif *ethif, const char *service_name, + const char *concentrator_name, ppp_link_status_cb_fn link_status_cb, + void *ctx_cb); +#endif /* PPPOE_SUPPORT */ +#if PPPOL2TP_SUPPORT +ppp_pcb *pppapi_pppol2tp_create(struct netif *pppif, struct netif *netif, ip_addr_t *ipaddr, u16_t port, + const u8_t *secret, u8_t secret_len, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb); +#endif /* PPPOL2TP_SUPPORT */ +err_t pppapi_connect(ppp_pcb *pcb, u16_t holdoff); +#if PPP_SERVER +err_t pppapi_listen(ppp_pcb *pcb); +#endif /* PPP_SERVER */ +err_t pppapi_close(ppp_pcb *pcb, u8_t nocarrier); +err_t pppapi_free(ppp_pcb *pcb); +err_t pppapi_ioctl(ppp_pcb *pcb, u8_t cmd, void *arg); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_PPP_API */ + +#endif /* LWIP_PPPAPI_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppcrypt.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppcrypt.h new file mode 100644 index 0000000..968e532 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppcrypt.h @@ -0,0 +1,144 @@ +/* + * pppcrypt.c - PPP/DES linkage for MS-CHAP and EAP SRP-SHA1 + * + * Extracted from chap_ms.c by James Carlson. + * + * Copyright (c) 1995 Eric Rosenquist. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +/* This header file is included in all PPP modules needing hashes and/or ciphers */ + +#ifndef PPPCRYPT_H +#define PPPCRYPT_H + +/* + * If included PolarSSL copy is not used, user is expected to include + * external libraries in arch/cc.h (which is included by lwip/arch.h). + */ +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Map hashes and ciphers functions to PolarSSL + */ +#if !LWIP_USE_EXTERNAL_MBEDTLS + +#include "netif/ppp/polarssl/md4.h" +#define lwip_md4_context md4_context +#define lwip_md4_init(context) +#define lwip_md4_starts md4_starts +#define lwip_md4_update md4_update +#define lwip_md4_finish md4_finish +#define lwip_md4_free(context) + +#include "netif/ppp/polarssl/md5.h" +#define lwip_md5_context md5_context +#define lwip_md5_init(context) +#define lwip_md5_starts md5_starts +#define lwip_md5_update md5_update +#define lwip_md5_finish md5_finish +#define lwip_md5_free(context) + +#include "netif/ppp/polarssl/sha1.h" +#define lwip_sha1_context sha1_context +#define lwip_sha1_init(context) +#define lwip_sha1_starts sha1_starts +#define lwip_sha1_update sha1_update +#define lwip_sha1_finish sha1_finish +#define lwip_sha1_free(context) + +#include "netif/ppp/polarssl/des.h" +#define lwip_des_context des_context +#define lwip_des_init(context) +#define lwip_des_setkey_enc des_setkey_enc +#define lwip_des_crypt_ecb des_crypt_ecb +#define lwip_des_free(context) + +#include "netif/ppp/polarssl/arc4.h" +#define lwip_arc4_context arc4_context +#define lwip_arc4_init(context) +#define lwip_arc4_setup arc4_setup +#define lwip_arc4_crypt arc4_crypt +#define lwip_arc4_free(context) + +#endif /* !LWIP_USE_EXTERNAL_MBEDTLS */ + +/* + * Map hashes and ciphers functions to mbed TLS + */ +#if LWIP_USE_EXTERNAL_MBEDTLS + +#define lwip_md4_context mbedtls_md4_context +#define lwip_md4_init mbedtls_md4_init +#define lwip_md4_starts mbedtls_md4_starts +#define lwip_md4_update mbedtls_md4_update +#define lwip_md4_finish mbedtls_md4_finish +#define lwip_md4_free mbedtls_md4_free + +#define lwip_md5_context mbedtls_md5_context +#define lwip_md5_init mbedtls_md5_init +#define lwip_md5_starts mbedtls_md5_starts +#define lwip_md5_update mbedtls_md5_update +#define lwip_md5_finish mbedtls_md5_finish +#define lwip_md5_free mbedtls_md5_free + +#define lwip_sha1_context mbedtls_sha1_context +#define lwip_sha1_init mbedtls_sha1_init +#define lwip_sha1_starts mbedtls_sha1_starts +#define lwip_sha1_update mbedtls_sha1_update +#define lwip_sha1_finish mbedtls_sha1_finish +#define lwip_sha1_free mbedtls_sha1_free + +#define lwip_des_context mbedtls_des_context +#define lwip_des_init mbedtls_des_init +#define lwip_des_setkey_enc mbedtls_des_setkey_enc +#define lwip_des_crypt_ecb mbedtls_des_crypt_ecb +#define lwip_des_free mbedtls_des_free + +#define lwip_arc4_context mbedtls_arc4_context +#define lwip_arc4_init mbedtls_arc4_init +#define lwip_arc4_setup mbedtls_arc4_setup +#define lwip_arc4_crypt(context, buffer, length) mbedtls_arc4_crypt(context, length, buffer, buffer) +#define lwip_arc4_free mbedtls_arc4_free + +#endif /* LWIP_USE_EXTERNAL_MBEDTLS */ + +void pppcrypt_56_to_64_bit_key(u_char *key, u_char *des_key); + +#ifdef __cplusplus +} +#endif + +#endif /* PPPCRYPT_H */ + +#endif /* PPP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppdebug.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppdebug.h new file mode 100644 index 0000000..391b7c6 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppdebug.h @@ -0,0 +1,88 @@ +/***************************************************************************** +* pppdebug.h - System debugging utilities. +* +* Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. +* portions Copyright (c) 1998 Global Election Systems Inc. +* portions Copyright (c) 2001 by Cognizant Pty Ltd. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY (please don't use tabs!) +* +* 03-01-01 Marc Boucher +* Ported to lwIP. +* 98-07-29 Guy Lancaster , Global Election Systems Inc. +* Original. +* +***************************************************************************** +*/ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef PPPDEBUG_H +#define PPPDEBUG_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Trace levels. */ +#define LOG_CRITICAL (PPP_DEBUG | LWIP_DBG_LEVEL_SEVERE) +#define LOG_ERR (PPP_DEBUG | LWIP_DBG_LEVEL_SEVERE) +#define LOG_NOTICE (PPP_DEBUG | LWIP_DBG_LEVEL_WARNING) +#define LOG_WARNING (PPP_DEBUG | LWIP_DBG_LEVEL_WARNING) +#define LOG_INFO (PPP_DEBUG) +#define LOG_DETAIL (PPP_DEBUG) +#define LOG_DEBUG (PPP_DEBUG) + +#if PPP_DEBUG + +#define MAINDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define SYSDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define FSMDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define LCPDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define IPCPDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define IPV6CPDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define UPAPDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define CHAPDEBUG(a) LWIP_DEBUGF(LWIP_DBG_LEVEL_WARNING, a) +#define PPPDEBUG(a, b) LWIP_DEBUGF(a, b) + +#else /* PPP_DEBUG */ + +#define MAINDEBUG(a) +#define SYSDEBUG(a) +#define FSMDEBUG(a) +#define LCPDEBUG(a) +#define IPCPDEBUG(a) +#define IPV6CPDEBUG(a) +#define UPAPDEBUG(a) +#define CHAPDEBUG(a) +#define PPPDEBUG(a, b) + +#endif /* PPP_DEBUG */ + +#ifdef __cplusplus +} +#endif + +#endif /* PPPDEBUG_H */ + +#endif /* PPP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppoe.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppoe.h new file mode 100644 index 0000000..93acd4c --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppoe.h @@ -0,0 +1,187 @@ +/***************************************************************************** +* pppoe.h - PPP Over Ethernet implementation for lwIP. +* +* Copyright (c) 2006 by Marc Boucher, Services Informatiques (MBSI) inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 06-01-01 Marc Boucher +* Ported to lwIP. +*****************************************************************************/ + + + +/* based on NetBSD: if_pppoe.c,v 1.64 2006/01/31 23:50:15 martin Exp */ + +/*- + * Copyright (c) 2002 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Martin Husemann . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPPOE_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef PPP_OE_H +#define PPP_OE_H + +#include "ppp.h" +#include "lwip/etharp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct pppoehdr { + PACK_STRUCT_FLD_8(u8_t vertype); + PACK_STRUCT_FLD_8(u8_t code); + PACK_STRUCT_FIELD(u16_t session); + PACK_STRUCT_FIELD(u16_t plen); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct pppoetag { + PACK_STRUCT_FIELD(u16_t tag); + PACK_STRUCT_FIELD(u16_t len); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + + +#define PPPOE_STATE_INITIAL 0 +#define PPPOE_STATE_PADI_SENT 1 +#define PPPOE_STATE_PADR_SENT 2 +#define PPPOE_STATE_SESSION 3 +/* passive */ +#define PPPOE_STATE_PADO_SENT 1 + +#define PPPOE_HEADERLEN sizeof(struct pppoehdr) +#define PPPOE_VERTYPE 0x11 /* VER=1, TYPE = 1 */ + +#define PPPOE_TAG_EOL 0x0000 /* end of list */ +#define PPPOE_TAG_SNAME 0x0101 /* service name */ +#define PPPOE_TAG_ACNAME 0x0102 /* access concentrator name */ +#define PPPOE_TAG_HUNIQUE 0x0103 /* host unique */ +#define PPPOE_TAG_ACCOOKIE 0x0104 /* AC cookie */ +#define PPPOE_TAG_VENDOR 0x0105 /* vendor specific */ +#define PPPOE_TAG_RELAYSID 0x0110 /* relay session id */ +#define PPPOE_TAG_SNAME_ERR 0x0201 /* service name error */ +#define PPPOE_TAG_ACSYS_ERR 0x0202 /* AC system error */ +#define PPPOE_TAG_GENERIC_ERR 0x0203 /* gerneric error */ + +#define PPPOE_CODE_PADI 0x09 /* Active Discovery Initiation */ +#define PPPOE_CODE_PADO 0x07 /* Active Discovery Offer */ +#define PPPOE_CODE_PADR 0x19 /* Active Discovery Request */ +#define PPPOE_CODE_PADS 0x65 /* Active Discovery Session confirmation */ +#define PPPOE_CODE_PADT 0xA7 /* Active Discovery Terminate */ + +#ifndef PPPOE_MAX_AC_COOKIE_LEN +#define PPPOE_MAX_AC_COOKIE_LEN 64 +#endif + +struct pppoe_softc { + struct pppoe_softc *next; + struct netif *sc_ethif; /* ethernet interface we are using */ + ppp_pcb *pcb; /* PPP PCB */ + + struct eth_addr sc_dest; /* hardware address of concentrator */ + u16_t sc_session; /* PPPoE session id */ + u8_t sc_state; /* discovery phase or session connected */ + +#ifdef PPPOE_TODO + u8_t *sc_service_name; /* if != NULL: requested name of service */ + u8_t *sc_concentrator_name; /* if != NULL: requested concentrator id */ +#endif /* PPPOE_TODO */ + u8_t sc_ac_cookie[PPPOE_MAX_AC_COOKIE_LEN]; /* content of AC cookie we must echo back */ + u8_t sc_ac_cookie_len; /* length of cookie data */ +#ifdef PPPOE_SERVER + u8_t *sc_hunique; /* content of host unique we must echo back */ + u8_t sc_hunique_len; /* length of host unique */ +#endif + u8_t sc_padi_retried; /* number of PADI retries already done */ + u8_t sc_padr_retried; /* number of PADR retries already done */ +}; + + +#define pppoe_init() /* compatibility define, no initialization needed */ + +ppp_pcb *pppoe_create(struct netif *pppif, + struct netif *ethif, + const char *service_name, const char *concentrator_name, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb); + +/* + * Functions called from lwIP + * DO NOT CALL FROM lwIP USER APPLICATION. + */ +void pppoe_disc_input(struct netif *netif, struct pbuf *p); +void pppoe_data_input(struct netif *netif, struct pbuf *p); + +#ifdef __cplusplus +} +#endif + +#endif /* PPP_OE_H */ + +#endif /* PPP_SUPPORT && PPPOE_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppol2tp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppol2tp.h new file mode 100644 index 0000000..e09d6e1 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppol2tp.h @@ -0,0 +1,209 @@ +/** + * @file + * Network Point to Point Protocol over Layer 2 Tunneling Protocol header file. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPPOL2TP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef PPPOL2TP_H +#define PPPOL2TP_H + +#include "ppp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Timeout */ +#define PPPOL2TP_CONTROL_TIMEOUT (5*1000) /* base for quick timeout calculation */ +#define PPPOL2TP_SLOW_RETRY (60*1000) /* persistent retry interval */ + +#define PPPOL2TP_MAXSCCRQ 4 /* retry SCCRQ four times (quickly) */ +#define PPPOL2TP_MAXICRQ 4 /* retry IRCQ four times */ +#define PPPOL2TP_MAXICCN 4 /* retry ICCN four times */ + +/* L2TP header flags */ +#define PPPOL2TP_HEADERFLAG_CONTROL 0x8000 +#define PPPOL2TP_HEADERFLAG_LENGTH 0x4000 +#define PPPOL2TP_HEADERFLAG_SEQUENCE 0x0800 +#define PPPOL2TP_HEADERFLAG_OFFSET 0x0200 +#define PPPOL2TP_HEADERFLAG_PRIORITY 0x0100 +#define PPPOL2TP_HEADERFLAG_VERSION 0x0002 + +/* Mandatory bits for control: Control, Length, Sequence, Version 2 */ +#define PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY (PPPOL2TP_HEADERFLAG_CONTROL|PPPOL2TP_HEADERFLAG_LENGTH|PPPOL2TP_HEADERFLAG_SEQUENCE|PPPOL2TP_HEADERFLAG_VERSION) +/* Forbidden bits for control: Offset, Priority */ +#define PPPOL2TP_HEADERFLAG_CONTROL_FORBIDDEN (PPPOL2TP_HEADERFLAG_OFFSET|PPPOL2TP_HEADERFLAG_PRIORITY) + +/* Mandatory bits for data: Version 2 */ +#define PPPOL2TP_HEADERFLAG_DATA_MANDATORY (PPPOL2TP_HEADERFLAG_VERSION) + +/* AVP (Attribute Value Pair) header */ +#define PPPOL2TP_AVPHEADERFLAG_MANDATORY 0x8000 +#define PPPOL2TP_AVPHEADERFLAG_HIDDEN 0x4000 +#define PPPOL2TP_AVPHEADERFLAG_LENGTHMASK 0x03ff + +/* -- AVP - Message type */ +#define PPPOL2TP_AVPTYPE_MESSAGE 0 /* Message type */ + +/* Control Connection Management */ +#define PPPOL2TP_MESSAGETYPE_SCCRQ 1 /* Start Control Connection Request */ +#define PPPOL2TP_MESSAGETYPE_SCCRP 2 /* Start Control Connection Reply */ +#define PPPOL2TP_MESSAGETYPE_SCCCN 3 /* Start Control Connection Connected */ +#define PPPOL2TP_MESSAGETYPE_STOPCCN 4 /* Stop Control Connection Notification */ +#define PPPOL2TP_MESSAGETYPE_HELLO 6 /* Hello */ +/* Call Management */ +#define PPPOL2TP_MESSAGETYPE_OCRQ 7 /* Outgoing Call Request */ +#define PPPOL2TP_MESSAGETYPE_OCRP 8 /* Outgoing Call Reply */ +#define PPPOL2TP_MESSAGETYPE_OCCN 9 /* Outgoing Call Connected */ +#define PPPOL2TP_MESSAGETYPE_ICRQ 10 /* Incoming Call Request */ +#define PPPOL2TP_MESSAGETYPE_ICRP 11 /* Incoming Call Reply */ +#define PPPOL2TP_MESSAGETYPE_ICCN 12 /* Incoming Call Connected */ +#define PPPOL2TP_MESSAGETYPE_CDN 14 /* Call Disconnect Notify */ +/* Error reporting */ +#define PPPOL2TP_MESSAGETYPE_WEN 15 /* WAN Error Notify */ +/* PPP Session Control */ +#define PPPOL2TP_MESSAGETYPE_SLI 16 /* Set Link Info */ + +/* -- AVP - Result code */ +#define PPPOL2TP_AVPTYPE_RESULTCODE 1 /* Result code */ +#define PPPOL2TP_RESULTCODE 1 /* General request to clear control connection */ + +/* -- AVP - Protocol version (!= L2TP Header version) */ +#define PPPOL2TP_AVPTYPE_VERSION 2 +#define PPPOL2TP_VERSION 0x0100 /* L2TP Protocol version 1, revision 0 */ + +/* -- AVP - Framing capabilities */ +#define PPPOL2TP_AVPTYPE_FRAMINGCAPABILITIES 3 /* Bearer capabilities */ +#define PPPOL2TP_FRAMINGCAPABILITIES 0x00000003 /* Async + Sync framing */ + +/* -- AVP - Bearer capabilities */ +#define PPPOL2TP_AVPTYPE_BEARERCAPABILITIES 4 /* Bearer capabilities */ +#define PPPOL2TP_BEARERCAPABILITIES 0x00000003 /* Analog + Digital Access */ + +/* -- AVP - Tie breaker */ +#define PPPOL2TP_AVPTYPE_TIEBREAKER 5 + +/* -- AVP - Host name */ +#define PPPOL2TP_AVPTYPE_HOSTNAME 7 /* Host name */ +#define PPPOL2TP_HOSTNAME "lwIP" /* FIXME: make it configurable */ + +/* -- AVP - Vendor name */ +#define PPPOL2TP_AVPTYPE_VENDORNAME 8 /* Vendor name */ +#define PPPOL2TP_VENDORNAME "lwIP" /* FIXME: make it configurable */ + +/* -- AVP - Assign tunnel ID */ +#define PPPOL2TP_AVPTYPE_TUNNELID 9 /* Assign Tunnel ID */ + +/* -- AVP - Receive window size */ +#define PPPOL2TP_AVPTYPE_RECEIVEWINDOWSIZE 10 /* Receive window size */ +#define PPPOL2TP_RECEIVEWINDOWSIZE 8 /* FIXME: make it configurable */ + +/* -- AVP - Challenge */ +#define PPPOL2TP_AVPTYPE_CHALLENGE 11 /* Challenge */ + +/* -- AVP - Cause code */ +#define PPPOL2TP_AVPTYPE_CAUSECODE 12 /* Cause code*/ + +/* -- AVP - Challenge response */ +#define PPPOL2TP_AVPTYPE_CHALLENGERESPONSE 13 /* Challenge response */ +#define PPPOL2TP_AVPTYPE_CHALLENGERESPONSE_SIZE 16 + +/* -- AVP - Assign session ID */ +#define PPPOL2TP_AVPTYPE_SESSIONID 14 /* Assign Session ID */ + +/* -- AVP - Call serial number */ +#define PPPOL2TP_AVPTYPE_CALLSERIALNUMBER 15 /* Call Serial Number */ + +/* -- AVP - Framing type */ +#define PPPOL2TP_AVPTYPE_FRAMINGTYPE 19 /* Framing Type */ +#define PPPOL2TP_FRAMINGTYPE 0x00000001 /* Sync framing */ + +/* -- AVP - TX Connect Speed */ +#define PPPOL2TP_AVPTYPE_TXCONNECTSPEED 24 /* TX Connect Speed */ +#define PPPOL2TP_TXCONNECTSPEED 100000000 /* Connect speed: 100 Mbits/s */ + +/* L2TP Session state */ +#define PPPOL2TP_STATE_INITIAL 0 +#define PPPOL2TP_STATE_SCCRQ_SENT 1 +#define PPPOL2TP_STATE_ICRQ_SENT 2 +#define PPPOL2TP_STATE_ICCN_SENT 3 +#define PPPOL2TP_STATE_DATA 4 + +#define PPPOL2TP_OUTPUT_DATA_HEADER_LEN 6 /* Our data header len */ + +/* + * PPPoL2TP interface control block. + */ +typedef struct pppol2tp_pcb_s pppol2tp_pcb; +struct pppol2tp_pcb_s { + ppp_pcb *ppp; /* PPP PCB */ + u8_t phase; /* L2TP phase */ + struct udp_pcb *udp; /* UDP L2TP Socket */ + struct netif *netif; /* Output interface, used as a default route */ + ip_addr_t remote_ip; /* LNS IP Address */ + u16_t remote_port; /* LNS port */ +#if PPPOL2TP_AUTH_SUPPORT + const u8_t *secret; /* Secret string */ + u8_t secret_len; /* Secret string length */ + u8_t secret_rv[16]; /* Random vector */ + u8_t challenge_hash[16]; /* Challenge response */ + u8_t send_challenge; /* Boolean whether the next sent packet should contains a challenge response */ +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + u16_t tunnel_port; /* Tunnel port */ + u16_t our_ns; /* NS to peer */ + u16_t peer_nr; /* NR from peer */ + u16_t peer_ns; /* Expected NS from peer */ + u16_t source_tunnel_id; /* Tunnel ID assigned by peer */ + u16_t remote_tunnel_id; /* Tunnel ID assigned to peer */ + u16_t source_session_id; /* Session ID assigned by peer */ + u16_t remote_session_id; /* Session ID assigned to peer */ + + u8_t sccrq_retried; /* number of SCCRQ retries already done */ + u8_t icrq_retried; /* number of ICRQ retries already done */ + u8_t iccn_retried; /* number of ICCN retries already done */ +}; + + +/* Create a new L2TP session. */ +ppp_pcb *pppol2tp_create(struct netif *pppif, + struct netif *netif, const ip_addr_t *ipaddr, u16_t port, + const u8_t *secret, u8_t secret_len, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb); + +#ifdef __cplusplus +} +#endif + +#endif /* PPPOL2TP_H */ +#endif /* PPP_SUPPORT && PPPOL2TP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppos.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppos.h new file mode 100644 index 0000000..acaedd7 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/pppos.h @@ -0,0 +1,126 @@ +/** + * @file + * Network Point to Point Protocol over Serial header file. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPPOS_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef PPPOS_H +#define PPPOS_H + +#include "lwip/sys.h" + +#include "ppp.h" +#include "vj.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* PPP packet parser states. Current state indicates operation yet to be + * completed. */ +enum { + PDIDLE = 0, /* Idle state - waiting. */ + PDSTART, /* Process start flag. */ + PDADDRESS, /* Process address field. */ + PDCONTROL, /* Process control field. */ + PDPROTOCOL1, /* Process protocol field 1. */ + PDPROTOCOL2, /* Process protocol field 2. */ + PDDATA /* Process data byte. */ +}; + +/* PPPoS serial output callback function prototype */ +typedef u32_t (*pppos_output_cb_fn)(ppp_pcb *pcb, u8_t *data, u32_t len, void *ctx); + +/* + * Extended asyncmap - allows any character to be escaped. + */ +typedef u8_t ext_accm[32]; + +/* + * PPPoS interface control block. + */ +typedef struct pppos_pcb_s pppos_pcb; +struct pppos_pcb_s { + /* -- below are data that will NOT be cleared between two sessions */ + ppp_pcb *ppp; /* PPP PCB */ + pppos_output_cb_fn output_cb; /* PPP serial output callback */ + + /* -- below are data that will be cleared between two sessions + * + * last_xmit must be the first member of cleared members, because it is + * used to know which part must not be cleared. + */ + u32_t last_xmit; /* Time of last transmission. */ + ext_accm out_accm; /* Async-Ctl-Char-Map for output. */ + + /* flags */ + unsigned int open :1; /* Set if PPPoS is open */ + unsigned int pcomp :1; /* Does peer accept protocol compression? */ + unsigned int accomp :1; /* Does peer accept addr/ctl compression? */ + + /* PPPoS rx */ + ext_accm in_accm; /* Async-Ctl-Char-Map for input. */ + struct pbuf *in_head, *in_tail; /* The input packet. */ + u16_t in_protocol; /* The input protocol code. */ + u16_t in_fcs; /* Input Frame Check Sequence value. */ + u8_t in_state; /* The input process state. */ + u8_t in_escaped; /* Escape next character. */ +}; + +/* Create a new PPPoS session. */ +ppp_pcb *pppos_create(struct netif *pppif, pppos_output_cb_fn output_cb, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb); + +#if !NO_SYS && !PPP_INPROC_IRQ_SAFE +/* Pass received raw characters to PPPoS to be decoded through lwIP TCPIP thread. */ +err_t pppos_input_tcpip(ppp_pcb *ppp, u8_t *s, int l); +#endif /* !NO_SYS && !PPP_INPROC_IRQ_SAFE */ + +/* PPP over Serial: this is the input function to be called for received data. */ +void pppos_input(ppp_pcb *ppp, u8_t* data, int len); + + +/* + * Functions called from lwIP + * DO NOT CALL FROM lwIP USER APPLICATION. + */ +#if !NO_SYS && !PPP_INPROC_IRQ_SAFE +err_t pppos_input_sys(struct pbuf *p, struct netif *inp); +#endif /* !NO_SYS && !PPP_INPROC_IRQ_SAFE */ + +#ifdef __cplusplus +} +#endif + +#endif /* PPPOS_H */ +#endif /* PPP_SUPPORT && PPPOL2TP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/upap.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/upap.h new file mode 100644 index 0000000..8d90e35 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/upap.h @@ -0,0 +1,131 @@ +/* + * upap.h - User/Password Authentication Protocol definitions. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: upap.h,v 1.8 2002/12/04 23:03:33 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef UPAP_H +#define UPAP_H + +#include "ppp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Packet header = Code, id, length. + */ +#define UPAP_HEADERLEN 4 + + +/* + * UPAP codes. + */ +#define UPAP_AUTHREQ 1 /* Authenticate-Request */ +#define UPAP_AUTHACK 2 /* Authenticate-Ack */ +#define UPAP_AUTHNAK 3 /* Authenticate-Nak */ + + +/* + * Client states. + */ +#define UPAPCS_INITIAL 0 /* Connection down */ +#define UPAPCS_CLOSED 1 /* Connection up, haven't requested auth */ +#define UPAPCS_PENDING 2 /* Connection down, have requested auth */ +#define UPAPCS_AUTHREQ 3 /* We've sent an Authenticate-Request */ +#define UPAPCS_OPEN 4 /* We've received an Ack */ +#define UPAPCS_BADAUTH 5 /* We've received a Nak */ + +/* + * Server states. + */ +#define UPAPSS_INITIAL 0 /* Connection down */ +#define UPAPSS_CLOSED 1 /* Connection up, haven't requested auth */ +#define UPAPSS_PENDING 2 /* Connection down, have requested auth */ +#define UPAPSS_LISTEN 3 /* Listening for an Authenticate */ +#define UPAPSS_OPEN 4 /* We've sent an Ack */ +#define UPAPSS_BADAUTH 5 /* We've sent a Nak */ + + +/* + * Timeouts. + */ +#if 0 /* moved to ppp_opts.h */ +#define UPAP_DEFTIMEOUT 3 /* Timeout (seconds) for retransmitting req */ +#define UPAP_DEFREQTIME 30 /* Time to wait for auth-req from peer */ +#endif /* moved to ppp_opts.h */ + +/* + * Each interface is described by upap structure. + */ +#if PAP_SUPPORT +typedef struct upap_state { + const char *us_user; /* User */ + u8_t us_userlen; /* User length */ + const char *us_passwd; /* Password */ + u8_t us_passwdlen; /* Password length */ + u8_t us_clientstate; /* Client state */ +#if PPP_SERVER + u8_t us_serverstate; /* Server state */ +#endif /* PPP_SERVER */ + u8_t us_id; /* Current id */ + u8_t us_transmits; /* Number of auth-reqs sent */ +} upap_state; +#endif /* PAP_SUPPORT */ + + +void upap_authwithpeer(ppp_pcb *pcb, const char *user, const char *password); +#if PPP_SERVER +void upap_authpeer(ppp_pcb *pcb); +#endif /* PPP_SERVER */ + +extern const struct protent pap_protent; + +#ifdef __cplusplus +} +#endif + +#endif /* UPAP_H */ +#endif /* PPP_SUPPORT && PAP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/vj.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/vj.h new file mode 100644 index 0000000..1d91f25 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/ppp/vj.h @@ -0,0 +1,169 @@ +/* + * Definitions for tcp compression routines. + * + * $Id: vj.h,v 1.7 2010/02/22 17:52:09 goldsimon Exp $ + * + * Copyright (c) 1989 Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms are permitted + * provided that the above copyright notice and this paragraph are + * duplicated in all such forms and that any documentation, + * advertising materials, and other materials related to such + * distribution and use acknowledge that the software was developed + * by the University of California, Berkeley. The name of the + * University may not be used to endorse or promote products derived + * from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * Van Jacobson (van@helios.ee.lbl.gov), Dec 31, 1989: + * - Initial distribution. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && VJ_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#ifndef VJ_H +#define VJ_H + +#include "lwip/ip.h" +#include "lwip/priv/tcp_priv.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define MAX_SLOTS 16 /* must be > 2 and < 256 */ +#define MAX_HDR 128 + +/* + * Compressed packet format: + * + * The first octet contains the packet type (top 3 bits), TCP + * 'push' bit, and flags that indicate which of the 4 TCP sequence + * numbers have changed (bottom 5 bits). The next octet is a + * conversation number that associates a saved IP/TCP header with + * the compressed packet. The next two octets are the TCP checksum + * from the original datagram. The next 0 to 15 octets are + * sequence number changes, one change per bit set in the header + * (there may be no changes and there are two special cases where + * the receiver implicitly knows what changed -- see below). + * + * There are 5 numbers which can change (they are always inserted + * in the following order): TCP urgent pointer, window, + * acknowlegement, sequence number and IP ID. (The urgent pointer + * is different from the others in that its value is sent, not the + * change in value.) Since typical use of SLIP links is biased + * toward small packets (see comments on MTU/MSS below), changes + * use a variable length coding with one octet for numbers in the + * range 1 - 255 and 3 octets (0, MSB, LSB) for numbers in the + * range 256 - 65535 or 0. (If the change in sequence number or + * ack is more than 65535, an uncompressed packet is sent.) + */ + +/* + * Packet types (must not conflict with IP protocol version) + * + * The top nibble of the first octet is the packet type. There are + * three possible types: IP (not proto TCP or tcp with one of the + * control flags set); uncompressed TCP (a normal IP/TCP packet but + * with the 8-bit protocol field replaced by an 8-bit connection id -- + * this type of packet syncs the sender & receiver); and compressed + * TCP (described above). + * + * LSB of 4-bit field is TCP "PUSH" bit (a worthless anachronism) and + * is logically part of the 4-bit "changes" field that follows. Top + * three bits are actual packet type. For backward compatibility + * and in the interest of conserving bits, numbers are chosen so the + * IP protocol version number (4) which normally appears in this nibble + * means "IP packet". + */ + +/* packet types */ +#define TYPE_IP 0x40 +#define TYPE_UNCOMPRESSED_TCP 0x70 +#define TYPE_COMPRESSED_TCP 0x80 +#define TYPE_ERROR 0x00 + +/* Bits in first octet of compressed packet */ +#define NEW_C 0x40 /* flag bits for what changed in a packet */ +#define NEW_I 0x20 +#define NEW_S 0x08 +#define NEW_A 0x04 +#define NEW_W 0x02 +#define NEW_U 0x01 + +/* reserved, special-case values of above */ +#define SPECIAL_I (NEW_S|NEW_W|NEW_U) /* echoed interactive traffic */ +#define SPECIAL_D (NEW_S|NEW_A|NEW_W|NEW_U) /* unidirectional data */ +#define SPECIALS_MASK (NEW_S|NEW_A|NEW_W|NEW_U) + +#define TCP_PUSH_BIT 0x10 + + +/* + * "state" data for each active tcp conversation on the wire. This is + * basically a copy of the entire IP/TCP header from the last packet + * we saw from the conversation together with a small identifier + * the transmit & receive ends of the line use to locate saved header. + */ +struct cstate { + struct cstate *cs_next; /* next most recently used state (xmit only) */ + u16_t cs_hlen; /* size of hdr (receive only) */ + u8_t cs_id; /* connection # associated with this state */ + u8_t cs_filler; + union { + char csu_hdr[MAX_HDR]; + struct ip_hdr csu_ip; /* ip/tcp hdr from most recent packet */ + } vjcs_u; +}; +#define cs_ip vjcs_u.csu_ip +#define cs_hdr vjcs_u.csu_hdr + + +struct vjstat { + u32_t vjs_packets; /* outbound packets */ + u32_t vjs_compressed; /* outbound compressed packets */ + u32_t vjs_searches; /* searches for connection state */ + u32_t vjs_misses; /* times couldn't find conn. state */ + u32_t vjs_uncompressedin; /* inbound uncompressed packets */ + u32_t vjs_compressedin; /* inbound compressed packets */ + u32_t vjs_errorin; /* inbound unknown type packets */ + u32_t vjs_tossed; /* inbound packets tossed because of error */ +}; + +/* + * all the state data for one serial line (we need one of these per line). + */ +struct vjcompress { + struct cstate *last_cs; /* most recently used tstate */ + u8_t last_recv; /* last rcvd conn. id */ + u8_t last_xmit; /* last sent conn. id */ + u16_t flags; + u8_t maxSlotIndex; + u8_t compressSlot; /* Flag indicating OK to compress slot ID. */ +#if LINK_STATS + struct vjstat stats; +#endif + struct cstate tstate[MAX_SLOTS]; /* xmit connection states */ + struct cstate rstate[MAX_SLOTS]; /* receive connection states */ +}; + +/* flag values */ +#define VJF_TOSS 1U /* tossing rcvd frames because of input err */ + +extern void vj_compress_init (struct vjcompress *comp); +extern u8_t vj_compress_tcp (struct vjcompress *comp, struct pbuf **pb); +extern void vj_uncompress_err (struct vjcompress *comp); +extern int vj_uncompress_uncomp(struct pbuf *nb, struct vjcompress *comp); +extern int vj_uncompress_tcp (struct pbuf **nb, struct vjcompress *comp); + +#ifdef __cplusplus +} +#endif + +#endif /* VJ_H */ + +#endif /* PPP_SUPPORT && VJ_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/slipif.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/slipif.h new file mode 100644 index 0000000..4e68c94 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/slipif.h @@ -0,0 +1,87 @@ +/** + * @file + * + * SLIP netif API + */ + +/* + * Copyright (c) 2001, Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the Institute nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_NETIF_SLIPIF_H +#define LWIP_HDR_NETIF_SLIPIF_H + +#include "lwip/opt.h" +#include "lwip/netif.h" + +/** Set this to 1 to start a thread that blocks reading on the serial line + * (using sio_read()). + */ +#ifndef SLIP_USE_RX_THREAD +#define SLIP_USE_RX_THREAD !NO_SYS +#endif + +/** Set this to 1 to enable functions to pass in RX bytes from ISR context. + * If enabled, slipif_received_byte[s]() process incoming bytes and put assembled + * packets on a queue, which is fed into lwIP from slipif_poll(). + * If disabled, slipif_poll() polls the serial line (using sio_tryread()). + */ +#ifndef SLIP_RX_FROM_ISR +#define SLIP_RX_FROM_ISR 0 +#endif + +/** Set this to 1 (default for SLIP_RX_FROM_ISR) to queue incoming packets + * received by slipif_received_byte[s]() as long as PBUF_POOL pbufs are available. + * If disabled, packets will be dropped if more than one packet is received. + */ +#ifndef SLIP_RX_QUEUE +#define SLIP_RX_QUEUE SLIP_RX_FROM_ISR +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +err_t slipif_init(struct netif * netif); +void slipif_poll(struct netif *netif); +#if SLIP_RX_FROM_ISR +void slipif_process_rxqueue(struct netif *netif); +void slipif_received_byte(struct netif *netif, u8_t data); +void slipif_received_bytes(struct netif *netif, u8_t *data, u8_t len); +#endif /* SLIP_RX_FROM_ISR */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_NETIF_SLIPIF_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/zepif.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/zepif.h new file mode 100644 index 0000000..059c8b0 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/netif/zepif.h @@ -0,0 +1,81 @@ +/** + * @file + * + * A netif implementing the ZigBee Eencapsulation Protocol (ZEP). + * This is used to tunnel 6LowPAN over UDP. + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#ifndef LWIP_HDR_ZEPIF_H +#define LWIP_HDR_ZEPIF_H + +#include "lwip/opt.h" +#include "netif/lowpan6.h" + +#if LWIP_IPV6 && LWIP_UDP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define ZEPIF_DEFAULT_UDP_PORT 17754 + +/** Pass this struct as 'state' to netif_add to control the behaviour + * of this netif. If NULL is passed, default behaviour is chosen */ +struct zepif_init { + /** The UDP port used to ZEP frames from (0 = default) */ + u16_t zep_src_udp_port; + /** The UDP port used to ZEP frames to (0 = default) */ + u16_t zep_dst_udp_port; + /** The IP address to sed ZEP frames from (NULL = ANY) */ + const ip_addr_t *zep_src_ip_addr; + /** The IP address to sed ZEP frames to (NULL = BROADCAST) */ + const ip_addr_t *zep_dst_ip_addr; + /** If != NULL, the udp pcb is bound to this netif */ + const struct netif *zep_netif; + /** MAC address of the 6LowPAN device */ + u8_t addr[6]; +}; + +err_t zepif_init(struct netif *netif); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 && LWIP_UDP */ + +#endif /* LWIP_HDR_ZEPIF_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/bridgeif.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/bridgeif.c new file mode 100644 index 0000000..1cdda12 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/bridgeif.c @@ -0,0 +1,563 @@ +/** + * @file + * lwIP netif implementing an IEEE 802.1D MAC Bridge + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +/** + * @defgroup bridgeif IEEE 802.1D bridge + * @ingroup netifs + * This file implements an IEEE 802.1D bridge by using a multilayer netif approach + * (one hardware-independent netif for the bridge that uses hardware netifs for its ports). + * On transmit, the bridge selects the outgoing port(s). + * On receive, the port netif calls into the bridge (via its netif->input function) and + * the bridge selects the port(s) (and/or its netif->input function) to pass the received pbuf to. + * + * Usage: + * - add the port netifs just like you would when using them as dedicated netif without a bridge + * - only NETIF_FLAG_ETHARP/NETIF_FLAG_ETHERNET netifs are supported as bridge ports + * - add the bridge port netifs without IPv4 addresses (i.e. pass 'NULL, NULL, NULL') + * - don't add IPv6 addresses to the port netifs! + * - set up the bridge configuration in a global variable of type 'bridgeif_initdata_t' that contains + * - the MAC address of the bridge + * - some configuration options controlling the memory consumption (maximum number of ports + * and FDB entries) + * - e.g. for a bridge MAC address 00-01-02-03-04-05, 2 bridge ports, 1024 FDB entries + 16 static MAC entries: + * bridgeif_initdata_t mybridge_initdata = BRIDGEIF_INITDATA1(2, 1024, 16, ETH_ADDR(0, 1, 2, 3, 4, 5)); + * - add the bridge netif (with IPv4 config): + * struct netif bridge_netif; + * netif_add(&bridge_netif, &my_ip, &my_netmask, &my_gw, &mybridge_initdata, bridgeif_init, tcpip_input); + * NOTE: the passed 'input' function depends on BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT setting, + * which controls where the forwarding is done (netif low level input context vs. tcpip_thread) + * - set up all ports netifs and the bridge netif + * + * - When adding a port netif, NETIF_FLAG_ETHARP flag will be removed from a port + * to prevent ETHARP working on that port netif (we only want one IP per bridge not per port). + * - When adding a port netif, its input function is changed to call into the bridge. + * + * + * @todo: + * - compact static FDB entries (instead of walking the whole array) + * - add FDB query/read access + * - add FDB change callback (when learning or dropping auto-learned entries) + * - prefill FDB with MAC classes that should never be forwarded + * - multicast snooping? (and only forward group addresses to interested ports) + * - support removing ports + * - check SNMP integration + * - VLAN handling / trunk ports + * - priority handling? (although that largely depends on TX queue limitations and lwIP doesn't provide tx-done handling) + */ + +#include "netif/bridgeif.h" +#include "lwip/netif.h" +#include "lwip/sys.h" +#include "lwip/etharp.h" +#include "lwip/ethip6.h" +#include "lwip/snmp.h" +#include "lwip/timeouts.h" +#include + +#if LWIP_NUM_NETIF_CLIENT_DATA + +/* Define those to better describe your network interface. */ +#define IFNAME0 'b' +#define IFNAME1 'r' + +struct bridgeif_private_s; +typedef struct bridgeif_port_private_s { + struct bridgeif_private_s *bridge; + struct netif *port_netif; + u8_t port_num; +} bridgeif_port_t; + +typedef struct bridgeif_fdb_static_entry_s { + u8_t used; + bridgeif_portmask_t dst_ports; + struct eth_addr addr; +} bridgeif_fdb_static_entry_t; + +typedef struct bridgeif_private_s { + struct netif *netif; + struct eth_addr ethaddr; + u8_t max_ports; + u8_t num_ports; + bridgeif_port_t *ports; + u16_t max_fdbs_entries; + bridgeif_fdb_static_entry_t *fdbs; + u16_t max_fdbd_entries; + void *fdbd; +} bridgeif_private_t; + +/* netif data index to get the bridge on input */ +u8_t bridgeif_netif_client_id = 0xff; + +/** + * @ingroup bridgeif + * Add a static entry to the forwarding database. + * A static entry marks where frames to a specific eth address (unicast or group address) are + * forwarded. + * bits [0..(BRIDGEIF_MAX_PORTS-1)]: hw ports + * bit [BRIDGEIF_MAX_PORTS]: cpu port + * 0: drop + */ +err_t +bridgeif_fdb_add(struct netif *bridgeif, const struct eth_addr *addr, bridgeif_portmask_t ports) +{ + int i; + bridgeif_private_t *br; + BRIDGEIF_DECL_PROTECT(lev); + LWIP_ASSERT("invalid netif", bridgeif != NULL); + br = (bridgeif_private_t *)bridgeif->state; + LWIP_ASSERT("invalid state", br != NULL); + + BRIDGEIF_READ_PROTECT(lev); + for (i = 0; i < br->max_fdbs_entries; i++) { + if (!br->fdbs[i].used) { + BRIDGEIF_WRITE_PROTECT(lev); + if (!br->fdbs[i].used) { + br->fdbs[i].used = 1; + br->fdbs[i].dst_ports = ports; + memcpy(&br->fdbs[i].addr, addr, sizeof(struct eth_addr)); + BRIDGEIF_WRITE_UNPROTECT(lev); + BRIDGEIF_READ_UNPROTECT(lev); + return ERR_OK; + } + BRIDGEIF_WRITE_UNPROTECT(lev); + } + } + BRIDGEIF_READ_UNPROTECT(lev); + return ERR_MEM; +} + +/** + * @ingroup bridgeif + * Remove a static entry from the forwarding database + */ +err_t +bridgeif_fdb_remove(struct netif *bridgeif, const struct eth_addr *addr) +{ + int i; + bridgeif_private_t *br; + BRIDGEIF_DECL_PROTECT(lev); + LWIP_ASSERT("invalid netif", bridgeif != NULL); + br = (bridgeif_private_t *)bridgeif->state; + LWIP_ASSERT("invalid state", br != NULL); + + BRIDGEIF_READ_PROTECT(lev); + for (i = 0; i < br->max_fdbs_entries; i++) { + if (br->fdbs[i].used && !memcmp(&br->fdbs[i].addr, addr, sizeof(struct eth_addr))) { + BRIDGEIF_WRITE_PROTECT(lev); + if (br->fdbs[i].used && !memcmp(&br->fdbs[i].addr, addr, sizeof(struct eth_addr))) { + memset(&br->fdbs[i], 0, sizeof(bridgeif_fdb_static_entry_t)); + BRIDGEIF_WRITE_UNPROTECT(lev); + BRIDGEIF_READ_UNPROTECT(lev); + return ERR_OK; + } + BRIDGEIF_WRITE_UNPROTECT(lev); + } + } + BRIDGEIF_READ_UNPROTECT(lev); + return ERR_VAL; +} + +/** Get the forwarding port(s) (as bit mask) for the specified destination mac address */ +static bridgeif_portmask_t +bridgeif_find_dst_ports(bridgeif_private_t *br, struct eth_addr *dst_addr) +{ + int i; + BRIDGEIF_DECL_PROTECT(lev); + BRIDGEIF_READ_PROTECT(lev); + /* first check for static entries */ + for (i = 0; i < br->max_fdbs_entries; i++) { + if (br->fdbs[i].used) { + if (!memcmp(&br->fdbs[i].addr, dst_addr, sizeof(struct eth_addr))) { + bridgeif_portmask_t ret = br->fdbs[i].dst_ports; + BRIDGEIF_READ_UNPROTECT(lev); + return ret; + } + } + } + if (dst_addr->addr[0] & 1) { + /* no match found: flood remaining group address */ + BRIDGEIF_READ_UNPROTECT(lev); + return BR_FLOOD; + } + BRIDGEIF_READ_UNPROTECT(lev); + /* no match found: check dynamic fdb for port or fall back to flooding */ + return bridgeif_fdb_get_dst_ports(br->fdbd, dst_addr); +} + +/** Helper function to see if a destination mac belongs to the bridge + * (bridge netif or one of the port netifs), in which case the frame + * is sent to the cpu only. + */ +static int +bridgeif_is_local_mac(bridgeif_private_t *br, struct eth_addr *addr) +{ + int i; + BRIDGEIF_DECL_PROTECT(lev); + if (!memcmp(br->netif->hwaddr, addr, sizeof(struct eth_addr))) { + return 1; + } + BRIDGEIF_READ_PROTECT(lev); + for (i = 0; i < br->num_ports; i++) { + struct netif *portif = br->ports[i].port_netif; + if (portif != NULL) { + if (!memcmp(portif->hwaddr, addr, sizeof(struct eth_addr))) { + BRIDGEIF_READ_UNPROTECT(lev); + return 1; + } + } + } + BRIDGEIF_READ_UNPROTECT(lev); + return 0; +} + +/* Output helper function */ +static err_t +bridgeif_send_to_port(bridgeif_private_t *br, struct pbuf *p, u8_t dstport_idx) +{ + if (dstport_idx < BRIDGEIF_MAX_PORTS) { + /* possibly an external port */ + if (dstport_idx < br->max_ports) { + struct netif *portif = br->ports[dstport_idx].port_netif; + if ((portif != NULL) && (portif->linkoutput != NULL)) { + /* prevent sending out to rx port */ + if (netif_get_index(portif) != p->if_idx) { + if (netif_is_link_up(portif)) { + LWIP_DEBUGF(BRIDGEIF_FW_DEBUG, ("br -> flood(%p:%d) -> %d\n", (void *)p, p->if_idx, netif_get_index(portif))); + return portif->linkoutput(portif, p); + } + } + } + } + } else { + LWIP_ASSERT("invalid port index", dstport_idx == BRIDGEIF_MAX_PORTS); + } + return ERR_OK; +} + +/** Helper function to pass a pbuf to all ports marked in 'dstports' + */ +static err_t +bridgeif_send_to_ports(bridgeif_private_t *br, struct pbuf *p, bridgeif_portmask_t dstports) +{ + err_t err, ret_err = ERR_OK; + u8_t i; + bridgeif_portmask_t mask = 1; + BRIDGEIF_DECL_PROTECT(lev); + BRIDGEIF_READ_PROTECT(lev); + for (i = 0; i < BRIDGEIF_MAX_PORTS; i++, mask = (bridgeif_portmask_t)(mask << 1)) { + if (dstports & mask) { + err = bridgeif_send_to_port(br, p, i); + if (err != ERR_OK) { + ret_err = err; + } + } + } + BRIDGEIF_READ_UNPROTECT(lev); + return ret_err; +} + +/** Output function of the application port of the bridge (the one with an ip address). + * The forwarding port(s) where this pbuf is sent on is/are automatically selected + * from the FDB. + */ +static err_t +bridgeif_output(struct netif *netif, struct pbuf *p) +{ + err_t err; + bridgeif_private_t *br = (bridgeif_private_t *)netif->state; + struct eth_addr *dst = (struct eth_addr *)(p->payload); + + bridgeif_portmask_t dstports = bridgeif_find_dst_ports(br, dst); + err = bridgeif_send_to_ports(br, p, dstports); + + MIB2_STATS_NETIF_ADD(netif, ifoutoctets, p->tot_len); + if (((u8_t *)p->payload)[0] & 1) { + /* broadcast or multicast packet*/ + MIB2_STATS_NETIF_INC(netif, ifoutnucastpkts); + } else { + /* unicast packet */ + MIB2_STATS_NETIF_INC(netif, ifoutucastpkts); + } + /* increase ifoutdiscards or ifouterrors on error */ + + LINK_STATS_INC(link.xmit); + + return err; +} + +/** The actual bridge input function. Port netif's input is changed to call + * here. This function decides where the frame is forwarded. + */ +static err_t +bridgeif_input(struct pbuf *p, struct netif *netif) +{ + u8_t rx_idx; + bridgeif_portmask_t dstports; + struct eth_addr *src, *dst; + bridgeif_private_t *br; + bridgeif_port_t *port; + if (p == NULL || netif == NULL) { + return ERR_VAL; + } + port = (bridgeif_port_t *)netif_get_client_data(netif, bridgeif_netif_client_id); + LWIP_ASSERT("port data not set", port != NULL); + if (port == NULL || port->bridge == NULL) { + return ERR_VAL; + } + br = (bridgeif_private_t *)port->bridge; + rx_idx = netif_get_index(netif); + /* store receive index in pbuf */ + p->if_idx = rx_idx; + + dst = (struct eth_addr *)p->payload; + src = (struct eth_addr *)(((u8_t *)p->payload) + sizeof(struct eth_addr)); + + if ((src->addr[0] & 1) == 0) { + /* update src for all non-group addresses */ + bridgeif_fdb_update_src(br->fdbd, src, port->port_num); + } + + if (dst->addr[0] & 1) { + /* group address -> flood + cpu? */ + dstports = bridgeif_find_dst_ports(br, dst); + bridgeif_send_to_ports(br, p, dstports); + if (dstports & (1 << BRIDGEIF_MAX_PORTS)) { + /* we pass the reference to ->input or have to free it */ + LWIP_DEBUGF(BRIDGEIF_FW_DEBUG, ("br -> input(%p)\n", (void *)p)); + if (br->netif->input(p, br->netif) != ERR_OK) { + pbuf_free(p); + } + } else { + /* all references done */ + pbuf_free(p); + } + /* always return ERR_OK here to prevent the caller freeing the pbuf */ + return ERR_OK; + } else { + /* is this for one of the local ports? */ + if (bridgeif_is_local_mac(br, dst)) { + /* yes, send to cpu port only */ + LWIP_DEBUGF(BRIDGEIF_FW_DEBUG, ("br -> input(%p)\n", (void *)p)); + return br->netif->input(p, br->netif); + } + + /* get dst port */ + dstports = bridgeif_find_dst_ports(br, dst); + bridgeif_send_to_ports(br, p, dstports); + /* no need to send to cpu, flooding is for external ports only */ + /* by this, we consumed the pbuf */ + pbuf_free(p); + /* always return ERR_OK here to prevent the caller freeing the pbuf */ + return ERR_OK; + } +} + +#if !BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT +/** Input function for port netifs used to synchronize into tcpip_thread. + */ +static err_t +bridgeif_tcpip_input(struct pbuf *p, struct netif *netif) +{ + return tcpip_inpkt(p, netif, bridgeif_input); +} +#endif /* BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT */ + +/** + * @ingroup bridgeif + * Initialization function passed to netif_add(). + * + * ATTENTION: A pointer to a @ref bridgeif_initdata_t must be passed as 'state' + * to @ref netif_add when adding the bridge. I supplies MAC address + * and controls memory allocation (number of ports, FDB size). + * + * @param netif the lwip network interface structure for this ethernetif + * @return ERR_OK if the loopif is initialized + * ERR_MEM if private data couldn't be allocated + * any other err_t on error + */ +err_t +bridgeif_init(struct netif *netif) +{ + bridgeif_initdata_t *init_data; + bridgeif_private_t *br; + size_t alloc_len_sizet; + mem_size_t alloc_len; + + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("bridgeif needs an input callback", (netif->input != NULL)); +#if !BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT + if (netif->input == tcpip_input) { + LWIP_DEBUGF(BRIDGEIF_DEBUG | LWIP_DBG_ON, ("bridgeif does not need tcpip_input, use netif_input/ethernet_input instead")); + } +#endif + + if (bridgeif_netif_client_id == 0xFF) { + bridgeif_netif_client_id = netif_alloc_client_data_id(); + } + + init_data = (bridgeif_initdata_t *)netif->state; + LWIP_ASSERT("init_data != NULL", (init_data != NULL)); + LWIP_ASSERT("init_data->max_ports <= BRIDGEIF_MAX_PORTS", + init_data->max_ports <= BRIDGEIF_MAX_PORTS); + + alloc_len_sizet = sizeof(bridgeif_private_t) + (init_data->max_ports * sizeof(bridgeif_port_t) + (init_data->max_fdb_static_entries * sizeof(bridgeif_fdb_static_entry_t))); + alloc_len = (mem_size_t)alloc_len_sizet; + LWIP_ASSERT("alloc_len == alloc_len_sizet", alloc_len == alloc_len_sizet); + LWIP_DEBUGF(BRIDGEIF_DEBUG, ("bridgeif_init: allocating %d bytes for private data\n", (int)alloc_len)); + br = (bridgeif_private_t *)mem_calloc(1, alloc_len); + if (br == NULL) { + LWIP_DEBUGF(NETIF_DEBUG, ("bridgeif_init: out of memory\n")); + return ERR_MEM; + } + memcpy(&br->ethaddr, &init_data->ethaddr, sizeof(br->ethaddr)); + br->netif = netif; + + br->max_ports = init_data->max_ports; + br->ports = (bridgeif_port_t *)(br + 1); + + br->max_fdbs_entries = init_data->max_fdb_static_entries; + br->fdbs = (bridgeif_fdb_static_entry_t *)(((u8_t *)(br + 1)) + (init_data->max_ports * sizeof(bridgeif_port_t))); + + br->max_fdbd_entries = init_data->max_fdb_dynamic_entries; + br->fdbd = bridgeif_fdb_init(init_data->max_fdb_dynamic_entries); + if (br->fdbd == NULL) { + LWIP_DEBUGF(NETIF_DEBUG, ("bridgeif_init: out of memory in fdb_init\n")); + mem_free(br); + return ERR_MEM; + } + +#if LWIP_NETIF_HOSTNAME + /* Initialize interface hostname */ + netif->hostname = "lwip"; +#endif /* LWIP_NETIF_HOSTNAME */ + + /* + * Initialize the snmp variables and counters inside the struct netif. + * The last argument should be replaced with your link speed, in units + * of bits per second. + */ + MIB2_INIT_NETIF(netif, snmp_ifType_ethernet_csmacd, 0); + + netif->state = br; + netif->name[0] = IFNAME0; + netif->name[1] = IFNAME1; + /* We directly use etharp_output() here to save a function call. + * You can instead declare your own function an call etharp_output() + * from it if you have to do some checks before sending (e.g. if link + * is available...) */ +#if LWIP_IPV4 + netif->output = etharp_output; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + netif->output_ip6 = ethip6_output; +#endif /* LWIP_IPV6 */ + netif->linkoutput = bridgeif_output; + + /* set MAC hardware address length */ + netif->hwaddr_len = ETH_HWADDR_LEN; + + /* set MAC hardware address */ + memcpy(netif->hwaddr, &br->ethaddr, ETH_HWADDR_LEN); + + /* maximum transfer unit */ + netif->mtu = 1500; + + /* device capabilities */ + /* don't set NETIF_FLAG_ETHARP if this device is not an ethernet one */ + netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET | NETIF_FLAG_IGMP | NETIF_FLAG_MLD6 | NETIF_FLAG_LINK_UP; + +#if LWIP_IPV6 && LWIP_IPV6_MLD + /* + * For hardware/netifs that implement MAC filtering. + * All-nodes link-local is handled by default, so we must let the hardware know + * to allow multicast packets in. + * Should set mld_mac_filter previously. */ + if (netif->mld_mac_filter != NULL) { + ip6_addr_t ip6_allnodes_ll; + ip6_addr_set_allnodes_linklocal(&ip6_allnodes_ll); + netif->mld_mac_filter(netif, &ip6_allnodes_ll, NETIF_ADD_MAC_FILTER); + } +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + + return ERR_OK; +} + +/** + * @ingroup bridgeif + * Add a port to the bridge + */ +err_t +bridgeif_add_port(struct netif *bridgeif, struct netif *portif) +{ + bridgeif_private_t *br; + bridgeif_port_t *port; + + LWIP_ASSERT("bridgeif != NULL", bridgeif != NULL); + LWIP_ASSERT("bridgeif->state != NULL", bridgeif->state != NULL); + LWIP_ASSERT("portif != NULL", portif != NULL); + + if (!(portif->flags & NETIF_FLAG_ETHARP) || !(portif->flags & NETIF_FLAG_ETHERNET)) { + /* can only add ETHERNET/ETHARP interfaces */ + return ERR_VAL; + } + + br = (bridgeif_private_t *)bridgeif->state; + + if (br->num_ports >= br->max_ports) { + return ERR_VAL; + } + port = &br->ports[br->num_ports]; + port->port_netif = portif; + port->port_num = br->num_ports; + port->bridge = br; + br->num_ports++; + + /* let the port call us on input */ +#if BRIDGEIF_PORT_NETIFS_OUTPUT_DIRECT + portif->input = bridgeif_input; +#else + portif->input = bridgeif_tcpip_input; +#endif + /* store pointer to bridge in netif */ + netif_set_client_data(portif, bridgeif_netif_client_id, port); + /* remove ETHARP flag to prevent sending report events on netif-up */ + netif_clear_flags(portif, NETIF_FLAG_ETHARP); + + return ERR_OK; +} + +#endif /* LWIP_NUM_NETIF_CLIENT_DATA */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/bridgeif_fdb.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/bridgeif_fdb.c new file mode 100644 index 0000000..fb4b094 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/bridgeif_fdb.c @@ -0,0 +1,212 @@ +/** + * @file + * lwIP netif implementing an FDB for IEEE 802.1D MAC Bridge + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +/** + * @defgroup bridgeif_fdb FDB example code + * @ingroup bridgeif + * This file implements an example for an FDB (Forwarding DataBase) + */ + +#include "netif/bridgeif.h" +#include "lwip/sys.h" +#include "lwip/mem.h" +#include "lwip/timeouts.h" +#include + +#define BRIDGEIF_AGE_TIMER_MS 1000 + +#define BR_FDB_TIMEOUT_SEC (60*5) /* 5 minutes FDB timeout */ + +typedef struct bridgeif_dfdb_entry_s { + u8_t used; + u8_t port; + u32_t ts; + struct eth_addr addr; +} bridgeif_dfdb_entry_t; + +typedef struct bridgeif_dfdb_s { + u16_t max_fdb_entries; + bridgeif_dfdb_entry_t *fdb; +} bridgeif_dfdb_t; + +/** + * @ingroup bridgeif_fdb + * A real simple and slow implementation of an auto-learning forwarding database that + * remembers known src mac addresses to know which port to send frames destined for that + * mac address. + * + * ATTENTION: This is meant as an example only, in real-world use, you should + * provide a better implementation :-) + */ +void +bridgeif_fdb_update_src(void *fdb_ptr, struct eth_addr *src_addr, u8_t port_idx) +{ + int i; + bridgeif_dfdb_t *fdb = (bridgeif_dfdb_t *)fdb_ptr; + BRIDGEIF_DECL_PROTECT(lev); + BRIDGEIF_READ_PROTECT(lev); + for (i = 0; i < fdb->max_fdb_entries; i++) { + bridgeif_dfdb_entry_t *e = &fdb->fdb[i]; + if (e->used && e->ts) { + if (!memcmp(&e->addr, src_addr, sizeof(struct eth_addr))) { + LWIP_DEBUGF(BRIDGEIF_FDB_DEBUG, ("br: update src %02x:%02x:%02x:%02x:%02x:%02x (from %d) @ idx %d\n", + src_addr->addr[0], src_addr->addr[1], src_addr->addr[2], src_addr->addr[3], src_addr->addr[4], src_addr->addr[5], + port_idx, i)); + BRIDGEIF_WRITE_PROTECT(lev); + e->ts = BR_FDB_TIMEOUT_SEC; + e->port = port_idx; + BRIDGEIF_WRITE_UNPROTECT(lev); + BRIDGEIF_READ_UNPROTECT(lev); + return; + } + } + } + /* not found, allocate new entry from free */ + for (i = 0; i < fdb->max_fdb_entries; i++) { + bridgeif_dfdb_entry_t *e = &fdb->fdb[i]; + if (!e->used || !e->ts) { + BRIDGEIF_WRITE_PROTECT(lev); + /* check again when protected */ + if (!e->used || !e->ts) { + LWIP_DEBUGF(BRIDGEIF_FDB_DEBUG, ("br: create src %02x:%02x:%02x:%02x:%02x:%02x (from %d) @ idx %d\n", + src_addr->addr[0], src_addr->addr[1], src_addr->addr[2], src_addr->addr[3], src_addr->addr[4], src_addr->addr[5], + port_idx, i)); + memcpy(&e->addr, src_addr, sizeof(struct eth_addr)); + e->ts = BR_FDB_TIMEOUT_SEC; + e->port = port_idx; + e->used = 1; + BRIDGEIF_WRITE_UNPROTECT(lev); + BRIDGEIF_READ_UNPROTECT(lev); + return; + } + BRIDGEIF_WRITE_UNPROTECT(lev); + } + } + BRIDGEIF_READ_UNPROTECT(lev); + /* not found, no free entry -> flood */ +} + +/** + * @ingroup bridgeif_fdb + * Walk our list of auto-learnt fdb entries and return a port to forward or BR_FLOOD if unknown + */ +bridgeif_portmask_t +bridgeif_fdb_get_dst_ports(void *fdb_ptr, struct eth_addr *dst_addr) +{ + int i; + bridgeif_dfdb_t *fdb = (bridgeif_dfdb_t *)fdb_ptr; + BRIDGEIF_DECL_PROTECT(lev); + BRIDGEIF_READ_PROTECT(lev); + for (i = 0; i < fdb->max_fdb_entries; i++) { + bridgeif_dfdb_entry_t *e = &fdb->fdb[i]; + if (e->used && e->ts) { + if (!memcmp(&e->addr, dst_addr, sizeof(struct eth_addr))) { + bridgeif_portmask_t ret = (bridgeif_portmask_t)(1 << e->port); + BRIDGEIF_READ_UNPROTECT(lev); + return ret; + } + } + } + BRIDGEIF_READ_UNPROTECT(lev); + return BR_FLOOD; +} + +/** + * @ingroup bridgeif_fdb + * Aging implementation of our simple fdb + */ +static void +bridgeif_fdb_age_one_second(void *fdb_ptr) +{ + int i; + bridgeif_dfdb_t *fdb; + BRIDGEIF_DECL_PROTECT(lev); + + fdb = (bridgeif_dfdb_t *)fdb_ptr; + BRIDGEIF_READ_PROTECT(lev); + + for (i = 0; i < fdb->max_fdb_entries; i++) { + bridgeif_dfdb_entry_t *e = &fdb->fdb[i]; + if (e->used && e->ts) { + BRIDGEIF_WRITE_PROTECT(lev); + /* check again when protected */ + if (e->used && e->ts) { + if (--e->ts == 0) { + e->used = 0; + } + } + BRIDGEIF_WRITE_UNPROTECT(lev); + } + } + BRIDGEIF_READ_UNPROTECT(lev); +} + +/** Timer callback for fdb aging, called once per second */ +static void +bridgeif_age_tmr(void *arg) +{ + bridgeif_dfdb_t *fdb = (bridgeif_dfdb_t *)arg; + + LWIP_ASSERT("invalid arg", arg != NULL); + + bridgeif_fdb_age_one_second(fdb); + sys_timeout(BRIDGEIF_AGE_TIMER_MS, bridgeif_age_tmr, arg); +} + +/** + * @ingroup bridgeif_fdb + * Init our simple fdb list + */ +void * +bridgeif_fdb_init(u16_t max_fdb_entries) +{ + bridgeif_dfdb_t *fdb; + size_t alloc_len_sizet = sizeof(bridgeif_dfdb_t) + (max_fdb_entries * sizeof(bridgeif_dfdb_entry_t)); + mem_size_t alloc_len = (mem_size_t)alloc_len_sizet; + LWIP_ASSERT("alloc_len == alloc_len_sizet", alloc_len == alloc_len_sizet); + LWIP_DEBUGF(BRIDGEIF_DEBUG, ("bridgeif_fdb_init: allocating %d bytes for private FDB data\n", (int)alloc_len)); + fdb = (bridgeif_dfdb_t *)mem_calloc(1, alloc_len); + if (fdb == NULL) { + return NULL; + } + fdb->max_fdb_entries = max_fdb_entries; + fdb->fdb = (bridgeif_dfdb_entry_t *)(fdb + 1); + + sys_timeout(BRIDGEIF_AGE_TIMER_MS, bridgeif_age_tmr, fdb); + + return fdb; +} diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ethernet.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ethernet.c new file mode 100644 index 0000000..3c9eb02 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ethernet.c @@ -0,0 +1,321 @@ +/** + * @file + * Ethernet common functions + * + * @defgroup ethernet Ethernet + * @ingroup callbackstyle_api + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * Copyright (c) 2003-2004 Leon Woestenberg + * Copyright (c) 2003-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/opt.h" + +#if LWIP_ARP || LWIP_ETHERNET + +#include "netif/ethernet.h" +#include "lwip/def.h" +#include "lwip/stats.h" +#include "lwip/etharp.h" +#include "lwip/ip.h" +#include "lwip/snmp.h" + +#include + +#include "netif/ppp/ppp_opts.h" +#if PPPOE_SUPPORT +#include "netif/ppp/pppoe.h" +#endif /* PPPOE_SUPPORT */ + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +const struct eth_addr ethbroadcast = {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}; +const struct eth_addr ethzero = {{0, 0, 0, 0, 0, 0}}; + +/** + * @ingroup lwip_nosys + * Process received ethernet frames. Using this function instead of directly + * calling ip_input and passing ARP frames through etharp in ethernetif_input, + * the ARP cache is protected from concurrent access.\n + * Don't call directly, pass to netif_add() and call netif->input(). + * + * @param p the received packet, p->payload pointing to the ethernet header + * @param netif the network interface on which the packet was received + * + * @see LWIP_HOOK_UNKNOWN_ETH_PROTOCOL + * @see ETHARP_SUPPORT_VLAN + * @see LWIP_HOOK_VLAN_CHECK + */ +err_t +ethernet_input(struct pbuf *p, struct netif *netif) +{ + struct eth_hdr *ethhdr; + u16_t type; +#if LWIP_ARP || ETHARP_SUPPORT_VLAN || LWIP_IPV6 + u16_t next_hdr_offset = SIZEOF_ETH_HDR; +#endif /* LWIP_ARP || ETHARP_SUPPORT_VLAN */ + + LWIP_ASSERT_CORE_LOCKED(); + + if (p->len <= SIZEOF_ETH_HDR) { + /* a packet with only an ethernet header (or less) is not valid for us */ + ETHARP_STATS_INC(etharp.proterr); + ETHARP_STATS_INC(etharp.drop); + MIB2_STATS_NETIF_INC(netif, ifinerrors); + goto free_and_return; + } + + if (p->if_idx == NETIF_NO_INDEX) { + p->if_idx = netif_get_index(netif); + } + + /* points to packet payload, which starts with an Ethernet header */ + ethhdr = (struct eth_hdr *)p->payload; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, + ("ethernet_input: dest:%"X8_F":%"X8_F":%"X8_F":%"X8_F":%"X8_F":%"X8_F", src:%"X8_F":%"X8_F":%"X8_F":%"X8_F":%"X8_F":%"X8_F", type:%"X16_F"\n", + (unsigned char)ethhdr->dest.addr[0], (unsigned char)ethhdr->dest.addr[1], (unsigned char)ethhdr->dest.addr[2], + (unsigned char)ethhdr->dest.addr[3], (unsigned char)ethhdr->dest.addr[4], (unsigned char)ethhdr->dest.addr[5], + (unsigned char)ethhdr->src.addr[0], (unsigned char)ethhdr->src.addr[1], (unsigned char)ethhdr->src.addr[2], + (unsigned char)ethhdr->src.addr[3], (unsigned char)ethhdr->src.addr[4], (unsigned char)ethhdr->src.addr[5], + lwip_htons(ethhdr->type))); + + type = ethhdr->type; +#if ETHARP_SUPPORT_VLAN + if (type == PP_HTONS(ETHTYPE_VLAN)) { + struct eth_vlan_hdr *vlan = (struct eth_vlan_hdr *)(((char *)ethhdr) + SIZEOF_ETH_HDR); + next_hdr_offset = SIZEOF_ETH_HDR + SIZEOF_VLAN_HDR; + if (p->len <= SIZEOF_ETH_HDR + SIZEOF_VLAN_HDR) { + /* a packet with only an ethernet/vlan header (or less) is not valid for us */ + ETHARP_STATS_INC(etharp.proterr); + ETHARP_STATS_INC(etharp.drop); + MIB2_STATS_NETIF_INC(netif, ifinerrors); + goto free_and_return; + } +#if defined(LWIP_HOOK_VLAN_CHECK) || defined(ETHARP_VLAN_CHECK) || defined(ETHARP_VLAN_CHECK_FN) /* if not, allow all VLANs */ +#ifdef LWIP_HOOK_VLAN_CHECK + if (!LWIP_HOOK_VLAN_CHECK(netif, ethhdr, vlan)) { +#elif defined(ETHARP_VLAN_CHECK_FN) + if (!ETHARP_VLAN_CHECK_FN(ethhdr, vlan)) { +#elif defined(ETHARP_VLAN_CHECK) + if (VLAN_ID(vlan) != ETHARP_VLAN_CHECK) { +#endif + /* silently ignore this packet: not for our VLAN */ + pbuf_free(p); + return ERR_OK; + } +#endif /* defined(LWIP_HOOK_VLAN_CHECK) || defined(ETHARP_VLAN_CHECK) || defined(ETHARP_VLAN_CHECK_FN) */ + type = vlan->tpid; + } +#endif /* ETHARP_SUPPORT_VLAN */ + +#if LWIP_ARP_FILTER_NETIF + netif = LWIP_ARP_FILTER_NETIF_FN(p, netif, lwip_htons(type)); +#endif /* LWIP_ARP_FILTER_NETIF*/ + + if (ethhdr->dest.addr[0] & 1) { + /* this might be a multicast or broadcast packet */ + if (ethhdr->dest.addr[0] == LL_IP4_MULTICAST_ADDR_0) { +#if LWIP_IPV4 + if ((ethhdr->dest.addr[1] == LL_IP4_MULTICAST_ADDR_1) && + (ethhdr->dest.addr[2] == LL_IP4_MULTICAST_ADDR_2)) { + /* mark the pbuf as link-layer multicast */ + p->flags |= PBUF_FLAG_LLMCAST; + } +#endif /* LWIP_IPV4 */ + } +#if LWIP_IPV6 + else if ((ethhdr->dest.addr[0] == LL_IP6_MULTICAST_ADDR_0) && + (ethhdr->dest.addr[1] == LL_IP6_MULTICAST_ADDR_1)) { + /* mark the pbuf as link-layer multicast */ + p->flags |= PBUF_FLAG_LLMCAST; + } +#endif /* LWIP_IPV6 */ + else if (eth_addr_cmp(ðhdr->dest, ðbroadcast)) { + /* mark the pbuf as link-layer broadcast */ + p->flags |= PBUF_FLAG_LLBCAST; + } + } + + switch (type) { +#if LWIP_IPV4 && LWIP_ARP + /* IP packet? */ + case PP_HTONS(ETHTYPE_IP): + if (!(netif->flags & NETIF_FLAG_ETHARP)) { + goto free_and_return; + } + /* skip Ethernet header (min. size checked above) */ + if (pbuf_remove_header(p, next_hdr_offset)) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("ethernet_input: IPv4 packet dropped, too short (%"U16_F"/%"U16_F")\n", + p->tot_len, next_hdr_offset)); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("Can't move over header in packet")); + goto free_and_return; + } else { + /* pass to IP layer */ + ip4_input(p, netif); + } + break; + + case PP_HTONS(ETHTYPE_ARP): + if (!(netif->flags & NETIF_FLAG_ETHARP)) { + goto free_and_return; + } + /* skip Ethernet header (min. size checked above) */ + if (pbuf_remove_header(p, next_hdr_offset)) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("ethernet_input: ARP response packet dropped, too short (%"U16_F"/%"U16_F")\n", + p->tot_len, next_hdr_offset)); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("Can't move over header in packet")); + ETHARP_STATS_INC(etharp.lenerr); + ETHARP_STATS_INC(etharp.drop); + goto free_and_return; + } else { + /* pass p to ARP module */ + etharp_input(p, netif); + } + break; +#endif /* LWIP_IPV4 && LWIP_ARP */ +#if PPPOE_SUPPORT + case PP_HTONS(ETHTYPE_PPPOEDISC): /* PPP Over Ethernet Discovery Stage */ + pppoe_disc_input(netif, p); + break; + + case PP_HTONS(ETHTYPE_PPPOE): /* PPP Over Ethernet Session Stage */ + pppoe_data_input(netif, p); + break; +#endif /* PPPOE_SUPPORT */ + +#if LWIP_IPV6 + case PP_HTONS(ETHTYPE_IPV6): /* IPv6 */ + /* skip Ethernet header */ + if ((p->len < next_hdr_offset) || pbuf_remove_header(p, next_hdr_offset)) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("ethernet_input: IPv6 packet dropped, too short (%"U16_F"/%"U16_F")\n", + p->tot_len, next_hdr_offset)); + goto free_and_return; + } else { + /* pass to IPv6 layer */ + ip6_input(p, netif); + } + break; +#endif /* LWIP_IPV6 */ + + default: +#ifdef LWIP_HOOK_UNKNOWN_ETH_PROTOCOL + if (LWIP_HOOK_UNKNOWN_ETH_PROTOCOL(p, netif) == ERR_OK) { + break; + } +#endif + ETHARP_STATS_INC(etharp.proterr); + ETHARP_STATS_INC(etharp.drop); + MIB2_STATS_NETIF_INC(netif, ifinunknownprotos); + goto free_and_return; + } + + /* This means the pbuf is freed or consumed, + so the caller doesn't have to free it again */ + return ERR_OK; + +free_and_return: + pbuf_free(p); + return ERR_OK; +} + +/** + * @ingroup ethernet + * Send an ethernet packet on the network using netif->linkoutput(). + * The ethernet header is filled in before sending. + * + * @see LWIP_HOOK_VLAN_SET + * + * @param netif the lwIP network interface on which to send the packet + * @param p the packet to send. pbuf layer must be @ref PBUF_LINK. + * @param src the source MAC address to be copied into the ethernet header + * @param dst the destination MAC address to be copied into the ethernet header + * @param eth_type ethernet type (@ref lwip_ieee_eth_type) + * @return ERR_OK if the packet was sent, any other err_t on failure + */ +err_t +ethernet_output(struct netif * netif, struct pbuf * p, + const struct eth_addr * src, const struct eth_addr * dst, + u16_t eth_type) { + struct eth_hdr *ethhdr; + u16_t eth_type_be = lwip_htons(eth_type); + +#if ETHARP_SUPPORT_VLAN && defined(LWIP_HOOK_VLAN_SET) + s32_t vlan_prio_vid = LWIP_HOOK_VLAN_SET(netif, p, src, dst, eth_type); + if (vlan_prio_vid >= 0) { + struct eth_vlan_hdr *vlanhdr; + + LWIP_ASSERT("prio_vid must be <= 0xFFFF", vlan_prio_vid <= 0xFFFF); + + if (pbuf_add_header(p, SIZEOF_ETH_HDR + SIZEOF_VLAN_HDR) != 0) { + goto pbuf_header_failed; + } + vlanhdr = (struct eth_vlan_hdr *)(((u8_t *)p->payload) + SIZEOF_ETH_HDR); + vlanhdr->tpid = eth_type_be; + vlanhdr->prio_vid = lwip_htons((u16_t)vlan_prio_vid); + + eth_type_be = PP_HTONS(ETHTYPE_VLAN); + } else +#endif /* ETHARP_SUPPORT_VLAN && defined(LWIP_HOOK_VLAN_SET) */ + { + if (pbuf_add_header(p, SIZEOF_ETH_HDR) != 0) { + goto pbuf_header_failed; + } + } + + LWIP_ASSERT_CORE_LOCKED(); + + ethhdr = (struct eth_hdr *)p->payload; + ethhdr->type = eth_type_be; + SMEMCPY(ðhdr->dest, dst, ETH_HWADDR_LEN); + SMEMCPY(ðhdr->src, src, ETH_HWADDR_LEN); + + LWIP_ASSERT("netif->hwaddr_len must be 6 for ethernet_output!", + (netif->hwaddr_len == ETH_HWADDR_LEN)); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, + ("ethernet_output: sending packet %p\n", (void *)p)); + + /* send the packet */ + return netif->linkoutput(netif, p); + +pbuf_header_failed: + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("ethernet_output: could not allocate room for header.\n")); + LINK_STATS_INC(link.lenerr); + return ERR_BUF; +} + +#endif /* LWIP_ARP || LWIP_ETHERNET */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/lowpan6.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/lowpan6.c new file mode 100644 index 0000000..6c26f66 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/lowpan6.c @@ -0,0 +1,920 @@ +/** + * @file + * + * 6LowPAN output for IPv6. Uses ND tables for link-layer addressing. Fragments packets to 6LowPAN units. + * + * This implementation aims to conform to IEEE 802.15.4(-2015), RFC 4944 and RFC 6282. + * @todo: RFC 6775. + */ + +/* + * Copyright (c) 2015 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +/** + * @defgroup sixlowpan 6LoWPAN (RFC4944) + * @ingroup netifs + * 6LowPAN netif implementation + */ + +#include "netif/lowpan6.h" + +#if LWIP_IPV6 + +#include "lwip/ip.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/nd6.h" +#include "lwip/mem.h" +#include "lwip/udp.h" +#include "lwip/tcpip.h" +#include "lwip/snmp.h" +#include "netif/ieee802154.h" + +#include + +#if LWIP_6LOWPAN_802154_HW_CRC +#define LWIP_6LOWPAN_DO_CALC_CRC(buf, len) 0 +#else +#define LWIP_6LOWPAN_DO_CALC_CRC(buf, len) LWIP_6LOWPAN_CALC_CRC(buf, len) +#endif + +/** This is a helper struct for reassembly of fragments + * (IEEE 802.15.4 limits to 127 bytes) + */ +struct lowpan6_reass_helper { + struct lowpan6_reass_helper *next_packet; + struct pbuf *reass; + struct pbuf *frags; + u8_t timer; + struct lowpan6_link_addr sender_addr; + u16_t datagram_size; + u16_t datagram_tag; +}; + +/** This struct keeps track of per-netif state */ +struct lowpan6_ieee802154_data { + /** fragment reassembly list */ + struct lowpan6_reass_helper *reass_list; +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 + /** address context for compression */ + ip6_addr_t lowpan6_context[LWIP_6LOWPAN_NUM_CONTEXTS]; +#endif + /** Datagram Tag for fragmentation */ + u16_t tx_datagram_tag; + /** local PAN ID for IEEE 802.15.4 header */ + u16_t ieee_802154_pan_id; + /** Sequence Number for IEEE 802.15.4 transmission */ + u8_t tx_frame_seq_num; +}; + +/* Maximum frame size is 127 bytes minus CRC size */ +#define LOWPAN6_MAX_PAYLOAD (127 - 2) + +/** Currently, this state is global, since there's only one 6LoWPAN netif */ +static struct lowpan6_ieee802154_data lowpan6_data; + +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 +#define LWIP_6LOWPAN_CONTEXTS(netif) lowpan6_data.lowpan6_context +#else +#define LWIP_6LOWPAN_CONTEXTS(netif) NULL +#endif + +static const struct lowpan6_link_addr ieee_802154_broadcast = {2, {0xff, 0xff}}; + +#if LWIP_6LOWPAN_INFER_SHORT_ADDRESS +static struct lowpan6_link_addr short_mac_addr = {2, {0, 0}}; +#endif /* LWIP_6LOWPAN_INFER_SHORT_ADDRESS */ + +/* IEEE 802.15.4 specific functions: */ + +/** Write the IEEE 802.15.4 header that encapsulates the 6LoWPAN frame. + * Src and dst PAN IDs are filled with the ID set by @ref lowpan6_set_pan_id. + * + * Since the length is variable: + * @returns the header length + */ +static u8_t +lowpan6_write_iee802154_header(struct ieee_802154_hdr *hdr, const struct lowpan6_link_addr *src, + const struct lowpan6_link_addr *dst) +{ + u8_t ieee_header_len; + u8_t *buffer; + u8_t i; + u16_t fc; + + fc = IEEE_802154_FC_FT_DATA; /* send data packet (2003 frame version) */ + fc |= IEEE_802154_FC_PANID_COMPR; /* set PAN ID compression, for now src and dst PANs are equal */ + if (dst != &ieee_802154_broadcast) { + fc |= IEEE_802154_FC_ACK_REQ; /* data packet, no broadcast: ack required. */ + } + if (dst->addr_len == 2) { + fc |= IEEE_802154_FC_DST_ADDR_MODE_SHORT; + } else { + LWIP_ASSERT("invalid dst address length", dst->addr_len == 8); + fc |= IEEE_802154_FC_DST_ADDR_MODE_EXT; + } + if (src->addr_len == 2) { + fc |= IEEE_802154_FC_SRC_ADDR_MODE_SHORT; + } else { + LWIP_ASSERT("invalid src address length", src->addr_len == 8); + fc |= IEEE_802154_FC_SRC_ADDR_MODE_EXT; + } + hdr->frame_control = fc; + hdr->sequence_number = lowpan6_data.tx_frame_seq_num++; + hdr->destination_pan_id = lowpan6_data.ieee_802154_pan_id; /* pan id */ + + buffer = (u8_t *)hdr; + ieee_header_len = 5; + i = dst->addr_len; + /* reverse memcpy of dst addr */ + while (i-- > 0) { + buffer[ieee_header_len++] = dst->addr[i]; + } + /* Source PAN ID skipped due to PAN ID Compression */ + i = src->addr_len; + /* reverse memcpy of src addr */ + while (i-- > 0) { + buffer[ieee_header_len++] = src->addr[i]; + } + return ieee_header_len; +} + +/** Parse the IEEE 802.15.4 header from a pbuf. + * If successful, the header is hidden from the pbuf. + * + * PAN IDs and seuqence number are not checked + * + * @param p input pbuf, p->payload pointing at the IEEE 802.15.4 header + * @param src pointer to source address filled from the header + * @param dest pointer to destination address filled from the header + * @returns ERR_OK if successful + */ +static err_t +lowpan6_parse_iee802154_header(struct pbuf *p, struct lowpan6_link_addr *src, + struct lowpan6_link_addr *dest) +{ + u8_t *puc; + s8_t i; + u16_t frame_control, addr_mode; + u16_t datagram_offset; + + /* Parse IEEE 802.15.4 header */ + puc = (u8_t *)p->payload; + frame_control = puc[0] | (puc[1] << 8); + datagram_offset = 2; + if (frame_control & IEEE_802154_FC_SEQNO_SUPPR) { + if (IEEE_802154_FC_FRAME_VERSION_GET(frame_control) <= 1) { + /* sequence number suppressed, this is not valid for versions 0/1 */ + return ERR_VAL; + } + } else { + datagram_offset++; + } + datagram_offset += 2; /* Skip destination PAN ID */ + addr_mode = frame_control & IEEE_802154_FC_DST_ADDR_MODE_MASK; + if (addr_mode == IEEE_802154_FC_DST_ADDR_MODE_EXT) { + /* extended address (64 bit) */ + dest->addr_len = 8; + /* reverse memcpy: */ + for (i = 0; i < 8; i++) { + dest->addr[i] = puc[datagram_offset + 7 - i]; + } + datagram_offset += 8; + } else if (addr_mode == IEEE_802154_FC_DST_ADDR_MODE_SHORT) { + /* short address (16 bit) */ + dest->addr_len = 2; + /* reverse memcpy: */ + dest->addr[0] = puc[datagram_offset + 1]; + dest->addr[1] = puc[datagram_offset]; + datagram_offset += 2; + } else { + /* unsupported address mode (do we need "no address"?) */ + return ERR_VAL; + } + + if (!(frame_control & IEEE_802154_FC_PANID_COMPR)) { + /* No PAN ID compression, skip source PAN ID */ + datagram_offset += 2; + } + + addr_mode = frame_control & IEEE_802154_FC_SRC_ADDR_MODE_MASK; + if (addr_mode == IEEE_802154_FC_SRC_ADDR_MODE_EXT) { + /* extended address (64 bit) */ + src->addr_len = 8; + /* reverse memcpy: */ + for (i = 0; i < 8; i++) { + src->addr[i] = puc[datagram_offset + 7 - i]; + } + datagram_offset += 8; + } else if (addr_mode == IEEE_802154_FC_DST_ADDR_MODE_SHORT) { + /* short address (16 bit) */ + src->addr_len = 2; + src->addr[0] = puc[datagram_offset + 1]; + src->addr[1] = puc[datagram_offset]; + datagram_offset += 2; + } else { + /* unsupported address mode (do we need "no address"?) */ + return ERR_VAL; + } + + /* hide IEEE802.15.4 header. */ + if (pbuf_remove_header(p, datagram_offset)) { + return ERR_VAL; + } + return ERR_OK; +} + +/** Calculate the 16-bit CRC as required by IEEE 802.15.4 */ +u16_t +lowpan6_calc_crc(const void* buf, u16_t len) +{ +#define CCITT_POLY_16 0x8408U + u16_t i; + u8_t b; + u16_t crc = 0; + const u8_t* p = (const u8_t*)buf; + + for (i = 0; i < len; i++) { + u8_t data = *p; + for (b = 0U; b < 8U; b++) { + if (((data ^ crc) & 1) != 0) { + crc = (u16_t)((crc >> 1) ^ CCITT_POLY_16); + } else { + crc = (u16_t)(crc >> 1); + } + data = (u8_t)(data >> 1); + } + p++; + } + return crc; +} + +/* Fragmentation specific functions: */ + +static void +free_reass_datagram(struct lowpan6_reass_helper *lrh) +{ + if (lrh->reass) { + pbuf_free(lrh->reass); + } + if (lrh->frags) { + pbuf_free(lrh->frags); + } + mem_free(lrh); +} + +/** + * Removes a datagram from the reassembly queue. + **/ +static void +dequeue_datagram(struct lowpan6_reass_helper *lrh, struct lowpan6_reass_helper *prev) +{ + if (lowpan6_data.reass_list == lrh) { + lowpan6_data.reass_list = lowpan6_data.reass_list->next_packet; + } else { + /* it wasn't the first, so it must have a valid 'prev' */ + LWIP_ASSERT("sanity check linked list", prev != NULL); + prev->next_packet = lrh->next_packet; + } +} + +/** + * Periodic timer for 6LowPAN functions: + * + * - Remove incomplete/old packets + */ +void +lowpan6_tmr(void) +{ + struct lowpan6_reass_helper *lrh, *lrh_next, *lrh_prev = NULL; + + lrh = lowpan6_data.reass_list; + while (lrh != NULL) { + lrh_next = lrh->next_packet; + if ((--lrh->timer) == 0) { + dequeue_datagram(lrh, lrh_prev); + free_reass_datagram(lrh); + } else { + lrh_prev = lrh; + } + lrh = lrh_next; + } +} + +/* + * Encapsulates data into IEEE 802.15.4 frames. + * Fragments an IPv6 datagram into 6LowPAN units, which fit into IEEE 802.15.4 frames. + * If configured, will compress IPv6 and or UDP headers. + * */ +static err_t +lowpan6_frag(struct netif *netif, struct pbuf *p, const struct lowpan6_link_addr *src, const struct lowpan6_link_addr *dst) +{ + struct pbuf *p_frag; + u16_t frag_len, remaining_len, max_data_len; + u8_t *buffer; + u8_t ieee_header_len; + u8_t lowpan6_header_len; + u8_t hidden_header_len; + u16_t crc; + u16_t datagram_offset; + err_t err = ERR_IF; + + LWIP_ASSERT("lowpan6_frag: netif->linkoutput not set", netif->linkoutput != NULL); + + /* We'll use a dedicated pbuf for building 6LowPAN fragments. */ + p_frag = pbuf_alloc(PBUF_RAW, 127, PBUF_RAM); + if (p_frag == NULL) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + return ERR_MEM; + } + LWIP_ASSERT("this needs a pbuf in one piece", p_frag->len == p_frag->tot_len); + + /* Write IEEE 802.15.4 header. */ + buffer = (u8_t *)p_frag->payload; + ieee_header_len = lowpan6_write_iee802154_header((struct ieee_802154_hdr *)buffer, src, dst); + LWIP_ASSERT("ieee_header_len < p_frag->len", ieee_header_len < p_frag->len); + +#if LWIP_6LOWPAN_IPHC + /* Perform 6LowPAN IPv6 header compression according to RFC 6282 */ + /* do the header compression (this does NOT copy any non-compressed data) */ + err = lowpan6_compress_headers(netif, (u8_t *)p->payload, p->len, + &buffer[ieee_header_len], p_frag->len - ieee_header_len, &lowpan6_header_len, + &hidden_header_len, LWIP_6LOWPAN_CONTEXTS(netif), src, dst); + if (err != ERR_OK) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + pbuf_free(p_frag); + return err; + } + pbuf_remove_header(p, hidden_header_len); + +#else /* LWIP_6LOWPAN_IPHC */ + /* Send uncompressed IPv6 header with appropriate dispatch byte. */ + lowpan6_header_len = 1; + buffer[ieee_header_len] = 0x41; /* IPv6 dispatch */ +#endif /* LWIP_6LOWPAN_IPHC */ + + /* Calculate remaining packet length */ + remaining_len = p->tot_len; + + if (remaining_len > 0x7FF) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + /* datagram_size must fit into 11 bit */ + pbuf_free(p_frag); + return ERR_VAL; + } + + /* Fragment, or 1 packet? */ + max_data_len = LOWPAN6_MAX_PAYLOAD - ieee_header_len - lowpan6_header_len; + if (remaining_len > max_data_len) { + u16_t data_len; + /* We must move the 6LowPAN header to make room for the FRAG header. */ + memmove(&buffer[ieee_header_len + 4], &buffer[ieee_header_len], lowpan6_header_len); + + /* Now we need to fragment the packet. FRAG1 header first */ + buffer[ieee_header_len] = 0xc0 | (((p->tot_len + hidden_header_len) >> 8) & 0x7); + buffer[ieee_header_len + 1] = (p->tot_len + hidden_header_len) & 0xff; + + lowpan6_data.tx_datagram_tag++; + buffer[ieee_header_len + 2] = (lowpan6_data.tx_datagram_tag >> 8) & 0xff; + buffer[ieee_header_len + 3] = lowpan6_data.tx_datagram_tag & 0xff; + + /* Fragment follows. */ + data_len = (max_data_len - 4) & 0xf8; + frag_len = data_len + lowpan6_header_len; + + pbuf_copy_partial(p, buffer + ieee_header_len + lowpan6_header_len + 4, frag_len - lowpan6_header_len, 0); + remaining_len -= frag_len - lowpan6_header_len; + /* datagram offset holds the offset before compression */ + datagram_offset = frag_len - lowpan6_header_len + hidden_header_len; + LWIP_ASSERT("datagram offset must be a multiple of 8", (datagram_offset & 7) == 0); + + /* Calculate frame length */ + p_frag->len = p_frag->tot_len = ieee_header_len + 4 + frag_len + 2; /* add 2 bytes for crc*/ + + /* 2 bytes CRC */ + crc = LWIP_6LOWPAN_DO_CALC_CRC(p_frag->payload, p_frag->len - 2); + pbuf_take_at(p_frag, &crc, 2, p_frag->len - 2); + + /* send the packet */ + MIB2_STATS_NETIF_ADD(netif, ifoutoctets, p_frag->tot_len); + LWIP_DEBUGF(LWIP_LOWPAN6_DEBUG | LWIP_DBG_TRACE, ("lowpan6_send: sending packet %p\n", (void *)p)); + err = netif->linkoutput(netif, p_frag); + + while ((remaining_len > 0) && (err == ERR_OK)) { + struct ieee_802154_hdr *hdr = (struct ieee_802154_hdr *)buffer; + /* new frame, new seq num for ACK */ + hdr->sequence_number = lowpan6_data.tx_frame_seq_num++; + + buffer[ieee_header_len] |= 0x20; /* Change FRAG1 to FRAGN */ + + LWIP_ASSERT("datagram offset must be a multiple of 8", (datagram_offset & 7) == 0); + buffer[ieee_header_len + 4] = (u8_t)(datagram_offset >> 3); /* datagram offset in FRAGN header (datagram_offset is max. 11 bit) */ + + frag_len = (127 - ieee_header_len - 5 - 2) & 0xf8; + if (frag_len > remaining_len) { + frag_len = remaining_len; + } + + pbuf_copy_partial(p, buffer + ieee_header_len + 5, frag_len, p->tot_len - remaining_len); + remaining_len -= frag_len; + datagram_offset += frag_len; + + /* Calculate frame length */ + p_frag->len = p_frag->tot_len = frag_len + 5 + ieee_header_len + 2; + + /* 2 bytes CRC */ + crc = LWIP_6LOWPAN_DO_CALC_CRC(p_frag->payload, p_frag->len - 2); + pbuf_take_at(p_frag, &crc, 2, p_frag->len - 2); + + /* send the packet */ + MIB2_STATS_NETIF_ADD(netif, ifoutoctets, p_frag->tot_len); + LWIP_DEBUGF(LWIP_LOWPAN6_DEBUG | LWIP_DBG_TRACE, ("lowpan6_send: sending packet %p\n", (void *)p)); + err = netif->linkoutput(netif, p_frag); + } + } else { + /* It fits in one frame. */ + frag_len = remaining_len; + + /* Copy IPv6 packet */ + pbuf_copy_partial(p, buffer + ieee_header_len + lowpan6_header_len, frag_len, 0); + remaining_len = 0; + + /* Calculate frame length */ + p_frag->len = p_frag->tot_len = frag_len + lowpan6_header_len + ieee_header_len + 2; + LWIP_ASSERT("", p_frag->len <= 127); + + /* 2 bytes CRC */ + crc = LWIP_6LOWPAN_DO_CALC_CRC(p_frag->payload, p_frag->len - 2); + pbuf_take_at(p_frag, &crc, 2, p_frag->len - 2); + + /* send the packet */ + MIB2_STATS_NETIF_ADD(netif, ifoutoctets, p_frag->tot_len); + LWIP_DEBUGF(LWIP_LOWPAN6_DEBUG | LWIP_DBG_TRACE, ("lowpan6_send: sending packet %p\n", (void *)p)); + err = netif->linkoutput(netif, p_frag); + } + + pbuf_free(p_frag); + + return err; +} + +/** + * @ingroup sixlowpan + * Set context + */ +err_t +lowpan6_set_context(u8_t idx, const ip6_addr_t *context) +{ +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 + if (idx >= LWIP_6LOWPAN_NUM_CONTEXTS) { + return ERR_ARG; + } + + IP6_ADDR_ZONECHECK(context); + + ip6_addr_set(&lowpan6_data.lowpan6_context[idx], context); + + return ERR_OK; +#else + LWIP_UNUSED_ARG(idx); + LWIP_UNUSED_ARG(context); + return ERR_ARG; +#endif +} + +#if LWIP_6LOWPAN_INFER_SHORT_ADDRESS +/** + * @ingroup sixlowpan + * Set short address + */ +err_t +lowpan6_set_short_addr(u8_t addr_high, u8_t addr_low) +{ + short_mac_addr.addr[0] = addr_high; + short_mac_addr.addr[1] = addr_low; + + return ERR_OK; +} +#endif /* LWIP_6LOWPAN_INFER_SHORT_ADDRESS */ + +/* Create IEEE 802.15.4 address from netif address */ +static err_t +lowpan6_hwaddr_to_addr(struct netif *netif, struct lowpan6_link_addr *addr) +{ + addr->addr_len = 8; + if (netif->hwaddr_len == 8) { + LWIP_ERROR("NETIF_MAX_HWADDR_LEN >= 8 required", sizeof(netif->hwaddr) >= 8, return ERR_VAL;); + SMEMCPY(addr->addr, netif->hwaddr, 8); + } else if (netif->hwaddr_len == 6) { + /* Copy from MAC-48 */ + SMEMCPY(addr->addr, netif->hwaddr, 3); + addr->addr[3] = addr->addr[4] = 0xff; + SMEMCPY(&addr->addr[5], &netif->hwaddr[3], 3); + } else { + /* Invalid address length, don't know how to convert this */ + return ERR_VAL; + } + return ERR_OK; +} + +/** + * @ingroup sixlowpan + * Resolve and fill-in IEEE 802.15.4 address header for outgoing IPv6 packet. + * + * Perform Header Compression and fragment if necessary. + * + * @param netif The lwIP network interface which the IP packet will be sent on. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ip6addr The IP address of the packet destination. + * + * @return err_t + */ +err_t +lowpan6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr) +{ + err_t result; + const u8_t *hwaddr; + struct lowpan6_link_addr src, dest; +#if LWIP_6LOWPAN_INFER_SHORT_ADDRESS + ip6_addr_t ip6_src; + struct ip6_hdr *ip6_hdr; +#endif /* LWIP_6LOWPAN_INFER_SHORT_ADDRESS */ + +#if LWIP_6LOWPAN_INFER_SHORT_ADDRESS + /* Check if we can compress source address (use aligned copy) */ + ip6_hdr = (struct ip6_hdr *)q->payload; + ip6_addr_copy_from_packed(ip6_src, ip6_hdr->src); + ip6_addr_assign_zone(&ip6_src, IP6_UNICAST, netif); + if (lowpan6_get_address_mode(&ip6_src, &short_mac_addr) == 3) { + src.addr_len = 2; + src.addr[0] = short_mac_addr.addr[0]; + src.addr[1] = short_mac_addr.addr[1]; + } else +#endif /* LWIP_6LOWPAN_INFER_SHORT_ADDRESS */ + { + result = lowpan6_hwaddr_to_addr(netif, &src); + if (result != ERR_OK) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + return result; + } + } + + /* multicast destination IP address? */ + if (ip6_addr_ismulticast(ip6addr)) { + MIB2_STATS_NETIF_INC(netif, ifoutnucastpkts); + /* We need to send to the broadcast address.*/ + return lowpan6_frag(netif, q, &src, &ieee_802154_broadcast); + } + + /* We have a unicast destination IP address */ + /* @todo anycast? */ + +#if LWIP_6LOWPAN_INFER_SHORT_ADDRESS + if (src.addr_len == 2) { + /* If source address was compressable to short_mac_addr, and dest has same subnet and + * is also compressable to 2-bytes, assume we can infer dest as a short address too. */ + dest.addr_len = 2; + dest.addr[0] = ((u8_t *)q->payload)[38]; + dest.addr[1] = ((u8_t *)q->payload)[39]; + if ((src.addr_len == 2) && (ip6_addr_netcmp_zoneless(&ip6_hdr->src, &ip6_hdr->dest)) && + (lowpan6_get_address_mode(ip6addr, &dest) == 3)) { + MIB2_STATS_NETIF_INC(netif, ifoutucastpkts); + return lowpan6_frag(netif, q, &src, &dest); + } + } +#endif /* LWIP_6LOWPAN_INFER_SHORT_ADDRESS */ + + /* Ask ND6 what to do with the packet. */ + result = nd6_get_next_hop_addr_or_queue(netif, q, ip6addr, &hwaddr); + if (result != ERR_OK) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + return result; + } + + /* If no hardware address is returned, nd6 has queued the packet for later. */ + if (hwaddr == NULL) { + return ERR_OK; + } + + /* Send out the packet using the returned hardware address. */ + dest.addr_len = netif->hwaddr_len; + /* XXX: Inferring the length of the source address from the destination address + * is not correct for IEEE 802.15.4, but currently we don't get this information + * from the neighbor cache */ + SMEMCPY(dest.addr, hwaddr, netif->hwaddr_len); + MIB2_STATS_NETIF_INC(netif, ifoutucastpkts); + return lowpan6_frag(netif, q, &src, &dest); +} +/** + * @ingroup sixlowpan + * NETIF input function: don't free the input pbuf when returning != ERR_OK! + */ +err_t +lowpan6_input(struct pbuf *p, struct netif *netif) +{ + u8_t *puc, b; + s8_t i; + struct lowpan6_link_addr src, dest; + u16_t datagram_size = 0; + u16_t datagram_offset, datagram_tag; + struct lowpan6_reass_helper *lrh, *lrh_next, *lrh_prev = NULL; + + if (p == NULL) { + return ERR_OK; + } + + MIB2_STATS_NETIF_ADD(netif, ifinoctets, p->tot_len); + + if (p->len != p->tot_len) { + /* for now, this needs a pbuf in one piece */ + goto lowpan6_input_discard; + } + + if (lowpan6_parse_iee802154_header(p, &src, &dest) != ERR_OK) { + goto lowpan6_input_discard; + } + + /* Check dispatch. */ + puc = (u8_t *)p->payload; + + b = *puc; + if ((b & 0xf8) == 0xc0) { + /* FRAG1 dispatch. add this packet to reassembly list. */ + datagram_size = ((u16_t)(puc[0] & 0x07) << 8) | (u16_t)puc[1]; + datagram_tag = ((u16_t)puc[2] << 8) | (u16_t)puc[3]; + + /* check for duplicate */ + lrh = lowpan6_data.reass_list; + while (lrh != NULL) { + uint8_t discard = 0; + lrh_next = lrh->next_packet; + if ((lrh->sender_addr.addr_len == src.addr_len) && + (memcmp(lrh->sender_addr.addr, src.addr, src.addr_len) == 0)) { + /* address match with packet in reassembly. */ + if ((datagram_tag == lrh->datagram_tag) && (datagram_size == lrh->datagram_size)) { + /* duplicate fragment. */ + goto lowpan6_input_discard; + } else { + /* We are receiving the start of a new datagram. Discard old one (incomplete). */ + discard = 1; + } + } + if (discard) { + dequeue_datagram(lrh, lrh_prev); + free_reass_datagram(lrh); + } else { + lrh_prev = lrh; + } + /* Check next datagram in queue. */ + lrh = lrh_next; + } + + pbuf_remove_header(p, 4); /* hide frag1 dispatch */ + + lrh = (struct lowpan6_reass_helper *) mem_malloc(sizeof(struct lowpan6_reass_helper)); + if (lrh == NULL) { + goto lowpan6_input_discard; + } + + lrh->sender_addr.addr_len = src.addr_len; + for (i = 0; i < src.addr_len; i++) { + lrh->sender_addr.addr[i] = src.addr[i]; + } + lrh->datagram_size = datagram_size; + lrh->datagram_tag = datagram_tag; + lrh->frags = NULL; + if (*(u8_t *)p->payload == 0x41) { + /* This is a complete IPv6 packet, just skip dispatch byte. */ + pbuf_remove_header(p, 1); /* hide dispatch byte. */ + lrh->reass = p; + } else if ((*(u8_t *)p->payload & 0xe0 ) == 0x60) { + lrh->reass = lowpan6_decompress(p, datagram_size, LWIP_6LOWPAN_CONTEXTS(netif), &src, &dest); + if (lrh->reass == NULL) { + /* decompression failed */ + mem_free(lrh); + goto lowpan6_input_discard; + } + } + /* TODO: handle the case where we already have FRAGN received */ + lrh->next_packet = lowpan6_data.reass_list; + lrh->timer = 2; + lowpan6_data.reass_list = lrh; + + return ERR_OK; + } else if ((b & 0xf8) == 0xe0) { + /* FRAGN dispatch, find packet being reassembled. */ + datagram_size = ((u16_t)(puc[0] & 0x07) << 8) | (u16_t)puc[1]; + datagram_tag = ((u16_t)puc[2] << 8) | (u16_t)puc[3]; + datagram_offset = (u16_t)puc[4] << 3; + pbuf_remove_header(p, 4); /* hide frag1 dispatch but keep datagram offset for reassembly */ + + for (lrh = lowpan6_data.reass_list; lrh != NULL; lrh_prev = lrh, lrh = lrh->next_packet) { + if ((lrh->sender_addr.addr_len == src.addr_len) && + (memcmp(lrh->sender_addr.addr, src.addr, src.addr_len) == 0) && + (datagram_tag == lrh->datagram_tag) && + (datagram_size == lrh->datagram_size)) { + break; + } + } + if (lrh == NULL) { + /* rogue fragment */ + goto lowpan6_input_discard; + } + /* Insert new pbuf into list of fragments. Each fragment is a pbuf, + this only works for unchained pbufs. */ + LWIP_ASSERT("p->next == NULL", p->next == NULL); + if (lrh->reass != NULL) { + /* FRAG1 already received, check this offset against first len */ + if (datagram_offset < lrh->reass->len) { + /* fragment overlap, discard old fragments */ + dequeue_datagram(lrh, lrh_prev); + free_reass_datagram(lrh); + goto lowpan6_input_discard; + } + } + if (lrh->frags == NULL) { + /* first FRAGN */ + lrh->frags = p; + } else { + /* find the correct place to insert */ + struct pbuf *q, *last; + u16_t new_frag_len = p->len - 1; /* p->len includes datagram_offset byte */ + for (q = lrh->frags, last = NULL; q != NULL; last = q, q = q->next) { + u16_t q_datagram_offset = ((u8_t *)q->payload)[0] << 3; + u16_t q_frag_len = q->len - 1; + if (datagram_offset < q_datagram_offset) { + if (datagram_offset + new_frag_len > q_datagram_offset) { + /* overlap, discard old fragments */ + dequeue_datagram(lrh, lrh_prev); + free_reass_datagram(lrh); + goto lowpan6_input_discard; + } + /* insert here */ + break; + } else if (datagram_offset == q_datagram_offset) { + if (q_frag_len != new_frag_len) { + /* fragment mismatch, discard old fragments */ + dequeue_datagram(lrh, lrh_prev); + free_reass_datagram(lrh); + goto lowpan6_input_discard; + } + /* duplicate, ignore */ + pbuf_free(p); + return ERR_OK; + } + } + /* insert fragment */ + if (last == NULL) { + lrh->frags = p; + } else { + last->next = p; + p->next = q; + } + } + /* check if all fragments were received */ + if (lrh->reass) { + u16_t offset = lrh->reass->len; + struct pbuf *q; + for (q = lrh->frags; q != NULL; q = q->next) { + u16_t q_datagram_offset = ((u8_t *)q->payload)[0] << 3; + if (q_datagram_offset != offset) { + /* not complete, wait for more fragments */ + return ERR_OK; + } + offset += q->len - 1; + } + if (offset == datagram_size) { + /* all fragments received, combine pbufs */ + u16_t datagram_left = datagram_size - lrh->reass->len; + for (q = lrh->frags; q != NULL; q = q->next) { + /* hide datagram_offset byte now */ + pbuf_remove_header(q, 1); + q->tot_len = datagram_left; + datagram_left -= q->len; + } + LWIP_ASSERT("datagram_left == 0", datagram_left == 0); + q = lrh->reass; + q->tot_len = datagram_size; + q->next = lrh->frags; + lrh->frags = NULL; + lrh->reass = NULL; + dequeue_datagram(lrh, lrh_prev); + mem_free(lrh); + + /* @todo: distinguish unicast/multicast */ + MIB2_STATS_NETIF_INC(netif, ifinucastpkts); + return ip6_input(q, netif); + } + } + /* pbuf enqueued, waiting for more fragments */ + return ERR_OK; + } else { + if (b == 0x41) { + /* This is a complete IPv6 packet, just skip dispatch byte. */ + pbuf_remove_header(p, 1); /* hide dispatch byte. */ + } else if ((b & 0xe0 ) == 0x60) { + /* IPv6 headers are compressed using IPHC. */ + p = lowpan6_decompress(p, datagram_size, LWIP_6LOWPAN_CONTEXTS(netif), &src, &dest); + if (p == NULL) { + MIB2_STATS_NETIF_INC(netif, ifindiscards); + return ERR_OK; + } + } else { + goto lowpan6_input_discard; + } + + /* @todo: distinguish unicast/multicast */ + MIB2_STATS_NETIF_INC(netif, ifinucastpkts); + + return ip6_input(p, netif); + } +lowpan6_input_discard: + MIB2_STATS_NETIF_INC(netif, ifindiscards); + pbuf_free(p); + /* always return ERR_OK here to prevent the caller freeing the pbuf */ + return ERR_OK; +} + +/** + * @ingroup sixlowpan + */ +err_t +lowpan6_if_init(struct netif *netif) +{ + netif->name[0] = 'L'; + netif->name[1] = '6'; + netif->output_ip6 = lowpan6_output; + + MIB2_INIT_NETIF(netif, snmp_ifType_other, 0); + + /* maximum transfer unit */ + netif->mtu = 1280; + + /* broadcast capability */ + netif->flags = NETIF_FLAG_BROADCAST /* | NETIF_FLAG_LOWPAN6 */; + + return ERR_OK; +} + +/** + * @ingroup sixlowpan + * Set PAN ID + */ +err_t +lowpan6_set_pan_id(u16_t pan_id) +{ + lowpan6_data.ieee_802154_pan_id = pan_id; + + return ERR_OK; +} + +#if !NO_SYS +/** + * @ingroup sixlowpan + * Pass a received packet to tcpip_thread for input processing + * + * @param p the received packet, p->payload pointing to the + * IEEE 802.15.4 header. + * @param inp the network interface on which the packet was received + */ +err_t +tcpip_6lowpan_input(struct pbuf *p, struct netif *inp) +{ + return tcpip_inpkt(p, inp, lowpan6_input); +} +#endif /* !NO_SYS */ + +#endif /* LWIP_IPV6 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/lowpan6_ble.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/lowpan6_ble.c new file mode 100644 index 0000000..37bd4ce --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/lowpan6_ble.c @@ -0,0 +1,447 @@ +/** + * @file + * 6LowPAN over BLE output for IPv6 (RFC7668). +*/ + +/* + * Copyright (c) 2017 Benjamin Aigner + * Copyright (c) 2015 Inico Technologies Ltd. , Author: Ivan Delamer + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Benjamin Aigner + * + * Based on the original 6lowpan implementation of lwIP ( @see 6lowpan.c) + */ + + +/** + * @defgroup rfc7668if 6LoWPAN over BLE (RFC7668) + * @ingroup netifs + * This file implements a RFC7668 implementation for 6LoWPAN over + * Bluetooth Low Energy. The specification is very similar to 6LoWPAN, + * so most of the code is re-used. + * Compared to 6LoWPAN, much functionality is already implemented in + * lower BLE layers (fragmenting, session management,...). + * + * Usage: + * - add this netif + * - don't add IPv4 addresses (no IPv4 support in RFC7668), pass 'NULL','NULL','NULL' + * - use the BLE to EUI64 conversation util to create an IPv6 link-local address from the BLE MAC (@ref ble_addr_to_eui64) + * - input function: @ref rfc7668_input + * - set the link output function, which transmits output data to an established L2CAP channel + * - If data arrives (HCI event "L2CAP_DATA_PACKET"): + * - allocate a @ref PBUF_RAW buffer + * - let the pbuf struct point to the incoming data or copy it to the buffer + * - call netif->input + * + * @todo: + * - further testing + * - support compression contexts + * - support multiple addresses + * - support multicast + * - support neighbor discovery + */ + + +#include "netif/lowpan6_ble.h" + +#if LWIP_IPV6 + +#include "lwip/ip.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/nd6.h" +#include "lwip/mem.h" +#include "lwip/udp.h" +#include "lwip/tcpip.h" +#include "lwip/snmp.h" + +#include + +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 +/** context memory, containing IPv6 addresses */ +static ip6_addr_t rfc7668_context[LWIP_6LOWPAN_NUM_CONTEXTS]; +#else +#define rfc7668_context NULL +#endif + +static struct lowpan6_link_addr rfc7668_local_addr; +static struct lowpan6_link_addr rfc7668_peer_addr; + +/** + * @ingroup rfc7668if + * convert BT address to EUI64 addr + * + * This method converts a Bluetooth MAC address to an EUI64 address, + * which is used within IPv6 communication + * + * @param dst IPv6 destination space + * @param src BLE MAC address source + * @param public_addr If the LWIP_RFC7668_LINUX_WORKAROUND_PUBLIC_ADDRESS + * option is set, bit 0x02 will be set if param=0 (no public addr); cleared otherwise + * + * @see LWIP_RFC7668_LINUX_WORKAROUND_PUBLIC_ADDRESS + */ +void +ble_addr_to_eui64(uint8_t *dst, const uint8_t *src, int public_addr) +{ + /* according to RFC7668 ch 3.2.2. */ + memcpy(dst, src, 3); + dst[3] = 0xFF; + dst[4] = 0xFE; + memcpy(&dst[5], &src[3], 3); +#if LWIP_RFC7668_LINUX_WORKAROUND_PUBLIC_ADDRESS + if(public_addr) { + dst[0] &= ~0x02; + } else { + dst[0] |= 0x02; + } +#else + LWIP_UNUSED_ARG(public_addr); +#endif +} + +/** + * @ingroup rfc7668if + * convert EUI64 address to Bluetooth MAC addr + * + * This method converts an EUI64 address to a Bluetooth MAC address, + * + * @param dst BLE MAC address destination + * @param src IPv6 source + * + */ +void +eui64_to_ble_addr(uint8_t *dst, const uint8_t *src) +{ + /* according to RFC7668 ch 3.2.2. */ + memcpy(dst,src,3); + memcpy(&dst[3],&src[5],3); +} + +/** Set an address used for stateful compression. + * This expects an address of 6 or 8 bytes. + */ +static err_t +rfc7668_set_addr(struct lowpan6_link_addr *addr, const u8_t *in_addr, size_t in_addr_len, int is_mac_48, int is_public_addr) +{ + if ((in_addr == NULL) || (addr == NULL)) { + return ERR_VAL; + } + if (is_mac_48) { + if (in_addr_len != 6) { + return ERR_VAL; + } + addr->addr_len = 8; + ble_addr_to_eui64(addr->addr, in_addr, is_public_addr); + } else { + if (in_addr_len != 8) { + return ERR_VAL; + } + addr->addr_len = 8; + memcpy(addr->addr, in_addr, 8); + } + return ERR_OK; +} + + +/** Set the local address used for stateful compression. + * This expects an address of 8 bytes. + */ +err_t +rfc7668_set_local_addr_eui64(struct netif *netif, const u8_t *local_addr, size_t local_addr_len) +{ + /* netif not used for now, the address is stored globally... */ + LWIP_UNUSED_ARG(netif); + return rfc7668_set_addr(&rfc7668_local_addr, local_addr, local_addr_len, 0, 0); +} + +/** Set the local address used for stateful compression. + * This expects an address of 6 bytes. + */ +err_t +rfc7668_set_local_addr_mac48(struct netif *netif, const u8_t *local_addr, size_t local_addr_len, int is_public_addr) +{ + /* netif not used for now, the address is stored globally... */ + LWIP_UNUSED_ARG(netif); + return rfc7668_set_addr(&rfc7668_local_addr, local_addr, local_addr_len, 1, is_public_addr); +} + +/** Set the peer address used for stateful compression. + * This expects an address of 8 bytes. + */ +err_t +rfc7668_set_peer_addr_eui64(struct netif *netif, const u8_t *peer_addr, size_t peer_addr_len) +{ + /* netif not used for now, the address is stored globally... */ + LWIP_UNUSED_ARG(netif); + return rfc7668_set_addr(&rfc7668_peer_addr, peer_addr, peer_addr_len, 0, 0); +} + +/** Set the peer address used for stateful compression. + * This expects an address of 6 bytes. + */ +err_t +rfc7668_set_peer_addr_mac48(struct netif *netif, const u8_t *peer_addr, size_t peer_addr_len, int is_public_addr) +{ + /* netif not used for now, the address is stored globally... */ + LWIP_UNUSED_ARG(netif); + return rfc7668_set_addr(&rfc7668_peer_addr, peer_addr, peer_addr_len, 1, is_public_addr); +} + +/** Encapsulate IPv6 frames for BLE transmission + * + * This method implements the IPv6 header compression: + * *) According to RFC6282 + * *) See Figure 2, contains base format of bit positions + * *) Fragmentation not necessary (done at L2CAP layer of BLE) + * @note Currently the pbuf allocation uses 256 bytes. If longer packets are used (possible due to MTU=1480Bytes), increase it here! + * + * @param p Pbuf struct, containing the payload data + * @param netif Output network interface. Should be of RFC7668 type + * + * @return Same as netif->output. + */ +static err_t +rfc7668_compress(struct netif *netif, struct pbuf *p) +{ + struct pbuf *p_frag; + u16_t remaining_len; + u8_t *buffer; + u8_t lowpan6_header_len; + u8_t hidden_header_len; + err_t err; + + LWIP_ASSERT("lowpan6_frag: netif->linkoutput not set", netif->linkoutput != NULL); + +#if LWIP_6LOWPAN_IPHC + + /* We'll use a dedicated pbuf for building BLE fragments. + * We'll over-allocate it by the bytes saved for header compression. + */ + p_frag = pbuf_alloc(PBUF_RAW, p->tot_len, PBUF_RAM); + if (p_frag == NULL) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + return ERR_MEM; + } + LWIP_ASSERT("this needs a pbuf in one piece", p_frag->len == p_frag->tot_len); + + /* Write IP6 header (with IPHC). */ + buffer = (u8_t*)p_frag->payload; + + err = lowpan6_compress_headers(netif, (u8_t *)p->payload, p->len, buffer, p_frag->len, + &lowpan6_header_len, &hidden_header_len, rfc7668_context, &rfc7668_local_addr, &rfc7668_peer_addr); + if (err != ERR_OK) { + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + pbuf_free(p_frag); + return err; + } + pbuf_remove_header(p, hidden_header_len); + + /* Calculate remaining packet length */ + remaining_len = p->tot_len; + + /* Copy IPv6 packet */ + pbuf_copy_partial(p, buffer + lowpan6_header_len, remaining_len, 0); + + /* Calculate frame length */ + p_frag->len = p_frag->tot_len = remaining_len + lowpan6_header_len; + + /* send the packet */ + MIB2_STATS_NETIF_ADD(netif, ifoutoctets, p_frag->tot_len); + LWIP_DEBUGF(LWIP_LOWPAN6_DEBUG|LWIP_DBG_TRACE, ("rfc7668_output: sending packet %p\n", (void *)p)); + err = netif->linkoutput(netif, p_frag); + + pbuf_free(p_frag); + + return err; +#else /* LWIP_6LOWPAN_IPHC */ + /* 6LoWPAN over BLE requires IPHC! */ + return ERR_IF; +#endif/* LWIP_6LOWPAN_IPHC */ +} + +/** + * @ingroup rfc7668if + * Set context id IPv6 address + * + * Store one IPv6 address to a given context id. + * + * @param idx Context id + * @param context IPv6 addr for this context + * + * @return ERR_OK (if everything is fine), ERR_ARG (if the context id is out of range), ERR_VAL (if contexts disabled) + */ +err_t +rfc7668_set_context(u8_t idx, const ip6_addr_t *context) +{ +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 + /* check if the ID is possible */ + if (idx >= LWIP_6LOWPAN_NUM_CONTEXTS) { + return ERR_ARG; + } + /* copy IPv6 address to context storage */ + ip6_addr_set(&rfc7668_context[idx], context); + return ERR_OK; +#else + LWIP_UNUSED_ARG(idx); + LWIP_UNUSED_ARG(context); + return ERR_VAL; +#endif +} + +/** + * @ingroup rfc7668if + * Compress outgoing IPv6 packet and pass it on to netif->linkoutput + * + * @param netif The lwIP network interface which the IP packet will be sent on. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ip6addr The IP address of the packet destination. + * + * @return See rfc7668_compress + */ +err_t +rfc7668_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr) +{ + /* dst ip6addr is not used here, we only have one peer */ + LWIP_UNUSED_ARG(ip6addr); + + return rfc7668_compress(netif, q); +} + +/** + * @ingroup rfc7668if + * Process a received raw payload from an L2CAP channel + * + * @param p the received packet, p->payload pointing to the + * IPv6 header (maybe compressed) + * @param netif the network interface on which the packet was received + * + * @return ERR_OK if everything was fine + */ +err_t +rfc7668_input(struct pbuf * p, struct netif *netif) +{ + u8_t * puc; + + MIB2_STATS_NETIF_ADD(netif, ifinoctets, p->tot_len); + + /* Load first header byte */ + puc = (u8_t*)p->payload; + + /* no IP header compression */ + if (*puc == 0x41) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Completed packet, removing dispatch: 0x%2x \n", *puc)); + /* This is a complete IPv6 packet, just skip header byte. */ + pbuf_remove_header(p, 1); + /* IPHC header compression */ + } else if ((*puc & 0xe0 )== 0x60) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Completed packet, decompress dispatch: 0x%2x \n", *puc)); + /* IPv6 headers are compressed using IPHC. */ + p = lowpan6_decompress(p, 0, rfc7668_context, &rfc7668_peer_addr, &rfc7668_local_addr); + /* if no pbuf is returned, handle as discarded packet */ + if (p == NULL) { + MIB2_STATS_NETIF_INC(netif, ifindiscards); + return ERR_OK; + } + /* invalid header byte, discard */ + } else { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Completed packet, discarding: 0x%2x \n", *puc)); + MIB2_STATS_NETIF_INC(netif, ifindiscards); + pbuf_free(p); + return ERR_OK; + } + /* @todo: distinguish unicast/multicast */ + MIB2_STATS_NETIF_INC(netif, ifinucastpkts); + +#if LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG + { + u16_t i; + LWIP_DEBUGF(LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG, ("IPv6 payload:\n")); + for (i = 0; i < p->len; i++) { + if ((i%4)==0) { + LWIP_DEBUGF(LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG, ("\n")); + } + LWIP_DEBUGF(LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG, ("%2X ", *((uint8_t *)p->payload+i))); + } + LWIP_DEBUGF(LWIP_RFC7668_IP_UNCOMPRESSED_DEBUG, ("\np->len: %d\n", p->len)); + } +#endif + /* pass data to ip6_input */ + return ip6_input(p, netif); +} + +/** + * @ingroup rfc7668if + * Initialize the netif + * + * No flags are used (broadcast not possible, not ethernet, ...) + * The shortname for this netif is "BT" + * + * @param netif the network interface to be initialized as RFC7668 netif + * + * @return ERR_OK if everything went fine + */ +err_t +rfc7668_if_init(struct netif *netif) +{ + netif->name[0] = 'b'; + netif->name[1] = 't'; + /* local function as IPv6 output */ + netif->output_ip6 = rfc7668_output; + + MIB2_INIT_NETIF(netif, snmp_ifType_other, 0); + + /* maximum transfer unit, set according to RFC7668 ch2.4 */ + netif->mtu = 1280; + + /* no flags set (no broadcast, ethernet,...)*/ + netif->flags = 0; + + /* everything fine */ + return ERR_OK; +} + +#if !NO_SYS +/** + * Pass a received packet to tcpip_thread for input processing + * + * @param p the received packet, p->payload pointing to the + * IEEE 802.15.4 header. + * @param inp the network interface on which the packet was received + * + * @return see @ref tcpip_inpkt, same return values + */ +err_t +tcpip_rfc7668_input(struct pbuf *p, struct netif *inp) +{ + /* send data to upper layer, return the result */ + return tcpip_inpkt(p, inp, rfc7668_input); +} +#endif /* !NO_SYS */ + +#endif /* LWIP_IPV6 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/lowpan6_common.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/lowpan6_common.c new file mode 100644 index 0000000..4aba4a3 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/lowpan6_common.c @@ -0,0 +1,841 @@ +/** + * @file + * + * Common 6LowPAN routines for IPv6. Uses ND tables for link-layer addressing. Fragments packets to 6LowPAN units. + * + * This implementation aims to conform to IEEE 802.15.4(-2015), RFC 4944 and RFC 6282. + * @todo: RFC 6775. + */ + +/* + * Copyright (c) 2015 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +/** + * @defgroup sixlowpan 6LoWPAN (RFC4944) + * @ingroup netifs + * 6LowPAN netif implementation + */ + +#include "netif/lowpan6_common.h" + +#if LWIP_IPV6 + +#include "lwip/ip.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/udp.h" + +#include + +/* Determine compression mode for unicast address. */ +s8_t +lowpan6_get_address_mode(const ip6_addr_t *ip6addr, const struct lowpan6_link_addr *mac_addr) +{ + if (mac_addr->addr_len == 2) { + if ((ip6addr->addr[2] == (u32_t)PP_HTONL(0x000000ff)) && + ((ip6addr->addr[3] & PP_HTONL(0xffff0000)) == PP_NTOHL(0xfe000000))) { + if ((ip6addr->addr[3] & PP_HTONL(0x0000ffff)) == lwip_ntohl((mac_addr->addr[0] << 8) | mac_addr->addr[1])) { + return 3; + } + } + } else if (mac_addr->addr_len == 8) { + if ((ip6addr->addr[2] == lwip_ntohl(((mac_addr->addr[0] ^ 2) << 24) | (mac_addr->addr[1] << 16) | mac_addr->addr[2] << 8 | mac_addr->addr[3])) && + (ip6addr->addr[3] == lwip_ntohl((mac_addr->addr[4] << 24) | (mac_addr->addr[5] << 16) | mac_addr->addr[6] << 8 | mac_addr->addr[7]))) { + return 3; + } + } + + if ((ip6addr->addr[2] == PP_HTONL(0x000000ffUL)) && + ((ip6addr->addr[3] & PP_HTONL(0xffff0000)) == PP_NTOHL(0xfe000000UL))) { + return 2; + } + + return 1; +} + +#if LWIP_6LOWPAN_IPHC + +/* Determine compression mode for multicast address. */ +static s8_t +lowpan6_get_address_mode_mc(const ip6_addr_t *ip6addr) +{ + if ((ip6addr->addr[0] == PP_HTONL(0xff020000)) && + (ip6addr->addr[1] == 0) && + (ip6addr->addr[2] == 0) && + ((ip6addr->addr[3] & PP_HTONL(0xffffff00)) == 0)) { + return 3; + } else if (((ip6addr->addr[0] & PP_HTONL(0xff00ffff)) == PP_HTONL(0xff000000)) && + (ip6addr->addr[1] == 0)) { + if ((ip6addr->addr[2] == 0) && + ((ip6addr->addr[3] & PP_HTONL(0xff000000)) == 0)) { + return 2; + } else if ((ip6addr->addr[2] & PP_HTONL(0xffffff00)) == 0) { + return 1; + } + } + + return 0; +} + +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 +static s8_t +lowpan6_context_lookup(const ip6_addr_t *lowpan6_contexts, const ip6_addr_t *ip6addr) +{ + s8_t i; + + for (i = 0; i < LWIP_6LOWPAN_NUM_CONTEXTS; i++) { + if (ip6_addr_netcmp(&lowpan6_contexts[i], ip6addr)) { + return i; + } + } + return -1; +} +#endif /* LWIP_6LOWPAN_NUM_CONTEXTS > 0 */ + +/* + * Compress IPv6 and/or UDP headers. + * */ +err_t +lowpan6_compress_headers(struct netif *netif, u8_t *inbuf, size_t inbuf_size, u8_t *outbuf, size_t outbuf_size, + u8_t *lowpan6_header_len_out, u8_t *hidden_header_len_out, ip6_addr_t *lowpan6_contexts, + const struct lowpan6_link_addr *src, const struct lowpan6_link_addr *dst) +{ + u8_t *buffer, *inptr; + u8_t lowpan6_header_len; + u8_t hidden_header_len = 0; + s8_t i; + struct ip6_hdr *ip6hdr; + ip_addr_t ip6src, ip6dst; + + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("inbuf != NULL", inbuf != NULL); + LWIP_ASSERT("outbuf != NULL", outbuf != NULL); + LWIP_ASSERT("lowpan6_header_len_out != NULL", lowpan6_header_len_out != NULL); + LWIP_ASSERT("hidden_header_len_out != NULL", hidden_header_len_out != NULL); + + /* Perform 6LowPAN IPv6 header compression according to RFC 6282 */ + buffer = outbuf; + inptr = inbuf; + + if (inbuf_size < IP6_HLEN) { + /* input buffer too short */ + return ERR_VAL; + } + if (outbuf_size < IP6_HLEN) { + /* output buffer too short for worst case */ + return ERR_MEM; + } + + /* Point to ip6 header and align copies of src/dest addresses. */ + ip6hdr = (struct ip6_hdr *)inptr; + ip_addr_copy_from_ip6_packed(ip6dst, ip6hdr->dest); + ip6_addr_assign_zone(ip_2_ip6(&ip6dst), IP6_UNKNOWN, netif); + ip_addr_copy_from_ip6_packed(ip6src, ip6hdr->src); + ip6_addr_assign_zone(ip_2_ip6(&ip6src), IP6_UNKNOWN, netif); + + /* Basic length of 6LowPAN header, set dispatch and clear fields. */ + lowpan6_header_len = 2; + buffer[0] = 0x60; + buffer[1] = 0; + + /* Determine whether there will be a Context Identifier Extension byte or not. + * If so, set it already. */ +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 + buffer[2] = 0; + + i = lowpan6_context_lookup(lowpan6_contexts, ip_2_ip6(&ip6src)); + if (i >= 0) { + /* Stateful source address compression. */ + buffer[1] |= 0x40; + buffer[2] |= (i & 0x0f) << 4; + } + + i = lowpan6_context_lookup(lowpan6_contexts, ip_2_ip6(&ip6dst)); + if (i >= 0) { + /* Stateful destination address compression. */ + buffer[1] |= 0x04; + buffer[2] |= i & 0x0f; + } + + if (buffer[2] != 0x00) { + /* Context identifier extension byte is appended. */ + buffer[1] |= 0x80; + lowpan6_header_len++; + } +#else /* LWIP_6LOWPAN_NUM_CONTEXTS > 0 */ + LWIP_UNUSED_ARG(lowpan6_contexts); +#endif /* LWIP_6LOWPAN_NUM_CONTEXTS > 0 */ + + /* Determine TF field: Traffic Class, Flow Label */ + if (IP6H_FL(ip6hdr) == 0) { + /* Flow label is elided. */ + buffer[0] |= 0x10; + if (IP6H_TC(ip6hdr) == 0) { + /* Traffic class (ECN+DSCP) elided too. */ + buffer[0] |= 0x08; + } else { + /* Traffic class (ECN+DSCP) appended. */ + buffer[lowpan6_header_len++] = IP6H_TC(ip6hdr); + } + } else { + if (((IP6H_TC(ip6hdr) & 0x3f) == 0)) { + /* DSCP portion of Traffic Class is elided, ECN and FL are appended (3 bytes) */ + buffer[0] |= 0x08; + + buffer[lowpan6_header_len] = IP6H_TC(ip6hdr) & 0xc0; + buffer[lowpan6_header_len++] |= (IP6H_FL(ip6hdr) >> 16) & 0x0f; + buffer[lowpan6_header_len++] = (IP6H_FL(ip6hdr) >> 8) & 0xff; + buffer[lowpan6_header_len++] = IP6H_FL(ip6hdr) & 0xff; + } else { + /* Traffic class and flow label are appended (4 bytes) */ + buffer[lowpan6_header_len++] = IP6H_TC(ip6hdr); + buffer[lowpan6_header_len++] = (IP6H_FL(ip6hdr) >> 16) & 0x0f; + buffer[lowpan6_header_len++] = (IP6H_FL(ip6hdr) >> 8) & 0xff; + buffer[lowpan6_header_len++] = IP6H_FL(ip6hdr) & 0xff; + } + } + + /* Compress NH? + * Only if UDP for now. @todo support other NH compression. */ + if (IP6H_NEXTH(ip6hdr) == IP6_NEXTH_UDP) { + buffer[0] |= 0x04; + } else { + /* append nexth. */ + buffer[lowpan6_header_len++] = IP6H_NEXTH(ip6hdr); + } + + /* Compress hop limit? */ + if (IP6H_HOPLIM(ip6hdr) == 255) { + buffer[0] |= 0x03; + } else if (IP6H_HOPLIM(ip6hdr) == 64) { + buffer[0] |= 0x02; + } else if (IP6H_HOPLIM(ip6hdr) == 1) { + buffer[0] |= 0x01; + } else { + /* append hop limit */ + buffer[lowpan6_header_len++] = IP6H_HOPLIM(ip6hdr); + } + + /* Compress source address */ + if (((buffer[1] & 0x40) != 0) || + (ip6_addr_islinklocal(ip_2_ip6(&ip6src)))) { + /* Context-based or link-local source address compression. */ + i = lowpan6_get_address_mode(ip_2_ip6(&ip6src), src); + buffer[1] |= (i & 0x03) << 4; + if (i == 1) { + MEMCPY(buffer + lowpan6_header_len, inptr + 16, 8); + lowpan6_header_len += 8; + } else if (i == 2) { + MEMCPY(buffer + lowpan6_header_len, inptr + 22, 2); + lowpan6_header_len += 2; + } + } else if (ip6_addr_isany(ip_2_ip6(&ip6src))) { + /* Special case: mark SAC and leave SAM=0 */ + buffer[1] |= 0x40; + } else { + /* Append full address. */ + MEMCPY(buffer + lowpan6_header_len, inptr + 8, 16); + lowpan6_header_len += 16; + } + + /* Compress destination address */ + if (ip6_addr_ismulticast(ip_2_ip6(&ip6dst))) { + /* @todo support stateful multicast address compression */ + + buffer[1] |= 0x08; + + i = lowpan6_get_address_mode_mc(ip_2_ip6(&ip6dst)); + buffer[1] |= i & 0x03; + if (i == 0) { + MEMCPY(buffer + lowpan6_header_len, inptr + 24, 16); + lowpan6_header_len += 16; + } else if (i == 1) { + buffer[lowpan6_header_len++] = inptr[25]; + MEMCPY(buffer + lowpan6_header_len, inptr + 35, 5); + lowpan6_header_len += 5; + } else if (i == 2) { + buffer[lowpan6_header_len++] = inptr[25]; + MEMCPY(buffer + lowpan6_header_len, inptr + 37, 3); + lowpan6_header_len += 3; + } else if (i == 3) { + buffer[lowpan6_header_len++] = (inptr)[39]; + } + } else if (((buffer[1] & 0x04) != 0) || + (ip6_addr_islinklocal(ip_2_ip6(&ip6dst)))) { + /* Context-based or link-local destination address compression. */ + i = lowpan6_get_address_mode(ip_2_ip6(&ip6dst), dst); + buffer[1] |= i & 0x03; + if (i == 1) { + MEMCPY(buffer + lowpan6_header_len, inptr + 32, 8); + lowpan6_header_len += 8; + } else if (i == 2) { + MEMCPY(buffer + lowpan6_header_len, inptr + 38, 2); + lowpan6_header_len += 2; + } + } else { + /* Append full address. */ + MEMCPY(buffer + lowpan6_header_len, inptr + 24, 16); + lowpan6_header_len += 16; + } + + /* Move to payload. */ + inptr += IP6_HLEN; + hidden_header_len += IP6_HLEN; + +#if LWIP_UDP + /* Compress UDP header? */ + if (IP6H_NEXTH(ip6hdr) == IP6_NEXTH_UDP) { + /* @todo support optional checksum compression */ + + if (inbuf_size < IP6_HLEN + UDP_HLEN) { + /* input buffer too short */ + return ERR_VAL; + } + if (outbuf_size < (size_t)(hidden_header_len + 7)) { + /* output buffer too short for worst case */ + return ERR_MEM; + } + + buffer[lowpan6_header_len] = 0xf0; + + /* determine port compression mode. */ + if ((inptr[0] == 0xf0) && ((inptr[1] & 0xf0) == 0xb0) && + (inptr[2] == 0xf0) && ((inptr[3] & 0xf0) == 0xb0)) { + /* Compress source and dest ports. */ + buffer[lowpan6_header_len++] |= 0x03; + buffer[lowpan6_header_len++] = ((inptr[1] & 0x0f) << 4) | (inptr[3] & 0x0f); + } else if (inptr[0] == 0xf0) { + /* Compress source port. */ + buffer[lowpan6_header_len++] |= 0x02; + buffer[lowpan6_header_len++] = inptr[1]; + buffer[lowpan6_header_len++] = inptr[2]; + buffer[lowpan6_header_len++] = inptr[3]; + } else if (inptr[2] == 0xf0) { + /* Compress dest port. */ + buffer[lowpan6_header_len++] |= 0x01; + buffer[lowpan6_header_len++] = inptr[0]; + buffer[lowpan6_header_len++] = inptr[1]; + buffer[lowpan6_header_len++] = inptr[3]; + } else { + /* append full ports. */ + lowpan6_header_len++; + buffer[lowpan6_header_len++] = inptr[0]; + buffer[lowpan6_header_len++] = inptr[1]; + buffer[lowpan6_header_len++] = inptr[2]; + buffer[lowpan6_header_len++] = inptr[3]; + } + + /* elide length and copy checksum */ + buffer[lowpan6_header_len++] = inptr[6]; + buffer[lowpan6_header_len++] = inptr[7]; + + hidden_header_len += UDP_HLEN; + } +#endif /* LWIP_UDP */ + + *lowpan6_header_len_out = lowpan6_header_len; + *hidden_header_len_out = hidden_header_len; + + return ERR_OK; +} + +/** Decompress IPv6 and UDP headers compressed according to RFC 6282 + * + * @param lowpan6_buffer compressed headers, first byte is the dispatch byte + * @param lowpan6_bufsize size of lowpan6_buffer (may include data after headers) + * @param decomp_buffer buffer where the decompressed headers are stored + * @param decomp_bufsize size of decomp_buffer + * @param hdr_size_comp returns the size of the compressed headers (skip to get to data) + * @param hdr_size_decomp returns the size of the decompressed headers (IPv6 + UDP) + * @param datagram_size datagram size from fragments or 0 if unfragmented + * @param compressed_size compressed datagram size (for unfragmented rx) + * @param lowpan6_contexts context addresses + * @param src source address of the outer layer, used for address compression + * @param dest destination address of the outer layer, used for address compression + * @return ERR_OK if decompression succeeded, an error otherwise + */ +static err_t +lowpan6_decompress_hdr(u8_t *lowpan6_buffer, size_t lowpan6_bufsize, + u8_t *decomp_buffer, size_t decomp_bufsize, + u16_t *hdr_size_comp, u16_t *hdr_size_decomp, + u16_t datagram_size, u16_t compressed_size, + ip6_addr_t *lowpan6_contexts, + struct lowpan6_link_addr *src, struct lowpan6_link_addr *dest) +{ + u16_t lowpan6_offset; + struct ip6_hdr *ip6hdr; + s8_t i; + u32_t header_temp; + u16_t ip6_offset = IP6_HLEN; + + LWIP_ASSERT("lowpan6_buffer != NULL", lowpan6_buffer != NULL); + LWIP_ASSERT("decomp_buffer != NULL", decomp_buffer != NULL); + LWIP_ASSERT("src != NULL", src != NULL); + LWIP_ASSERT("dest != NULL", dest != NULL); + LWIP_ASSERT("hdr_size_comp != NULL", hdr_size_comp != NULL); + LWIP_ASSERT("dehdr_size_decompst != NULL", hdr_size_decomp != NULL); + + ip6hdr = (struct ip6_hdr *)decomp_buffer; + if (decomp_bufsize < IP6_HLEN) { + return ERR_MEM; + } + + /* output the full compressed packet, if set in @see lowpan6_opts.h */ +#if LWIP_LOWPAN6_IP_COMPRESSED_DEBUG + { + u16_t j; + LWIP_DEBUGF(LWIP_LOWPAN6_IP_COMPRESSED_DEBUG, ("lowpan6_decompress_hdr: IP6 payload (compressed): \n")); + for (j = 0; j < lowpan6_bufsize; j++) { + if ((j % 4) == 0) { + LWIP_DEBUGF(LWIP_LOWPAN6_IP_COMPRESSED_DEBUG, ("\n")); + } + LWIP_DEBUGF(LWIP_LOWPAN6_IP_COMPRESSED_DEBUG, ("%2X ", lowpan6_buffer[j])); + } + LWIP_DEBUGF(LWIP_LOWPAN6_IP_COMPRESSED_DEBUG, ("\np->len: %d", lowpan6_bufsize)); + } +#endif + + /* offset for inline IP headers (RFC 6282 ch3)*/ + lowpan6_offset = 2; + /* if CID is set (context identifier), the context byte + * follows immediately after the header, so other IPHC fields are @+3 */ + if (lowpan6_buffer[1] & 0x80) { + lowpan6_offset++; + } + + /* Set IPv6 version, traffic class and flow label. (RFC6282, ch 3.1.1.)*/ + if ((lowpan6_buffer[0] & 0x18) == 0x00) { + header_temp = ((lowpan6_buffer[lowpan6_offset+1] & 0x0f) << 16) | \ + (lowpan6_buffer[lowpan6_offset + 2] << 8) | lowpan6_buffer[lowpan6_offset+3]; + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("TF: 00, ECN: 0x%2x, Flowlabel+DSCP: 0x%8X\n", \ + lowpan6_buffer[lowpan6_offset],header_temp)); + IP6H_VTCFL_SET(ip6hdr, 6, lowpan6_buffer[lowpan6_offset], header_temp); + /* increase offset, processed 4 bytes here: + * TF=00: ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)*/ + lowpan6_offset += 4; + } else if ((lowpan6_buffer[0] & 0x18) == 0x08) { + header_temp = ((lowpan6_buffer[lowpan6_offset] & 0x0f) << 16) | (lowpan6_buffer[lowpan6_offset + 1] << 8) | lowpan6_buffer[lowpan6_offset+2]; + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("TF: 01, ECN: 0x%2x, Flowlabel: 0x%2X, DSCP ignored\n", \ + lowpan6_buffer[lowpan6_offset] & 0xc0,header_temp)); + IP6H_VTCFL_SET(ip6hdr, 6, lowpan6_buffer[lowpan6_offset] & 0xc0, header_temp); + /* increase offset, processed 3 bytes here: + * TF=01: ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided.*/ + lowpan6_offset += 3; + } else if ((lowpan6_buffer[0] & 0x18) == 0x10) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("TF: 10, DCSP+ECN: 0x%2x, Flowlabel ignored\n", lowpan6_buffer[lowpan6_offset])); + IP6H_VTCFL_SET(ip6hdr, 6, lowpan6_buffer[lowpan6_offset],0); + /* increase offset, processed 1 byte here: + * ECN + DSCP (1 byte), Flow Label is elided.*/ + lowpan6_offset += 1; + } else if ((lowpan6_buffer[0] & 0x18) == 0x18) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("TF: 11, DCSP/ECN & Flowlabel ignored\n")); + /* don't increase offset, no bytes processed here */ + IP6H_VTCFL_SET(ip6hdr, 6, 0, 0); + } + + /* Set Next Header (NH) */ + if ((lowpan6_buffer[0] & 0x04) == 0x00) { + /* 0: full next header byte carried inline (increase offset)*/ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("NH: 0x%2X\n", lowpan6_buffer[lowpan6_offset+1])); + IP6H_NEXTH_SET(ip6hdr, lowpan6_buffer[lowpan6_offset++]); + } else { + /* 1: NH compression, LOWPAN_NHC (RFC6282, ch 4.1) */ + /* We should fill this later with NHC decoding */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("NH: skipped, later done with NHC\n")); + IP6H_NEXTH_SET(ip6hdr, 0); + } + + /* Set Hop Limit, either carried inline or 3 different hops (1,64,255) */ + if ((lowpan6_buffer[0] & 0x03) == 0x00) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Hops: full value: %d\n", lowpan6_buffer[lowpan6_offset+1])); + IP6H_HOPLIM_SET(ip6hdr, lowpan6_buffer[lowpan6_offset++]); + } else if ((lowpan6_buffer[0] & 0x03) == 0x01) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Hops: compressed: 1\n")); + IP6H_HOPLIM_SET(ip6hdr, 1); + } else if ((lowpan6_buffer[0] & 0x03) == 0x02) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Hops: compressed: 64\n")); + IP6H_HOPLIM_SET(ip6hdr, 64); + } else if ((lowpan6_buffer[0] & 0x03) == 0x03) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Hops: compressed: 255\n")); + IP6H_HOPLIM_SET(ip6hdr, 255); + } + + /* Source address decoding. */ + if ((lowpan6_buffer[1] & 0x40) == 0x00) { + /* Source address compression (SAC) = 0 -> stateless compression */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAC == 0, no context byte\n")); + /* Stateless compression */ + if ((lowpan6_buffer[1] & 0x30) == 0x00) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 00, no src compression, fetching 128bits inline\n")); + /* copy full address, increase offset by 16 Bytes */ + MEMCPY(&ip6hdr->src.addr[0], lowpan6_buffer + lowpan6_offset, 16); + lowpan6_offset += 16; + } else if ((lowpan6_buffer[1] & 0x30) == 0x10) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 01, src compression, 64bits inline\n")); + /* set 64 bits to link local */ + ip6hdr->src.addr[0] = PP_HTONL(0xfe800000UL); + ip6hdr->src.addr[1] = 0; + /* copy 8 Bytes, increase offset */ + MEMCPY(&ip6hdr->src.addr[2], lowpan6_buffer + lowpan6_offset, 8); + lowpan6_offset += 8; + } else if ((lowpan6_buffer[1] & 0x30) == 0x20) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 10, src compression, 16bits inline\n")); + /* set 96 bits to link local */ + ip6hdr->src.addr[0] = PP_HTONL(0xfe800000UL); + ip6hdr->src.addr[1] = 0; + ip6hdr->src.addr[2] = PP_HTONL(0x000000ffUL); + /* extract remaining 16bits from inline bytes, increase offset */ + ip6hdr->src.addr[3] = lwip_htonl(0xfe000000UL | (lowpan6_buffer[lowpan6_offset] << 8) | + lowpan6_buffer[lowpan6_offset + 1]); + lowpan6_offset += 2; + } else if ((lowpan6_buffer[1] & 0x30) == 0x30) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 11, src compression, 0bits inline, using other headers\n")); + /* no information avalaible, using other layers, see RFC6282 ch 3.2.2 */ + ip6hdr->src.addr[0] = PP_HTONL(0xfe800000UL); + ip6hdr->src.addr[1] = 0; + if (src->addr_len == 2) { + ip6hdr->src.addr[2] = PP_HTONL(0x000000ffUL); + ip6hdr->src.addr[3] = lwip_htonl(0xfe000000UL | (src->addr[0] << 8) | src->addr[1]); + } else if (src->addr_len == 8) { + ip6hdr->src.addr[2] = lwip_htonl(((src->addr[0] ^ 2) << 24) | (src->addr[1] << 16) | + (src->addr[2] << 8) | src->addr[3]); + ip6hdr->src.addr[3] = lwip_htonl((src->addr[4] << 24) | (src->addr[5] << 16) | + (src->addr[6] << 8) | src->addr[7]); + } else { + /* invalid source address length */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Invalid source address length\n")); + return ERR_VAL; + } + } + } else { + /* Source address compression (SAC) = 1 -> stateful/context-based compression */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAC == 1, additional context byte\n")); + if ((lowpan6_buffer[1] & 0x30) == 0x00) { + /* SAM=00, address=> :: (ANY) */ + ip6hdr->src.addr[0] = 0; + ip6hdr->src.addr[1] = 0; + ip6hdr->src.addr[2] = 0; + ip6hdr->src.addr[3] = 0; + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 00, context compression, ANY (::)\n")); + } else { + /* Set prefix from context info */ + if (lowpan6_buffer[1] & 0x80) { + i = (lowpan6_buffer[2] >> 4) & 0x0f; + } else { + i = 0; + } + if (i >= LWIP_6LOWPAN_NUM_CONTEXTS) { + /* Error */ + return ERR_VAL; + } +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 + ip6hdr->src.addr[0] = lowpan6_contexts[i].addr[0]; + ip6hdr->src.addr[1] = lowpan6_contexts[i].addr[1]; + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == xx, context compression found @%d: %8X, %8X\n", (int)i, ip6hdr->src.addr[0], ip6hdr->src.addr[1])); +#else + LWIP_UNUSED_ARG(lowpan6_contexts); +#endif + } + + /* determine further address bits */ + if ((lowpan6_buffer[1] & 0x30) == 0x10) { + /* SAM=01, load additional 64bits */ + MEMCPY(&ip6hdr->src.addr[2], lowpan6_buffer + lowpan6_offset, 8); + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 01, context compression, 64bits inline\n")); + lowpan6_offset += 8; + } else if ((lowpan6_buffer[1] & 0x30) == 0x20) { + /* SAM=01, load additional 16bits */ + ip6hdr->src.addr[2] = PP_HTONL(0x000000ffUL); + ip6hdr->src.addr[3] = lwip_htonl(0xfe000000UL | (lowpan6_buffer[lowpan6_offset] << 8) | lowpan6_buffer[lowpan6_offset + 1]); + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 10, context compression, 16bits inline\n")); + lowpan6_offset += 2; + } else if ((lowpan6_buffer[1] & 0x30) == 0x30) { + /* SAM=11, address is fully elided, load from other layers */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("SAM == 11, context compression, 0bits inline, using other headers\n")); + if (src->addr_len == 2) { + ip6hdr->src.addr[2] = PP_HTONL(0x000000ffUL); + ip6hdr->src.addr[3] = lwip_htonl(0xfe000000UL | (src->addr[0] << 8) | src->addr[1]); + } else if (src->addr_len == 8) { + ip6hdr->src.addr[2] = lwip_htonl(((src->addr[0] ^ 2) << 24) | (src->addr[1] << 16) | (src->addr[2] << 8) | src->addr[3]); + ip6hdr->src.addr[3] = lwip_htonl((src->addr[4] << 24) | (src->addr[5] << 16) | (src->addr[6] << 8) | src->addr[7]); + } else { + /* invalid source address length */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Invalid source address length\n")); + return ERR_VAL; + } + } + } + + /* Destination address decoding. */ + if (lowpan6_buffer[1] & 0x08) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("M=1: multicast\n")); + /* Multicast destination */ + if (lowpan6_buffer[1] & 0x04) { + LWIP_DEBUGF(LWIP_DBG_ON,("DAC == 1, context multicast: unsupported!!!\n")); + /* @todo support stateful multicast addressing */ + return ERR_VAL; + } + + if ((lowpan6_buffer[1] & 0x03) == 0x00) { + /* DAM = 00, copy full address (128bits) */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 00, no dst compression, fetching 128bits inline\n")); + MEMCPY(&ip6hdr->dest.addr[0], lowpan6_buffer + lowpan6_offset, 16); + lowpan6_offset += 16; + } else if ((lowpan6_buffer[1] & 0x03) == 0x01) { + /* DAM = 01, copy 4 bytes (32bits) */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 01, dst address form (48bits): ffXX::00XX:XXXX:XXXX\n")); + ip6hdr->dest.addr[0] = lwip_htonl(0xff000000UL | (lowpan6_buffer[lowpan6_offset++] << 16)); + ip6hdr->dest.addr[1] = 0; + ip6hdr->dest.addr[2] = lwip_htonl(lowpan6_buffer[lowpan6_offset++]); + ip6hdr->dest.addr[3] = lwip_htonl((lowpan6_buffer[lowpan6_offset] << 24) | (lowpan6_buffer[lowpan6_offset + 1] << 16) | (lowpan6_buffer[lowpan6_offset + 2] << 8) | lowpan6_buffer[lowpan6_offset + 3]); + lowpan6_offset += 4; + } else if ((lowpan6_buffer[1] & 0x03) == 0x02) { + /* DAM = 10, copy 3 bytes (24bits) */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 10, dst address form (32bits): ffXX::00XX:XXXX\n")); + ip6hdr->dest.addr[0] = lwip_htonl(0xff000000UL | (lowpan6_buffer[lowpan6_offset++] << 16)); + ip6hdr->dest.addr[1] = 0; + ip6hdr->dest.addr[2] = 0; + ip6hdr->dest.addr[3] = lwip_htonl((lowpan6_buffer[lowpan6_offset] << 16) | (lowpan6_buffer[lowpan6_offset + 1] << 8) | lowpan6_buffer[lowpan6_offset + 2]); + lowpan6_offset += 3; + } else if ((lowpan6_buffer[1] & 0x03) == 0x03) { + /* DAM = 11, copy 1 byte (8bits) */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 11, dst address form (8bits): ff02::00XX\n")); + ip6hdr->dest.addr[0] = PP_HTONL(0xff020000UL); + ip6hdr->dest.addr[1] = 0; + ip6hdr->dest.addr[2] = 0; + ip6hdr->dest.addr[3] = lwip_htonl(lowpan6_buffer[lowpan6_offset++]); + } + + } else { + /* no Multicast (M=0) */ + if (lowpan6_buffer[1] & 0x04) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAC == 1, stateful compression\n")); + /* Stateful destination compression */ + /* Set prefix from context info */ + if (lowpan6_buffer[1] & 0x80) { + i = lowpan6_buffer[2] & 0x0f; + } else { + i = 0; + } + if (i >= LWIP_6LOWPAN_NUM_CONTEXTS) { + /* Error */ + return ERR_VAL; + } +#if LWIP_6LOWPAN_NUM_CONTEXTS > 0 + ip6hdr->dest.addr[0] = lowpan6_contexts[i].addr[0]; + ip6hdr->dest.addr[1] = lowpan6_contexts[i].addr[1]; +#endif + } else { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAC == 0, stateless compression, setting link local prefix\n")); + /* Link local address compression */ + ip6hdr->dest.addr[0] = PP_HTONL(0xfe800000UL); + ip6hdr->dest.addr[1] = 0; + } + + /* M=0, DAC=0, determining destination address length via DAM=xx */ + if ((lowpan6_buffer[1] & 0x03) == 0x00) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 00, no dst compression, fetching 128bits inline")); + /* DAM=00, copy full address */ + MEMCPY(&ip6hdr->dest.addr[0], lowpan6_buffer + lowpan6_offset, 16); + lowpan6_offset += 16; + } else if ((lowpan6_buffer[1] & 0x03) == 0x01) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 01, dst compression, 64bits inline\n")); + /* DAM=01, copy 64 inline bits, increase offset */ + MEMCPY(&ip6hdr->dest.addr[2], lowpan6_buffer + lowpan6_offset, 8); + lowpan6_offset += 8; + } else if ((lowpan6_buffer[1] & 0x03) == 0x02) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("DAM == 01, dst compression, 16bits inline\n")); + /* DAM=10, copy 16 inline bits, increase offset */ + ip6hdr->dest.addr[2] = PP_HTONL(0x000000ffUL); + ip6hdr->dest.addr[3] = lwip_htonl(0xfe000000UL | (lowpan6_buffer[lowpan6_offset] << 8) | lowpan6_buffer[lowpan6_offset + 1]); + lowpan6_offset += 2; + } else if ((lowpan6_buffer[1] & 0x03) == 0x03) { + /* DAM=11, no bits available, use other headers (not done here) */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG,("DAM == 01, dst compression, 0bits inline, using other headers\n")); + if (dest->addr_len == 2) { + ip6hdr->dest.addr[2] = PP_HTONL(0x000000ffUL); + ip6hdr->dest.addr[3] = lwip_htonl(0xfe000000UL | (dest->addr[0] << 8) | dest->addr[1]); + } else if (dest->addr_len == 8) { + ip6hdr->dest.addr[2] = lwip_htonl(((dest->addr[0] ^ 2) << 24) | (dest->addr[1] << 16) | dest->addr[2] << 8 | dest->addr[3]); + ip6hdr->dest.addr[3] = lwip_htonl((dest->addr[4] << 24) | (dest->addr[5] << 16) | dest->addr[6] << 8 | dest->addr[7]); + } else { + /* invalid destination address length */ + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("Invalid destination address length\n")); + return ERR_VAL; + } + } + } + + + /* Next Header Compression (NHC) decoding? */ + if (lowpan6_buffer[0] & 0x04) { + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("NHC decoding\n")); +#if LWIP_UDP + if ((lowpan6_buffer[lowpan6_offset] & 0xf8) == 0xf0) { + /* NHC: UDP */ + struct udp_hdr *udphdr; + LWIP_DEBUGF(LWIP_LOWPAN6_DECOMPRESSION_DEBUG, ("NHC: UDP\n")); + + /* UDP compression */ + IP6H_NEXTH_SET(ip6hdr, IP6_NEXTH_UDP); + udphdr = (struct udp_hdr *)((u8_t *)decomp_buffer + ip6_offset); + if (decomp_bufsize < IP6_HLEN + UDP_HLEN) { + return ERR_MEM; + } + + /* Checksum decompression */ + if (lowpan6_buffer[lowpan6_offset] & 0x04) { + /* @todo support checksum decompress */ + LWIP_DEBUGF(LWIP_DBG_ON, ("NHC: UDP chechsum decompression UNSUPPORTED\n")); + return ERR_VAL; + } + + /* Decompress ports, according to RFC4944 */ + i = lowpan6_buffer[lowpan6_offset++] & 0x03; + if (i == 0) { + udphdr->src = lwip_htons(lowpan6_buffer[lowpan6_offset] << 8 | lowpan6_buffer[lowpan6_offset + 1]); + udphdr->dest = lwip_htons(lowpan6_buffer[lowpan6_offset + 2] << 8 | lowpan6_buffer[lowpan6_offset + 3]); + lowpan6_offset += 4; + } else if (i == 0x01) { + udphdr->src = lwip_htons(lowpan6_buffer[lowpan6_offset] << 8 | lowpan6_buffer[lowpan6_offset + 1]); + udphdr->dest = lwip_htons(0xf000 | lowpan6_buffer[lowpan6_offset + 2]); + lowpan6_offset += 3; + } else if (i == 0x02) { + udphdr->src = lwip_htons(0xf000 | lowpan6_buffer[lowpan6_offset]); + udphdr->dest = lwip_htons(lowpan6_buffer[lowpan6_offset + 1] << 8 | lowpan6_buffer[lowpan6_offset + 2]); + lowpan6_offset += 3; + } else if (i == 0x03) { + udphdr->src = lwip_htons(0xf0b0 | ((lowpan6_buffer[lowpan6_offset] >> 4) & 0x0f)); + udphdr->dest = lwip_htons(0xf0b0 | (lowpan6_buffer[lowpan6_offset] & 0x0f)); + lowpan6_offset += 1; + } + + udphdr->chksum = lwip_htons(lowpan6_buffer[lowpan6_offset] << 8 | lowpan6_buffer[lowpan6_offset + 1]); + lowpan6_offset += 2; + ip6_offset += UDP_HLEN; + if (datagram_size == 0) { + datagram_size = compressed_size - lowpan6_offset + ip6_offset; + } + udphdr->len = lwip_htons(datagram_size - IP6_HLEN); + + } else +#endif /* LWIP_UDP */ + { + LWIP_DEBUGF(LWIP_DBG_ON,("NHC: unsupported protocol!\n")); + /* @todo support NHC other than UDP */ + return ERR_VAL; + } + } + if (datagram_size == 0) { + datagram_size = compressed_size - lowpan6_offset + ip6_offset; + } + /* Infer IPv6 payload length for header */ + IP6H_PLEN_SET(ip6hdr, datagram_size - IP6_HLEN); + + if (lowpan6_offset > lowpan6_bufsize) { + /* input buffer overflow */ + return ERR_VAL; + } + *hdr_size_comp = lowpan6_offset; + *hdr_size_decomp = ip6_offset; + + return ERR_OK; +} + +struct pbuf * +lowpan6_decompress(struct pbuf *p, u16_t datagram_size, ip6_addr_t *lowpan6_contexts, + struct lowpan6_link_addr *src, struct lowpan6_link_addr *dest) +{ + struct pbuf *q; + u16_t lowpan6_offset, ip6_offset; + err_t err; + +#if LWIP_UDP +#define UDP_HLEN_ALLOC UDP_HLEN +#else +#define UDP_HLEN_ALLOC 0 +#endif + + /* Allocate a buffer for decompression. This buffer will be too big and will be + trimmed once the final size is known. */ + q = pbuf_alloc(PBUF_IP, p->len + IP6_HLEN + UDP_HLEN_ALLOC, PBUF_POOL); + if (q == NULL) { + pbuf_free(p); + return NULL; + } + if (q->len < IP6_HLEN + UDP_HLEN_ALLOC) { + /* The headers need to fit into the first pbuf */ + pbuf_free(p); + pbuf_free(q); + return NULL; + } + + /* Decompress the IPv6 (and possibly UDP) header(s) into the new pbuf */ + err = lowpan6_decompress_hdr((u8_t *)p->payload, p->len, (u8_t *)q->payload, q->len, + &lowpan6_offset, &ip6_offset, datagram_size, p->tot_len, lowpan6_contexts, src, dest); + if (err != ERR_OK) { + pbuf_free(p); + pbuf_free(q); + return NULL; + } + + /* Now we copy leftover contents from p to q, so we have all L2 and L3 headers + (and L4?) in a single pbuf: */ + + /* Hide the compressed headers in p */ + pbuf_remove_header(p, lowpan6_offset); + /* Temporarily hide the headers in q... */ + pbuf_remove_header(q, ip6_offset); + /* ... copy the rest of p into q... */ + pbuf_copy(q, p); + /* ... and reveal the headers again... */ + pbuf_add_header_force(q, ip6_offset); + /* ... trim the pbuf to its correct size... */ + pbuf_realloc(q, ip6_offset + p->len); + /* ... and cat possibly remaining (data-only) pbufs */ + if (p->next != NULL) { + pbuf_cat(q, p->next); + } + /* the original (first) pbuf can now be freed */ + p->next = NULL; + pbuf_free(p); + + /* all done */ + return q; +} + +#endif /* LWIP_6LOWPAN_IPHC */ +#endif /* LWIP_IPV6 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/auth.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/auth.c new file mode 100644 index 0000000..fee01ae --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/auth.c @@ -0,0 +1,2510 @@ +/* + * auth.c - PPP authentication and phase control. + * + * Copyright (c) 1993-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * Derived from main.c, which is: + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(_PATH_LASTLOG) && defined(__linux__) +#include +#endif + +#include +#include +#include + +#ifdef HAS_SHADOW +#include +#ifndef PW_PPP +#define PW_PPP PW_LOGIN +#endif +#endif + +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/lcp.h" +#if CCP_SUPPORT +#include "netif/ppp/ccp.h" +#endif /* CCP_SUPPORT */ +#if ECP_SUPPORT +#include "netif/ppp/ecp.h" +#endif /* ECP_SUPPORT */ +#include "netif/ppp/ipcp.h" +#if PAP_SUPPORT +#include "netif/ppp/upap.h" +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT +#include "netif/ppp/chap-new.h" +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT +#include "netif/ppp/eap.h" +#endif /* EAP_SUPPORT */ +#if CBCP_SUPPORT +#include "netif/ppp/cbcp.h" +#endif + +#if 0 /* UNUSED */ +#include "session.h" +#endif /* UNUSED */ + +#if 0 /* UNUSED */ +/* Bits in scan_authfile return value */ +#define NONWILD_SERVER 1 +#define NONWILD_CLIENT 2 + +#define ISWILD(word) (word[0] == '*' && word[1] == 0) +#endif /* UNUSED */ + +#if 0 /* UNUSED */ +/* List of addresses which the peer may use. */ +static struct permitted_ip *addresses[NUM_PPP]; + +/* Wordlist giving addresses which the peer may use + without authenticating itself. */ +static struct wordlist *noauth_addrs; + +/* Remote telephone number, if available */ +char remote_number[MAXNAMELEN]; + +/* Wordlist giving remote telephone numbers which may connect. */ +static struct wordlist *permitted_numbers; + +/* Extra options to apply, from the secrets file entry for the peer. */ +static struct wordlist *extra_options; +#endif /* UNUSED */ + +#if 0 /* UNUSED */ +/* Set if we require authentication only because we have a default route. */ +static bool default_auth; + +/* Hook to enable a plugin to control the idle time limit */ +int (*idle_time_hook) (struct ppp_idle *) = NULL; + +/* Hook for a plugin to say whether we can possibly authenticate any peer */ +int (*pap_check_hook) (void) = NULL; + +/* Hook for a plugin to check the PAP user and password */ +int (*pap_auth_hook) (char *user, char *passwd, char **msgp, + struct wordlist **paddrs, + struct wordlist **popts) = NULL; + +/* Hook for a plugin to know about the PAP user logout */ +void (*pap_logout_hook) (void) = NULL; + +/* Hook for a plugin to get the PAP password for authenticating us */ +int (*pap_passwd_hook) (char *user, char *passwd) = NULL; + +/* Hook for a plugin to say if we can possibly authenticate a peer using CHAP */ +int (*chap_check_hook) (void) = NULL; + +/* Hook for a plugin to get the CHAP password for authenticating us */ +int (*chap_passwd_hook) (char *user, char *passwd) = NULL; + +/* Hook for a plugin to say whether it is OK if the peer + refuses to authenticate. */ +int (*null_auth_hook) (struct wordlist **paddrs, + struct wordlist **popts) = NULL; + +int (*allowed_address_hook) (u32_t addr) = NULL; +#endif /* UNUSED */ + +#ifdef HAVE_MULTILINK +/* Hook for plugin to hear when an interface joins a multilink bundle */ +void (*multilink_join_hook) (void) = NULL; +#endif + +#if PPP_NOTIFY +/* A notifier for when the peer has authenticated itself, + and we are proceeding to the network phase. */ +struct notifier *auth_up_notifier = NULL; + +/* A notifier for when the link goes down. */ +struct notifier *link_down_notifier = NULL; +#endif /* PPP_NOTIFY */ + +/* + * Option variables. + */ +#if 0 /* MOVED TO ppp_settings */ +bool uselogin = 0; /* Use /etc/passwd for checking PAP */ +bool session_mgmt = 0; /* Do session management (login records) */ +bool cryptpap = 0; /* Passwords in pap-secrets are encrypted */ +bool refuse_pap = 0; /* Don't wanna auth. ourselves with PAP */ +bool refuse_chap = 0; /* Don't wanna auth. ourselves with CHAP */ +bool refuse_eap = 0; /* Don't wanna auth. ourselves with EAP */ +#if MSCHAP_SUPPORT +bool refuse_mschap = 0; /* Don't wanna auth. ourselves with MS-CHAP */ +bool refuse_mschap_v2 = 0; /* Don't wanna auth. ourselves with MS-CHAPv2 */ +#else /* MSCHAP_SUPPORT */ +bool refuse_mschap = 1; /* Don't wanna auth. ourselves with MS-CHAP */ +bool refuse_mschap_v2 = 1; /* Don't wanna auth. ourselves with MS-CHAPv2 */ +#endif /* MSCHAP_SUPPORT */ +bool usehostname = 0; /* Use hostname for our_name */ +bool auth_required = 0; /* Always require authentication from peer */ +bool allow_any_ip = 0; /* Allow peer to use any IP address */ +bool explicit_remote = 0; /* User specified explicit remote name */ +bool explicit_user = 0; /* Set if "user" option supplied */ +bool explicit_passwd = 0; /* Set if "password" option supplied */ +char remote_name[MAXNAMELEN]; /* Peer's name for authentication */ +static char *uafname; /* name of most recent +ua file */ + +extern char *crypt (const char *, const char *); +#endif /* UNUSED */ +/* Prototypes for procedures local to this file. */ + +static void network_phase(ppp_pcb *pcb); +#if PPP_IDLETIMELIMIT +static void check_idle(void *arg); +#endif /* PPP_IDLETIMELIMIT */ +#if PPP_MAXCONNECT +static void connect_time_expired(void *arg); +#endif /* PPP_MAXCONNECT */ +#if 0 /* UNUSED */ +static int null_login (int); +/* static int get_pap_passwd (char *); */ +static int have_pap_secret (int *); +static int have_chap_secret (char *, char *, int, int *); +static int have_srp_secret (char *client, char *server, int need_ip, + int *lacks_ipp); +static int ip_addr_check (u32_t, struct permitted_ip *); +static int scan_authfile (FILE *, char *, char *, char *, + struct wordlist **, struct wordlist **, + char *, int); +static void free_wordlist (struct wordlist *); +static void set_allowed_addrs (int, struct wordlist *, struct wordlist *); +static int some_ip_ok (struct wordlist *); +static int setupapfile (char **); +static int privgroup (char **); +static int set_noauth_addr (char **); +static int set_permitted_number (char **); +static void check_access (FILE *, char *); +static int wordlist_count (struct wordlist *); +#endif /* UNUSED */ + +#ifdef MAXOCTETS +static void check_maxoctets (void *); +#endif + +#if PPP_OPTIONS +/* + * Authentication-related options. + */ +option_t auth_options[] = { + { "auth", o_bool, &auth_required, + "Require authentication from peer", OPT_PRIO | 1 }, + { "noauth", o_bool, &auth_required, + "Don't require peer to authenticate", OPT_PRIOSUB | OPT_PRIV, + &allow_any_ip }, + { "require-pap", o_bool, &lcp_wantoptions[0].neg_upap, + "Require PAP authentication from peer", + OPT_PRIOSUB | 1, &auth_required }, + { "+pap", o_bool, &lcp_wantoptions[0].neg_upap, + "Require PAP authentication from peer", + OPT_ALIAS | OPT_PRIOSUB | 1, &auth_required }, + { "require-chap", o_bool, &auth_required, + "Require CHAP authentication from peer", + OPT_PRIOSUB | OPT_A2OR | MDTYPE_MD5, + &lcp_wantoptions[0].chap_mdtype }, + { "+chap", o_bool, &auth_required, + "Require CHAP authentication from peer", + OPT_ALIAS | OPT_PRIOSUB | OPT_A2OR | MDTYPE_MD5, + &lcp_wantoptions[0].chap_mdtype }, +#if MSCHAP_SUPPORT + { "require-mschap", o_bool, &auth_required, + "Require MS-CHAP authentication from peer", + OPT_PRIOSUB | OPT_A2OR | MDTYPE_MICROSOFT, + &lcp_wantoptions[0].chap_mdtype }, + { "+mschap", o_bool, &auth_required, + "Require MS-CHAP authentication from peer", + OPT_ALIAS | OPT_PRIOSUB | OPT_A2OR | MDTYPE_MICROSOFT, + &lcp_wantoptions[0].chap_mdtype }, + { "require-mschap-v2", o_bool, &auth_required, + "Require MS-CHAPv2 authentication from peer", + OPT_PRIOSUB | OPT_A2OR | MDTYPE_MICROSOFT_V2, + &lcp_wantoptions[0].chap_mdtype }, + { "+mschap-v2", o_bool, &auth_required, + "Require MS-CHAPv2 authentication from peer", + OPT_ALIAS | OPT_PRIOSUB | OPT_A2OR | MDTYPE_MICROSOFT_V2, + &lcp_wantoptions[0].chap_mdtype }, +#endif /* MSCHAP_SUPPORT */ +#if 0 + { "refuse-pap", o_bool, &refuse_pap, + "Don't agree to auth to peer with PAP", 1 }, + { "-pap", o_bool, &refuse_pap, + "Don't allow PAP authentication with peer", OPT_ALIAS | 1 }, + { "refuse-chap", o_bool, &refuse_chap, + "Don't agree to auth to peer with CHAP", + OPT_A2CLRB | MDTYPE_MD5, + &lcp_allowoptions[0].chap_mdtype }, + { "-chap", o_bool, &refuse_chap, + "Don't allow CHAP authentication with peer", + OPT_ALIAS | OPT_A2CLRB | MDTYPE_MD5, + &lcp_allowoptions[0].chap_mdtype }, +#endif +#if MSCHAP_SUPPORT +#if 0 + { "refuse-mschap", o_bool, &refuse_mschap, + "Don't agree to auth to peer with MS-CHAP", + OPT_A2CLRB | MDTYPE_MICROSOFT, + &lcp_allowoptions[0].chap_mdtype }, + { "-mschap", o_bool, &refuse_mschap, + "Don't allow MS-CHAP authentication with peer", + OPT_ALIAS | OPT_A2CLRB | MDTYPE_MICROSOFT, + &lcp_allowoptions[0].chap_mdtype }, + { "refuse-mschap-v2", o_bool, &refuse_mschap_v2, + "Don't agree to auth to peer with MS-CHAPv2", + OPT_A2CLRB | MDTYPE_MICROSOFT_V2, + &lcp_allowoptions[0].chap_mdtype }, + { "-mschap-v2", o_bool, &refuse_mschap_v2, + "Don't allow MS-CHAPv2 authentication with peer", + OPT_ALIAS | OPT_A2CLRB | MDTYPE_MICROSOFT_V2, + &lcp_allowoptions[0].chap_mdtype }, +#endif +#endif /* MSCHAP_SUPPORT*/ +#if EAP_SUPPORT + { "require-eap", o_bool, &lcp_wantoptions[0].neg_eap, + "Require EAP authentication from peer", OPT_PRIOSUB | 1, + &auth_required }, +#if 0 + { "refuse-eap", o_bool, &refuse_eap, + "Don't agree to authenticate to peer with EAP", 1 }, +#endif +#endif /* EAP_SUPPORT */ + { "name", o_string, our_name, + "Set local name for authentication", + OPT_PRIO | OPT_PRIV | OPT_STATIC, NULL, MAXNAMELEN }, + + { "+ua", o_special, (void *)setupapfile, + "Get PAP user and password from file", + OPT_PRIO | OPT_A2STRVAL, &uafname }, + +#if 0 + { "user", o_string, user, + "Set name for auth with peer", OPT_PRIO | OPT_STATIC, + &explicit_user, MAXNAMELEN }, + + { "password", o_string, passwd, + "Password for authenticating us to the peer", + OPT_PRIO | OPT_STATIC | OPT_HIDE, + &explicit_passwd, MAXSECRETLEN }, +#endif + + { "usehostname", o_bool, &usehostname, + "Must use hostname for authentication", 1 }, + + { "remotename", o_string, remote_name, + "Set remote name for authentication", OPT_PRIO | OPT_STATIC, + &explicit_remote, MAXNAMELEN }, + + { "login", o_bool, &uselogin, + "Use system password database for PAP", OPT_A2COPY | 1 , + &session_mgmt }, + { "enable-session", o_bool, &session_mgmt, + "Enable session accounting for remote peers", OPT_PRIV | 1 }, + + { "papcrypt", o_bool, &cryptpap, + "PAP passwords are encrypted", 1 }, + + { "privgroup", o_special, (void *)privgroup, + "Allow group members to use privileged options", OPT_PRIV | OPT_A2LIST }, + + { "allow-ip", o_special, (void *)set_noauth_addr, + "Set IP address(es) which can be used without authentication", + OPT_PRIV | OPT_A2LIST }, + + { "remotenumber", o_string, remote_number, + "Set remote telephone number for authentication", OPT_PRIO | OPT_STATIC, + NULL, MAXNAMELEN }, + + { "allow-number", o_special, (void *)set_permitted_number, + "Set telephone number(s) which are allowed to connect", + OPT_PRIV | OPT_A2LIST }, + + { NULL } +}; +#endif /* PPP_OPTIONS */ + +#if 0 /* UNUSED */ +/* + * setupapfile - specifies UPAP info for authenticating with peer. + */ +static int +setupapfile(argv) + char **argv; +{ + FILE *ufile; + int l; + uid_t euid; + char u[MAXNAMELEN], p[MAXSECRETLEN]; + char *fname; + + lcp_allowoptions[0].neg_upap = 1; + + /* open user info file */ + fname = strdup(*argv); + if (fname == NULL) + novm("+ua file name"); + euid = geteuid(); + if (seteuid(getuid()) == -1) { + option_error("unable to reset uid before opening %s: %m", fname); + return 0; + } + ufile = fopen(fname, "r"); + if (seteuid(euid) == -1) + fatal("unable to regain privileges: %m"); + if (ufile == NULL) { + option_error("unable to open user login data file %s", fname); + return 0; + } + check_access(ufile, fname); + uafname = fname; + + /* get username */ + if (fgets(u, MAXNAMELEN - 1, ufile) == NULL + || fgets(p, MAXSECRETLEN - 1, ufile) == NULL) { + fclose(ufile); + option_error("unable to read user login data file %s", fname); + return 0; + } + fclose(ufile); + + /* get rid of newlines */ + l = strlen(u); + if (l > 0 && u[l-1] == '\n') + u[l-1] = 0; + l = strlen(p); + if (l > 0 && p[l-1] == '\n') + p[l-1] = 0; + + if (override_value("user", option_priority, fname)) { + strlcpy(ppp_settings.user, u, sizeof(ppp_settings.user)); + explicit_user = 1; + } + if (override_value("passwd", option_priority, fname)) { + strlcpy(ppp_settings.passwd, p, sizeof(ppp_settings.passwd)); + explicit_passwd = 1; + } + + return (1); +} + +/* + * privgroup - allow members of the group to have privileged access. + */ +static int +privgroup(argv) + char **argv; +{ + struct group *g; + int i; + + g = getgrnam(*argv); + if (g == 0) { + option_error("group %s is unknown", *argv); + return 0; + } + for (i = 0; i < ngroups; ++i) { + if (groups[i] == g->gr_gid) { + privileged = 1; + break; + } + } + return 1; +} + + +/* + * set_noauth_addr - set address(es) that can be used without authentication. + * Equivalent to specifying an entry like `"" * "" addr' in pap-secrets. + */ +static int +set_noauth_addr(argv) + char **argv; +{ + char *addr = *argv; + int l = strlen(addr) + 1; + struct wordlist *wp; + + wp = (struct wordlist *) malloc(sizeof(struct wordlist) + l); + if (wp == NULL) + novm("allow-ip argument"); + wp->word = (char *) (wp + 1); + wp->next = noauth_addrs; + MEMCPY(wp->word, addr, l); + noauth_addrs = wp; + return 1; +} + + +/* + * set_permitted_number - set remote telephone number(s) that may connect. + */ +static int +set_permitted_number(argv) + char **argv; +{ + char *number = *argv; + int l = strlen(number) + 1; + struct wordlist *wp; + + wp = (struct wordlist *) malloc(sizeof(struct wordlist) + l); + if (wp == NULL) + novm("allow-number argument"); + wp->word = (char *) (wp + 1); + wp->next = permitted_numbers; + MEMCPY(wp->word, number, l); + permitted_numbers = wp; + return 1; +} +#endif + +/* + * An Open on LCP has requested a change from Dead to Establish phase. + */ +void link_required(ppp_pcb *pcb) { + LWIP_UNUSED_ARG(pcb); +} + +#if 0 +/* + * Bring the link up to the point of being able to do ppp. + */ +void start_link(unit) + int unit; +{ + ppp_pcb *pcb = &ppp_pcb_list[unit]; + char *msg; + + status = EXIT_NEGOTIATION_FAILED; + new_phase(pcb, PPP_PHASE_SERIALCONN); + + hungup = 0; + devfd = the_channel->connect(); + msg = "Connect script failed"; + if (devfd < 0) + goto fail; + + /* set up the serial device as a ppp interface */ + /* + * N.B. we used to do tdb_writelock/tdb_writeunlock around this + * (from establish_ppp to set_ifunit). However, we won't be + * doing the set_ifunit in multilink mode, which is the only time + * we need the atomicity that the tdb_writelock/tdb_writeunlock + * gives us. Thus we don't need the tdb_writelock/tdb_writeunlock. + */ + fd_ppp = the_channel->establish_ppp(devfd); + msg = "ppp establishment failed"; + if (fd_ppp < 0) { + status = EXIT_FATAL_ERROR; + goto disconnect; + } + + if (!demand && ifunit >= 0) + set_ifunit(1); + + /* + * Start opening the connection and wait for + * incoming events (reply, timeout, etc.). + */ + if (ifunit >= 0) + ppp_notice("Connect: %s <--> %s", ifname, ppp_devnam); + else + ppp_notice("Starting negotiation on %s", ppp_devnam); + add_fd(fd_ppp); + + new_phase(pcb, PPP_PHASE_ESTABLISH); + + lcp_lowerup(pcb); + return; + + disconnect: + new_phase(pcb, PPP_PHASE_DISCONNECT); + if (the_channel->disconnect) + the_channel->disconnect(); + + fail: + new_phase(pcb, PPP_PHASE_DEAD); + if (the_channel->cleanup) + (*the_channel->cleanup)(); +} +#endif + +/* + * LCP has terminated the link; go to the Dead phase and take the + * physical layer down. + */ +void link_terminated(ppp_pcb *pcb) { + if (pcb->phase == PPP_PHASE_DEAD +#ifdef HAVE_MULTILINK + || pcb->phase == PPP_PHASE_MASTER +#endif /* HAVE_MULTILINK */ + ) + return; + new_phase(pcb, PPP_PHASE_DISCONNECT); + +#if 0 /* UNUSED */ + if (pap_logout_hook) { + pap_logout_hook(); + } + session_end(devnam); +#endif /* UNUSED */ + + if (!doing_multilink) { + ppp_notice("Connection terminated."); +#if PPP_STATS_SUPPORT + print_link_stats(); +#endif /* PPP_STATS_SUPPORT */ + } else + ppp_notice("Link terminated."); + + lcp_lowerdown(pcb); + + ppp_link_terminated(pcb); +#if 0 + /* + * Delete pid files before disestablishing ppp. Otherwise it + * can happen that another pppd gets the same unit and then + * we delete its pid file. + */ + if (!doing_multilink && !demand) + remove_pidfiles(); + + /* + * If we may want to bring the link up again, transfer + * the ppp unit back to the loopback. Set the + * real serial device back to its normal mode of operation. + */ + if (fd_ppp >= 0) { + remove_fd(fd_ppp); + clean_check(); + the_channel->disestablish_ppp(devfd); + if (doing_multilink) + mp_exit_bundle(); + fd_ppp = -1; + } + if (!hungup) + lcp_lowerdown(pcb); + if (!doing_multilink && !demand) + script_unsetenv("IFNAME"); + + /* + * Run disconnector script, if requested. + * XXX we may not be able to do this if the line has hung up! + */ + if (devfd >= 0 && the_channel->disconnect) { + the_channel->disconnect(); + devfd = -1; + } + if (the_channel->cleanup) + (*the_channel->cleanup)(); + + if (doing_multilink && multilink_master) { + if (!bundle_terminating) + new_phase(pcb, PPP_PHASE_MASTER); + else + mp_bundle_terminated(); + } else + new_phase(pcb, PPP_PHASE_DEAD); +#endif +} + +/* + * LCP has gone down; it will either die or try to re-establish. + */ +void link_down(ppp_pcb *pcb) { +#if PPP_NOTIFY + notify(link_down_notifier, 0); +#endif /* PPP_NOTIFY */ + + if (!doing_multilink) { + upper_layers_down(pcb); + if (pcb->phase != PPP_PHASE_DEAD +#ifdef HAVE_MULTILINK + && pcb->phase != PPP_PHASE_MASTER +#endif /* HAVE_MULTILINK */ + ) + new_phase(pcb, PPP_PHASE_ESTABLISH); + } + /* XXX if doing_multilink, should do something to stop + network-layer traffic on the link */ +} + +void upper_layers_down(ppp_pcb *pcb) { + int i; + const struct protent *protp; + + for (i = 0; (protp = protocols[i]) != NULL; ++i) { + if (protp->protocol != PPP_LCP && protp->lowerdown != NULL) + (*protp->lowerdown)(pcb); + if (protp->protocol < 0xC000 && protp->close != NULL) + (*protp->close)(pcb, "LCP down"); + } + pcb->num_np_open = 0; + pcb->num_np_up = 0; +} + +/* + * The link is established. + * Proceed to the Dead, Authenticate or Network phase as appropriate. + */ +void link_established(ppp_pcb *pcb) { +#if PPP_AUTH_SUPPORT + int auth; +#if PPP_SERVER +#if PAP_SUPPORT + lcp_options *wo = &pcb->lcp_wantoptions; +#endif /* PAP_SUPPORT */ + lcp_options *go = &pcb->lcp_gotoptions; +#endif /* PPP_SERVER */ + lcp_options *ho = &pcb->lcp_hisoptions; +#endif /* PPP_AUTH_SUPPORT */ + int i; + const struct protent *protp; + + /* + * Tell higher-level protocols that LCP is up. + */ + if (!doing_multilink) { + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->protocol != PPP_LCP + && protp->lowerup != NULL) + (*protp->lowerup)(pcb); + } + +#if PPP_AUTH_SUPPORT +#if PPP_SERVER +#if PPP_ALLOWED_ADDRS + if (!auth_required && noauth_addrs != NULL) + set_allowed_addrs(unit, NULL, NULL); +#endif /* PPP_ALLOWED_ADDRS */ + + if (pcb->settings.auth_required && !(0 +#if PAP_SUPPORT + || go->neg_upap +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + || go->neg_chap +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + || go->neg_eap +#endif /* EAP_SUPPORT */ + )) { + +#if PPP_ALLOWED_ADDRS + /* + * We wanted the peer to authenticate itself, and it refused: + * if we have some address(es) it can use without auth, fine, + * otherwise treat it as though it authenticated with PAP using + * a username of "" and a password of "". If that's not OK, + * boot it out. + */ + if (noauth_addrs != NULL) { + set_allowed_addrs(unit, NULL, NULL); + } else +#endif /* PPP_ALLOWED_ADDRS */ + if (!pcb->settings.null_login +#if PAP_SUPPORT + || !wo->neg_upap +#endif /* PAP_SUPPORT */ + ) { + ppp_warn("peer refused to authenticate: terminating link"); +#if 0 /* UNUSED */ + status = EXIT_PEER_AUTH_FAILED; +#endif /* UNUSED */ + pcb->err_code = PPPERR_AUTHFAIL; + lcp_close(pcb, "peer refused to authenticate"); + return; + } + } +#endif /* PPP_SERVER */ + + new_phase(pcb, PPP_PHASE_AUTHENTICATE); + auth = 0; +#if PPP_SERVER +#if EAP_SUPPORT + if (go->neg_eap) { + eap_authpeer(pcb, PPP_OUR_NAME); + auth |= EAP_PEER; + } else +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT + if (go->neg_chap) { + chap_auth_peer(pcb, PPP_OUR_NAME, CHAP_DIGEST(go->chap_mdtype)); + auth |= CHAP_PEER; + } else +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + if (go->neg_upap) { + upap_authpeer(pcb); + auth |= PAP_PEER; + } else +#endif /* PAP_SUPPORT */ + {} +#endif /* PPP_SERVER */ + +#if EAP_SUPPORT + if (ho->neg_eap) { + eap_authwithpeer(pcb, pcb->settings.user); + auth |= EAP_WITHPEER; + } else +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT + if (ho->neg_chap) { + chap_auth_with_peer(pcb, pcb->settings.user, CHAP_DIGEST(ho->chap_mdtype)); + auth |= CHAP_WITHPEER; + } else +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + if (ho->neg_upap) { + upap_authwithpeer(pcb, pcb->settings.user, pcb->settings.passwd); + auth |= PAP_WITHPEER; + } else +#endif /* PAP_SUPPORT */ + {} + + pcb->auth_pending = auth; + pcb->auth_done = 0; + + if (!auth) +#endif /* PPP_AUTH_SUPPORT */ + network_phase(pcb); +} + +/* + * Proceed to the network phase. + */ +static void network_phase(ppp_pcb *pcb) { +#if CBCP_SUPPORT + ppp_pcb *pcb = &ppp_pcb_list[unit]; +#endif +#if 0 /* UNUSED */ + lcp_options *go = &lcp_gotoptions[unit]; +#endif /* UNUSED */ + +#if 0 /* UNUSED */ + /* Log calling number. */ + if (*remote_number) + ppp_notice("peer from calling number %q authorized", remote_number); +#endif /* UNUSED */ + +#if PPP_NOTIFY + /* + * If the peer had to authenticate, notify it now. + */ + if (0 +#if CHAP_SUPPORT + || go->neg_chap +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + || go->neg_upap +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + || go->neg_eap +#endif /* EAP_SUPPORT */ + ) { + notify(auth_up_notifier, 0); + } +#endif /* PPP_NOTIFY */ + +#if CBCP_SUPPORT + /* + * If we negotiated callback, do it now. + */ + if (go->neg_cbcp) { + new_phase(pcb, PPP_PHASE_CALLBACK); + (*cbcp_protent.open)(pcb); + return; + } +#endif + +#if PPP_OPTIONS + /* + * Process extra options from the secrets file + */ + if (extra_options) { + options_from_list(extra_options, 1); + free_wordlist(extra_options); + extra_options = 0; + } +#endif /* PPP_OPTIONS */ + start_networks(pcb); +} + +void start_networks(ppp_pcb *pcb) { +#if CCP_SUPPORT || ECP_SUPPORT + int i; + const struct protent *protp; +#endif /* CCP_SUPPORT || ECP_SUPPORT */ + + new_phase(pcb, PPP_PHASE_NETWORK); + +#ifdef HAVE_MULTILINK + if (multilink) { + if (mp_join_bundle()) { + if (multilink_join_hook) + (*multilink_join_hook)(); + if (updetach && !nodetach) + detach(); + return; + } + } +#endif /* HAVE_MULTILINK */ + +#ifdef PPP_FILTER + if (!demand) + set_filters(&pass_filter, &active_filter); +#endif +#if CCP_SUPPORT || ECP_SUPPORT + /* Start CCP and ECP */ + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if ( + (0 +#if ECP_SUPPORT + || protp->protocol == PPP_ECP +#endif /* ECP_SUPPORT */ +#if CCP_SUPPORT + || protp->protocol == PPP_CCP +#endif /* CCP_SUPPORT */ + ) + && protp->open != NULL) + (*protp->open)(pcb); +#endif /* CCP_SUPPORT || ECP_SUPPORT */ + + /* + * Bring up other network protocols iff encryption is not required. + */ + if (1 +#if ECP_SUPPORT + && !ecp_gotoptions[unit].required +#endif /* ECP_SUPPORT */ +#if MPPE_SUPPORT + && !pcb->ccp_gotoptions.mppe +#endif /* MPPE_SUPPORT */ + ) + continue_networks(pcb); +} + +void continue_networks(ppp_pcb *pcb) { + int i; + const struct protent *protp; + + /* + * Start the "real" network protocols. + */ + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->protocol < 0xC000 +#if CCP_SUPPORT + && protp->protocol != PPP_CCP +#endif /* CCP_SUPPORT */ +#if ECP_SUPPORT + && protp->protocol != PPP_ECP +#endif /* ECP_SUPPORT */ + && protp->open != NULL) { + (*protp->open)(pcb); + ++pcb->num_np_open; + } + + if (pcb->num_np_open == 0) + /* nothing to do */ + lcp_close(pcb, "No network protocols running"); +} + +#if PPP_AUTH_SUPPORT +#if PPP_SERVER +/* + * auth_check_passwd - Check the user name and passwd against configuration. + * + * returns: + * 0: Authentication failed. + * 1: Authentication succeeded. + * In either case, msg points to an appropriate message and msglen to the message len. + */ +int auth_check_passwd(ppp_pcb *pcb, char *auser, int userlen, char *apasswd, int passwdlen, const char **msg, int *msglen) { + int secretuserlen; + int secretpasswdlen; + + if (pcb->settings.user && pcb->settings.passwd) { + secretuserlen = (int)strlen(pcb->settings.user); + secretpasswdlen = (int)strlen(pcb->settings.passwd); + if (secretuserlen == userlen + && secretpasswdlen == passwdlen + && !memcmp(auser, pcb->settings.user, userlen) + && !memcmp(apasswd, pcb->settings.passwd, passwdlen) ) { + *msg = "Login ok"; + *msglen = sizeof("Login ok")-1; + return 1; + } + } + + *msg = "Login incorrect"; + *msglen = sizeof("Login incorrect")-1; + return 0; +} + +/* + * The peer has failed to authenticate himself using `protocol'. + */ +void auth_peer_fail(ppp_pcb *pcb, int protocol) { + LWIP_UNUSED_ARG(protocol); + /* + * Authentication failure: take the link down + */ +#if 0 /* UNUSED */ + status = EXIT_PEER_AUTH_FAILED; +#endif /* UNUSED */ + pcb->err_code = PPPERR_AUTHFAIL; + lcp_close(pcb, "Authentication failed"); +} + +/* + * The peer has been successfully authenticated using `protocol'. + */ +void auth_peer_success(ppp_pcb *pcb, int protocol, int prot_flavor, const char *name, int namelen) { + int bit; +#ifndef HAVE_MULTILINK + LWIP_UNUSED_ARG(name); + LWIP_UNUSED_ARG(namelen); +#endif /* HAVE_MULTILINK */ + + switch (protocol) { +#if CHAP_SUPPORT + case PPP_CHAP: + bit = CHAP_PEER; + switch (prot_flavor) { + case CHAP_MD5: + bit |= CHAP_MD5_PEER; + break; +#if MSCHAP_SUPPORT + case CHAP_MICROSOFT: + bit |= CHAP_MS_PEER; + break; + case CHAP_MICROSOFT_V2: + bit |= CHAP_MS2_PEER; + break; +#endif /* MSCHAP_SUPPORT */ + default: + break; + } + break; +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + case PPP_PAP: + bit = PAP_PEER; + break; +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + case PPP_EAP: + bit = EAP_PEER; + break; +#endif /* EAP_SUPPORT */ + default: + ppp_warn("auth_peer_success: unknown protocol %x", protocol); + return; + } + +#ifdef HAVE_MULTILINK + /* + * Save the authenticated name of the peer for later. + */ + if (namelen > (int)sizeof(pcb->peer_authname) - 1) + namelen = (int)sizeof(pcb->peer_authname) - 1; + MEMCPY(pcb->peer_authname, name, namelen); + pcb->peer_authname[namelen] = 0; +#endif /* HAVE_MULTILINK */ +#if 0 /* UNUSED */ + script_setenv("PEERNAME", , 0); +#endif /* UNUSED */ + + /* Save the authentication method for later. */ + pcb->auth_done |= bit; + + /* + * If there is no more authentication still to be done, + * proceed to the network (or callback) phase. + */ + if ((pcb->auth_pending &= ~bit) == 0) + network_phase(pcb); +} +#endif /* PPP_SERVER */ + +/* + * We have failed to authenticate ourselves to the peer using `protocol'. + */ +void auth_withpeer_fail(ppp_pcb *pcb, int protocol) { + LWIP_UNUSED_ARG(protocol); + /* + * We've failed to authenticate ourselves to our peer. + * + * Some servers keep sending CHAP challenges, but there + * is no point in persisting without any way to get updated + * authentication secrets. + * + * He'll probably take the link down, and there's not much + * we can do except wait for that. + */ + pcb->err_code = PPPERR_AUTHFAIL; + lcp_close(pcb, "Failed to authenticate ourselves to peer"); +} + +/* + * We have successfully authenticated ourselves with the peer using `protocol'. + */ +void auth_withpeer_success(ppp_pcb *pcb, int protocol, int prot_flavor) { + int bit; + const char *prot = ""; + + switch (protocol) { +#if CHAP_SUPPORT + case PPP_CHAP: + bit = CHAP_WITHPEER; + prot = "CHAP"; + switch (prot_flavor) { + case CHAP_MD5: + bit |= CHAP_MD5_WITHPEER; + break; +#if MSCHAP_SUPPORT + case CHAP_MICROSOFT: + bit |= CHAP_MS_WITHPEER; + break; + case CHAP_MICROSOFT_V2: + bit |= CHAP_MS2_WITHPEER; + break; +#endif /* MSCHAP_SUPPORT */ + default: + break; + } + break; +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + case PPP_PAP: + bit = PAP_WITHPEER; + prot = "PAP"; + break; +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + case PPP_EAP: + bit = EAP_WITHPEER; + prot = "EAP"; + break; +#endif /* EAP_SUPPORT */ + default: + ppp_warn("auth_withpeer_success: unknown protocol %x", protocol); + bit = 0; + /* no break */ + } + + ppp_notice("%s authentication succeeded", prot); + + /* Save the authentication method for later. */ + pcb->auth_done |= bit; + + /* + * If there is no more authentication still being done, + * proceed to the network (or callback) phase. + */ + if ((pcb->auth_pending &= ~bit) == 0) + network_phase(pcb); +} +#endif /* PPP_AUTH_SUPPORT */ + + +/* + * np_up - a network protocol has come up. + */ +void np_up(ppp_pcb *pcb, int proto) { +#if PPP_IDLETIMELIMIT + int tlim; +#endif /* PPP_IDLETIMELIMIT */ + LWIP_UNUSED_ARG(proto); + + if (pcb->num_np_up == 0) { + /* + * At this point we consider that the link has come up successfully. + */ + new_phase(pcb, PPP_PHASE_RUNNING); + +#if PPP_IDLETIMELIMIT +#if 0 /* UNUSED */ + if (idle_time_hook != 0) + tlim = (*idle_time_hook)(NULL); + else +#endif /* UNUSED */ + tlim = pcb->settings.idle_time_limit; + if (tlim > 0) + TIMEOUT(check_idle, (void*)pcb, tlim); +#endif /* PPP_IDLETIMELIMIT */ + +#if PPP_MAXCONNECT + /* + * Set a timeout to close the connection once the maximum + * connect time has expired. + */ + if (pcb->settings.maxconnect > 0) + TIMEOUT(connect_time_expired, (void*)pcb, pcb->settings.maxconnect); +#endif /* PPP_MAXCONNECT */ + +#ifdef MAXOCTETS + if (maxoctets > 0) + TIMEOUT(check_maxoctets, NULL, maxoctets_timeout); +#endif + +#if 0 /* Unused */ + /* + * Detach now, if the updetach option was given. + */ + if (updetach && !nodetach) + detach(); +#endif /* Unused */ + } + ++pcb->num_np_up; +} + +/* + * np_down - a network protocol has gone down. + */ +void np_down(ppp_pcb *pcb, int proto) { + LWIP_UNUSED_ARG(proto); + if (--pcb->num_np_up == 0) { +#if PPP_IDLETIMELIMIT + UNTIMEOUT(check_idle, (void*)pcb); +#endif /* PPP_IDLETIMELIMIT */ +#if PPP_MAXCONNECT + UNTIMEOUT(connect_time_expired, NULL); +#endif /* PPP_MAXCONNECT */ +#ifdef MAXOCTETS + UNTIMEOUT(check_maxoctets, NULL); +#endif + new_phase(pcb, PPP_PHASE_NETWORK); + } +} + +/* + * np_finished - a network protocol has finished using the link. + */ +void np_finished(ppp_pcb *pcb, int proto) { + LWIP_UNUSED_ARG(proto); + if (--pcb->num_np_open <= 0) { + /* no further use for the link: shut up shop. */ + lcp_close(pcb, "No network protocols running"); + } +} + +#ifdef MAXOCTETS +static void +check_maxoctets(arg) + void *arg; +{ +#if PPP_STATS_SUPPORT + unsigned int used; + + update_link_stats(ifunit); + link_stats_valid=0; + + switch(maxoctets_dir) { + case PPP_OCTETS_DIRECTION_IN: + used = link_stats.bytes_in; + break; + case PPP_OCTETS_DIRECTION_OUT: + used = link_stats.bytes_out; + break; + case PPP_OCTETS_DIRECTION_MAXOVERAL: + case PPP_OCTETS_DIRECTION_MAXSESSION: + used = (link_stats.bytes_in > link_stats.bytes_out) ? link_stats.bytes_in : link_stats.bytes_out; + break; + default: + used = link_stats.bytes_in+link_stats.bytes_out; + break; + } + if (used > maxoctets) { + ppp_notice("Traffic limit reached. Limit: %u Used: %u", maxoctets, used); + status = EXIT_TRAFFIC_LIMIT; + lcp_close(pcb, "Traffic limit"); +#if 0 /* UNUSED */ + need_holdoff = 0; +#endif /* UNUSED */ + } else { + TIMEOUT(check_maxoctets, NULL, maxoctets_timeout); + } +#endif /* PPP_STATS_SUPPORT */ +} +#endif /* MAXOCTETS */ + +#if PPP_IDLETIMELIMIT +/* + * check_idle - check whether the link has been idle for long + * enough that we can shut it down. + */ +static void check_idle(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + struct ppp_idle idle; + time_t itime; + int tlim; + + if (!get_idle_time(pcb, &idle)) + return; +#if 0 /* UNUSED */ + if (idle_time_hook != 0) { + tlim = idle_time_hook(&idle); + } else { +#endif /* UNUSED */ + itime = LWIP_MIN(idle.xmit_idle, idle.recv_idle); + tlim = pcb->settings.idle_time_limit - itime; +#if 0 /* UNUSED */ + } +#endif /* UNUSED */ + if (tlim <= 0) { + /* link is idle: shut it down. */ + ppp_notice("Terminating connection due to lack of activity."); + pcb->err_code = PPPERR_IDLETIMEOUT; + lcp_close(pcb, "Link inactive"); +#if 0 /* UNUSED */ + need_holdoff = 0; +#endif /* UNUSED */ + } else { + TIMEOUT(check_idle, (void*)pcb, tlim); + } +} +#endif /* PPP_IDLETIMELIMIT */ + +#if PPP_MAXCONNECT +/* + * connect_time_expired - log a message and close the connection. + */ +static void connect_time_expired(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + ppp_info("Connect time expired"); + pcb->err_code = PPPERR_CONNECTTIME; + lcp_close(pcb, "Connect time expired"); /* Close connection */ +} +#endif /* PPP_MAXCONNECT */ + +#if PPP_OPTIONS +/* + * auth_check_options - called to check authentication options. + */ +void +auth_check_options() +{ + lcp_options *wo = &lcp_wantoptions[0]; + int can_auth; + int lacks_ip; + + /* Default our_name to hostname, and user to our_name */ + if (our_name[0] == 0 || usehostname) + strlcpy(our_name, hostname, sizeof(our_name)); + /* If a blank username was explicitly given as an option, trust + the user and don't use our_name */ + if (ppp_settings.user[0] == 0 && !explicit_user) + strlcpy(ppp_settings.user, our_name, sizeof(ppp_settings.user)); + + /* + * If we have a default route, require the peer to authenticate + * unless the noauth option was given or the real user is root. + */ + if (!auth_required && !allow_any_ip && have_route_to(0) && !privileged) { + auth_required = 1; + default_auth = 1; + } + +#if CHAP_SUPPORT + /* If we selected any CHAP flavors, we should probably negotiate it. :-) */ + if (wo->chap_mdtype) + wo->neg_chap = 1; +#endif /* CHAP_SUPPORT */ + + /* If authentication is required, ask peer for CHAP, PAP, or EAP. */ + if (auth_required) { + allow_any_ip = 0; + if (1 +#if CHAP_SUPPORT + && !wo->neg_chap +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + && !wo->neg_upap +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + && !wo->neg_eap +#endif /* EAP_SUPPORT */ + ) { +#if CHAP_SUPPORT + wo->neg_chap = CHAP_MDTYPE_SUPPORTED != MDTYPE_NONE; + wo->chap_mdtype = CHAP_MDTYPE_SUPPORTED; +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + wo->neg_upap = 1; +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + wo->neg_eap = 1; +#endif /* EAP_SUPPORT */ + } + } else { +#if CHAP_SUPPORT + wo->neg_chap = 0; + wo->chap_mdtype = MDTYPE_NONE; +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + wo->neg_upap = 0; +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + wo->neg_eap = 0; +#endif /* EAP_SUPPORT */ + } + + /* + * Check whether we have appropriate secrets to use + * to authenticate the peer. Note that EAP can authenticate by way + * of a CHAP-like exchanges as well as SRP. + */ + lacks_ip = 0; +#if PAP_SUPPORT + can_auth = wo->neg_upap && (uselogin || have_pap_secret(&lacks_ip)); +#else + can_auth = 0; +#endif /* PAP_SUPPORT */ + if (!can_auth && (0 +#if CHAP_SUPPORT + || wo->neg_chap +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + || wo->neg_eap +#endif /* EAP_SUPPORT */ + )) { +#if CHAP_SUPPORT + can_auth = have_chap_secret((explicit_remote? remote_name: NULL), + our_name, 1, &lacks_ip); +#else + can_auth = 0; +#endif + } + if (!can_auth +#if EAP_SUPPORT + && wo->neg_eap +#endif /* EAP_SUPPORT */ + ) { + can_auth = have_srp_secret((explicit_remote? remote_name: NULL), + our_name, 1, &lacks_ip); + } + + if (auth_required && !can_auth && noauth_addrs == NULL) { + if (default_auth) { + option_error( +"By default the remote system is required to authenticate itself"); + option_error( +"(because this system has a default route to the internet)"); + } else if (explicit_remote) + option_error( +"The remote system (%s) is required to authenticate itself", + remote_name); + else + option_error( +"The remote system is required to authenticate itself"); + option_error( +"but I couldn't find any suitable secret (password) for it to use to do so."); + if (lacks_ip) + option_error( +"(None of the available passwords would let it use an IP address.)"); + + exit(1); + } + + /* + * Early check for remote number authorization. + */ + if (!auth_number()) { + ppp_warn("calling number %q is not authorized", remote_number); + exit(EXIT_CNID_AUTH_FAILED); + } +} +#endif /* PPP_OPTIONS */ + +#if 0 /* UNUSED */ +/* + * auth_reset - called when LCP is starting negotiations to recheck + * authentication options, i.e. whether we have appropriate secrets + * to use for authenticating ourselves and/or the peer. + */ +void +auth_reset(unit) + int unit; +{ + lcp_options *go = &lcp_gotoptions[unit]; + lcp_options *ao = &lcp_allowoptions[unit]; + int hadchap; + + hadchap = -1; + ao->neg_upap = !refuse_pap && (passwd[0] != 0 || get_pap_passwd(NULL)); + ao->neg_chap = (!refuse_chap || !refuse_mschap || !refuse_mschap_v2) + && (passwd[0] != 0 || + (hadchap = have_chap_secret(user, (explicit_remote? remote_name: + NULL), 0, NULL))); + ao->neg_eap = !refuse_eap && ( + passwd[0] != 0 || + (hadchap == 1 || (hadchap == -1 && have_chap_secret(user, + (explicit_remote? remote_name: NULL), 0, NULL))) || + have_srp_secret(user, (explicit_remote? remote_name: NULL), 0, NULL)); + + hadchap = -1; + if (go->neg_upap && !uselogin && !have_pap_secret(NULL)) + go->neg_upap = 0; + if (go->neg_chap) { + if (!(hadchap = have_chap_secret((explicit_remote? remote_name: NULL), + our_name, 1, NULL))) + go->neg_chap = 0; + } + if (go->neg_eap && + (hadchap == 0 || (hadchap == -1 && + !have_chap_secret((explicit_remote? remote_name: NULL), our_name, + 1, NULL))) && + !have_srp_secret((explicit_remote? remote_name: NULL), our_name, 1, + NULL)) + go->neg_eap = 0; +} + +/* + * check_passwd - Check the user name and passwd against the PAP secrets + * file. If requested, also check against the system password database, + * and login the user if OK. + * + * returns: + * UPAP_AUTHNAK: Authentication failed. + * UPAP_AUTHACK: Authentication succeeded. + * In either case, msg points to an appropriate message. + */ +int +check_passwd(unit, auser, userlen, apasswd, passwdlen, msg) + int unit; + char *auser; + int userlen; + char *apasswd; + int passwdlen; + char **msg; +{ + return UPAP_AUTHNAK; + int ret; + char *filename; + FILE *f; + struct wordlist *addrs = NULL, *opts = NULL; + char passwd[256], user[256]; + char secret[MAXWORDLEN]; + static int attempts = 0; + + /* + * Make copies of apasswd and auser, then null-terminate them. + * If there are unprintable characters in the password, make + * them visible. + */ + slprintf(ppp_settings.passwd, sizeof(ppp_settings.passwd), "%.*v", passwdlen, apasswd); + slprintf(ppp_settings.user, sizeof(ppp_settings.user), "%.*v", userlen, auser); + *msg = ""; + + /* + * Check if a plugin wants to handle this. + */ + if (pap_auth_hook) { + ret = (*pap_auth_hook)(ppp_settings.user, ppp_settings.passwd, msg, &addrs, &opts); + if (ret >= 0) { + /* note: set_allowed_addrs() saves opts (but not addrs): + don't free it! */ + if (ret) + set_allowed_addrs(unit, addrs, opts); + else if (opts != 0) + free_wordlist(opts); + if (addrs != 0) + free_wordlist(addrs); + BZERO(ppp_settings.passwd, sizeof(ppp_settings.passwd)); + return ret? UPAP_AUTHACK: UPAP_AUTHNAK; + } + } + + /* + * Open the file of pap secrets and scan for a suitable secret + * for authenticating this user. + */ + filename = _PATH_UPAPFILE; + addrs = opts = NULL; + ret = UPAP_AUTHNAK; + f = fopen(filename, "r"); + if (f == NULL) { + ppp_error("Can't open PAP password file %s: %m", filename); + + } else { + check_access(f, filename); + if (scan_authfile(f, ppp_settings.user, our_name, secret, &addrs, &opts, filename, 0) < 0) { + ppp_warn("no PAP secret found for %s", user); + } else { + /* + * If the secret is "@login", it means to check + * the password against the login database. + */ + int login_secret = strcmp(secret, "@login") == 0; + ret = UPAP_AUTHACK; + if (uselogin || login_secret) { + /* login option or secret is @login */ + if (session_full(ppp_settings.user, ppp_settings.passwd, devnam, msg) == 0) { + ret = UPAP_AUTHNAK; + } + } else if (session_mgmt) { + if (session_check(ppp_settings.user, NULL, devnam, NULL) == 0) { + ppp_warn("Peer %q failed PAP Session verification", user); + ret = UPAP_AUTHNAK; + } + } + if (secret[0] != 0 && !login_secret) { + /* password given in pap-secrets - must match */ + if ((cryptpap || strcmp(ppp_settings.passwd, secret) != 0) + && strcmp(crypt(ppp_settings.passwd, secret), secret) != 0) + ret = UPAP_AUTHNAK; + } + } + fclose(f); + } + + if (ret == UPAP_AUTHNAK) { + if (**msg == 0) + *msg = "Login incorrect"; + /* + * XXX can we ever get here more than once?? + * Frustrate passwd stealer programs. + * Allow 10 tries, but start backing off after 3 (stolen from login). + * On 10'th, drop the connection. + */ + if (attempts++ >= 10) { + ppp_warn("%d LOGIN FAILURES ON %s, %s", attempts, devnam, user); + lcp_close(pcb, "login failed"); + } + if (attempts > 3) + sleep((u_int) (attempts - 3) * 5); + if (opts != NULL) + free_wordlist(opts); + + } else { + attempts = 0; /* Reset count */ + if (**msg == 0) + *msg = "Login ok"; + set_allowed_addrs(unit, addrs, opts); + } + + if (addrs != NULL) + free_wordlist(addrs); + BZERO(ppp_settings.passwd, sizeof(ppp_settings.passwd)); + BZERO(secret, sizeof(secret)); + + return ret; +} + +/* + * null_login - Check if a username of "" and a password of "" are + * acceptable, and iff so, set the list of acceptable IP addresses + * and return 1. + */ +static int +null_login(unit) + int unit; +{ + char *filename; + FILE *f; + int i, ret; + struct wordlist *addrs, *opts; + char secret[MAXWORDLEN]; + + /* + * Check if a plugin wants to handle this. + */ + ret = -1; + if (null_auth_hook) + ret = (*null_auth_hook)(&addrs, &opts); + + /* + * Open the file of pap secrets and scan for a suitable secret. + */ + if (ret <= 0) { + filename = _PATH_UPAPFILE; + addrs = NULL; + f = fopen(filename, "r"); + if (f == NULL) + return 0; + check_access(f, filename); + + i = scan_authfile(f, "", our_name, secret, &addrs, &opts, filename, 0); + ret = i >= 0 && secret[0] == 0; + BZERO(secret, sizeof(secret)); + fclose(f); + } + + if (ret) + set_allowed_addrs(unit, addrs, opts); + else if (opts != 0) + free_wordlist(opts); + if (addrs != 0) + free_wordlist(addrs); + + return ret; +} + +/* + * get_pap_passwd - get a password for authenticating ourselves with + * our peer using PAP. Returns 1 on success, 0 if no suitable password + * could be found. + * Assumes passwd points to MAXSECRETLEN bytes of space (if non-null). + */ +static int +get_pap_passwd(passwd) + char *passwd; +{ + char *filename; + FILE *f; + int ret; + char secret[MAXWORDLEN]; + + /* + * Check whether a plugin wants to supply this. + */ + if (pap_passwd_hook) { + ret = (*pap_passwd_hook)(ppp_settings,user, ppp_settings.passwd); + if (ret >= 0) + return ret; + } + + filename = _PATH_UPAPFILE; + f = fopen(filename, "r"); + if (f == NULL) + return 0; + check_access(f, filename); + ret = scan_authfile(f, user, + (remote_name[0]? remote_name: NULL), + secret, NULL, NULL, filename, 0); + fclose(f); + if (ret < 0) + return 0; + if (passwd != NULL) + strlcpy(passwd, secret, MAXSECRETLEN); + BZERO(secret, sizeof(secret)); + return 1; +} + +/* + * have_pap_secret - check whether we have a PAP file with any + * secrets that we could possibly use for authenticating the peer. + */ +static int +have_pap_secret(lacks_ipp) + int *lacks_ipp; +{ + FILE *f; + int ret; + char *filename; + struct wordlist *addrs; + + /* let the plugin decide, if there is one */ + if (pap_check_hook) { + ret = (*pap_check_hook)(); + if (ret >= 0) + return ret; + } + + filename = _PATH_UPAPFILE; + f = fopen(filename, "r"); + if (f == NULL) + return 0; + + ret = scan_authfile(f, (explicit_remote? remote_name: NULL), our_name, + NULL, &addrs, NULL, filename, 0); + fclose(f); + if (ret >= 0 && !some_ip_ok(addrs)) { + if (lacks_ipp != 0) + *lacks_ipp = 1; + ret = -1; + } + if (addrs != 0) + free_wordlist(addrs); + + return ret >= 0; +} + +/* + * have_chap_secret - check whether we have a CHAP file with a + * secret that we could possibly use for authenticating `client' + * on `server'. Either can be the null string, meaning we don't + * know the identity yet. + */ +static int +have_chap_secret(client, server, need_ip, lacks_ipp) + char *client; + char *server; + int need_ip; + int *lacks_ipp; +{ + FILE *f; + int ret; + char *filename; + struct wordlist *addrs; + + if (chap_check_hook) { + ret = (*chap_check_hook)(); + if (ret >= 0) { + return ret; + } + } + + filename = _PATH_CHAPFILE; + f = fopen(filename, "r"); + if (f == NULL) + return 0; + + if (client != NULL && client[0] == 0) + client = NULL; + else if (server != NULL && server[0] == 0) + server = NULL; + + ret = scan_authfile(f, client, server, NULL, &addrs, NULL, filename, 0); + fclose(f); + if (ret >= 0 && need_ip && !some_ip_ok(addrs)) { + if (lacks_ipp != 0) + *lacks_ipp = 1; + ret = -1; + } + if (addrs != 0) + free_wordlist(addrs); + + return ret >= 0; +} + +/* + * have_srp_secret - check whether we have a SRP file with a + * secret that we could possibly use for authenticating `client' + * on `server'. Either can be the null string, meaning we don't + * know the identity yet. + */ +static int +have_srp_secret(client, server, need_ip, lacks_ipp) + char *client; + char *server; + int need_ip; + int *lacks_ipp; +{ + FILE *f; + int ret; + char *filename; + struct wordlist *addrs; + + filename = _PATH_SRPFILE; + f = fopen(filename, "r"); + if (f == NULL) + return 0; + + if (client != NULL && client[0] == 0) + client = NULL; + else if (server != NULL && server[0] == 0) + server = NULL; + + ret = scan_authfile(f, client, server, NULL, &addrs, NULL, filename, 0); + fclose(f); + if (ret >= 0 && need_ip && !some_ip_ok(addrs)) { + if (lacks_ipp != 0) + *lacks_ipp = 1; + ret = -1; + } + if (addrs != 0) + free_wordlist(addrs); + + return ret >= 0; +} +#endif /* UNUSED */ + +#if PPP_AUTH_SUPPORT +/* + * get_secret - open the CHAP secret file and return the secret + * for authenticating the given client on the given server. + * (We could be either client or server). + */ +int get_secret(ppp_pcb *pcb, const char *client, const char *server, char *secret, int *secret_len, int am_server) { + int len; + LWIP_UNUSED_ARG(server); + LWIP_UNUSED_ARG(am_server); + + if (!client || !client[0] || !pcb->settings.user || !pcb->settings.passwd || strcmp(client, pcb->settings.user)) { + return 0; + } + + len = (int)strlen(pcb->settings.passwd); + if (len > MAXSECRETLEN) { + ppp_error("Secret for %s on %s is too long", client, server); + len = MAXSECRETLEN; + } + + MEMCPY(secret, pcb->settings.passwd, len); + *secret_len = len; + return 1; + +#if 0 /* UNUSED */ + FILE *f; + int ret, len; + char *filename; + struct wordlist *addrs, *opts; + char secbuf[MAXWORDLEN]; + struct wordlist *addrs; + addrs = NULL; + + if (!am_server && ppp_settings.passwd[0] != 0) { + strlcpy(secbuf, ppp_settings.passwd, sizeof(secbuf)); + } else if (!am_server && chap_passwd_hook) { + if ( (*chap_passwd_hook)(client, secbuf) < 0) { + ppp_error("Unable to obtain CHAP password for %s on %s from plugin", + client, server); + return 0; + } + } else { + filename = _PATH_CHAPFILE; + addrs = NULL; + secbuf[0] = 0; + + f = fopen(filename, "r"); + if (f == NULL) { + ppp_error("Can't open chap secret file %s: %m", filename); + return 0; + } + check_access(f, filename); + + ret = scan_authfile(f, client, server, secbuf, &addrs, &opts, filename, 0); + fclose(f); + if (ret < 0) + return 0; + + if (am_server) + set_allowed_addrs(unit, addrs, opts); + else if (opts != 0) + free_wordlist(opts); + if (addrs != 0) + free_wordlist(addrs); + } + + len = strlen(secbuf); + if (len > MAXSECRETLEN) { + ppp_error("Secret for %s on %s is too long", client, server); + len = MAXSECRETLEN; + } + MEMCPY(secret, secbuf, len); + BZERO(secbuf, sizeof(secbuf)); + *secret_len = len; + + return 1; +#endif /* UNUSED */ +} +#endif /* PPP_AUTH_SUPPORT */ + + +#if 0 /* UNUSED */ +/* + * get_srp_secret - open the SRP secret file and return the secret + * for authenticating the given client on the given server. + * (We could be either client or server). + */ +int +get_srp_secret(unit, client, server, secret, am_server) + int unit; + char *client; + char *server; + char *secret; + int am_server; +{ + FILE *fp; + int ret; + char *filename; + struct wordlist *addrs, *opts; + + if (!am_server && ppp_settings.passwd[0] != '\0') { + strlcpy(secret, ppp_settings.passwd, MAXWORDLEN); + } else { + filename = _PATH_SRPFILE; + addrs = NULL; + + fp = fopen(filename, "r"); + if (fp == NULL) { + ppp_error("Can't open srp secret file %s: %m", filename); + return 0; + } + check_access(fp, filename); + + secret[0] = '\0'; + ret = scan_authfile(fp, client, server, secret, &addrs, &opts, + filename, am_server); + fclose(fp); + if (ret < 0) + return 0; + + if (am_server) + set_allowed_addrs(unit, addrs, opts); + else if (opts != NULL) + free_wordlist(opts); + if (addrs != NULL) + free_wordlist(addrs); + } + + return 1; +} + +/* + * set_allowed_addrs() - set the list of allowed addresses. + * Also looks for `--' indicating options to apply for this peer + * and leaves the following words in extra_options. + */ +static void +set_allowed_addrs(unit, addrs, opts) + int unit; + struct wordlist *addrs; + struct wordlist *opts; +{ + int n; + struct wordlist *ap, **plink; + struct permitted_ip *ip; + char *ptr_word, *ptr_mask; + struct hostent *hp; + struct netent *np; + u32_t a, mask, ah, offset; + struct ipcp_options *wo = &ipcp_wantoptions[unit]; + u32_t suggested_ip = 0; + + if (addresses[unit] != NULL) + free(addresses[unit]); + addresses[unit] = NULL; + if (extra_options != NULL) + free_wordlist(extra_options); + extra_options = opts; + + /* + * Count the number of IP addresses given. + */ + n = wordlist_count(addrs) + wordlist_count(noauth_addrs); + if (n == 0) + return; + ip = (struct permitted_ip *) malloc((n + 1) * sizeof(struct permitted_ip)); + if (ip == 0) + return; + + /* temporarily append the noauth_addrs list to addrs */ + for (plink = &addrs; *plink != NULL; plink = &(*plink)->next) + ; + *plink = noauth_addrs; + + n = 0; + for (ap = addrs; ap != NULL; ap = ap->next) { + /* "-" means no addresses authorized, "*" means any address allowed */ + ptr_word = ap->word; + if (strcmp(ptr_word, "-") == 0) + break; + if (strcmp(ptr_word, "*") == 0) { + ip[n].permit = 1; + ip[n].base = ip[n].mask = 0; + ++n; + break; + } + + ip[n].permit = 1; + if (*ptr_word == '!') { + ip[n].permit = 0; + ++ptr_word; + } + + mask = ~ (u32_t) 0; + offset = 0; + ptr_mask = strchr (ptr_word, '/'); + if (ptr_mask != NULL) { + int bit_count; + char *endp; + + bit_count = (int) strtol (ptr_mask+1, &endp, 10); + if (bit_count <= 0 || bit_count > 32) { + ppp_warn("invalid address length %v in auth. address list", + ptr_mask+1); + continue; + } + bit_count = 32 - bit_count; /* # bits in host part */ + if (*endp == '+') { + offset = ifunit + 1; + ++endp; + } + if (*endp != 0) { + ppp_warn("invalid address length syntax: %v", ptr_mask+1); + continue; + } + *ptr_mask = '\0'; + mask <<= bit_count; + } + + hp = gethostbyname(ptr_word); + if (hp != NULL && hp->h_addrtype == AF_INET) { + a = *(u32_t *)hp->h_addr; + } else { + np = getnetbyname (ptr_word); + if (np != NULL && np->n_addrtype == AF_INET) { + a = lwip_htonl ((u32_t)np->n_net); + if (ptr_mask == NULL) { + /* calculate appropriate mask for net */ + ah = lwip_ntohl(a); + if (IN_CLASSA(ah)) + mask = IN_CLASSA_NET; + else if (IN_CLASSB(ah)) + mask = IN_CLASSB_NET; + else if (IN_CLASSC(ah)) + mask = IN_CLASSC_NET; + } + } else { + a = inet_addr (ptr_word); + } + } + + if (ptr_mask != NULL) + *ptr_mask = '/'; + + if (a == (u32_t)-1L) { + ppp_warn("unknown host %s in auth. address list", ap->word); + continue; + } + if (offset != 0) { + if (offset >= ~mask) { + ppp_warn("interface unit %d too large for subnet %v", + ifunit, ptr_word); + continue; + } + a = lwip_htonl((lwip_ntohl(a) & mask) + offset); + mask = ~(u32_t)0; + } + ip[n].mask = lwip_htonl(mask); + ip[n].base = a & ip[n].mask; + ++n; + if (~mask == 0 && suggested_ip == 0) + suggested_ip = a; + } + *plink = NULL; + + ip[n].permit = 0; /* make the last entry forbid all addresses */ + ip[n].base = 0; /* to terminate the list */ + ip[n].mask = 0; + + addresses[unit] = ip; + + /* + * If the address given for the peer isn't authorized, or if + * the user hasn't given one, AND there is an authorized address + * which is a single host, then use that if we find one. + */ + if (suggested_ip != 0 + && (wo->hisaddr == 0 || !auth_ip_addr(unit, wo->hisaddr))) { + wo->hisaddr = suggested_ip; + /* + * Do we insist on this address? No, if there are other + * addresses authorized than the suggested one. + */ + if (n > 1) + wo->accept_remote = 1; + } +} + +/* + * auth_ip_addr - check whether the peer is authorized to use + * a given IP address. Returns 1 if authorized, 0 otherwise. + */ +int +auth_ip_addr(unit, addr) + int unit; + u32_t addr; +{ + int ok; + + /* don't allow loopback or multicast address */ + if (bad_ip_adrs(addr)) + return 0; + + if (allowed_address_hook) { + ok = allowed_address_hook(addr); + if (ok >= 0) return ok; + } + + if (addresses[unit] != NULL) { + ok = ip_addr_check(addr, addresses[unit]); + if (ok >= 0) + return ok; + } + + if (auth_required) + return 0; /* no addresses authorized */ + return allow_any_ip || privileged || !have_route_to(addr); +} + +static int +ip_addr_check(addr, addrs) + u32_t addr; + struct permitted_ip *addrs; +{ + for (; ; ++addrs) + if ((addr & addrs->mask) == addrs->base) + return addrs->permit; +} + +/* + * bad_ip_adrs - return 1 if the IP address is one we don't want + * to use, such as an address in the loopback net or a multicast address. + * addr is in network byte order. + */ +int +bad_ip_adrs(addr) + u32_t addr; +{ + addr = lwip_ntohl(addr); + return (addr >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET + || IN_MULTICAST(addr) || IN_BADCLASS(addr); +} + +/* + * some_ip_ok - check a wordlist to see if it authorizes any + * IP address(es). + */ +static int +some_ip_ok(addrs) + struct wordlist *addrs; +{ + for (; addrs != 0; addrs = addrs->next) { + if (addrs->word[0] == '-') + break; + if (addrs->word[0] != '!') + return 1; /* some IP address is allowed */ + } + return 0; +} + +/* + * auth_number - check whether the remote number is allowed to connect. + * Returns 1 if authorized, 0 otherwise. + */ +int +auth_number() +{ + struct wordlist *wp = permitted_numbers; + int l; + + /* Allow all if no authorization list. */ + if (!wp) + return 1; + + /* Allow if we have a match in the authorization list. */ + while (wp) { + /* trailing '*' wildcard */ + l = strlen(wp->word); + if ((wp->word)[l - 1] == '*') + l--; + if (!strncasecmp(wp->word, remote_number, l)) + return 1; + wp = wp->next; + } + + return 0; +} + +/* + * check_access - complain if a secret file has too-liberal permissions. + */ +static void +check_access(f, filename) + FILE *f; + char *filename; +{ + struct stat sbuf; + + if (fstat(fileno(f), &sbuf) < 0) { + ppp_warn("cannot stat secret file %s: %m", filename); + } else if ((sbuf.st_mode & (S_IRWXG | S_IRWXO)) != 0) { + ppp_warn("Warning - secret file %s has world and/or group access", + filename); + } +} + +/* + * scan_authfile - Scan an authorization file for a secret suitable + * for authenticating `client' on `server'. The return value is -1 + * if no secret is found, otherwise >= 0. The return value has + * NONWILD_CLIENT set if the secret didn't have "*" for the client, and + * NONWILD_SERVER set if the secret didn't have "*" for the server. + * Any following words on the line up to a "--" (i.e. address authorization + * info) are placed in a wordlist and returned in *addrs. Any + * following words (extra options) are placed in a wordlist and + * returned in *opts. + * We assume secret is NULL or points to MAXWORDLEN bytes of space. + * Flags are non-zero if we need two colons in the secret in order to + * match. + */ +static int +scan_authfile(f, client, server, secret, addrs, opts, filename, flags) + FILE *f; + char *client; + char *server; + char *secret; + struct wordlist **addrs; + struct wordlist **opts; + char *filename; + int flags; +{ + int newline, xxx; + int got_flag, best_flag; + FILE *sf; + struct wordlist *ap, *addr_list, *alist, **app; + char word[MAXWORDLEN]; + char atfile[MAXWORDLEN]; + char lsecret[MAXWORDLEN]; + char *cp; + + if (addrs != NULL) + *addrs = NULL; + if (opts != NULL) + *opts = NULL; + addr_list = NULL; + if (!getword(f, word, &newline, filename)) + return -1; /* file is empty??? */ + newline = 1; + best_flag = -1; + for (;;) { + /* + * Skip until we find a word at the start of a line. + */ + while (!newline && getword(f, word, &newline, filename)) + ; + if (!newline) + break; /* got to end of file */ + + /* + * Got a client - check if it's a match or a wildcard. + */ + got_flag = 0; + if (client != NULL && strcmp(word, client) != 0 && !ISWILD(word)) { + newline = 0; + continue; + } + if (!ISWILD(word)) + got_flag = NONWILD_CLIENT; + + /* + * Now get a server and check if it matches. + */ + if (!getword(f, word, &newline, filename)) + break; + if (newline) + continue; + if (!ISWILD(word)) { + if (server != NULL && strcmp(word, server) != 0) + continue; + got_flag |= NONWILD_SERVER; + } + + /* + * Got some sort of a match - see if it's better than what + * we have already. + */ + if (got_flag <= best_flag) + continue; + + /* + * Get the secret. + */ + if (!getword(f, word, &newline, filename)) + break; + if (newline) + continue; + + /* + * SRP-SHA1 authenticator should never be reading secrets from + * a file. (Authenticatee may, though.) + */ + if (flags && ((cp = strchr(word, ':')) == NULL || + strchr(cp + 1, ':') == NULL)) + continue; + + if (secret != NULL) { + /* + * Special syntax: @/pathname means read secret from file. + */ + if (word[0] == '@' && word[1] == '/') { + strlcpy(atfile, word+1, sizeof(atfile)); + if ((sf = fopen(atfile, "r")) == NULL) { + ppp_warn("can't open indirect secret file %s", atfile); + continue; + } + check_access(sf, atfile); + if (!getword(sf, word, &xxx, atfile)) { + ppp_warn("no secret in indirect secret file %s", atfile); + fclose(sf); + continue; + } + fclose(sf); + } + strlcpy(lsecret, word, sizeof(lsecret)); + } + + /* + * Now read address authorization info and make a wordlist. + */ + app = &alist; + for (;;) { + if (!getword(f, word, &newline, filename) || newline) + break; + ap = (struct wordlist *) + malloc(sizeof(struct wordlist) + strlen(word) + 1); + if (ap == NULL) + novm("authorized addresses"); + ap->word = (char *) (ap + 1); + strcpy(ap->word, word); + *app = ap; + app = &ap->next; + } + *app = NULL; + + /* + * This is the best so far; remember it. + */ + best_flag = got_flag; + if (addr_list) + free_wordlist(addr_list); + addr_list = alist; + if (secret != NULL) + strlcpy(secret, lsecret, MAXWORDLEN); + + if (!newline) + break; + } + + /* scan for a -- word indicating the start of options */ + for (app = &addr_list; (ap = *app) != NULL; app = &ap->next) + if (strcmp(ap->word, "--") == 0) + break; + /* ap = start of options */ + if (ap != NULL) { + ap = ap->next; /* first option */ + free(*app); /* free the "--" word */ + *app = NULL; /* terminate addr list */ + } + if (opts != NULL) + *opts = ap; + else if (ap != NULL) + free_wordlist(ap); + if (addrs != NULL) + *addrs = addr_list; + else if (addr_list != NULL) + free_wordlist(addr_list); + + return best_flag; +} + +/* + * wordlist_count - return the number of items in a wordlist + */ +static int +wordlist_count(wp) + struct wordlist *wp; +{ + int n; + + for (n = 0; wp != NULL; wp = wp->next) + ++n; + return n; +} + +/* + * free_wordlist - release memory allocated for a wordlist. + */ +static void +free_wordlist(wp) + struct wordlist *wp; +{ + struct wordlist *next; + + while (wp != NULL) { + next = wp->next; + free(wp); + wp = next; + } +} +#endif /* UNUSED */ + +#endif /* PPP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/ccp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/ccp.c new file mode 100644 index 0000000..b354a3f --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/ccp.c @@ -0,0 +1,1740 @@ +/* + * ccp.c - PPP Compression Control Protocol. + * + * Copyright (c) 1994-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && CCP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include +#include + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/ccp.h" + +#if MPPE_SUPPORT +#include "netif/ppp/lcp.h" /* lcp_close(), lcp_fsm */ +#include "netif/ppp/mppe.h" /* mppe_init() */ +#endif /* MPPE_SUPPORT */ + +/* + * Unfortunately there is a bug in zlib which means that using a + * size of 8 (window size = 256) for Deflate compression will cause + * buffer overruns and kernel crashes in the deflate module. + * Until this is fixed we only accept sizes in the range 9 .. 15. + * Thanks to James Carlson for pointing this out. + */ +#define DEFLATE_MIN_WORKS 9 + +/* + * Command-line options. + */ +#if PPP_OPTIONS +static int setbsdcomp (char **); +static int setdeflate (char **); +static char bsd_value[8]; +static char deflate_value[8]; + +/* + * Option variables. + */ +#if MPPE_SUPPORT +bool refuse_mppe_stateful = 1; /* Allow stateful mode? */ +#endif /* MPPE_SUPPORT */ + +static option_t ccp_option_list[] = { + { "noccp", o_bool, &ccp_protent.enabled_flag, + "Disable CCP negotiation" }, + { "-ccp", o_bool, &ccp_protent.enabled_flag, + "Disable CCP negotiation", OPT_ALIAS }, + + { "bsdcomp", o_special, (void *)setbsdcomp, + "Request BSD-Compress packet compression", + OPT_PRIO | OPT_A2STRVAL | OPT_STATIC, bsd_value }, + { "nobsdcomp", o_bool, &ccp_wantoptions[0].bsd_compress, + "don't allow BSD-Compress", OPT_PRIOSUB | OPT_A2CLR, + &ccp_allowoptions[0].bsd_compress }, + { "-bsdcomp", o_bool, &ccp_wantoptions[0].bsd_compress, + "don't allow BSD-Compress", OPT_ALIAS | OPT_PRIOSUB | OPT_A2CLR, + &ccp_allowoptions[0].bsd_compress }, + + { "deflate", o_special, (void *)setdeflate, + "request Deflate compression", + OPT_PRIO | OPT_A2STRVAL | OPT_STATIC, deflate_value }, + { "nodeflate", o_bool, &ccp_wantoptions[0].deflate, + "don't allow Deflate compression", OPT_PRIOSUB | OPT_A2CLR, + &ccp_allowoptions[0].deflate }, + { "-deflate", o_bool, &ccp_wantoptions[0].deflate, + "don't allow Deflate compression", OPT_ALIAS | OPT_PRIOSUB | OPT_A2CLR, + &ccp_allowoptions[0].deflate }, + + { "nodeflatedraft", o_bool, &ccp_wantoptions[0].deflate_draft, + "don't use draft deflate #", OPT_A2COPY, + &ccp_allowoptions[0].deflate_draft }, + + { "predictor1", o_bool, &ccp_wantoptions[0].predictor_1, + "request Predictor-1", OPT_PRIO | 1 }, + { "nopredictor1", o_bool, &ccp_wantoptions[0].predictor_1, + "don't allow Predictor-1", OPT_PRIOSUB | OPT_A2CLR, + &ccp_allowoptions[0].predictor_1 }, + { "-predictor1", o_bool, &ccp_wantoptions[0].predictor_1, + "don't allow Predictor-1", OPT_ALIAS | OPT_PRIOSUB | OPT_A2CLR, + &ccp_allowoptions[0].predictor_1 }, + +#if MPPE_SUPPORT + /* MPPE options are symmetrical ... we only set wantoptions here */ + { "require-mppe", o_bool, &ccp_wantoptions[0].mppe, + "require MPPE encryption", + OPT_PRIO | MPPE_OPT_40 | MPPE_OPT_128 }, + { "+mppe", o_bool, &ccp_wantoptions[0].mppe, + "require MPPE encryption", + OPT_ALIAS | OPT_PRIO | MPPE_OPT_40 | MPPE_OPT_128 }, + { "nomppe", o_bool, &ccp_wantoptions[0].mppe, + "don't allow MPPE encryption", OPT_PRIO }, + { "-mppe", o_bool, &ccp_wantoptions[0].mppe, + "don't allow MPPE encryption", OPT_ALIAS | OPT_PRIO }, + + /* We use ccp_allowoptions[0].mppe as a junk var ... it is reset later */ + { "require-mppe-40", o_bool, &ccp_allowoptions[0].mppe, + "require MPPE 40-bit encryption", OPT_PRIO | OPT_A2OR | MPPE_OPT_40, + &ccp_wantoptions[0].mppe }, + { "+mppe-40", o_bool, &ccp_allowoptions[0].mppe, + "require MPPE 40-bit encryption", OPT_PRIO | OPT_A2OR | MPPE_OPT_40, + &ccp_wantoptions[0].mppe }, + { "nomppe-40", o_bool, &ccp_allowoptions[0].mppe, + "don't allow MPPE 40-bit encryption", + OPT_PRIOSUB | OPT_A2CLRB | MPPE_OPT_40, &ccp_wantoptions[0].mppe }, + { "-mppe-40", o_bool, &ccp_allowoptions[0].mppe, + "don't allow MPPE 40-bit encryption", + OPT_ALIAS | OPT_PRIOSUB | OPT_A2CLRB | MPPE_OPT_40, + &ccp_wantoptions[0].mppe }, + + { "require-mppe-128", o_bool, &ccp_allowoptions[0].mppe, + "require MPPE 128-bit encryption", OPT_PRIO | OPT_A2OR | MPPE_OPT_128, + &ccp_wantoptions[0].mppe }, + { "+mppe-128", o_bool, &ccp_allowoptions[0].mppe, + "require MPPE 128-bit encryption", + OPT_ALIAS | OPT_PRIO | OPT_A2OR | MPPE_OPT_128, + &ccp_wantoptions[0].mppe }, + { "nomppe-128", o_bool, &ccp_allowoptions[0].mppe, + "don't allow MPPE 128-bit encryption", + OPT_PRIOSUB | OPT_A2CLRB | MPPE_OPT_128, &ccp_wantoptions[0].mppe }, + { "-mppe-128", o_bool, &ccp_allowoptions[0].mppe, + "don't allow MPPE 128-bit encryption", + OPT_ALIAS | OPT_PRIOSUB | OPT_A2CLRB | MPPE_OPT_128, + &ccp_wantoptions[0].mppe }, + + /* strange one; we always request stateless, but will we allow stateful? */ + { "mppe-stateful", o_bool, &refuse_mppe_stateful, + "allow MPPE stateful mode", OPT_PRIO }, + { "nomppe-stateful", o_bool, &refuse_mppe_stateful, + "disallow MPPE stateful mode", OPT_PRIO | 1 }, +#endif /* MPPE_SUPPORT */ + + { NULL } +}; +#endif /* PPP_OPTIONS */ + +/* + * Protocol entry points from main code. + */ +static void ccp_init(ppp_pcb *pcb); +static void ccp_open(ppp_pcb *pcb); +static void ccp_close(ppp_pcb *pcb, const char *reason); +static void ccp_lowerup(ppp_pcb *pcb); +static void ccp_lowerdown(ppp_pcb *pcb); +static void ccp_input(ppp_pcb *pcb, u_char *pkt, int len); +static void ccp_protrej(ppp_pcb *pcb); +#if PRINTPKT_SUPPORT +static int ccp_printpkt(const u_char *p, int plen, void (*printer) (void *, const char *, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT +static void ccp_datainput(ppp_pcb *pcb, u_char *pkt, int len); +#endif /* PPP_DATAINPUT */ + +const struct protent ccp_protent = { + PPP_CCP, + ccp_init, + ccp_input, + ccp_protrej, + ccp_lowerup, + ccp_lowerdown, + ccp_open, + ccp_close, +#if PRINTPKT_SUPPORT + ccp_printpkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + ccp_datainput, +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "CCP", + "Compressed", +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + ccp_option_list, + NULL, +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + NULL, + NULL +#endif /* DEMAND_SUPPORT */ +}; + +/* + * Callbacks for fsm code. + */ +static void ccp_resetci (fsm *); +static int ccp_cilen (fsm *); +static void ccp_addci (fsm *, u_char *, int *); +static int ccp_ackci (fsm *, u_char *, int); +static int ccp_nakci (fsm *, u_char *, int, int); +static int ccp_rejci (fsm *, u_char *, int); +static int ccp_reqci (fsm *, u_char *, int *, int); +static void ccp_up (fsm *); +static void ccp_down (fsm *); +static int ccp_extcode (fsm *, int, int, u_char *, int); +static void ccp_rack_timeout (void *); +static const char *method_name (ccp_options *, ccp_options *); + +static const fsm_callbacks ccp_callbacks = { + ccp_resetci, + ccp_cilen, + ccp_addci, + ccp_ackci, + ccp_nakci, + ccp_rejci, + ccp_reqci, + ccp_up, + ccp_down, + NULL, + NULL, + NULL, + NULL, + ccp_extcode, + "CCP" +}; + +/* + * Do we want / did we get any compression? + */ +static int ccp_anycompress(ccp_options *opt) { + return (0 +#if DEFLATE_SUPPORT + || (opt)->deflate +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + || (opt)->bsd_compress +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + || (opt)->predictor_1 || (opt)->predictor_2 +#endif /* PREDICTOR_SUPPORT */ +#if MPPE_SUPPORT + || (opt)->mppe +#endif /* MPPE_SUPPORT */ + ); +} + +/* + * Local state (mainly for handling reset-reqs and reset-acks). + */ +#define RACK_PENDING 1 /* waiting for reset-ack */ +#define RREQ_REPEAT 2 /* send another reset-req if no reset-ack */ + +#define RACKTIMEOUT 1 /* second */ + +#if PPP_OPTIONS +/* + * Option parsing + */ +static int +setbsdcomp(argv) + char **argv; +{ + int rbits, abits; + char *str, *endp; + + str = *argv; + abits = rbits = strtol(str, &endp, 0); + if (endp != str && *endp == ',') { + str = endp + 1; + abits = strtol(str, &endp, 0); + } + if (*endp != 0 || endp == str) { + option_error("invalid parameter '%s' for bsdcomp option", *argv); + return 0; + } + if ((rbits != 0 && (rbits < BSD_MIN_BITS || rbits > BSD_MAX_BITS)) + || (abits != 0 && (abits < BSD_MIN_BITS || abits > BSD_MAX_BITS))) { + option_error("bsdcomp option values must be 0 or %d .. %d", + BSD_MIN_BITS, BSD_MAX_BITS); + return 0; + } + if (rbits > 0) { + ccp_wantoptions[0].bsd_compress = 1; + ccp_wantoptions[0].bsd_bits = rbits; + } else + ccp_wantoptions[0].bsd_compress = 0; + if (abits > 0) { + ccp_allowoptions[0].bsd_compress = 1; + ccp_allowoptions[0].bsd_bits = abits; + } else + ccp_allowoptions[0].bsd_compress = 0; + ppp_slprintf(bsd_value, sizeof(bsd_value), + rbits == abits? "%d": "%d,%d", rbits, abits); + + return 1; +} + +static int +setdeflate(argv) + char **argv; +{ + int rbits, abits; + char *str, *endp; + + str = *argv; + abits = rbits = strtol(str, &endp, 0); + if (endp != str && *endp == ',') { + str = endp + 1; + abits = strtol(str, &endp, 0); + } + if (*endp != 0 || endp == str) { + option_error("invalid parameter '%s' for deflate option", *argv); + return 0; + } + if ((rbits != 0 && (rbits < DEFLATE_MIN_SIZE || rbits > DEFLATE_MAX_SIZE)) + || (abits != 0 && (abits < DEFLATE_MIN_SIZE + || abits > DEFLATE_MAX_SIZE))) { + option_error("deflate option values must be 0 or %d .. %d", + DEFLATE_MIN_SIZE, DEFLATE_MAX_SIZE); + return 0; + } + if (rbits == DEFLATE_MIN_SIZE || abits == DEFLATE_MIN_SIZE) { + if (rbits == DEFLATE_MIN_SIZE) + rbits = DEFLATE_MIN_WORKS; + if (abits == DEFLATE_MIN_SIZE) + abits = DEFLATE_MIN_WORKS; + warn("deflate option value of %d changed to %d to avoid zlib bug", + DEFLATE_MIN_SIZE, DEFLATE_MIN_WORKS); + } + if (rbits > 0) { + ccp_wantoptions[0].deflate = 1; + ccp_wantoptions[0].deflate_size = rbits; + } else + ccp_wantoptions[0].deflate = 0; + if (abits > 0) { + ccp_allowoptions[0].deflate = 1; + ccp_allowoptions[0].deflate_size = abits; + } else + ccp_allowoptions[0].deflate = 0; + ppp_slprintf(deflate_value, sizeof(deflate_value), + rbits == abits? "%d": "%d,%d", rbits, abits); + + return 1; +} +#endif /* PPP_OPTIONS */ + +/* + * ccp_init - initialize CCP. + */ +static void ccp_init(ppp_pcb *pcb) { + fsm *f = &pcb->ccp_fsm; + + f->pcb = pcb; + f->protocol = PPP_CCP; + f->callbacks = &ccp_callbacks; + fsm_init(f); + +#if 0 /* Not necessary, everything is cleared in ppp_new() */ + memset(wo, 0, sizeof(*wo)); + memset(go, 0, sizeof(*go)); + memset(ao, 0, sizeof(*ao)); + memset(ho, 0, sizeof(*ho)); +#endif /* 0 */ + +#if DEFLATE_SUPPORT + wo->deflate = 1; + wo->deflate_size = DEFLATE_MAX_SIZE; + wo->deflate_correct = 1; + wo->deflate_draft = 1; + ao->deflate = 1; + ao->deflate_size = DEFLATE_MAX_SIZE; + ao->deflate_correct = 1; + ao->deflate_draft = 1; +#endif /* DEFLATE_SUPPORT */ + +#if BSDCOMPRESS_SUPPORT + wo->bsd_compress = 1; + wo->bsd_bits = BSD_MAX_BITS; + ao->bsd_compress = 1; + ao->bsd_bits = BSD_MAX_BITS; +#endif /* BSDCOMPRESS_SUPPORT */ + +#if PREDICTOR_SUPPORT + ao->predictor_1 = 1; +#endif /* PREDICTOR_SUPPORT */ +} + +/* + * ccp_open - CCP is allowed to come up. + */ +static void ccp_open(ppp_pcb *pcb) { + fsm *f = &pcb->ccp_fsm; + ccp_options *go = &pcb->ccp_gotoptions; + + if (f->state != PPP_FSM_OPENED) + ccp_set(pcb, 1, 0, 0, 0); + + /* + * Find out which compressors the kernel supports before + * deciding whether to open in silent mode. + */ + ccp_resetci(f); + if (!ccp_anycompress(go)) + f->flags |= OPT_SILENT; + + fsm_open(f); +} + +/* + * ccp_close - Terminate CCP. + */ +static void ccp_close(ppp_pcb *pcb, const char *reason) { + fsm *f = &pcb->ccp_fsm; + ccp_set(pcb, 0, 0, 0, 0); + fsm_close(f, reason); +} + +/* + * ccp_lowerup - we may now transmit CCP packets. + */ +static void ccp_lowerup(ppp_pcb *pcb) { + fsm *f = &pcb->ccp_fsm; + fsm_lowerup(f); +} + +/* + * ccp_lowerdown - we may not transmit CCP packets. + */ +static void ccp_lowerdown(ppp_pcb *pcb) { + fsm *f = &pcb->ccp_fsm; + fsm_lowerdown(f); +} + +/* + * ccp_input - process a received CCP packet. + */ +static void ccp_input(ppp_pcb *pcb, u_char *p, int len) { + fsm *f = &pcb->ccp_fsm; + ccp_options *go = &pcb->ccp_gotoptions; + int oldstate; + + /* + * Check for a terminate-request so we can print a message. + */ + oldstate = f->state; + fsm_input(f, p, len); + if (oldstate == PPP_FSM_OPENED && p[0] == TERMREQ && f->state != PPP_FSM_OPENED) { + ppp_notice("Compression disabled by peer."); +#if MPPE_SUPPORT + if (go->mppe) { + ppp_error("MPPE disabled, closing LCP"); + lcp_close(pcb, "MPPE disabled by peer"); + } +#endif /* MPPE_SUPPORT */ + } + + /* + * If we get a terminate-ack and we're not asking for compression, + * close CCP. + */ + if (oldstate == PPP_FSM_REQSENT && p[0] == TERMACK + && !ccp_anycompress(go)) + ccp_close(pcb, "No compression negotiated"); +} + +/* + * Handle a CCP-specific code. + */ +static int ccp_extcode(fsm *f, int code, int id, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(len); + + switch (code) { + case CCP_RESETREQ: + if (f->state != PPP_FSM_OPENED) + break; + ccp_reset_comp(pcb); + /* send a reset-ack, which the transmitter will see and + reset its compression state. */ + fsm_sdata(f, CCP_RESETACK, id, NULL, 0); + break; + + case CCP_RESETACK: + if ((pcb->ccp_localstate & RACK_PENDING) && id == f->reqid) { + pcb->ccp_localstate &= ~(RACK_PENDING | RREQ_REPEAT); + UNTIMEOUT(ccp_rack_timeout, f); + ccp_reset_decomp(pcb); + } + break; + + default: + return 0; + } + + return 1; +} + +/* + * ccp_protrej - peer doesn't talk CCP. + */ +static void ccp_protrej(ppp_pcb *pcb) { + fsm *f = &pcb->ccp_fsm; +#if MPPE_SUPPORT + ccp_options *go = &pcb->ccp_gotoptions; +#endif /* MPPE_SUPPORT */ + + ccp_set(pcb, 0, 0, 0, 0); + fsm_lowerdown(f); + +#if MPPE_SUPPORT + if (go->mppe) { + ppp_error("MPPE required but peer negotiation failed"); + lcp_close(pcb, "MPPE required but peer negotiation failed"); + } +#endif /* MPPE_SUPPORT */ + +} + +/* + * ccp_resetci - initialize at start of negotiation. + */ +static void ccp_resetci(fsm *f) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; + ccp_options *wo = &pcb->ccp_wantoptions; +#if MPPE_SUPPORT + ccp_options *ao = &pcb->ccp_allowoptions; +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT || BSDCOMPRESS_SUPPORT || PREDICTOR_SUPPORT + u_char opt_buf[CCP_MAX_OPTION_LENGTH]; +#endif /* DEFLATE_SUPPORT || BSDCOMPRESS_SUPPORT || PREDICTOR_SUPPORT */ +#if DEFLATE_SUPPORT || BSDCOMPRESS_SUPPORT + int res; +#endif /* DEFLATE_SUPPORT || BSDCOMPRESS_SUPPORT */ + +#if MPPE_SUPPORT + if (pcb->settings.require_mppe) { + wo->mppe = ao->mppe = + (pcb->settings.refuse_mppe_40 ? 0 : MPPE_OPT_40) + | (pcb->settings.refuse_mppe_128 ? 0 : MPPE_OPT_128); + } +#endif /* MPPE_SUPPORT */ + + *go = *wo; + pcb->ccp_all_rejected = 0; + +#if MPPE_SUPPORT + if (go->mppe) { + int auth_mschap_bits = pcb->auth_done; + int numbits; + + /* + * Start with a basic sanity check: mschap[v2] auth must be in + * exactly one direction. RFC 3079 says that the keys are + * 'derived from the credentials of the peer that initiated the call', + * however the PPP protocol doesn't have such a concept, and pppd + * cannot get this info externally. Instead we do the best we can. + * NB: If MPPE is required, all other compression opts are invalid. + * So, we return right away if we can't do it. + */ + + /* Leave only the mschap auth bits set */ + auth_mschap_bits &= (CHAP_MS_WITHPEER | CHAP_MS_PEER | + CHAP_MS2_WITHPEER | CHAP_MS2_PEER); + /* Count the mschap auths */ + auth_mschap_bits >>= CHAP_MS_SHIFT; + numbits = 0; + do { + numbits += auth_mschap_bits & 1; + auth_mschap_bits >>= 1; + } while (auth_mschap_bits); + if (numbits > 1) { + ppp_error("MPPE required, but auth done in both directions."); + lcp_close(pcb, "MPPE required but not available"); + return; + } + if (!numbits) { + ppp_error("MPPE required, but MS-CHAP[v2] auth not performed."); + lcp_close(pcb, "MPPE required but not available"); + return; + } + + /* A plugin (eg radius) may not have obtained key material. */ + if (!pcb->mppe_keys_set) { + ppp_error("MPPE required, but keys are not available. " + "Possible plugin problem?"); + lcp_close(pcb, "MPPE required but not available"); + return; + } + + /* LM auth not supported for MPPE */ + if (pcb->auth_done & (CHAP_MS_WITHPEER | CHAP_MS_PEER)) { + /* This might be noise */ + if (go->mppe & MPPE_OPT_40) { + ppp_notice("Disabling 40-bit MPPE; MS-CHAP LM not supported"); + go->mppe &= ~MPPE_OPT_40; + wo->mppe &= ~MPPE_OPT_40; + } + } + + /* Last check: can we actually negotiate something? */ + if (!(go->mppe & (MPPE_OPT_40 | MPPE_OPT_128))) { + /* Could be misconfig, could be 40-bit disabled above. */ + ppp_error("MPPE required, but both 40-bit and 128-bit disabled."); + lcp_close(pcb, "MPPE required but not available"); + return; + } + + /* sync options */ + ao->mppe = go->mppe; + /* MPPE is not compatible with other compression types */ +#if BSDCOMPRESS_SUPPORT + ao->bsd_compress = go->bsd_compress = 0; +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + ao->predictor_1 = go->predictor_1 = 0; + ao->predictor_2 = go->predictor_2 = 0; +#endif /* PREDICTOR_SUPPORT */ +#if DEFLATE_SUPPORT + ao->deflate = go->deflate = 0; +#endif /* DEFLATE_SUPPORT */ + } +#endif /* MPPE_SUPPORT */ + + /* + * Check whether the kernel knows about the various + * compression methods we might request. + */ +#if BSDCOMPRESS_SUPPORT + /* FIXME: we don't need to test if BSD compress is available + * if BSDCOMPRESS_SUPPORT is set, it is. + */ + if (go->bsd_compress) { + opt_buf[0] = CI_BSD_COMPRESS; + opt_buf[1] = CILEN_BSD_COMPRESS; + for (;;) { + if (go->bsd_bits < BSD_MIN_BITS) { + go->bsd_compress = 0; + break; + } + opt_buf[2] = BSD_MAKE_OPT(BSD_CURRENT_VERSION, go->bsd_bits); + res = ccp_test(pcb, opt_buf, CILEN_BSD_COMPRESS, 0); + if (res > 0) { + break; + } else if (res < 0) { + go->bsd_compress = 0; + break; + } + go->bsd_bits--; + } + } +#endif /* BSDCOMPRESS_SUPPORT */ +#if DEFLATE_SUPPORT + /* FIXME: we don't need to test if deflate is available + * if DEFLATE_SUPPORT is set, it is. + */ + if (go->deflate) { + if (go->deflate_correct) { + opt_buf[0] = CI_DEFLATE; + opt_buf[1] = CILEN_DEFLATE; + opt_buf[3] = DEFLATE_CHK_SEQUENCE; + for (;;) { + if (go->deflate_size < DEFLATE_MIN_WORKS) { + go->deflate_correct = 0; + break; + } + opt_buf[2] = DEFLATE_MAKE_OPT(go->deflate_size); + res = ccp_test(pcb, opt_buf, CILEN_DEFLATE, 0); + if (res > 0) { + break; + } else if (res < 0) { + go->deflate_correct = 0; + break; + } + go->deflate_size--; + } + } + if (go->deflate_draft) { + opt_buf[0] = CI_DEFLATE_DRAFT; + opt_buf[1] = CILEN_DEFLATE; + opt_buf[3] = DEFLATE_CHK_SEQUENCE; + for (;;) { + if (go->deflate_size < DEFLATE_MIN_WORKS) { + go->deflate_draft = 0; + break; + } + opt_buf[2] = DEFLATE_MAKE_OPT(go->deflate_size); + res = ccp_test(pcb, opt_buf, CILEN_DEFLATE, 0); + if (res > 0) { + break; + } else if (res < 0) { + go->deflate_draft = 0; + break; + } + go->deflate_size--; + } + } + if (!go->deflate_correct && !go->deflate_draft) + go->deflate = 0; + } +#endif /* DEFLATE_SUPPORT */ +#if PREDICTOR_SUPPORT + /* FIXME: we don't need to test if predictor is available, + * if PREDICTOR_SUPPORT is set, it is. + */ + if (go->predictor_1) { + opt_buf[0] = CI_PREDICTOR_1; + opt_buf[1] = CILEN_PREDICTOR_1; + if (ccp_test(pcb, opt_buf, CILEN_PREDICTOR_1, 0) <= 0) + go->predictor_1 = 0; + } + if (go->predictor_2) { + opt_buf[0] = CI_PREDICTOR_2; + opt_buf[1] = CILEN_PREDICTOR_2; + if (ccp_test(pcb, opt_buf, CILEN_PREDICTOR_2, 0) <= 0) + go->predictor_2 = 0; + } +#endif /* PREDICTOR_SUPPORT */ +} + +/* + * ccp_cilen - Return total length of our configuration info. + */ +static int ccp_cilen(fsm *f) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; + + return 0 +#if BSDCOMPRESS_SUPPORT + + (go->bsd_compress? CILEN_BSD_COMPRESS: 0) +#endif /* BSDCOMPRESS_SUPPORT */ +#if DEFLATE_SUPPORT + + (go->deflate && go->deflate_correct? CILEN_DEFLATE: 0) + + (go->deflate && go->deflate_draft? CILEN_DEFLATE: 0) +#endif /* DEFLATE_SUPPORT */ +#if PREDICTOR_SUPPORT + + (go->predictor_1? CILEN_PREDICTOR_1: 0) + + (go->predictor_2? CILEN_PREDICTOR_2: 0) +#endif /* PREDICTOR_SUPPORT */ +#if MPPE_SUPPORT + + (go->mppe? CILEN_MPPE: 0) +#endif /* MPPE_SUPPORT */ + ; +} + +/* + * ccp_addci - put our requests in a packet. + */ +static void ccp_addci(fsm *f, u_char *p, int *lenp) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; + u_char *p0 = p; + + /* + * Add the compression types that we can receive, in decreasing + * preference order. + */ +#if MPPE_SUPPORT + if (go->mppe) { + p[0] = CI_MPPE; + p[1] = CILEN_MPPE; + MPPE_OPTS_TO_CI(go->mppe, &p[2]); + mppe_init(pcb, &pcb->mppe_decomp, go->mppe); + p += CILEN_MPPE; + } +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + if (go->deflate) { + if (go->deflate_correct) { + p[0] = CI_DEFLATE; + p[1] = CILEN_DEFLATE; + p[2] = DEFLATE_MAKE_OPT(go->deflate_size); + p[3] = DEFLATE_CHK_SEQUENCE; + p += CILEN_DEFLATE; + } + if (go->deflate_draft) { + p[0] = CI_DEFLATE_DRAFT; + p[1] = CILEN_DEFLATE; + p[2] = p[2 - CILEN_DEFLATE]; + p[3] = DEFLATE_CHK_SEQUENCE; + p += CILEN_DEFLATE; + } + } +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + if (go->bsd_compress) { + p[0] = CI_BSD_COMPRESS; + p[1] = CILEN_BSD_COMPRESS; + p[2] = BSD_MAKE_OPT(BSD_CURRENT_VERSION, go->bsd_bits); + p += CILEN_BSD_COMPRESS; + } +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + /* XXX Should Predictor 2 be preferable to Predictor 1? */ + if (go->predictor_1) { + p[0] = CI_PREDICTOR_1; + p[1] = CILEN_PREDICTOR_1; + p += CILEN_PREDICTOR_1; + } + if (go->predictor_2) { + p[0] = CI_PREDICTOR_2; + p[1] = CILEN_PREDICTOR_2; + p += CILEN_PREDICTOR_2; + } +#endif /* PREDICTOR_SUPPORT */ + + go->method = (p > p0)? p0[0]: 0; + + *lenp = p - p0; +} + +/* + * ccp_ackci - process a received configure-ack, and return + * 1 iff the packet was OK. + */ +static int ccp_ackci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; +#if BSDCOMPRESS_SUPPORT || PREDICTOR_SUPPORT + u_char *p0 = p; +#endif /* BSDCOMPRESS_SUPPORT || PREDICTOR_SUPPORT */ + +#if MPPE_SUPPORT + if (go->mppe) { + u_char opt_buf[CILEN_MPPE]; + + opt_buf[0] = CI_MPPE; + opt_buf[1] = CILEN_MPPE; + MPPE_OPTS_TO_CI(go->mppe, &opt_buf[2]); + if (len < CILEN_MPPE || memcmp(opt_buf, p, CILEN_MPPE)) + return 0; + p += CILEN_MPPE; + len -= CILEN_MPPE; + /* XXX Cope with first/fast ack */ + if (len == 0) + return 1; + } +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + if (go->deflate) { + if (len < CILEN_DEFLATE + || p[0] != (go->deflate_correct? CI_DEFLATE: CI_DEFLATE_DRAFT) + || p[1] != CILEN_DEFLATE + || p[2] != DEFLATE_MAKE_OPT(go->deflate_size) + || p[3] != DEFLATE_CHK_SEQUENCE) + return 0; + p += CILEN_DEFLATE; + len -= CILEN_DEFLATE; + /* XXX Cope with first/fast ack */ + if (len == 0) + return 1; + if (go->deflate_correct && go->deflate_draft) { + if (len < CILEN_DEFLATE + || p[0] != CI_DEFLATE_DRAFT + || p[1] != CILEN_DEFLATE + || p[2] != DEFLATE_MAKE_OPT(go->deflate_size) + || p[3] != DEFLATE_CHK_SEQUENCE) + return 0; + p += CILEN_DEFLATE; + len -= CILEN_DEFLATE; + } + } +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + if (go->bsd_compress) { + if (len < CILEN_BSD_COMPRESS + || p[0] != CI_BSD_COMPRESS || p[1] != CILEN_BSD_COMPRESS + || p[2] != BSD_MAKE_OPT(BSD_CURRENT_VERSION, go->bsd_bits)) + return 0; + p += CILEN_BSD_COMPRESS; + len -= CILEN_BSD_COMPRESS; + /* XXX Cope with first/fast ack */ + if (p == p0 && len == 0) + return 1; + } +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + if (go->predictor_1) { + if (len < CILEN_PREDICTOR_1 + || p[0] != CI_PREDICTOR_1 || p[1] != CILEN_PREDICTOR_1) + return 0; + p += CILEN_PREDICTOR_1; + len -= CILEN_PREDICTOR_1; + /* XXX Cope with first/fast ack */ + if (p == p0 && len == 0) + return 1; + } + if (go->predictor_2) { + if (len < CILEN_PREDICTOR_2 + || p[0] != CI_PREDICTOR_2 || p[1] != CILEN_PREDICTOR_2) + return 0; + p += CILEN_PREDICTOR_2; + len -= CILEN_PREDICTOR_2; + /* XXX Cope with first/fast ack */ + if (p == p0 && len == 0) + return 1; + } +#endif /* PREDICTOR_SUPPORT */ + + if (len != 0) + return 0; + return 1; +} + +/* + * ccp_nakci - process received configure-nak. + * Returns 1 iff the nak was OK. + */ +static int ccp_nakci(fsm *f, u_char *p, int len, int treat_as_reject) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; + ccp_options no; /* options we've seen already */ + ccp_options try_; /* options to ask for next time */ + LWIP_UNUSED_ARG(treat_as_reject); +#if !MPPE_SUPPORT && !DEFLATE_SUPPORT && !BSDCOMPRESS_SUPPORT + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(len); +#endif /* !MPPE_SUPPORT && !DEFLATE_SUPPORT && !BSDCOMPRESS_SUPPORT */ + + memset(&no, 0, sizeof(no)); + try_ = *go; + +#if MPPE_SUPPORT + if (go->mppe && len >= CILEN_MPPE + && p[0] == CI_MPPE && p[1] == CILEN_MPPE) { + no.mppe = 1; + /* + * Peer wants us to use a different strength or other setting. + * Fail if we aren't willing to use his suggestion. + */ + MPPE_CI_TO_OPTS(&p[2], try_.mppe); + if ((try_.mppe & MPPE_OPT_STATEFUL) && pcb->settings.refuse_mppe_stateful) { + ppp_error("Refusing MPPE stateful mode offered by peer"); + try_.mppe = 0; + } else if (((go->mppe | MPPE_OPT_STATEFUL) & try_.mppe) != try_.mppe) { + /* Peer must have set options we didn't request (suggest) */ + try_.mppe = 0; + } + + if (!try_.mppe) { + ppp_error("MPPE required but peer negotiation failed"); + lcp_close(pcb, "MPPE required but peer negotiation failed"); + } + } +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + if (go->deflate && len >= CILEN_DEFLATE + && p[0] == (go->deflate_correct? CI_DEFLATE: CI_DEFLATE_DRAFT) + && p[1] == CILEN_DEFLATE) { + no.deflate = 1; + /* + * Peer wants us to use a different code size or something. + * Stop asking for Deflate if we don't understand his suggestion. + */ + if (DEFLATE_METHOD(p[2]) != DEFLATE_METHOD_VAL + || DEFLATE_SIZE(p[2]) < DEFLATE_MIN_WORKS + || p[3] != DEFLATE_CHK_SEQUENCE) + try_.deflate = 0; + else if (DEFLATE_SIZE(p[2]) < go->deflate_size) + try_.deflate_size = DEFLATE_SIZE(p[2]); + p += CILEN_DEFLATE; + len -= CILEN_DEFLATE; + if (go->deflate_correct && go->deflate_draft + && len >= CILEN_DEFLATE && p[0] == CI_DEFLATE_DRAFT + && p[1] == CILEN_DEFLATE) { + p += CILEN_DEFLATE; + len -= CILEN_DEFLATE; + } + } +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + if (go->bsd_compress && len >= CILEN_BSD_COMPRESS + && p[0] == CI_BSD_COMPRESS && p[1] == CILEN_BSD_COMPRESS) { + no.bsd_compress = 1; + /* + * Peer wants us to use a different number of bits + * or a different version. + */ + if (BSD_VERSION(p[2]) != BSD_CURRENT_VERSION) + try_.bsd_compress = 0; + else if (BSD_NBITS(p[2]) < go->bsd_bits) + try_.bsd_bits = BSD_NBITS(p[2]); + p += CILEN_BSD_COMPRESS; + len -= CILEN_BSD_COMPRESS; + } +#endif /* BSDCOMPRESS_SUPPORT */ + + /* + * Predictor-1 and 2 have no options, so they can't be Naked. + * + * There may be remaining options but we ignore them. + */ + + if (f->state != PPP_FSM_OPENED) + *go = try_; + return 1; +} + +/* + * ccp_rejci - reject some of our suggested compression methods. + */ +static int ccp_rejci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; + ccp_options try_; /* options to request next time */ + + try_ = *go; + + /* + * Cope with empty configure-rejects by ceasing to send + * configure-requests. + */ + if (len == 0 && pcb->ccp_all_rejected) + return -1; + +#if MPPE_SUPPORT + if (go->mppe && len >= CILEN_MPPE + && p[0] == CI_MPPE && p[1] == CILEN_MPPE) { + ppp_error("MPPE required but peer refused"); + lcp_close(pcb, "MPPE required but peer refused"); + p += CILEN_MPPE; + len -= CILEN_MPPE; + } +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + if (go->deflate_correct && len >= CILEN_DEFLATE + && p[0] == CI_DEFLATE && p[1] == CILEN_DEFLATE) { + if (p[2] != DEFLATE_MAKE_OPT(go->deflate_size) + || p[3] != DEFLATE_CHK_SEQUENCE) + return 0; /* Rej is bad */ + try_.deflate_correct = 0; + p += CILEN_DEFLATE; + len -= CILEN_DEFLATE; + } + if (go->deflate_draft && len >= CILEN_DEFLATE + && p[0] == CI_DEFLATE_DRAFT && p[1] == CILEN_DEFLATE) { + if (p[2] != DEFLATE_MAKE_OPT(go->deflate_size) + || p[3] != DEFLATE_CHK_SEQUENCE) + return 0; /* Rej is bad */ + try_.deflate_draft = 0; + p += CILEN_DEFLATE; + len -= CILEN_DEFLATE; + } + if (!try_.deflate_correct && !try_.deflate_draft) + try_.deflate = 0; +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + if (go->bsd_compress && len >= CILEN_BSD_COMPRESS + && p[0] == CI_BSD_COMPRESS && p[1] == CILEN_BSD_COMPRESS) { + if (p[2] != BSD_MAKE_OPT(BSD_CURRENT_VERSION, go->bsd_bits)) + return 0; + try_.bsd_compress = 0; + p += CILEN_BSD_COMPRESS; + len -= CILEN_BSD_COMPRESS; + } +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + if (go->predictor_1 && len >= CILEN_PREDICTOR_1 + && p[0] == CI_PREDICTOR_1 && p[1] == CILEN_PREDICTOR_1) { + try_.predictor_1 = 0; + p += CILEN_PREDICTOR_1; + len -= CILEN_PREDICTOR_1; + } + if (go->predictor_2 && len >= CILEN_PREDICTOR_2 + && p[0] == CI_PREDICTOR_2 && p[1] == CILEN_PREDICTOR_2) { + try_.predictor_2 = 0; + p += CILEN_PREDICTOR_2; + len -= CILEN_PREDICTOR_2; + } +#endif /* PREDICTOR_SUPPORT */ + + if (len != 0) + return 0; + + if (f->state != PPP_FSM_OPENED) + *go = try_; + + return 1; +} + +/* + * ccp_reqci - processed a received configure-request. + * Returns CONFACK, CONFNAK or CONFREJ and the packet modified + * appropriately. + */ +static int ccp_reqci(fsm *f, u_char *p, int *lenp, int dont_nak) { + ppp_pcb *pcb = f->pcb; + ccp_options *ho = &pcb->ccp_hisoptions; + ccp_options *ao = &pcb->ccp_allowoptions; + int ret, newret; +#if DEFLATE_SUPPORT || BSDCOMPRESS_SUPPORT + int res; + int nb; +#endif /* DEFLATE_SUPPORT || BSDCOMPRESS_SUPPORT */ + u_char *p0, *retp; + int len, clen, type; +#if MPPE_SUPPORT + u8_t rej_for_ci_mppe = 1; /* Are we rejecting based on a bad/missing */ + /* CI_MPPE, or due to other options? */ +#endif /* MPPE_SUPPORT */ + + ret = CONFACK; + retp = p0 = p; + len = *lenp; + + memset(ho, 0, sizeof(ccp_options)); + ho->method = (len > 0)? p[0]: 0; + + while (len > 0) { + newret = CONFACK; + if (len < 2 || p[1] < 2 || p[1] > len) { + /* length is bad */ + clen = len; + newret = CONFREJ; + + } else { + type = p[0]; + clen = p[1]; + + switch (type) { +#if MPPE_SUPPORT + case CI_MPPE: + if (!ao->mppe || clen != CILEN_MPPE) { + newret = CONFREJ; + break; + } + MPPE_CI_TO_OPTS(&p[2], ho->mppe); + + /* Nak if anything unsupported or unknown are set. */ + if (ho->mppe & MPPE_OPT_UNSUPPORTED) { + newret = CONFNAK; + ho->mppe &= ~MPPE_OPT_UNSUPPORTED; + } + if (ho->mppe & MPPE_OPT_UNKNOWN) { + newret = CONFNAK; + ho->mppe &= ~MPPE_OPT_UNKNOWN; + } + + /* Check state opt */ + if (ho->mppe & MPPE_OPT_STATEFUL) { + /* + * We can Nak and request stateless, but it's a + * lot easier to just assume the peer will request + * it if he can do it; stateful mode is bad over + * the Internet -- which is where we expect MPPE. + */ + if (pcb->settings.refuse_mppe_stateful) { + ppp_error("Refusing MPPE stateful mode offered by peer"); + newret = CONFREJ; + break; + } + } + + /* Find out which of {S,L} are set. */ + if ((ho->mppe & MPPE_OPT_128) + && (ho->mppe & MPPE_OPT_40)) { + /* Both are set, negotiate the strongest. */ + newret = CONFNAK; + if (ao->mppe & MPPE_OPT_128) + ho->mppe &= ~MPPE_OPT_40; + else if (ao->mppe & MPPE_OPT_40) + ho->mppe &= ~MPPE_OPT_128; + else { + newret = CONFREJ; + break; + } + } else if (ho->mppe & MPPE_OPT_128) { + if (!(ao->mppe & MPPE_OPT_128)) { + newret = CONFREJ; + break; + } + } else if (ho->mppe & MPPE_OPT_40) { + if (!(ao->mppe & MPPE_OPT_40)) { + newret = CONFREJ; + break; + } + } else { + /* Neither are set. */ + /* We cannot accept this. */ + newret = CONFNAK; + /* Give the peer our idea of what can be used, + so it can choose and confirm */ + ho->mppe = ao->mppe; + } + + /* rebuild the opts */ + MPPE_OPTS_TO_CI(ho->mppe, &p[2]); + if (newret == CONFACK) { + int mtu; + + mppe_init(pcb, &pcb->mppe_comp, ho->mppe); + /* + * We need to decrease the interface MTU by MPPE_PAD + * because MPPE frames **grow**. The kernel [must] + * allocate MPPE_PAD extra bytes in xmit buffers. + */ + mtu = netif_get_mtu(pcb); + if (mtu) + netif_set_mtu(pcb, mtu - MPPE_PAD); + else + newret = CONFREJ; + } + + /* + * We have accepted MPPE or are willing to negotiate + * MPPE parameters. A CONFREJ is due to subsequent + * (non-MPPE) processing. + */ + rej_for_ci_mppe = 0; + break; +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + case CI_DEFLATE: + case CI_DEFLATE_DRAFT: + if (!ao->deflate || clen != CILEN_DEFLATE + || (!ao->deflate_correct && type == CI_DEFLATE) + || (!ao->deflate_draft && type == CI_DEFLATE_DRAFT)) { + newret = CONFREJ; + break; + } + + ho->deflate = 1; + ho->deflate_size = nb = DEFLATE_SIZE(p[2]); + if (DEFLATE_METHOD(p[2]) != DEFLATE_METHOD_VAL + || p[3] != DEFLATE_CHK_SEQUENCE + || nb > ao->deflate_size || nb < DEFLATE_MIN_WORKS) { + newret = CONFNAK; + if (!dont_nak) { + p[2] = DEFLATE_MAKE_OPT(ao->deflate_size); + p[3] = DEFLATE_CHK_SEQUENCE; + /* fall through to test this #bits below */ + } else + break; + } + + /* + * Check whether we can do Deflate with the window + * size they want. If the window is too big, reduce + * it until the kernel can cope and nak with that. + * We only check this for the first option. + */ + if (p == p0) { + for (;;) { + res = ccp_test(pcb, p, CILEN_DEFLATE, 1); + if (res > 0) + break; /* it's OK now */ + if (res < 0 || nb == DEFLATE_MIN_WORKS || dont_nak) { + newret = CONFREJ; + p[2] = DEFLATE_MAKE_OPT(ho->deflate_size); + break; + } + newret = CONFNAK; + --nb; + p[2] = DEFLATE_MAKE_OPT(nb); + } + } + break; +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + case CI_BSD_COMPRESS: + if (!ao->bsd_compress || clen != CILEN_BSD_COMPRESS) { + newret = CONFREJ; + break; + } + + ho->bsd_compress = 1; + ho->bsd_bits = nb = BSD_NBITS(p[2]); + if (BSD_VERSION(p[2]) != BSD_CURRENT_VERSION + || nb > ao->bsd_bits || nb < BSD_MIN_BITS) { + newret = CONFNAK; + if (!dont_nak) { + p[2] = BSD_MAKE_OPT(BSD_CURRENT_VERSION, ao->bsd_bits); + /* fall through to test this #bits below */ + } else + break; + } + + /* + * Check whether we can do BSD-Compress with the code + * size they want. If the code size is too big, reduce + * it until the kernel can cope and nak with that. + * We only check this for the first option. + */ + if (p == p0) { + for (;;) { + res = ccp_test(pcb, p, CILEN_BSD_COMPRESS, 1); + if (res > 0) + break; + if (res < 0 || nb == BSD_MIN_BITS || dont_nak) { + newret = CONFREJ; + p[2] = BSD_MAKE_OPT(BSD_CURRENT_VERSION, + ho->bsd_bits); + break; + } + newret = CONFNAK; + --nb; + p[2] = BSD_MAKE_OPT(BSD_CURRENT_VERSION, nb); + } + } + break; +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + case CI_PREDICTOR_1: + if (!ao->predictor_1 || clen != CILEN_PREDICTOR_1) { + newret = CONFREJ; + break; + } + + ho->predictor_1 = 1; + if (p == p0 + && ccp_test(pcb, p, CILEN_PREDICTOR_1, 1) <= 0) { + newret = CONFREJ; + } + break; + + case CI_PREDICTOR_2: + if (!ao->predictor_2 || clen != CILEN_PREDICTOR_2) { + newret = CONFREJ; + break; + } + + ho->predictor_2 = 1; + if (p == p0 + && ccp_test(pcb, p, CILEN_PREDICTOR_2, 1) <= 0) { + newret = CONFREJ; + } + break; +#endif /* PREDICTOR_SUPPORT */ + + default: + newret = CONFREJ; + } + } + + if (newret == CONFNAK && dont_nak) + newret = CONFREJ; + if (!(newret == CONFACK || (newret == CONFNAK && ret == CONFREJ))) { + /* we're returning this option */ + if (newret == CONFREJ && ret == CONFNAK) + retp = p0; + ret = newret; + if (p != retp) + MEMCPY(retp, p, clen); + retp += clen; + } + + p += clen; + len -= clen; + } + + if (ret != CONFACK) { + if (ret == CONFREJ && *lenp == retp - p0) + pcb->ccp_all_rejected = 1; + else + *lenp = retp - p0; + } +#if MPPE_SUPPORT + if (ret == CONFREJ && ao->mppe && rej_for_ci_mppe) { + ppp_error("MPPE required but peer negotiation failed"); + lcp_close(pcb, "MPPE required but peer negotiation failed"); + } +#endif /* MPPE_SUPPORT */ + return ret; +} + +/* + * Make a string name for a compression method (or 2). + */ +static const char *method_name(ccp_options *opt, ccp_options *opt2) { + static char result[64]; +#if !DEFLATE_SUPPORT && !BSDCOMPRESS_SUPPORT + LWIP_UNUSED_ARG(opt2); +#endif /* !DEFLATE_SUPPORT && !BSDCOMPRESS_SUPPORT */ + + if (!ccp_anycompress(opt)) + return "(none)"; + switch (opt->method) { +#if MPPE_SUPPORT + case CI_MPPE: + { + char *p = result; + char *q = result + sizeof(result); /* 1 past result */ + + ppp_slprintf(p, q - p, "MPPE "); + p += 5; + if (opt->mppe & MPPE_OPT_128) { + ppp_slprintf(p, q - p, "128-bit "); + p += 8; + } + if (opt->mppe & MPPE_OPT_40) { + ppp_slprintf(p, q - p, "40-bit "); + p += 7; + } + if (opt->mppe & MPPE_OPT_STATEFUL) + ppp_slprintf(p, q - p, "stateful"); + else + ppp_slprintf(p, q - p, "stateless"); + + break; + } +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + case CI_DEFLATE: + case CI_DEFLATE_DRAFT: + if (opt2 != NULL && opt2->deflate_size != opt->deflate_size) + ppp_slprintf(result, sizeof(result), "Deflate%s (%d/%d)", + (opt->method == CI_DEFLATE_DRAFT? "(old#)": ""), + opt->deflate_size, opt2->deflate_size); + else + ppp_slprintf(result, sizeof(result), "Deflate%s (%d)", + (opt->method == CI_DEFLATE_DRAFT? "(old#)": ""), + opt->deflate_size); + break; +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + case CI_BSD_COMPRESS: + if (opt2 != NULL && opt2->bsd_bits != opt->bsd_bits) + ppp_slprintf(result, sizeof(result), "BSD-Compress (%d/%d)", + opt->bsd_bits, opt2->bsd_bits); + else + ppp_slprintf(result, sizeof(result), "BSD-Compress (%d)", + opt->bsd_bits); + break; +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + case CI_PREDICTOR_1: + return "Predictor 1"; + case CI_PREDICTOR_2: + return "Predictor 2"; +#endif /* PREDICTOR_SUPPORT */ + default: + ppp_slprintf(result, sizeof(result), "Method %d", opt->method); + } + return result; +} + +/* + * CCP has come up - inform the kernel driver and log a message. + */ +static void ccp_up(fsm *f) { + ppp_pcb *pcb = f->pcb; + ccp_options *go = &pcb->ccp_gotoptions; + ccp_options *ho = &pcb->ccp_hisoptions; + char method1[64]; + + ccp_set(pcb, 1, 1, go->method, ho->method); + if (ccp_anycompress(go)) { + if (ccp_anycompress(ho)) { + if (go->method == ho->method) { + ppp_notice("%s compression enabled", method_name(go, ho)); + } else { + ppp_strlcpy(method1, method_name(go, NULL), sizeof(method1)); + ppp_notice("%s / %s compression enabled", + method1, method_name(ho, NULL)); + } + } else + ppp_notice("%s receive compression enabled", method_name(go, NULL)); + } else if (ccp_anycompress(ho)) + ppp_notice("%s transmit compression enabled", method_name(ho, NULL)); +#if MPPE_SUPPORT + if (go->mppe) { + continue_networks(pcb); /* Bring up IP et al */ + } +#endif /* MPPE_SUPPORT */ +} + +/* + * CCP has gone down - inform the kernel driver. + */ +static void ccp_down(fsm *f) { + ppp_pcb *pcb = f->pcb; +#if MPPE_SUPPORT + ccp_options *go = &pcb->ccp_gotoptions; +#endif /* MPPE_SUPPORT */ + + if (pcb->ccp_localstate & RACK_PENDING) + UNTIMEOUT(ccp_rack_timeout, f); + pcb->ccp_localstate = 0; + ccp_set(pcb, 1, 0, 0, 0); +#if MPPE_SUPPORT + if (go->mppe) { + go->mppe = 0; + if (pcb->lcp_fsm.state == PPP_FSM_OPENED) { + /* If LCP is not already going down, make sure it does. */ + ppp_error("MPPE disabled"); + lcp_close(pcb, "MPPE disabled"); + } + } +#endif /* MPPE_SUPPORT */ +} + +#if PRINTPKT_SUPPORT +/* + * Print the contents of a CCP packet. + */ +static const char* const ccp_codenames[] = { + "ConfReq", "ConfAck", "ConfNak", "ConfRej", + "TermReq", "TermAck", "CodeRej", + NULL, NULL, NULL, NULL, NULL, NULL, + "ResetReq", "ResetAck", +}; + +static int ccp_printpkt(const u_char *p, int plen, void (*printer) (void *, const char *, ...), void *arg) { + const u_char *p0, *optend; + int code, id, len; + int optlen; + + p0 = p; + if (plen < HEADERLEN) + return 0; + code = p[0]; + id = p[1]; + len = (p[2] << 8) + p[3]; + if (len < HEADERLEN || len > plen) + return 0; + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(ccp_codenames) && ccp_codenames[code-1] != NULL) + printer(arg, " %s", ccp_codenames[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= HEADERLEN; + p += HEADERLEN; + + switch (code) { + case CONFREQ: + case CONFACK: + case CONFNAK: + case CONFREJ: + /* print list of possible compression methods */ + while (len >= 2) { + code = p[0]; + optlen = p[1]; + if (optlen < 2 || optlen > len) + break; + printer(arg, " <"); + len -= optlen; + optend = p + optlen; + switch (code) { +#if MPPE_SUPPORT + case CI_MPPE: + if (optlen >= CILEN_MPPE) { + u_char mppe_opts; + + MPPE_CI_TO_OPTS(&p[2], mppe_opts); + printer(arg, "mppe %s %s %s %s %s %s%s", + (p[2] & MPPE_H_BIT)? "+H": "-H", + (p[5] & MPPE_M_BIT)? "+M": "-M", + (p[5] & MPPE_S_BIT)? "+S": "-S", + (p[5] & MPPE_L_BIT)? "+L": "-L", + (p[5] & MPPE_D_BIT)? "+D": "-D", + (p[5] & MPPE_C_BIT)? "+C": "-C", + (mppe_opts & MPPE_OPT_UNKNOWN)? " +U": ""); + if (mppe_opts & MPPE_OPT_UNKNOWN) + printer(arg, " (%.2x %.2x %.2x %.2x)", + p[2], p[3], p[4], p[5]); + p += CILEN_MPPE; + } + break; +#endif /* MPPE_SUPPORT */ +#if DEFLATE_SUPPORT + case CI_DEFLATE: + case CI_DEFLATE_DRAFT: + if (optlen >= CILEN_DEFLATE) { + printer(arg, "deflate%s %d", + (code == CI_DEFLATE_DRAFT? "(old#)": ""), + DEFLATE_SIZE(p[2])); + if (DEFLATE_METHOD(p[2]) != DEFLATE_METHOD_VAL) + printer(arg, " method %d", DEFLATE_METHOD(p[2])); + if (p[3] != DEFLATE_CHK_SEQUENCE) + printer(arg, " check %d", p[3]); + p += CILEN_DEFLATE; + } + break; +#endif /* DEFLATE_SUPPORT */ +#if BSDCOMPRESS_SUPPORT + case CI_BSD_COMPRESS: + if (optlen >= CILEN_BSD_COMPRESS) { + printer(arg, "bsd v%d %d", BSD_VERSION(p[2]), + BSD_NBITS(p[2])); + p += CILEN_BSD_COMPRESS; + } + break; +#endif /* BSDCOMPRESS_SUPPORT */ +#if PREDICTOR_SUPPORT + case CI_PREDICTOR_1: + if (optlen >= CILEN_PREDICTOR_1) { + printer(arg, "predictor 1"); + p += CILEN_PREDICTOR_1; + } + break; + case CI_PREDICTOR_2: + if (optlen >= CILEN_PREDICTOR_2) { + printer(arg, "predictor 2"); + p += CILEN_PREDICTOR_2; + } + break; +#endif /* PREDICTOR_SUPPORT */ + default: + break; + } + while (p < optend) + printer(arg, " %.2x", *p++); + printer(arg, ">"); + } + break; + + case TERMACK: + case TERMREQ: + if (len > 0 && *p >= ' ' && *p < 0x7f) { + ppp_print_string(p, len, printer, arg); + p += len; + len = 0; + } + break; + default: + break; + } + + /* dump out the rest of the packet in hex */ + while (--len >= 0) + printer(arg, " %.2x", *p++); + + return p - p0; +} +#endif /* PRINTPKT_SUPPORT */ + +#if PPP_DATAINPUT +/* + * We have received a packet that the decompressor failed to + * decompress. Here we would expect to issue a reset-request, but + * Motorola has a patent on resetting the compressor as a result of + * detecting an error in the decompressed data after decompression. + * (See US patent 5,130,993; international patent publication number + * WO 91/10289; Australian patent 73296/91.) + * + * So we ask the kernel whether the error was detected after + * decompression; if it was, we take CCP down, thus disabling + * compression :-(, otherwise we issue the reset-request. + */ +static void ccp_datainput(ppp_pcb *pcb, u_char *pkt, int len) { + fsm *f; +#if MPPE_SUPPORT + ccp_options *go = &pcb->ccp_gotoptions; +#endif /* MPPE_SUPPORT */ + LWIP_UNUSED_ARG(pkt); + LWIP_UNUSED_ARG(len); + + f = &pcb->ccp_fsm; + if (f->state == PPP_FSM_OPENED) { + if (ccp_fatal_error(pcb)) { + /* + * Disable compression by taking CCP down. + */ + ppp_error("Lost compression sync: disabling compression"); + ccp_close(pcb, "Lost compression sync"); +#if MPPE_SUPPORT + /* + * If we were doing MPPE, we must also take the link down. + */ + if (go->mppe) { + ppp_error("Too many MPPE errors, closing LCP"); + lcp_close(pcb, "Too many MPPE errors"); + } +#endif /* MPPE_SUPPORT */ + } else { + /* + * Send a reset-request to reset the peer's compressor. + * We don't do that if we are still waiting for an + * acknowledgement to a previous reset-request. + */ + if (!(pcb->ccp_localstate & RACK_PENDING)) { + fsm_sdata(f, CCP_RESETREQ, f->reqid = ++f->id, NULL, 0); + TIMEOUT(ccp_rack_timeout, f, RACKTIMEOUT); + pcb->ccp_localstate |= RACK_PENDING; + } else + pcb->ccp_localstate |= RREQ_REPEAT; + } + } +} +#endif /* PPP_DATAINPUT */ + +/* + * We have received a packet that the decompressor failed to + * decompress. Issue a reset-request. + */ +void ccp_resetrequest(ppp_pcb *pcb) { + fsm *f = &pcb->ccp_fsm; + + if (f->state != PPP_FSM_OPENED) + return; + + /* + * Send a reset-request to reset the peer's compressor. + * We don't do that if we are still waiting for an + * acknowledgement to a previous reset-request. + */ + if (!(pcb->ccp_localstate & RACK_PENDING)) { + fsm_sdata(f, CCP_RESETREQ, f->reqid = ++f->id, NULL, 0); + TIMEOUT(ccp_rack_timeout, f, RACKTIMEOUT); + pcb->ccp_localstate |= RACK_PENDING; + } else + pcb->ccp_localstate |= RREQ_REPEAT; +} + +/* + * Timeout waiting for reset-ack. + */ +static void ccp_rack_timeout(void *arg) { + fsm *f = (fsm*)arg; + ppp_pcb *pcb = f->pcb; + + if (f->state == PPP_FSM_OPENED && (pcb->ccp_localstate & RREQ_REPEAT)) { + fsm_sdata(f, CCP_RESETREQ, f->reqid, NULL, 0); + TIMEOUT(ccp_rack_timeout, f, RACKTIMEOUT); + pcb->ccp_localstate &= ~RREQ_REPEAT; + } else + pcb->ccp_localstate &= ~RACK_PENDING; +} + +#endif /* PPP_SUPPORT && CCP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/chap-md5.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/chap-md5.c new file mode 100644 index 0000000..5a78944 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/chap-md5.c @@ -0,0 +1,126 @@ +/* + * chap-md5.c - New CHAP/MD5 implementation. + * + * Copyright (c) 2003 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && CHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/chap-new.h" +#include "netif/ppp/chap-md5.h" +#include "netif/ppp/magic.h" +#include "netif/ppp/pppcrypt.h" + +#define MD5_HASH_SIZE 16 +#define MD5_MIN_CHALLENGE 17 +#define MD5_MAX_CHALLENGE 24 +#define MD5_MIN_MAX_POWER_OF_TWO_CHALLENGE 3 /* 2^3-1 = 7, 17+7 = 24 */ + +#if PPP_SERVER +static void chap_md5_generate_challenge(ppp_pcb *pcb, unsigned char *cp) { + int clen; + LWIP_UNUSED_ARG(pcb); + + clen = MD5_MIN_CHALLENGE + magic_pow(MD5_MIN_MAX_POWER_OF_TWO_CHALLENGE); + *cp++ = clen; + magic_random_bytes(cp, clen); +} + +static int chap_md5_verify_response(ppp_pcb *pcb, int id, const char *name, + const unsigned char *secret, int secret_len, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space) { + lwip_md5_context ctx; + unsigned char idbyte = id; + unsigned char hash[MD5_HASH_SIZE]; + int challenge_len, response_len; + LWIP_UNUSED_ARG(name); + LWIP_UNUSED_ARG(pcb); + + challenge_len = *challenge++; + response_len = *response++; + if (response_len == MD5_HASH_SIZE) { + /* Generate hash of ID, secret, challenge */ + lwip_md5_init(&ctx); + lwip_md5_starts(&ctx); + lwip_md5_update(&ctx, &idbyte, 1); + lwip_md5_update(&ctx, secret, secret_len); + lwip_md5_update(&ctx, challenge, challenge_len); + lwip_md5_finish(&ctx, hash); + lwip_md5_free(&ctx); + + /* Test if our hash matches the peer's response */ + if (memcmp(hash, response, MD5_HASH_SIZE) == 0) { + ppp_slprintf(message, message_space, "Access granted"); + return 1; + } + } + ppp_slprintf(message, message_space, "Access denied"); + return 0; +} +#endif /* PPP_SERVER */ + +static void chap_md5_make_response(ppp_pcb *pcb, unsigned char *response, int id, const char *our_name, + const unsigned char *challenge, const char *secret, int secret_len, + unsigned char *private_) { + lwip_md5_context ctx; + unsigned char idbyte = id; + int challenge_len = *challenge++; + LWIP_UNUSED_ARG(our_name); + LWIP_UNUSED_ARG(private_); + LWIP_UNUSED_ARG(pcb); + + lwip_md5_init(&ctx); + lwip_md5_starts(&ctx); + lwip_md5_update(&ctx, &idbyte, 1); + lwip_md5_update(&ctx, (const u_char *)secret, secret_len); + lwip_md5_update(&ctx, challenge, challenge_len); + lwip_md5_finish(&ctx, &response[1]); + lwip_md5_free(&ctx); + response[0] = MD5_HASH_SIZE; +} + +const struct chap_digest_type md5_digest = { + CHAP_MD5, /* code */ +#if PPP_SERVER + chap_md5_generate_challenge, + chap_md5_verify_response, +#endif /* PPP_SERVER */ + chap_md5_make_response, + NULL, /* check_success */ + NULL, /* handle_failure */ +}; + +#endif /* PPP_SUPPORT && CHAP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/chap-new.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/chap-new.c new file mode 100644 index 0000000..486d7e1 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/chap-new.c @@ -0,0 +1,677 @@ +/* + * chap-new.c - New CHAP implementation. + * + * Copyright (c) 2003 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && CHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#if 0 /* UNUSED */ +#include "session.h" +#endif /* UNUSED */ + +#include "netif/ppp/chap-new.h" +#include "netif/ppp/chap-md5.h" +#if MSCHAP_SUPPORT +#include "netif/ppp/chap_ms.h" +#endif +#include "netif/ppp/magic.h" + +#if 0 /* UNUSED */ +/* Hook for a plugin to validate CHAP challenge */ +int (*chap_verify_hook)(const char *name, const char *ourname, int id, + const struct chap_digest_type *digest, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space) = NULL; +#endif /* UNUSED */ + +#if PPP_OPTIONS +/* + * Command-line options. + */ +static option_t chap_option_list[] = { + { "chap-restart", o_int, &chap_timeout_time, + "Set timeout for CHAP", OPT_PRIO }, + { "chap-max-challenge", o_int, &pcb->settings.chap_max_transmits, + "Set max #xmits for challenge", OPT_PRIO }, + { "chap-interval", o_int, &pcb->settings.chap_rechallenge_time, + "Set interval for rechallenge", OPT_PRIO }, + { NULL } +}; +#endif /* PPP_OPTIONS */ + + +/* Values for flags in chap_client_state and chap_server_state */ +#define LOWERUP 1 +#define AUTH_STARTED 2 +#define AUTH_DONE 4 +#define AUTH_FAILED 8 +#define TIMEOUT_PENDING 0x10 +#define CHALLENGE_VALID 0x20 + +/* + * Prototypes. + */ +static void chap_init(ppp_pcb *pcb); +static void chap_lowerup(ppp_pcb *pcb); +static void chap_lowerdown(ppp_pcb *pcb); +#if PPP_SERVER +static void chap_timeout(void *arg); +static void chap_generate_challenge(ppp_pcb *pcb); +static void chap_handle_response(ppp_pcb *pcb, int code, + unsigned char *pkt, int len); +static int chap_verify_response(ppp_pcb *pcb, const char *name, const char *ourname, int id, + const struct chap_digest_type *digest, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space); +#endif /* PPP_SERVER */ +static void chap_respond(ppp_pcb *pcb, int id, + unsigned char *pkt, int len); +static void chap_handle_status(ppp_pcb *pcb, int code, int id, + unsigned char *pkt, int len); +static void chap_protrej(ppp_pcb *pcb); +static void chap_input(ppp_pcb *pcb, unsigned char *pkt, int pktlen); +#if PRINTPKT_SUPPORT +static int chap_print_pkt(const unsigned char *p, int plen, + void (*printer) (void *, const char *, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ + +/* List of digest types that we know about */ +static const struct chap_digest_type* const chap_digests[] = { + &md5_digest, +#if MSCHAP_SUPPORT + &chapms_digest, + &chapms2_digest, +#endif /* MSCHAP_SUPPORT */ + NULL +}; + +/* + * chap_init - reset to initial state. + */ +static void chap_init(ppp_pcb *pcb) { + LWIP_UNUSED_ARG(pcb); + +#if 0 /* Not necessary, everything is cleared in ppp_new() */ + memset(&pcb->chap_client, 0, sizeof(chap_client_state)); +#if PPP_SERVER + memset(&pcb->chap_server, 0, sizeof(chap_server_state)); +#endif /* PPP_SERVER */ +#endif /* 0 */ +} + +/* + * chap_lowerup - we can start doing stuff now. + */ +static void chap_lowerup(ppp_pcb *pcb) { + + pcb->chap_client.flags |= LOWERUP; +#if PPP_SERVER + pcb->chap_server.flags |= LOWERUP; + if (pcb->chap_server.flags & AUTH_STARTED) + chap_timeout(pcb); +#endif /* PPP_SERVER */ +} + +static void chap_lowerdown(ppp_pcb *pcb) { + + pcb->chap_client.flags = 0; +#if PPP_SERVER + if (pcb->chap_server.flags & TIMEOUT_PENDING) + UNTIMEOUT(chap_timeout, pcb); + pcb->chap_server.flags = 0; +#endif /* PPP_SERVER */ +} + +#if PPP_SERVER +/* + * chap_auth_peer - Start authenticating the peer. + * If the lower layer is already up, we start sending challenges, + * otherwise we wait for the lower layer to come up. + */ +void chap_auth_peer(ppp_pcb *pcb, const char *our_name, int digest_code) { + const struct chap_digest_type *dp; + int i; + + if (pcb->chap_server.flags & AUTH_STARTED) { + ppp_error("CHAP: peer authentication already started!"); + return; + } + for (i = 0; (dp = chap_digests[i]) != NULL; ++i) + if (dp->code == digest_code) + break; + if (dp == NULL) + ppp_fatal("CHAP digest 0x%x requested but not available", + digest_code); + + pcb->chap_server.digest = dp; + pcb->chap_server.name = our_name; + /* Start with a random ID value */ + pcb->chap_server.id = magic(); + pcb->chap_server.flags |= AUTH_STARTED; + if (pcb->chap_server.flags & LOWERUP) + chap_timeout(pcb); +} +#endif /* PPP_SERVER */ + +/* + * chap_auth_with_peer - Prepare to authenticate ourselves to the peer. + * There isn't much to do until we receive a challenge. + */ +void chap_auth_with_peer(ppp_pcb *pcb, const char *our_name, int digest_code) { + const struct chap_digest_type *dp; + int i; + + if(NULL == our_name) + return; + + if (pcb->chap_client.flags & AUTH_STARTED) { + ppp_error("CHAP: authentication with peer already started!"); + return; + } + for (i = 0; (dp = chap_digests[i]) != NULL; ++i) + if (dp->code == digest_code) + break; + + if (dp == NULL) + ppp_fatal("CHAP digest 0x%x requested but not available", + digest_code); + + pcb->chap_client.digest = dp; + pcb->chap_client.name = our_name; + pcb->chap_client.flags |= AUTH_STARTED; +} + +#if PPP_SERVER +/* + * chap_timeout - It's time to send another challenge to the peer. + * This could be either a retransmission of a previous challenge, + * or a new challenge to start re-authentication. + */ +static void chap_timeout(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + struct pbuf *p; + + pcb->chap_server.flags &= ~TIMEOUT_PENDING; + if ((pcb->chap_server.flags & CHALLENGE_VALID) == 0) { + pcb->chap_server.challenge_xmits = 0; + chap_generate_challenge(pcb); + pcb->chap_server.flags |= CHALLENGE_VALID; + } else if (pcb->chap_server.challenge_xmits >= pcb->settings.chap_max_transmits) { + pcb->chap_server.flags &= ~CHALLENGE_VALID; + pcb->chap_server.flags |= AUTH_DONE | AUTH_FAILED; + auth_peer_fail(pcb, PPP_CHAP); + return; + } + + p = pbuf_alloc(PBUF_RAW, (u16_t)(pcb->chap_server.challenge_pktlen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + MEMCPY(p->payload, pcb->chap_server.challenge, pcb->chap_server.challenge_pktlen); + ppp_write(pcb, p); + ++pcb->chap_server.challenge_xmits; + pcb->chap_server.flags |= TIMEOUT_PENDING; + TIMEOUT(chap_timeout, arg, pcb->settings.chap_timeout_time); +} + +/* + * chap_generate_challenge - generate a challenge string and format + * the challenge packet in pcb->chap_server.challenge_pkt. + */ +static void chap_generate_challenge(ppp_pcb *pcb) { + int clen = 1, nlen, len; + unsigned char *p; + + p = pcb->chap_server.challenge; + MAKEHEADER(p, PPP_CHAP); + p += CHAP_HDRLEN; + pcb->chap_server.digest->generate_challenge(pcb, p); + clen = *p; + nlen = strlen(pcb->chap_server.name); + memcpy(p + 1 + clen, pcb->chap_server.name, nlen); + + len = CHAP_HDRLEN + 1 + clen + nlen; + pcb->chap_server.challenge_pktlen = PPP_HDRLEN + len; + + p = pcb->chap_server.challenge + PPP_HDRLEN; + p[0] = CHAP_CHALLENGE; + p[1] = ++pcb->chap_server.id; + p[2] = len >> 8; + p[3] = len; +} + +/* + * chap_handle_response - check the response to our challenge. + */ +static void chap_handle_response(ppp_pcb *pcb, int id, + unsigned char *pkt, int len) { + int response_len, ok, mlen; + const unsigned char *response; + unsigned char *outp; + struct pbuf *p; + const char *name = NULL; /* initialized to shut gcc up */ +#if 0 /* UNUSED */ + int (*verifier)(const char *, const char *, int, const struct chap_digest_type *, + const unsigned char *, const unsigned char *, char *, int); +#endif /* UNUSED */ + char rname[MAXNAMELEN+1]; + char message[256]; + + if ((pcb->chap_server.flags & LOWERUP) == 0) + return; + if (id != pcb->chap_server.challenge[PPP_HDRLEN+1] || len < 2) + return; + if (pcb->chap_server.flags & CHALLENGE_VALID) { + response = pkt; + GETCHAR(response_len, pkt); + len -= response_len + 1; /* length of name */ + name = (char *)pkt + response_len; + if (len < 0) + return; + + if (pcb->chap_server.flags & TIMEOUT_PENDING) { + pcb->chap_server.flags &= ~TIMEOUT_PENDING; + UNTIMEOUT(chap_timeout, pcb); + } +#if PPP_REMOTENAME + if (pcb->settings.explicit_remote) { + name = pcb->remote_name; + } else +#endif /* PPP_REMOTENAME */ + { + /* Null terminate and clean remote name. */ + ppp_slprintf(rname, sizeof(rname), "%.*v", len, name); + name = rname; + } + +#if 0 /* UNUSED */ + if (chap_verify_hook) + verifier = chap_verify_hook; + else + verifier = chap_verify_response; + ok = (*verifier)(name, pcb->chap_server.name, id, pcb->chap_server.digest, + pcb->chap_server.challenge + PPP_HDRLEN + CHAP_HDRLEN, + response, pcb->chap_server.message, sizeof(pcb->chap_server.message)); +#endif /* UNUSED */ + ok = chap_verify_response(pcb, name, pcb->chap_server.name, id, pcb->chap_server.digest, + pcb->chap_server.challenge + PPP_HDRLEN + CHAP_HDRLEN, + response, message, sizeof(message)); +#if 0 /* UNUSED */ + if (!ok || !auth_number()) { +#endif /* UNUSED */ + if (!ok) { + pcb->chap_server.flags |= AUTH_FAILED; + ppp_warn("Peer %q failed CHAP authentication", name); + } + } else if ((pcb->chap_server.flags & AUTH_DONE) == 0) + return; + + /* send the response */ + mlen = strlen(message); + len = CHAP_HDRLEN + mlen; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN +len), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (unsigned char *)p->payload; + MAKEHEADER(outp, PPP_CHAP); + + outp[0] = (pcb->chap_server.flags & AUTH_FAILED)? CHAP_FAILURE: CHAP_SUCCESS; + outp[1] = id; + outp[2] = len >> 8; + outp[3] = len; + if (mlen > 0) + memcpy(outp + CHAP_HDRLEN, message, mlen); + ppp_write(pcb, p); + + if (pcb->chap_server.flags & CHALLENGE_VALID) { + pcb->chap_server.flags &= ~CHALLENGE_VALID; + if (!(pcb->chap_server.flags & AUTH_DONE) && !(pcb->chap_server.flags & AUTH_FAILED)) { + +#if 0 /* UNUSED */ + /* + * Auth is OK, so now we need to check session restrictions + * to ensure everything is OK, but only if we used a + * plugin, and only if we're configured to check. This + * allows us to do PAM checks on PPP servers that + * authenticate against ActiveDirectory, and use AD for + * account info (like when using Winbind integrated with + * PAM). + */ + if (session_mgmt && + session_check(name, NULL, devnam, NULL) == 0) { + pcb->chap_server.flags |= AUTH_FAILED; + ppp_warn("Peer %q failed CHAP Session verification", name); + } +#endif /* UNUSED */ + + } + if (pcb->chap_server.flags & AUTH_FAILED) { + auth_peer_fail(pcb, PPP_CHAP); + } else { + if ((pcb->chap_server.flags & AUTH_DONE) == 0) + auth_peer_success(pcb, PPP_CHAP, + pcb->chap_server.digest->code, + name, strlen(name)); + if (pcb->settings.chap_rechallenge_time) { + pcb->chap_server.flags |= TIMEOUT_PENDING; + TIMEOUT(chap_timeout, pcb, + pcb->settings.chap_rechallenge_time); + } + } + pcb->chap_server.flags |= AUTH_DONE; + } +} + +/* + * chap_verify_response - check whether the peer's response matches + * what we think it should be. Returns 1 if it does (authentication + * succeeded), or 0 if it doesn't. + */ +static int chap_verify_response(ppp_pcb *pcb, const char *name, const char *ourname, int id, + const struct chap_digest_type *digest, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space) { + int ok; + unsigned char secret[MAXSECRETLEN]; + int secret_len; + + /* Get the secret that the peer is supposed to know */ + if (!get_secret(pcb, name, ourname, (char *)secret, &secret_len, 1)) { + ppp_error("No CHAP secret found for authenticating %q", name); + return 0; + } + ok = digest->verify_response(pcb, id, name, secret, secret_len, challenge, + response, message, message_space); + memset(secret, 0, sizeof(secret)); + + return ok; +} +#endif /* PPP_SERVER */ + +/* + * chap_respond - Generate and send a response to a challenge. + */ +static void chap_respond(ppp_pcb *pcb, int id, + unsigned char *pkt, int len) { + int clen, nlen; + int secret_len; + struct pbuf *p; + u_char *outp; + char rname[MAXNAMELEN+1]; + char secret[MAXSECRETLEN+1]; + + p = pbuf_alloc(PBUF_RAW, (u16_t)(RESP_MAX_PKTLEN), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + if ((pcb->chap_client.flags & (LOWERUP | AUTH_STARTED)) != (LOWERUP | AUTH_STARTED)) + return; /* not ready */ + if (len < 2 || len < pkt[0] + 1) + return; /* too short */ + clen = pkt[0]; + nlen = len - (clen + 1); + + /* Null terminate and clean remote name. */ + ppp_slprintf(rname, sizeof(rname), "%.*v", nlen, pkt + clen + 1); + +#if PPP_REMOTENAME + /* Microsoft doesn't send their name back in the PPP packet */ + if (pcb->settings.explicit_remote || (pcb->settings.remote_name[0] != 0 && rname[0] == 0)) + strlcpy(rname, pcb->settings.remote_name, sizeof(rname)); +#endif /* PPP_REMOTENAME */ + + /* get secret for authenticating ourselves with the specified host */ + if (!get_secret(pcb, pcb->chap_client.name, rname, secret, &secret_len, 0)) { + secret_len = 0; /* assume null secret if can't find one */ + ppp_warn("No CHAP secret found for authenticating us to %q", rname); + } + + outp = (u_char*)p->payload; + MAKEHEADER(outp, PPP_CHAP); + outp += CHAP_HDRLEN; + + pcb->chap_client.digest->make_response(pcb, outp, id, pcb->chap_client.name, pkt, + secret, secret_len, pcb->chap_client.priv); + memset(secret, 0, secret_len); + + clen = *outp; + nlen = strlen(pcb->chap_client.name); + memcpy(outp + clen + 1, pcb->chap_client.name, nlen); + + outp = (u_char*)p->payload + PPP_HDRLEN; + len = CHAP_HDRLEN + clen + 1 + nlen; + outp[0] = CHAP_RESPONSE; + outp[1] = id; + outp[2] = len >> 8; + outp[3] = len; + + pbuf_realloc(p, PPP_HDRLEN + len); + ppp_write(pcb, p); +} + +static void chap_handle_status(ppp_pcb *pcb, int code, int id, + unsigned char *pkt, int len) { + const char *msg = NULL; + LWIP_UNUSED_ARG(id); + + if ((pcb->chap_client.flags & (AUTH_DONE|AUTH_STARTED|LOWERUP)) + != (AUTH_STARTED|LOWERUP)) + return; + pcb->chap_client.flags |= AUTH_DONE; + + if (code == CHAP_SUCCESS) { + /* used for MS-CHAP v2 mutual auth, yuck */ + if (pcb->chap_client.digest->check_success != NULL) { + if (!(*pcb->chap_client.digest->check_success)(pcb, pkt, len, pcb->chap_client.priv)) + code = CHAP_FAILURE; + } else + msg = "CHAP authentication succeeded"; + } else { + if (pcb->chap_client.digest->handle_failure != NULL) + (*pcb->chap_client.digest->handle_failure)(pcb, pkt, len); + else + msg = "CHAP authentication failed"; + } + if (msg) { + if (len > 0) + ppp_info("%s: %.*v", msg, len, pkt); + else + ppp_info("%s", msg); + } + if (code == CHAP_SUCCESS) + auth_withpeer_success(pcb, PPP_CHAP, pcb->chap_client.digest->code); + else { + pcb->chap_client.flags |= AUTH_FAILED; + ppp_error("CHAP authentication failed"); + auth_withpeer_fail(pcb, PPP_CHAP); + } +} + +static void chap_input(ppp_pcb *pcb, unsigned char *pkt, int pktlen) { + unsigned char code, id; + int len; + + if (pktlen < CHAP_HDRLEN) + return; + GETCHAR(code, pkt); + GETCHAR(id, pkt); + GETSHORT(len, pkt); + if (len < CHAP_HDRLEN || len > pktlen) + return; + len -= CHAP_HDRLEN; + + switch (code) { + case CHAP_CHALLENGE: + chap_respond(pcb, id, pkt, len); + break; +#if PPP_SERVER + case CHAP_RESPONSE: + chap_handle_response(pcb, id, pkt, len); + break; +#endif /* PPP_SERVER */ + case CHAP_FAILURE: + case CHAP_SUCCESS: + chap_handle_status(pcb, code, id, pkt, len); + break; + default: + break; + } +} + +static void chap_protrej(ppp_pcb *pcb) { + +#if PPP_SERVER + if (pcb->chap_server.flags & TIMEOUT_PENDING) { + pcb->chap_server.flags &= ~TIMEOUT_PENDING; + UNTIMEOUT(chap_timeout, pcb); + } + if (pcb->chap_server.flags & AUTH_STARTED) { + pcb->chap_server.flags = 0; + auth_peer_fail(pcb, PPP_CHAP); + } +#endif /* PPP_SERVER */ + if ((pcb->chap_client.flags & (AUTH_STARTED|AUTH_DONE)) == AUTH_STARTED) { + pcb->chap_client.flags &= ~AUTH_STARTED; + ppp_error("CHAP authentication failed due to protocol-reject"); + auth_withpeer_fail(pcb, PPP_CHAP); + } +} + +#if PRINTPKT_SUPPORT +/* + * chap_print_pkt - print the contents of a CHAP packet. + */ +static const char* const chap_code_names[] = { + "Challenge", "Response", "Success", "Failure" +}; + +static int chap_print_pkt(const unsigned char *p, int plen, + void (*printer) (void *, const char *, ...), void *arg) { + int code, id, len; + int clen, nlen; + unsigned char x; + + if (plen < CHAP_HDRLEN) + return 0; + GETCHAR(code, p); + GETCHAR(id, p); + GETSHORT(len, p); + if (len < CHAP_HDRLEN || len > plen) + return 0; + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(chap_code_names)) + printer(arg, " %s", chap_code_names[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= CHAP_HDRLEN; + switch (code) { + case CHAP_CHALLENGE: + case CHAP_RESPONSE: + if (len < 1) + break; + clen = p[0]; + if (len < clen + 1) + break; + ++p; + nlen = len - clen - 1; + printer(arg, " <"); + for (; clen > 0; --clen) { + GETCHAR(x, p); + printer(arg, "%.2x", x); + } + printer(arg, ">, name = "); + ppp_print_string(p, nlen, printer, arg); + break; + case CHAP_FAILURE: + case CHAP_SUCCESS: + printer(arg, " "); + ppp_print_string(p, len, printer, arg); + break; + default: + for (clen = len; clen > 0; --clen) { + GETCHAR(x, p); + printer(arg, " %.2x", x); + } + /* no break */ + } + + return len + CHAP_HDRLEN; +} +#endif /* PRINTPKT_SUPPORT */ + +const struct protent chap_protent = { + PPP_CHAP, + chap_init, + chap_input, + chap_protrej, + chap_lowerup, + chap_lowerdown, + NULL, /* open */ + NULL, /* close */ +#if PRINTPKT_SUPPORT + chap_print_pkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, /* datainput */ +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "CHAP", /* name */ + NULL, /* data_name */ +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + chap_option_list, + NULL, /* check_options */ +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + NULL, + NULL +#endif /* DEMAND_SUPPORT */ +}; + +#endif /* PPP_SUPPORT && CHAP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/chap_ms.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/chap_ms.c new file mode 100644 index 0000000..c1533e0 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/chap_ms.c @@ -0,0 +1,962 @@ +/* + * chap_ms.c - Microsoft MS-CHAP compatible implementation. + * + * Copyright (c) 1995 Eric Rosenquist. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * Modifications by Lauri Pesonen / lpesonen@clinet.fi, april 1997 + * + * Implemented LANManager type password response to MS-CHAP challenges. + * Now pppd provides both NT style and LANMan style blocks, and the + * prefered is set by option "ms-lanman". Default is to use NT. + * The hash text (StdText) was taken from Win95 RASAPI32.DLL. + * + * You should also use DOMAIN\\USERNAME as described in README.MSCHAP80 + */ + +/* + * Modifications by Frank Cusack, frank@google.com, March 2002. + * + * Implemented MS-CHAPv2 functionality, heavily based on sample + * implementation in RFC 2759. Implemented MPPE functionality, + * heavily based on sample implementation in RFC 3079. + * + * Copyright (c) 2002 Google, Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && MSCHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#include +#include +#include +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/chap-new.h" +#include "netif/ppp/chap_ms.h" +#include "netif/ppp/pppcrypt.h" +#include "netif/ppp/magic.h" +#if MPPE_SUPPORT +#include "netif/ppp/mppe.h" /* For mppe_sha1_pad*, mppe_set_key() */ +#endif /* MPPE_SUPPORT */ + +#define SHA1_SIGNATURE_SIZE 20 +#define MD4_SIGNATURE_SIZE 16 /* 16 bytes in a MD4 message digest */ +#define MAX_NT_PASSWORD 256 /* Max (Unicode) chars in an NT pass */ + +#define MS_CHAP_RESPONSE_LEN 49 /* Response length for MS-CHAP */ +#define MS_CHAP2_RESPONSE_LEN 49 /* Response length for MS-CHAPv2 */ +#define MS_AUTH_RESPONSE_LENGTH 40 /* MS-CHAPv2 authenticator response, */ + /* as ASCII */ + +/* Error codes for MS-CHAP failure messages. */ +#define MS_CHAP_ERROR_RESTRICTED_LOGON_HOURS 646 +#define MS_CHAP_ERROR_ACCT_DISABLED 647 +#define MS_CHAP_ERROR_PASSWD_EXPIRED 648 +#define MS_CHAP_ERROR_NO_DIALIN_PERMISSION 649 +#define MS_CHAP_ERROR_AUTHENTICATION_FAILURE 691 +#define MS_CHAP_ERROR_CHANGING_PASSWORD 709 + +/* + * Offsets within the response field for MS-CHAP + */ +#define MS_CHAP_LANMANRESP 0 +#define MS_CHAP_LANMANRESP_LEN 24 +#define MS_CHAP_NTRESP 24 +#define MS_CHAP_NTRESP_LEN 24 +#define MS_CHAP_USENT 48 + +/* + * Offsets within the response field for MS-CHAP2 + */ +#define MS_CHAP2_PEER_CHALLENGE 0 +#define MS_CHAP2_PEER_CHAL_LEN 16 +#define MS_CHAP2_RESERVED_LEN 8 +#define MS_CHAP2_NTRESP 24 +#define MS_CHAP2_NTRESP_LEN 24 +#define MS_CHAP2_FLAGS 48 + +#if MPPE_SUPPORT +#if 0 /* UNUSED */ +/* These values are the RADIUS attribute values--see RFC 2548. */ +#define MPPE_ENC_POL_ENC_ALLOWED 1 +#define MPPE_ENC_POL_ENC_REQUIRED 2 +#define MPPE_ENC_TYPES_RC4_40 2 +#define MPPE_ENC_TYPES_RC4_128 4 + +/* used by plugins (using above values) */ +extern void set_mppe_enc_types(int, int); +#endif /* UNUSED */ +#endif /* MPPE_SUPPORT */ + +/* Are we the authenticator or authenticatee? For MS-CHAPv2 key derivation. */ +#define MS_CHAP2_AUTHENTICATEE 0 +#define MS_CHAP2_AUTHENTICATOR 1 + +static void ascii2unicode (const char[], int, u_char[]); +static void NTPasswordHash (u_char *, int, u_char[MD4_SIGNATURE_SIZE]); +static void ChallengeResponse (const u_char *, const u_char *, u_char[24]); +static void ChallengeHash (const u_char[16], const u_char *, const char *, u_char[8]); +static void ChapMS_NT (const u_char *, const char *, int, u_char[24]); +static void ChapMS2_NT (const u_char *, const u_char[16], const char *, const char *, int, + u_char[24]); +static void GenerateAuthenticatorResponsePlain + (const char*, int, u_char[24], const u_char[16], const u_char *, + const char *, u_char[41]); +#ifdef MSLANMAN +static void ChapMS_LANMan (u_char *, char *, int, u_char *); +#endif + +static void GenerateAuthenticatorResponse(const u_char PasswordHashHash[MD4_SIGNATURE_SIZE], + u_char NTResponse[24], const u_char PeerChallenge[16], + const u_char *rchallenge, const char *username, + u_char authResponse[MS_AUTH_RESPONSE_LENGTH+1]); + +#if MPPE_SUPPORT +static void Set_Start_Key (ppp_pcb *pcb, const u_char *, const char *, int); +static void SetMasterKeys (ppp_pcb *pcb, const char *, int, u_char[24], int); +#endif /* MPPE_SUPPORT */ + +static void ChapMS (ppp_pcb *pcb, const u_char *, const char *, int, u_char *); +static void ChapMS2 (ppp_pcb *pcb, const u_char *, const u_char *, const char *, const char *, int, + u_char *, u_char[MS_AUTH_RESPONSE_LENGTH+1], int); + +#ifdef MSLANMAN +bool ms_lanman = 0; /* Use LanMan password instead of NT */ + /* Has meaning only with MS-CHAP challenges */ +#endif + +#if MPPE_SUPPORT +#ifdef DEBUGMPPEKEY +/* For MPPE debug */ +/* Use "[]|}{?/><,`!2&&(" (sans quotes) for RFC 3079 MS-CHAPv2 test value */ +static char *mschap_challenge = NULL; +/* Use "!@\#$%^&*()_+:3|~" (sans quotes, backslash is to escape #) for ... */ +static char *mschap2_peer_challenge = NULL; +#endif + +#include "netif/ppp/fsm.h" /* Need to poke MPPE options */ +#include "netif/ppp/ccp.h" +#endif /* MPPE_SUPPORT */ + +#if PPP_OPTIONS +/* + * Command-line options. + */ +static option_t chapms_option_list[] = { +#ifdef MSLANMAN + { "ms-lanman", o_bool, &ms_lanman, + "Use LanMan passwd when using MS-CHAP", 1 }, +#endif +#ifdef DEBUGMPPEKEY + { "mschap-challenge", o_string, &mschap_challenge, + "specify CHAP challenge" }, + { "mschap2-peer-challenge", o_string, &mschap2_peer_challenge, + "specify CHAP peer challenge" }, +#endif + { NULL } +}; +#endif /* PPP_OPTIONS */ + +#if PPP_SERVER +/* + * chapms_generate_challenge - generate a challenge for MS-CHAP. + * For MS-CHAP the challenge length is fixed at 8 bytes. + * The length goes in challenge[0] and the actual challenge starts + * at challenge[1]. + */ +static void chapms_generate_challenge(ppp_pcb *pcb, unsigned char *challenge) { + LWIP_UNUSED_ARG(pcb); + + *challenge++ = 8; +#ifdef DEBUGMPPEKEY + if (mschap_challenge && strlen(mschap_challenge) == 8) + memcpy(challenge, mschap_challenge, 8); + else +#endif + magic_random_bytes(challenge, 8); +} + +static void chapms2_generate_challenge(ppp_pcb *pcb, unsigned char *challenge) { + LWIP_UNUSED_ARG(pcb); + + *challenge++ = 16; +#ifdef DEBUGMPPEKEY + if (mschap_challenge && strlen(mschap_challenge) == 16) + memcpy(challenge, mschap_challenge, 16); + else +#endif + magic_random_bytes(challenge, 16); +} + +static int chapms_verify_response(ppp_pcb *pcb, int id, const char *name, + const unsigned char *secret, int secret_len, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space) { + unsigned char md[MS_CHAP_RESPONSE_LEN]; + int diff; + int challenge_len, response_len; + LWIP_UNUSED_ARG(id); + LWIP_UNUSED_ARG(name); + + challenge_len = *challenge++; /* skip length, is 8 */ + response_len = *response++; + if (response_len != MS_CHAP_RESPONSE_LEN) + goto bad; + +#ifndef MSLANMAN + if (!response[MS_CHAP_USENT]) { + /* Should really propagate this into the error packet. */ + ppp_notice("Peer request for LANMAN auth not supported"); + goto bad; + } +#endif + + /* Generate the expected response. */ + ChapMS(pcb, (const u_char *)challenge, (const char *)secret, secret_len, md); + +#ifdef MSLANMAN + /* Determine which part of response to verify against */ + if (!response[MS_CHAP_USENT]) + diff = memcmp(&response[MS_CHAP_LANMANRESP], + &md[MS_CHAP_LANMANRESP], MS_CHAP_LANMANRESP_LEN); + else +#endif + diff = memcmp(&response[MS_CHAP_NTRESP], &md[MS_CHAP_NTRESP], + MS_CHAP_NTRESP_LEN); + + if (diff == 0) { + ppp_slprintf(message, message_space, "Access granted"); + return 1; + } + + bad: + /* See comments below for MS-CHAP V2 */ + ppp_slprintf(message, message_space, "E=691 R=1 C=%0.*B V=0", + challenge_len, challenge); + return 0; +} + +static int chapms2_verify_response(ppp_pcb *pcb, int id, const char *name, + const unsigned char *secret, int secret_len, + const unsigned char *challenge, const unsigned char *response, + char *message, int message_space) { + unsigned char md[MS_CHAP2_RESPONSE_LEN]; + char saresponse[MS_AUTH_RESPONSE_LENGTH+1]; + int challenge_len, response_len; + LWIP_UNUSED_ARG(id); + + challenge_len = *challenge++; /* skip length, is 16 */ + response_len = *response++; + if (response_len != MS_CHAP2_RESPONSE_LEN) + goto bad; /* not even the right length */ + + /* Generate the expected response and our mutual auth. */ + ChapMS2(pcb, (const u_char*)challenge, (const u_char*)&response[MS_CHAP2_PEER_CHALLENGE], name, + (const char *)secret, secret_len, md, + (unsigned char *)saresponse, MS_CHAP2_AUTHENTICATOR); + + /* compare MDs and send the appropriate status */ + /* + * Per RFC 2759, success message must be formatted as + * "S= M=" + * where + * is the Authenticator Response (mutual auth) + * is a text message + * + * However, some versions of Windows (win98 tested) do not know + * about the M= part (required per RFC 2759) and flag + * it as an error (reported incorrectly as an encryption error + * to the user). Since the RFC requires it, and it can be + * useful information, we supply it if the peer is a conforming + * system. Luckily (?), win98 sets the Flags field to 0x04 + * (contrary to RFC requirements) so we can use that to + * distinguish between conforming and non-conforming systems. + * + * Special thanks to Alex Swiridov for + * help debugging this. + */ + if (memcmp(&md[MS_CHAP2_NTRESP], &response[MS_CHAP2_NTRESP], + MS_CHAP2_NTRESP_LEN) == 0) { + if (response[MS_CHAP2_FLAGS]) + ppp_slprintf(message, message_space, "S=%s", saresponse); + else + ppp_slprintf(message, message_space, "S=%s M=%s", + saresponse, "Access granted"); + return 1; + } + + bad: + /* + * Failure message must be formatted as + * "E=e R=r C=c V=v M=m" + * where + * e = error code (we use 691, ERROR_AUTHENTICATION_FAILURE) + * r = retry (we use 1, ok to retry) + * c = challenge to use for next response, we reuse previous + * v = Change Password version supported, we use 0 + * m = text message + * + * The M=m part is only for MS-CHAPv2. Neither win2k nor + * win98 (others untested) display the message to the user anyway. + * They also both ignore the E=e code. + * + * Note that it's safe to reuse the same challenge as we don't + * actually accept another response based on the error message + * (and no clients try to resend a response anyway). + * + * Basically, this whole bit is useless code, even the small + * implementation here is only because of overspecification. + */ + ppp_slprintf(message, message_space, "E=691 R=1 C=%0.*B V=0 M=%s", + challenge_len, challenge, "Access denied"); + return 0; +} +#endif /* PPP_SERVER */ + +static void chapms_make_response(ppp_pcb *pcb, unsigned char *response, int id, const char *our_name, + const unsigned char *challenge, const char *secret, int secret_len, + unsigned char *private_) { + LWIP_UNUSED_ARG(id); + LWIP_UNUSED_ARG(our_name); + LWIP_UNUSED_ARG(private_); + challenge++; /* skip length, should be 8 */ + *response++ = MS_CHAP_RESPONSE_LEN; + ChapMS(pcb, challenge, secret, secret_len, response); +} + +static void chapms2_make_response(ppp_pcb *pcb, unsigned char *response, int id, const char *our_name, + const unsigned char *challenge, const char *secret, int secret_len, + unsigned char *private_) { + LWIP_UNUSED_ARG(id); + challenge++; /* skip length, should be 16 */ + *response++ = MS_CHAP2_RESPONSE_LEN; + ChapMS2(pcb, challenge, +#ifdef DEBUGMPPEKEY + mschap2_peer_challenge, +#else + NULL, +#endif + our_name, secret, secret_len, response, private_, + MS_CHAP2_AUTHENTICATEE); +} + +static int chapms2_check_success(ppp_pcb *pcb, unsigned char *msg, int len, unsigned char *private_) { + LWIP_UNUSED_ARG(pcb); + + if ((len < MS_AUTH_RESPONSE_LENGTH + 2) || + strncmp((char *)msg, "S=", 2) != 0) { + /* Packet does not start with "S=" */ + ppp_error("MS-CHAPv2 Success packet is badly formed."); + return 0; + } + msg += 2; + len -= 2; + if (len < MS_AUTH_RESPONSE_LENGTH + || memcmp(msg, private_, MS_AUTH_RESPONSE_LENGTH)) { + /* Authenticator Response did not match expected. */ + ppp_error("MS-CHAPv2 mutual authentication failed."); + return 0; + } + /* Authenticator Response matches. */ + msg += MS_AUTH_RESPONSE_LENGTH; /* Eat it */ + len -= MS_AUTH_RESPONSE_LENGTH; + if ((len >= 3) && !strncmp((char *)msg, " M=", 3)) { + msg += 3; /* Eat the delimiter */ + } else if (len) { + /* Packet has extra text which does not begin " M=" */ + ppp_error("MS-CHAPv2 Success packet is badly formed."); + return 0; + } + return 1; +} + +static void chapms_handle_failure(ppp_pcb *pcb, unsigned char *inp, int len) { + int err; + const char *p; + char msg[64]; + LWIP_UNUSED_ARG(pcb); + + /* We want a null-terminated string for strxxx(). */ + len = LWIP_MIN(len, 63); + MEMCPY(msg, inp, len); + msg[len] = 0; + p = msg; + + /* + * Deal with MS-CHAP formatted failure messages; just print the + * M= part (if any). For MS-CHAP we're not really supposed + * to use M=, but it shouldn't hurt. See + * chapms[2]_verify_response. + */ + if (!strncmp(p, "E=", 2)) + err = strtol(p+2, NULL, 10); /* Remember the error code. */ + else + goto print_msg; /* Message is badly formatted. */ + + if (len && ((p = strstr(p, " M=")) != NULL)) { + /* M= field found. */ + p += 3; + } else { + /* No M=; use the error code. */ + switch (err) { + case MS_CHAP_ERROR_RESTRICTED_LOGON_HOURS: + p = "E=646 Restricted logon hours"; + break; + + case MS_CHAP_ERROR_ACCT_DISABLED: + p = "E=647 Account disabled"; + break; + + case MS_CHAP_ERROR_PASSWD_EXPIRED: + p = "E=648 Password expired"; + break; + + case MS_CHAP_ERROR_NO_DIALIN_PERMISSION: + p = "E=649 No dialin permission"; + break; + + case MS_CHAP_ERROR_AUTHENTICATION_FAILURE: + p = "E=691 Authentication failure"; + break; + + case MS_CHAP_ERROR_CHANGING_PASSWORD: + /* Should never see this, we don't support Change Password. */ + p = "E=709 Error changing password"; + break; + + default: + ppp_error("Unknown MS-CHAP authentication failure: %.*v", + len, inp); + return; + } + } +print_msg: + if (p != NULL) + ppp_error("MS-CHAP authentication failed: %v", p); +} + +static void ChallengeResponse(const u_char *challenge, + const u_char PasswordHash[MD4_SIGNATURE_SIZE], + u_char response[24]) { + u_char ZPasswordHash[21]; + lwip_des_context des; + u_char des_key[8]; + + BZERO(ZPasswordHash, sizeof(ZPasswordHash)); + MEMCPY(ZPasswordHash, PasswordHash, MD4_SIGNATURE_SIZE); + +#if 0 + dbglog("ChallengeResponse - ZPasswordHash %.*B", + sizeof(ZPasswordHash), ZPasswordHash); +#endif + + pppcrypt_56_to_64_bit_key(ZPasswordHash + 0, des_key); + lwip_des_init(&des); + lwip_des_setkey_enc(&des, des_key); + lwip_des_crypt_ecb(&des, challenge, response +0); + lwip_des_free(&des); + + pppcrypt_56_to_64_bit_key(ZPasswordHash + 7, des_key); + lwip_des_init(&des); + lwip_des_setkey_enc(&des, des_key); + lwip_des_crypt_ecb(&des, challenge, response +8); + lwip_des_free(&des); + + pppcrypt_56_to_64_bit_key(ZPasswordHash + 14, des_key); + lwip_des_init(&des); + lwip_des_setkey_enc(&des, des_key); + lwip_des_crypt_ecb(&des, challenge, response +16); + lwip_des_free(&des); + +#if 0 + dbglog("ChallengeResponse - response %.24B", response); +#endif +} + +static void ChallengeHash(const u_char PeerChallenge[16], const u_char *rchallenge, + const char *username, u_char Challenge[8]) { + lwip_sha1_context sha1Context; + u_char sha1Hash[SHA1_SIGNATURE_SIZE]; + const char *user; + + /* remove domain from "domain\username" */ + if ((user = strrchr(username, '\\')) != NULL) + ++user; + else + user = username; + + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, PeerChallenge, 16); + lwip_sha1_update(&sha1Context, rchallenge, 16); + lwip_sha1_update(&sha1Context, (const unsigned char*)user, strlen(user)); + lwip_sha1_finish(&sha1Context, sha1Hash); + lwip_sha1_free(&sha1Context); + + MEMCPY(Challenge, sha1Hash, 8); +} + +/* + * Convert the ASCII version of the password to Unicode. + * This implicitly supports 8-bit ISO8859/1 characters. + * This gives us the little-endian representation, which + * is assumed by all M$ CHAP RFCs. (Unicode byte ordering + * is machine-dependent.) + */ +static void ascii2unicode(const char ascii[], int ascii_len, u_char unicode[]) { + int i; + + BZERO(unicode, ascii_len * 2); + for (i = 0; i < ascii_len; i++) + unicode[i * 2] = (u_char) ascii[i]; +} + +static void NTPasswordHash(u_char *secret, int secret_len, u_char hash[MD4_SIGNATURE_SIZE]) { + lwip_md4_context md4Context; + + lwip_md4_init(&md4Context); + lwip_md4_starts(&md4Context); + lwip_md4_update(&md4Context, secret, secret_len); + lwip_md4_finish(&md4Context, hash); + lwip_md4_free(&md4Context); +} + +static void ChapMS_NT(const u_char *rchallenge, const char *secret, int secret_len, + u_char NTResponse[24]) { + u_char unicodePassword[MAX_NT_PASSWORD * 2]; + u_char PasswordHash[MD4_SIGNATURE_SIZE]; + + /* Hash the Unicode version of the secret (== password). */ + ascii2unicode(secret, secret_len, unicodePassword); + NTPasswordHash(unicodePassword, secret_len * 2, PasswordHash); + + ChallengeResponse(rchallenge, PasswordHash, NTResponse); +} + +static void ChapMS2_NT(const u_char *rchallenge, const u_char PeerChallenge[16], const char *username, + const char *secret, int secret_len, u_char NTResponse[24]) { + u_char unicodePassword[MAX_NT_PASSWORD * 2]; + u_char PasswordHash[MD4_SIGNATURE_SIZE]; + u_char Challenge[8]; + + ChallengeHash(PeerChallenge, rchallenge, username, Challenge); + + /* Hash the Unicode version of the secret (== password). */ + ascii2unicode(secret, secret_len, unicodePassword); + NTPasswordHash(unicodePassword, secret_len * 2, PasswordHash); + + ChallengeResponse(Challenge, PasswordHash, NTResponse); +} + +#ifdef MSLANMAN +static u_char *StdText = (u_char *)"KGS!@#$%"; /* key from rasapi32.dll */ + +static void ChapMS_LANMan(u_char *rchallenge, char *secret, int secret_len, + unsigned char *response) { + int i; + u_char UcasePassword[MAX_NT_PASSWORD]; /* max is actually 14 */ + u_char PasswordHash[MD4_SIGNATURE_SIZE]; + lwip_des_context des; + u_char des_key[8]; + + /* LANMan password is case insensitive */ + BZERO(UcasePassword, sizeof(UcasePassword)); + for (i = 0; i < secret_len; i++) + UcasePassword[i] = (u_char)toupper(secret[i]); + + pppcrypt_56_to_64_bit_key(UcasePassword +0, des_key); + lwip_des_init(&des); + lwip_des_setkey_enc(&des, des_key); + lwip_des_crypt_ecb(&des, StdText, PasswordHash +0); + lwip_des_free(&des); + + pppcrypt_56_to_64_bit_key(UcasePassword +7, des_key); + lwip_des_init(&des); + lwip_des_setkey_enc(&des, des_key); + lwip_des_crypt_ecb(&des, StdText, PasswordHash +8); + lwip_des_free(&des); + + ChallengeResponse(rchallenge, PasswordHash, &response[MS_CHAP_LANMANRESP]); +} +#endif + + +static void GenerateAuthenticatorResponse(const u_char PasswordHashHash[MD4_SIGNATURE_SIZE], + u_char NTResponse[24], const u_char PeerChallenge[16], + const u_char *rchallenge, const char *username, + u_char authResponse[MS_AUTH_RESPONSE_LENGTH+1]) { + /* + * "Magic" constants used in response generation, from RFC 2759. + */ + static const u_char Magic1[39] = /* "Magic server to client signing constant" */ + { 0x4D, 0x61, 0x67, 0x69, 0x63, 0x20, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x20, 0x74, 0x6F, 0x20, 0x63, 0x6C, 0x69, 0x65, + 0x6E, 0x74, 0x20, 0x73, 0x69, 0x67, 0x6E, 0x69, 0x6E, 0x67, + 0x20, 0x63, 0x6F, 0x6E, 0x73, 0x74, 0x61, 0x6E, 0x74 }; + static const u_char Magic2[41] = /* "Pad to make it do more than one iteration" */ + { 0x50, 0x61, 0x64, 0x20, 0x74, 0x6F, 0x20, 0x6D, 0x61, 0x6B, + 0x65, 0x20, 0x69, 0x74, 0x20, 0x64, 0x6F, 0x20, 0x6D, 0x6F, + 0x72, 0x65, 0x20, 0x74, 0x68, 0x61, 0x6E, 0x20, 0x6F, 0x6E, + 0x65, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6F, + 0x6E }; + + int i; + lwip_sha1_context sha1Context; + u_char Digest[SHA1_SIGNATURE_SIZE]; + u_char Challenge[8]; + + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, PasswordHashHash, MD4_SIGNATURE_SIZE); + lwip_sha1_update(&sha1Context, NTResponse, 24); + lwip_sha1_update(&sha1Context, Magic1, sizeof(Magic1)); + lwip_sha1_finish(&sha1Context, Digest); + lwip_sha1_free(&sha1Context); + + ChallengeHash(PeerChallenge, rchallenge, username, Challenge); + + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, Digest, sizeof(Digest)); + lwip_sha1_update(&sha1Context, Challenge, sizeof(Challenge)); + lwip_sha1_update(&sha1Context, Magic2, sizeof(Magic2)); + lwip_sha1_finish(&sha1Context, Digest); + lwip_sha1_free(&sha1Context); + + /* Convert to ASCII hex string. */ + for (i = 0; i < LWIP_MAX((MS_AUTH_RESPONSE_LENGTH / 2), (int)sizeof(Digest)); i++) + sprintf((char *)&authResponse[i * 2], "%02X", Digest[i]); +} + + +static void GenerateAuthenticatorResponsePlain( + const char *secret, int secret_len, + u_char NTResponse[24], const u_char PeerChallenge[16], + const u_char *rchallenge, const char *username, + u_char authResponse[MS_AUTH_RESPONSE_LENGTH+1]) { + u_char unicodePassword[MAX_NT_PASSWORD * 2]; + u_char PasswordHash[MD4_SIGNATURE_SIZE]; + u_char PasswordHashHash[MD4_SIGNATURE_SIZE]; + + /* Hash (x2) the Unicode version of the secret (== password). */ + ascii2unicode(secret, secret_len, unicodePassword); + NTPasswordHash(unicodePassword, secret_len * 2, PasswordHash); + NTPasswordHash(PasswordHash, sizeof(PasswordHash), + PasswordHashHash); + + GenerateAuthenticatorResponse(PasswordHashHash, NTResponse, PeerChallenge, + rchallenge, username, authResponse); +} + + +#if MPPE_SUPPORT +/* + * Set mppe_xxxx_key from MS-CHAP credentials. (see RFC 3079) + */ +static void Set_Start_Key(ppp_pcb *pcb, const u_char *rchallenge, const char *secret, int secret_len) { + u_char unicodePassword[MAX_NT_PASSWORD * 2]; + u_char PasswordHash[MD4_SIGNATURE_SIZE]; + u_char PasswordHashHash[MD4_SIGNATURE_SIZE]; + lwip_sha1_context sha1Context; + u_char Digest[SHA1_SIGNATURE_SIZE]; /* >= MPPE_MAX_KEY_LEN */ + + /* Hash (x2) the Unicode version of the secret (== password). */ + ascii2unicode(secret, secret_len, unicodePassword); + NTPasswordHash(unicodePassword, secret_len * 2, PasswordHash); + NTPasswordHash(PasswordHash, sizeof(PasswordHash), PasswordHashHash); + + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, PasswordHashHash, MD4_SIGNATURE_SIZE); + lwip_sha1_update(&sha1Context, PasswordHashHash, MD4_SIGNATURE_SIZE); + lwip_sha1_update(&sha1Context, rchallenge, 8); + lwip_sha1_finish(&sha1Context, Digest); + lwip_sha1_free(&sha1Context); + + /* Same key in both directions. */ + mppe_set_key(pcb, &pcb->mppe_comp, Digest); + mppe_set_key(pcb, &pcb->mppe_decomp, Digest); + + pcb->mppe_keys_set = 1; +} + +/* + * Set mppe_xxxx_key from MS-CHAPv2 credentials. (see RFC 3079) + */ +static void SetMasterKeys(ppp_pcb *pcb, const char *secret, int secret_len, u_char NTResponse[24], int IsServer) { + u_char unicodePassword[MAX_NT_PASSWORD * 2]; + u_char PasswordHash[MD4_SIGNATURE_SIZE]; + u_char PasswordHashHash[MD4_SIGNATURE_SIZE]; + lwip_sha1_context sha1Context; + u_char MasterKey[SHA1_SIGNATURE_SIZE]; /* >= MPPE_MAX_KEY_LEN */ + u_char Digest[SHA1_SIGNATURE_SIZE]; /* >= MPPE_MAX_KEY_LEN */ + const u_char *s; + + /* "This is the MPPE Master Key" */ + static const u_char Magic1[27] = + { 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x4d, 0x50, 0x50, 0x45, 0x20, 0x4d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x20, 0x4b, 0x65, 0x79 }; + /* "On the client side, this is the send key; " + "on the server side, it is the receive key." */ + static const u_char Magic2[84] = + { 0x4f, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x20, 0x73, 0x69, 0x64, 0x65, 0x2c, 0x20, + 0x74, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x73, 0x65, 0x6e, 0x64, 0x20, 0x6b, 0x65, 0x79, + 0x3b, 0x20, 0x6f, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x20, 0x73, 0x69, 0x64, 0x65, + 0x2c, 0x20, 0x69, 0x74, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x20, + 0x6b, 0x65, 0x79, 0x2e }; + /* "On the client side, this is the receive key; " + "on the server side, it is the send key." */ + static const u_char Magic3[84] = + { 0x4f, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x20, 0x73, 0x69, 0x64, 0x65, 0x2c, 0x20, + 0x74, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x20, + 0x6b, 0x65, 0x79, 0x3b, 0x20, 0x6f, 0x6e, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x20, 0x73, + 0x69, 0x64, 0x65, 0x2c, 0x20, 0x69, 0x74, 0x20, 0x69, 0x73, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x65, 0x6e, 0x64, 0x20, + 0x6b, 0x65, 0x79, 0x2e }; + + /* Hash (x2) the Unicode version of the secret (== password). */ + ascii2unicode(secret, secret_len, unicodePassword); + NTPasswordHash(unicodePassword, secret_len * 2, PasswordHash); + NTPasswordHash(PasswordHash, sizeof(PasswordHash), PasswordHashHash); + + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, PasswordHashHash, MD4_SIGNATURE_SIZE); + lwip_sha1_update(&sha1Context, NTResponse, 24); + lwip_sha1_update(&sha1Context, Magic1, sizeof(Magic1)); + lwip_sha1_finish(&sha1Context, MasterKey); + lwip_sha1_free(&sha1Context); + + /* + * generate send key + */ + if (IsServer) + s = Magic3; + else + s = Magic2; + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, MasterKey, 16); + lwip_sha1_update(&sha1Context, mppe_sha1_pad1, SHA1_PAD_SIZE); + lwip_sha1_update(&sha1Context, s, 84); + lwip_sha1_update(&sha1Context, mppe_sha1_pad2, SHA1_PAD_SIZE); + lwip_sha1_finish(&sha1Context, Digest); + lwip_sha1_free(&sha1Context); + + mppe_set_key(pcb, &pcb->mppe_comp, Digest); + + /* + * generate recv key + */ + if (IsServer) + s = Magic2; + else + s = Magic3; + lwip_sha1_init(&sha1Context); + lwip_sha1_starts(&sha1Context); + lwip_sha1_update(&sha1Context, MasterKey, 16); + lwip_sha1_update(&sha1Context, mppe_sha1_pad1, SHA1_PAD_SIZE); + lwip_sha1_update(&sha1Context, s, 84); + lwip_sha1_update(&sha1Context, mppe_sha1_pad2, SHA1_PAD_SIZE); + lwip_sha1_finish(&sha1Context, Digest); + lwip_sha1_free(&sha1Context); + + mppe_set_key(pcb, &pcb->mppe_decomp, Digest); + + pcb->mppe_keys_set = 1; +} + +#endif /* MPPE_SUPPORT */ + + +static void ChapMS(ppp_pcb *pcb, const u_char *rchallenge, const char *secret, int secret_len, + unsigned char *response) { +#if !MPPE_SUPPORT + LWIP_UNUSED_ARG(pcb); +#endif /* !MPPE_SUPPORT */ + BZERO(response, MS_CHAP_RESPONSE_LEN); + + ChapMS_NT(rchallenge, secret, secret_len, &response[MS_CHAP_NTRESP]); + +#ifdef MSLANMAN + ChapMS_LANMan(rchallenge, secret, secret_len, + &response[MS_CHAP_LANMANRESP]); + + /* preferred method is set by option */ + response[MS_CHAP_USENT] = !ms_lanman; +#else + response[MS_CHAP_USENT] = 1; +#endif + +#if MPPE_SUPPORT + Set_Start_Key(pcb, rchallenge, secret, secret_len); +#endif /* MPPE_SUPPORT */ +} + + +/* + * If PeerChallenge is NULL, one is generated and the PeerChallenge + * field of response is filled in. Call this way when generating a response. + * If PeerChallenge is supplied, it is copied into the PeerChallenge field. + * Call this way when verifying a response (or debugging). + * Do not call with PeerChallenge = response. + * + * The PeerChallenge field of response is then used for calculation of the + * Authenticator Response. + */ +static void ChapMS2(ppp_pcb *pcb, const u_char *rchallenge, const u_char *PeerChallenge, + const char *user, const char *secret, int secret_len, unsigned char *response, + u_char authResponse[], int authenticator) { + /* ARGSUSED */ + LWIP_UNUSED_ARG(authenticator); +#if !MPPE_SUPPORT + LWIP_UNUSED_ARG(pcb); +#endif /* !MPPE_SUPPORT */ + + BZERO(response, MS_CHAP2_RESPONSE_LEN); + + /* Generate the Peer-Challenge if requested, or copy it if supplied. */ + if (!PeerChallenge) + magic_random_bytes(&response[MS_CHAP2_PEER_CHALLENGE], MS_CHAP2_PEER_CHAL_LEN); + else + MEMCPY(&response[MS_CHAP2_PEER_CHALLENGE], PeerChallenge, + MS_CHAP2_PEER_CHAL_LEN); + + /* Generate the NT-Response */ + ChapMS2_NT(rchallenge, &response[MS_CHAP2_PEER_CHALLENGE], user, + secret, secret_len, &response[MS_CHAP2_NTRESP]); + + /* Generate the Authenticator Response. */ + GenerateAuthenticatorResponsePlain(secret, secret_len, + &response[MS_CHAP2_NTRESP], + &response[MS_CHAP2_PEER_CHALLENGE], + rchallenge, user, authResponse); + +#if MPPE_SUPPORT + SetMasterKeys(pcb, secret, secret_len, + &response[MS_CHAP2_NTRESP], authenticator); +#endif /* MPPE_SUPPORT */ +} + +#if 0 /* UNUSED */ +#if MPPE_SUPPORT +/* + * Set MPPE options from plugins. + */ +void set_mppe_enc_types(int policy, int types) { + /* Early exit for unknown policies. */ + if (policy != MPPE_ENC_POL_ENC_ALLOWED || + policy != MPPE_ENC_POL_ENC_REQUIRED) + return; + + /* Don't modify MPPE if it's optional and wasn't already configured. */ + if (policy == MPPE_ENC_POL_ENC_ALLOWED && !ccp_wantoptions[0].mppe) + return; + + /* + * Disable undesirable encryption types. Note that we don't ENABLE + * any encryption types, to avoid overriding manual configuration. + */ + switch(types) { + case MPPE_ENC_TYPES_RC4_40: + ccp_wantoptions[0].mppe &= ~MPPE_OPT_128; /* disable 128-bit */ + break; + case MPPE_ENC_TYPES_RC4_128: + ccp_wantoptions[0].mppe &= ~MPPE_OPT_40; /* disable 40-bit */ + break; + default: + break; + } +} +#endif /* MPPE_SUPPORT */ +#endif /* UNUSED */ + +const struct chap_digest_type chapms_digest = { + CHAP_MICROSOFT, /* code */ +#if PPP_SERVER + chapms_generate_challenge, + chapms_verify_response, +#endif /* PPP_SERVER */ + chapms_make_response, + NULL, /* check_success */ + chapms_handle_failure, +}; + +const struct chap_digest_type chapms2_digest = { + CHAP_MICROSOFT_V2, /* code */ +#if PPP_SERVER + chapms2_generate_challenge, + chapms2_verify_response, +#endif /* PPP_SERVER */ + chapms2_make_response, + chapms2_check_success, + chapms_handle_failure, +}; + +#endif /* PPP_SUPPORT && MSCHAP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/demand.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/demand.c new file mode 100644 index 0000000..0bd3598 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/demand.c @@ -0,0 +1,465 @@ +/* + * demand.c - Support routines for demand-dialling. + * + * Copyright (c) 1996-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && DEMAND_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef PPP_FILTER +#include +#endif + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/ipcp.h" +#include "netif/ppp/lcp.h" + +char *frame; +int framelen; +int framemax; +int escape_flag; +int flush_flag; +int fcs; + +struct packet { + int length; + struct packet *next; + unsigned char data[1]; +}; + +struct packet *pend_q; +struct packet *pend_qtail; + +static int active_packet (unsigned char *, int); + +/* + * demand_conf - configure the interface for doing dial-on-demand. + */ +void +demand_conf() +{ + int i; + const struct protent *protp; + +/* framemax = lcp_allowoptions[0].mru; + if (framemax < PPP_MRU) */ + framemax = PPP_MRU; + framemax += PPP_HDRLEN + PPP_FCSLEN; + frame = malloc(framemax); + if (frame == NULL) + novm("demand frame"); + framelen = 0; + pend_q = NULL; + escape_flag = 0; + flush_flag = 0; + fcs = PPP_INITFCS; + + netif_set_mtu(pcb, LWIP_MIN(lcp_allowoptions[0].mru, PPP_MRU)); + if (ppp_send_config(pcb, PPP_MRU, (u32_t) 0, 0, 0) < 0 + || ppp_recv_config(pcb, PPP_MRU, (u32_t) 0, 0, 0) < 0) + fatal("Couldn't set up demand-dialled PPP interface: %m"); + +#ifdef PPP_FILTER + set_filters(&pass_filter, &active_filter); +#endif + + /* + * Call the demand_conf procedure for each protocol that's got one. + */ + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->demand_conf != NULL) + ((*protp->demand_conf)(pcb)); +/* FIXME: find a way to die() here */ +#if 0 + if (!((*protp->demand_conf)(pcb))) + die(1); +#endif +} + + +/* + * demand_block - set each network protocol to block further packets. + */ +void +demand_block() +{ + int i; + const struct protent *protp; + + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->demand_conf != NULL) + sifnpmode(pcb, protp->protocol & ~0x8000, NPMODE_QUEUE); + get_loop_output(); +} + +/* + * demand_discard - set each network protocol to discard packets + * with an error. + */ +void +demand_discard() +{ + struct packet *pkt, *nextpkt; + int i; + const struct protent *protp; + + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->demand_conf != NULL) + sifnpmode(pcb, protp->protocol & ~0x8000, NPMODE_ERROR); + get_loop_output(); + + /* discard all saved packets */ + for (pkt = pend_q; pkt != NULL; pkt = nextpkt) { + nextpkt = pkt->next; + free(pkt); + } + pend_q = NULL; + framelen = 0; + flush_flag = 0; + escape_flag = 0; + fcs = PPP_INITFCS; +} + +/* + * demand_unblock - set each enabled network protocol to pass packets. + */ +void +demand_unblock() +{ + int i; + const struct protent *protp; + + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->demand_conf != NULL) + sifnpmode(pcb, protp->protocol & ~0x8000, NPMODE_PASS); +} + +/* + * FCS lookup table as calculated by genfcstab. + */ +static u_short fcstab[256] = { + 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf, + 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7, + 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e, + 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876, + 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd, + 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5, + 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c, + 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974, + 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb, + 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3, + 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a, + 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72, + 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9, + 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1, + 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738, + 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70, + 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7, + 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff, + 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036, + 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e, + 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5, + 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd, + 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134, + 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c, + 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3, + 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb, + 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232, + 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a, + 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1, + 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9, + 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330, + 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78 +}; + +/* + * loop_chars - process characters received from the loopback. + * Calls loop_frame when a complete frame has been accumulated. + * Return value is 1 if we need to bring up the link, 0 otherwise. + */ +int +loop_chars(p, n) + unsigned char *p; + int n; +{ + int c, rv; + + rv = 0; + +/* check for synchronous connection... */ + + if ( (p[0] == 0xFF) && (p[1] == 0x03) ) { + rv = loop_frame(p,n); + return rv; + } + + for (; n > 0; --n) { + c = *p++; + if (c == PPP_FLAG) { + if (!escape_flag && !flush_flag + && framelen > 2 && fcs == PPP_GOODFCS) { + framelen -= 2; + if (loop_frame((unsigned char *)frame, framelen)) + rv = 1; + } + framelen = 0; + flush_flag = 0; + escape_flag = 0; + fcs = PPP_INITFCS; + continue; + } + if (flush_flag) + continue; + if (escape_flag) { + c ^= PPP_TRANS; + escape_flag = 0; + } else if (c == PPP_ESCAPE) { + escape_flag = 1; + continue; + } + if (framelen >= framemax) { + flush_flag = 1; + continue; + } + frame[framelen++] = c; + fcs = PPP_FCS(fcs, c); + } + return rv; +} + +/* + * loop_frame - given a frame obtained from the loopback, + * decide whether to bring up the link or not, and, if we want + * to transmit this frame later, put it on the pending queue. + * Return value is 1 if we need to bring up the link, 0 otherwise. + * We assume that the kernel driver has already applied the + * pass_filter, so we won't get packets it rejected. + * We apply the active_filter to see if we want this packet to + * bring up the link. + */ +int +loop_frame(frame, len) + unsigned char *frame; + int len; +{ + struct packet *pkt; + + /* dbglog("from loop: %P", frame, len); */ + if (len < PPP_HDRLEN) + return 0; + if ((PPP_PROTOCOL(frame) & 0x8000) != 0) + return 0; /* shouldn't get any of these anyway */ + if (!active_packet(frame, len)) + return 0; + + pkt = (struct packet *) malloc(sizeof(struct packet) + len); + if (pkt != NULL) { + pkt->length = len; + pkt->next = NULL; + memcpy(pkt->data, frame, len); + if (pend_q == NULL) + pend_q = pkt; + else + pend_qtail->next = pkt; + pend_qtail = pkt; + } + return 1; +} + +/* + * demand_rexmit - Resend all those frames which we got via the + * loopback, now that the real serial link is up. + */ +void +demand_rexmit(proto, newip) + int proto; + u32_t newip; +{ + struct packet *pkt, *prev, *nextpkt; + unsigned short checksum; + unsigned short pkt_checksum = 0; + unsigned iphdr; + struct timeval tv; + char cv = 0; + char ipstr[16]; + + prev = NULL; + pkt = pend_q; + pend_q = NULL; + tv.tv_sec = 1; + tv.tv_usec = 0; + select(0,NULL,NULL,NULL,&tv); /* Sleep for 1 Seconds */ + for (; pkt != NULL; pkt = nextpkt) { + nextpkt = pkt->next; + if (PPP_PROTOCOL(pkt->data) == proto) { + if ( (proto == PPP_IP) && newip ) { + /* Get old checksum */ + + iphdr = (pkt->data[4] & 15) << 2; + checksum = *((unsigned short *) (pkt->data+14)); + if (checksum == 0xFFFF) { + checksum = 0; + } + + + if (pkt->data[13] == 17) { + pkt_checksum = *((unsigned short *) (pkt->data+10+iphdr)); + if (pkt_checksum) { + cv = 1; + if (pkt_checksum == 0xFFFF) { + pkt_checksum = 0; + } + } + else { + cv = 0; + } + } + + if (pkt->data[13] == 6) { + pkt_checksum = *((unsigned short *) (pkt->data+20+iphdr)); + cv = 1; + if (pkt_checksum == 0xFFFF) { + pkt_checksum = 0; + } + } + + /* Delete old Source-IP-Address */ + checksum -= *((unsigned short *) (pkt->data+16)) ^ 0xFFFF; + checksum -= *((unsigned short *) (pkt->data+18)) ^ 0xFFFF; + + pkt_checksum -= *((unsigned short *) (pkt->data+16)) ^ 0xFFFF; + pkt_checksum -= *((unsigned short *) (pkt->data+18)) ^ 0xFFFF; + + /* Change Source-IP-Address */ + * ((u32_t *) (pkt->data + 16)) = newip; + + /* Add new Source-IP-Address */ + checksum += *((unsigned short *) (pkt->data+16)) ^ 0xFFFF; + checksum += *((unsigned short *) (pkt->data+18)) ^ 0xFFFF; + + pkt_checksum += *((unsigned short *) (pkt->data+16)) ^ 0xFFFF; + pkt_checksum += *((unsigned short *) (pkt->data+18)) ^ 0xFFFF; + + /* Write new checksum */ + if (!checksum) { + checksum = 0xFFFF; + } + *((unsigned short *) (pkt->data+14)) = checksum; + if (pkt->data[13] == 6) { + *((unsigned short *) (pkt->data+20+iphdr)) = pkt_checksum; + } + if (cv && (pkt->data[13] == 17) ) { + *((unsigned short *) (pkt->data+10+iphdr)) = pkt_checksum; + } + + /* Log Packet */ + strcpy(ipstr,inet_ntoa(*( (struct in_addr *) (pkt->data+16)))); + if (pkt->data[13] == 1) { + syslog(LOG_INFO,"Open ICMP %s -> %s\n", + ipstr, + inet_ntoa(*( (struct in_addr *) (pkt->data+20)))); + } else { + syslog(LOG_INFO,"Open %s %s:%d -> %s:%d\n", + pkt->data[13] == 6 ? "TCP" : "UDP", + ipstr, + ntohs(*( (short *) (pkt->data+iphdr+4))), + inet_ntoa(*( (struct in_addr *) (pkt->data+20))), + ntohs(*( (short *) (pkt->data+iphdr+6)))); + } + } + output(pcb, pkt->data, pkt->length); + free(pkt); + } else { + if (prev == NULL) + pend_q = pkt; + else + prev->next = pkt; + prev = pkt; + } + } + pend_qtail = prev; + if (prev != NULL) + prev->next = NULL; +} + +/* + * Scan a packet to decide whether it is an "active" packet, + * that is, whether it is worth bringing up the link for. + */ +static int +active_packet(p, len) + unsigned char *p; + int len; +{ + int proto, i; + const struct protent *protp; + + if (len < PPP_HDRLEN) + return 0; + proto = PPP_PROTOCOL(p); +#ifdef PPP_FILTER + p[0] = 1; /* outbound packet indicator */ + if ((pass_filter.bf_len != 0 + && bpf_filter(pass_filter.bf_insns, p, len, len) == 0) + || (active_filter.bf_len != 0 + && bpf_filter(active_filter.bf_insns, p, len, len) == 0)) { + p[0] = 0xff; + return 0; + } + p[0] = 0xff; +#endif + for (i = 0; (protp = protocols[i]) != NULL; ++i) { + if (protp->protocol < 0xC000 && (protp->protocol & ~0x8000) == proto) { + if (protp->active_pkt == NULL) + return 1; + return (*protp->active_pkt)(p, len); + } + } + return 0; /* not a supported protocol !!?? */ +} + +#endif /* PPP_SUPPORT && DEMAND_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/eap.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/eap.c new file mode 100644 index 0000000..5000e26 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/eap.c @@ -0,0 +1,2423 @@ +/* + * eap.c - Extensible Authentication Protocol for PPP (RFC 2284) + * + * Copyright (c) 2001 by Sun Microsystems, Inc. + * All rights reserved. + * + * Non-exclusive rights to redistribute, modify, translate, and use + * this software in source and binary forms, in whole or in part, is + * hereby granted, provided that the above copyright notice is + * duplicated in any source form, and that neither the name of the + * copyright holder nor the author is used to endorse or promote + * products derived from this software. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * Original version by James Carlson + * + * This implementation of EAP supports MD5-Challenge and SRP-SHA1 + * authentication styles. Note that support of MD5-Challenge is a + * requirement of RFC 2284, and that it's essentially just a + * reimplementation of regular RFC 1994 CHAP using EAP messages. + * + * As an authenticator ("server"), there are multiple phases for each + * style. In the first phase of each style, the unauthenticated peer + * name is queried using the EAP Identity request type. If the + * "remotename" option is used, then this phase is skipped, because + * the peer's name is presumed to be known. + * + * For MD5-Challenge, there are two phases, and the second phase + * consists of sending the challenge itself and handling the + * associated response. + * + * For SRP-SHA1, there are four phases. The second sends 's', 'N', + * and 'g'. The reply contains 'A'. The third sends 'B', and the + * reply contains 'M1'. The forth sends the 'M2' value. + * + * As an authenticatee ("client"), there's just a single phase -- + * responding to the queries generated by the peer. EAP is an + * authenticator-driven protocol. + * + * Based on draft-ietf-pppext-eap-srp-03.txt. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && EAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/eap.h" +#include "netif/ppp/magic.h" +#include "netif/ppp/pppcrypt.h" + +#ifdef USE_SRP +#include +#include +#include +#endif /* USE_SRP */ + +#ifndef SHA_DIGESTSIZE +#define SHA_DIGESTSIZE 20 +#endif + +#ifdef USE_SRP +static char *pn_secret = NULL; /* Pseudonym generating secret */ +#endif + +#if PPP_OPTIONS +/* + * Command-line options. + */ +static option_t eap_option_list[] = { + { "eap-restart", o_int, &eap_states[0].es_server.ea_timeout, + "Set retransmit timeout for EAP Requests (server)" }, + { "eap-max-sreq", o_int, &eap_states[0].es_server.ea_maxrequests, + "Set max number of EAP Requests sent (server)" }, + { "eap-timeout", o_int, &eap_states[0].es_client.ea_timeout, + "Set time limit for peer EAP authentication" }, + { "eap-max-rreq", o_int, &eap_states[0].es_client.ea_maxrequests, + "Set max number of EAP Requests allows (client)" }, + { "eap-interval", o_int, &eap_states[0].es_rechallenge, + "Set interval for EAP rechallenge" }, +#ifdef USE_SRP + { "srp-interval", o_int, &eap_states[0].es_lwrechallenge, + "Set interval for SRP lightweight rechallenge" }, + { "srp-pn-secret", o_string, &pn_secret, + "Long term pseudonym generation secret" }, + { "srp-use-pseudonym", o_bool, &eap_states[0].es_usepseudo, + "Use pseudonym if offered one by server", 1 }, +#endif + { NULL } +}; +#endif /* PPP_OPTIONS */ + +/* + * Protocol entry points. + */ +static void eap_init(ppp_pcb *pcb); +static void eap_input(ppp_pcb *pcb, u_char *inp, int inlen); +static void eap_protrej(ppp_pcb *pcb); +static void eap_lowerup(ppp_pcb *pcb); +static void eap_lowerdown(ppp_pcb *pcb); +#if PRINTPKT_SUPPORT +static int eap_printpkt(const u_char *inp, int inlen, + void (*)(void *arg, const char *fmt, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ + +const struct protent eap_protent = { + PPP_EAP, /* protocol number */ + eap_init, /* initialization procedure */ + eap_input, /* process a received packet */ + eap_protrej, /* process a received protocol-reject */ + eap_lowerup, /* lower layer has gone up */ + eap_lowerdown, /* lower layer has gone down */ + NULL, /* open the protocol */ + NULL, /* close the protocol */ +#if PRINTPKT_SUPPORT + eap_printpkt, /* print a packet in readable form */ +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, /* process a received data packet */ +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "EAP", /* text name of protocol */ + NULL, /* text name of corresponding data protocol */ +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + eap_option_list, /* list of command-line options */ + NULL, /* check requested options; assign defaults */ +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + NULL, /* configure interface for demand-dial */ + NULL /* say whether to bring up link for this pkt */ +#endif /* DEMAND_SUPPORT */ +}; + +#ifdef USE_SRP +/* + * A well-known 2048 bit modulus. + */ +static const u_char wkmodulus[] = { + 0xAC, 0x6B, 0xDB, 0x41, 0x32, 0x4A, 0x9A, 0x9B, + 0xF1, 0x66, 0xDE, 0x5E, 0x13, 0x89, 0x58, 0x2F, + 0xAF, 0x72, 0xB6, 0x65, 0x19, 0x87, 0xEE, 0x07, + 0xFC, 0x31, 0x92, 0x94, 0x3D, 0xB5, 0x60, 0x50, + 0xA3, 0x73, 0x29, 0xCB, 0xB4, 0xA0, 0x99, 0xED, + 0x81, 0x93, 0xE0, 0x75, 0x77, 0x67, 0xA1, 0x3D, + 0xD5, 0x23, 0x12, 0xAB, 0x4B, 0x03, 0x31, 0x0D, + 0xCD, 0x7F, 0x48, 0xA9, 0xDA, 0x04, 0xFD, 0x50, + 0xE8, 0x08, 0x39, 0x69, 0xED, 0xB7, 0x67, 0xB0, + 0xCF, 0x60, 0x95, 0x17, 0x9A, 0x16, 0x3A, 0xB3, + 0x66, 0x1A, 0x05, 0xFB, 0xD5, 0xFA, 0xAA, 0xE8, + 0x29, 0x18, 0xA9, 0x96, 0x2F, 0x0B, 0x93, 0xB8, + 0x55, 0xF9, 0x79, 0x93, 0xEC, 0x97, 0x5E, 0xEA, + 0xA8, 0x0D, 0x74, 0x0A, 0xDB, 0xF4, 0xFF, 0x74, + 0x73, 0x59, 0xD0, 0x41, 0xD5, 0xC3, 0x3E, 0xA7, + 0x1D, 0x28, 0x1E, 0x44, 0x6B, 0x14, 0x77, 0x3B, + 0xCA, 0x97, 0xB4, 0x3A, 0x23, 0xFB, 0x80, 0x16, + 0x76, 0xBD, 0x20, 0x7A, 0x43, 0x6C, 0x64, 0x81, + 0xF1, 0xD2, 0xB9, 0x07, 0x87, 0x17, 0x46, 0x1A, + 0x5B, 0x9D, 0x32, 0xE6, 0x88, 0xF8, 0x77, 0x48, + 0x54, 0x45, 0x23, 0xB5, 0x24, 0xB0, 0xD5, 0x7D, + 0x5E, 0xA7, 0x7A, 0x27, 0x75, 0xD2, 0xEC, 0xFA, + 0x03, 0x2C, 0xFB, 0xDB, 0xF5, 0x2F, 0xB3, 0x78, + 0x61, 0x60, 0x27, 0x90, 0x04, 0xE5, 0x7A, 0xE6, + 0xAF, 0x87, 0x4E, 0x73, 0x03, 0xCE, 0x53, 0x29, + 0x9C, 0xCC, 0x04, 0x1C, 0x7B, 0xC3, 0x08, 0xD8, + 0x2A, 0x56, 0x98, 0xF3, 0xA8, 0xD0, 0xC3, 0x82, + 0x71, 0xAE, 0x35, 0xF8, 0xE9, 0xDB, 0xFB, 0xB6, + 0x94, 0xB5, 0xC8, 0x03, 0xD8, 0x9F, 0x7A, 0xE4, + 0x35, 0xDE, 0x23, 0x6D, 0x52, 0x5F, 0x54, 0x75, + 0x9B, 0x65, 0xE3, 0x72, 0xFC, 0xD6, 0x8E, 0xF2, + 0x0F, 0xA7, 0x11, 0x1F, 0x9E, 0x4A, 0xFF, 0x73 +}; +#endif + +#if PPP_SERVER +/* Local forward declarations. */ +static void eap_server_timeout(void *arg); +#endif /* PPP_SERVER */ + +/* + * Convert EAP state code to printable string for debug. + */ +static const char * eap_state_name(enum eap_state_code esc) +{ + static const char *state_names[] = { EAP_STATES }; + + return (state_names[(int)esc]); +} + +/* + * eap_init - Initialize state for an EAP user. This is currently + * called once by main() during start-up. + */ +static void eap_init(ppp_pcb *pcb) { + + BZERO(&pcb->eap, sizeof(eap_state)); +#if PPP_SERVER + pcb->eap.es_server.ea_id = magic(); +#endif /* PPP_SERVER */ +} + +/* + * eap_client_timeout - Give up waiting for the peer to send any + * Request messages. + */ +static void eap_client_timeout(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + if (!eap_client_active(pcb)) + return; + + ppp_error("EAP: timeout waiting for Request from peer"); + auth_withpeer_fail(pcb, PPP_EAP); + pcb->eap.es_client.ea_state = eapBadAuth; +} + +/* + * eap_authwithpeer - Authenticate to our peer (behave as client). + * + * Start client state and wait for requests. This is called only + * after eap_lowerup. + */ +void eap_authwithpeer(ppp_pcb *pcb, const char *localname) { + + if(NULL == localname) + return; + + /* Save the peer name we're given */ + pcb->eap.es_client.ea_name = localname; + pcb->eap.es_client.ea_namelen = strlen(localname); + + pcb->eap.es_client.ea_state = eapListen; + + /* + * Start a timer so that if the other end just goes + * silent, we don't sit here waiting forever. + */ + if (pcb->settings.eap_req_time > 0) + TIMEOUT(eap_client_timeout, pcb, + pcb->settings.eap_req_time); +} + +#if PPP_SERVER +/* + * Format a standard EAP Failure message and send it to the peer. + * (Server operation) + */ +static void eap_send_failure(ppp_pcb *pcb) { + struct pbuf *p; + u_char *outp; + + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + EAP_HEADERLEN), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_FAILURE, outp); + pcb->eap.es_server.ea_id++; + PUTCHAR(pcb->eap.es_server.ea_id, outp); + PUTSHORT(EAP_HEADERLEN, outp); + + ppp_write(pcb, p); + + pcb->eap.es_server.ea_state = eapBadAuth; + auth_peer_fail(pcb, PPP_EAP); +} + +/* + * Format a standard EAP Success message and send it to the peer. + * (Server operation) + */ +static void eap_send_success(ppp_pcb *pcb) { + struct pbuf *p; + u_char *outp; + + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + EAP_HEADERLEN), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_SUCCESS, outp); + pcb->eap.es_server.ea_id++; + PUTCHAR(pcb->eap.es_server.ea_id, outp); + PUTSHORT(EAP_HEADERLEN, outp); + + ppp_write(pcb, p); + + auth_peer_success(pcb, PPP_EAP, 0, + pcb->eap.es_server.ea_peer, pcb->eap.es_server.ea_peerlen); +} +#endif /* PPP_SERVER */ + +#ifdef USE_SRP +/* + * Set DES key according to pseudonym-generating secret and current + * date. + */ +static bool +pncrypt_setkey(int timeoffs) +{ + struct tm *tp; + char tbuf[9]; + SHA1_CTX ctxt; + u_char dig[SHA_DIGESTSIZE]; + time_t reftime; + + if (pn_secret == NULL) + return (0); + reftime = time(NULL) + timeoffs; + tp = localtime(&reftime); + SHA1Init(&ctxt); + SHA1Update(&ctxt, pn_secret, strlen(pn_secret)); + strftime(tbuf, sizeof (tbuf), "%Y%m%d", tp); + SHA1Update(&ctxt, tbuf, strlen(tbuf)); + SHA1Final(dig, &ctxt); + /* FIXME: if we want to do SRP, we need to find a way to pass the PolarSSL des_context instead of using static memory */ + return (DesSetkey(dig)); +} + +static char base64[] = +"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + +struct b64state { + u32_t bs_bits; + int bs_offs; +}; + +static int +b64enc(bs, inp, inlen, outp) +struct b64state *bs; +u_char *inp; +int inlen; +u_char *outp; +{ + int outlen = 0; + + while (inlen > 0) { + bs->bs_bits = (bs->bs_bits << 8) | *inp++; + inlen--; + bs->bs_offs += 8; + if (bs->bs_offs >= 24) { + *outp++ = base64[(bs->bs_bits >> 18) & 0x3F]; + *outp++ = base64[(bs->bs_bits >> 12) & 0x3F]; + *outp++ = base64[(bs->bs_bits >> 6) & 0x3F]; + *outp++ = base64[bs->bs_bits & 0x3F]; + outlen += 4; + bs->bs_offs = 0; + bs->bs_bits = 0; + } + } + return (outlen); +} + +static int +b64flush(bs, outp) +struct b64state *bs; +u_char *outp; +{ + int outlen = 0; + + if (bs->bs_offs == 8) { + *outp++ = base64[(bs->bs_bits >> 2) & 0x3F]; + *outp++ = base64[(bs->bs_bits << 4) & 0x3F]; + outlen = 2; + } else if (bs->bs_offs == 16) { + *outp++ = base64[(bs->bs_bits >> 10) & 0x3F]; + *outp++ = base64[(bs->bs_bits >> 4) & 0x3F]; + *outp++ = base64[(bs->bs_bits << 2) & 0x3F]; + outlen = 3; + } + bs->bs_offs = 0; + bs->bs_bits = 0; + return (outlen); +} + +static int +b64dec(bs, inp, inlen, outp) +struct b64state *bs; +u_char *inp; +int inlen; +u_char *outp; +{ + int outlen = 0; + char *cp; + + while (inlen > 0) { + if ((cp = strchr(base64, *inp++)) == NULL) + break; + bs->bs_bits = (bs->bs_bits << 6) | (cp - base64); + inlen--; + bs->bs_offs += 6; + if (bs->bs_offs >= 8) { + *outp++ = bs->bs_bits >> (bs->bs_offs - 8); + outlen++; + bs->bs_offs -= 8; + } + } + return (outlen); +} +#endif /* USE_SRP */ + +#if PPP_SERVER +/* + * Assume that current waiting server state is complete and figure + * next state to use based on available authentication data. 'status' + * indicates if there was an error in handling the last query. It is + * 0 for success and non-zero for failure. + */ +static void eap_figure_next_state(ppp_pcb *pcb, int status) { +#ifdef USE_SRP + unsigned char secbuf[MAXSECRETLEN], clear[8], *sp, *dp; + struct t_pw tpw; + struct t_confent *tce, mytce; + char *cp, *cp2; + struct t_server *ts; + int id, i, plen, toffs; + u_char vals[2]; + struct b64state bs; +#endif /* USE_SRP */ + + pcb->settings.eap_timeout_time = pcb->eap.es_savedtime; + switch (pcb->eap.es_server.ea_state) { + case eapBadAuth: + return; + + case eapIdentify: +#ifdef USE_SRP + /* Discard any previous session. */ + ts = (struct t_server *)pcb->eap.es_server.ea_session; + if (ts != NULL) { + t_serverclose(ts); + pcb->eap.es_server.ea_session = NULL; + pcb->eap.es_server.ea_skey = NULL; + } +#endif /* USE_SRP */ + if (status != 0) { + pcb->eap.es_server.ea_state = eapBadAuth; + break; + } +#ifdef USE_SRP + /* If we've got a pseudonym, try to decode to real name. */ + if (pcb->eap.es_server.ea_peerlen > SRP_PSEUDO_LEN && + strncmp(pcb->eap.es_server.ea_peer, SRP_PSEUDO_ID, + SRP_PSEUDO_LEN) == 0 && + (pcb->eap.es_server.ea_peerlen - SRP_PSEUDO_LEN) * 3 / 4 < + sizeof (secbuf)) { + BZERO(&bs, sizeof (bs)); + plen = b64dec(&bs, + pcb->eap.es_server.ea_peer + SRP_PSEUDO_LEN, + pcb->eap.es_server.ea_peerlen - SRP_PSEUDO_LEN, + secbuf); + toffs = 0; + for (i = 0; i < 5; i++) { + pncrypt_setkey(toffs); + toffs -= 86400; + /* FIXME: if we want to do SRP, we need to find a way to pass the PolarSSL des_context instead of using static memory */ + if (!DesDecrypt(secbuf, clear)) { + ppp_dbglog("no DES here; cannot decode " + "pseudonym"); + return; + } + id = *(unsigned char *)clear; + if (id + 1 <= plen && id + 9 > plen) + break; + } + if (plen % 8 == 0 && i < 5) { + /* + * Note that this is always shorter than the + * original stored string, so there's no need + * to realloc. + */ + if ((i = plen = *(unsigned char *)clear) > 7) + i = 7; + pcb->eap.es_server.ea_peerlen = plen; + dp = (unsigned char *)pcb->eap.es_server.ea_peer; + MEMCPY(dp, clear + 1, i); + plen -= i; + dp += i; + sp = secbuf + 8; + while (plen > 0) { + /* FIXME: if we want to do SRP, we need to find a way to pass the PolarSSL des_context instead of using static memory */ + (void) DesDecrypt(sp, dp); + sp += 8; + dp += 8; + plen -= 8; + } + pcb->eap.es_server.ea_peer[ + pcb->eap.es_server.ea_peerlen] = '\0'; + ppp_dbglog("decoded pseudonym to \"%.*q\"", + pcb->eap.es_server.ea_peerlen, + pcb->eap.es_server.ea_peer); + } else { + ppp_dbglog("failed to decode real name"); + /* Stay in eapIdentfy state; requery */ + break; + } + } + /* Look up user in secrets database. */ + if (get_srp_secret(pcb->eap.es_unit, pcb->eap.es_server.ea_peer, + pcb->eap.es_server.ea_name, (char *)secbuf, 1) != 0) { + /* Set up default in case SRP entry is bad */ + pcb->eap.es_server.ea_state = eapMD5Chall; + /* Get t_confent based on index in srp-secrets */ + id = strtol((char *)secbuf, &cp, 10); + if (*cp++ != ':' || id < 0) + break; + if (id == 0) { + mytce.index = 0; + mytce.modulus.data = (u_char *)wkmodulus; + mytce.modulus.len = sizeof (wkmodulus); + mytce.generator.data = (u_char *)"\002"; + mytce.generator.len = 1; + tce = &mytce; + } else if ((tce = gettcid(id)) != NULL) { + /* + * Client will have to verify this modulus/ + * generator combination, and that will take + * a while. Lengthen the timeout here. + */ + if (pcb->settings.eap_timeout_time > 0 && + pcb->settings.eap_timeout_time < 30) + pcb->settings.eap_timeout_time = 30; + } else { + break; + } + if ((cp2 = strchr(cp, ':')) == NULL) + break; + *cp2++ = '\0'; + tpw.pebuf.name = pcb->eap.es_server.ea_peer; + tpw.pebuf.password.len = t_fromb64((char *)tpw.pwbuf, + cp); + tpw.pebuf.password.data = tpw.pwbuf; + tpw.pebuf.salt.len = t_fromb64((char *)tpw.saltbuf, + cp2); + tpw.pebuf.salt.data = tpw.saltbuf; + if ((ts = t_serveropenraw(&tpw.pebuf, tce)) == NULL) + break; + pcb->eap.es_server.ea_session = (void *)ts; + pcb->eap.es_server.ea_state = eapSRP1; + vals[0] = pcb->eap.es_server.ea_id + 1; + vals[1] = EAPT_SRP; + t_serveraddexdata(ts, vals, 2); + /* Generate B; must call before t_servergetkey() */ + t_servergenexp(ts); + break; + } +#endif /* USE_SRP */ + pcb->eap.es_server.ea_state = eapMD5Chall; + break; + + case eapSRP1: +#ifdef USE_SRP + ts = (struct t_server *)pcb->eap.es_server.ea_session; + if (ts != NULL && status != 0) { + t_serverclose(ts); + pcb->eap.es_server.ea_session = NULL; + pcb->eap.es_server.ea_skey = NULL; + } +#endif /* USE_SRP */ + if (status == 1) { + pcb->eap.es_server.ea_state = eapMD5Chall; + } else if (status != 0 || pcb->eap.es_server.ea_session == NULL) { + pcb->eap.es_server.ea_state = eapBadAuth; + } else { + pcb->eap.es_server.ea_state = eapSRP2; + } + break; + + case eapSRP2: +#ifdef USE_SRP + ts = (struct t_server *)pcb->eap.es_server.ea_session; + if (ts != NULL && status != 0) { + t_serverclose(ts); + pcb->eap.es_server.ea_session = NULL; + pcb->eap.es_server.ea_skey = NULL; + } +#endif /* USE_SRP */ + if (status != 0 || pcb->eap.es_server.ea_session == NULL) { + pcb->eap.es_server.ea_state = eapBadAuth; + } else { + pcb->eap.es_server.ea_state = eapSRP3; + } + break; + + case eapSRP3: + case eapSRP4: +#ifdef USE_SRP + ts = (struct t_server *)pcb->eap.es_server.ea_session; + if (ts != NULL && status != 0) { + t_serverclose(ts); + pcb->eap.es_server.ea_session = NULL; + pcb->eap.es_server.ea_skey = NULL; + } +#endif /* USE_SRP */ + if (status != 0 || pcb->eap.es_server.ea_session == NULL) { + pcb->eap.es_server.ea_state = eapBadAuth; + } else { + pcb->eap.es_server.ea_state = eapOpen; + } + break; + + case eapMD5Chall: + if (status != 0) { + pcb->eap.es_server.ea_state = eapBadAuth; + } else { + pcb->eap.es_server.ea_state = eapOpen; + } + break; + + default: + pcb->eap.es_server.ea_state = eapBadAuth; + break; + } + if (pcb->eap.es_server.ea_state == eapBadAuth) + eap_send_failure(pcb); +} + +/* + * Format an EAP Request message and send it to the peer. Message + * type depends on current state. (Server operation) + */ +static void eap_send_request(ppp_pcb *pcb) { + struct pbuf *p; + u_char *outp; + u_char *lenloc; + int outlen; + int len; + const char *str; +#ifdef USE_SRP + struct t_server *ts; + u_char clear[8], cipher[8], dig[SHA_DIGESTSIZE], *optr, *cp; + int i, j; + struct b64state b64; + SHA1_CTX ctxt; +#endif /* USE_SRP */ + + /* Handle both initial auth and restart */ + if (pcb->eap.es_server.ea_state < eapIdentify && + pcb->eap.es_server.ea_state != eapInitial) { + pcb->eap.es_server.ea_state = eapIdentify; +#if PPP_REMOTENAME + if (pcb->settings.explicit_remote && pcb->remote_name) { + /* + * If we already know the peer's + * unauthenticated name, then there's no + * reason to ask. Go to next state instead. + */ + int len = (int)strlen(pcb->remote_name); + if (len > MAXNAMELEN) { + len = MAXNAMELEN; + } + MEMCPY(pcb->eap.es_server.ea_peer, pcb->remote_name, len); + pcb->eap.es_server.ea_peer[len] = '\0'; + pcb->eap.es_server.ea_peerlen = len; + eap_figure_next_state(pcb, 0); + } +#endif /* PPP_REMOTENAME */ + } + + if (pcb->settings.eap_max_transmits > 0 && + pcb->eap.es_server.ea_requests >= pcb->settings.eap_max_transmits) { + if (pcb->eap.es_server.ea_responses > 0) + ppp_error("EAP: too many Requests sent"); + else + ppp_error("EAP: no response to Requests"); + eap_send_failure(pcb); + return; + } + + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_CTRL_PBUF_MAX_SIZE), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_REQUEST, outp); + PUTCHAR(pcb->eap.es_server.ea_id, outp); + lenloc = outp; + INCPTR(2, outp); + + switch (pcb->eap.es_server.ea_state) { + case eapIdentify: + PUTCHAR(EAPT_IDENTITY, outp); + str = "Name"; + len = strlen(str); + MEMCPY(outp, str, len); + INCPTR(len, outp); + break; + + case eapMD5Chall: + PUTCHAR(EAPT_MD5CHAP, outp); + /* + * pick a random challenge length between + * EAP_MIN_CHALLENGE_LENGTH and EAP_MAX_CHALLENGE_LENGTH + */ + pcb->eap.es_challen = EAP_MIN_CHALLENGE_LENGTH + + magic_pow(EAP_MIN_MAX_POWER_OF_TWO_CHALLENGE_LENGTH); + PUTCHAR(pcb->eap.es_challen, outp); + magic_random_bytes(pcb->eap.es_challenge, pcb->eap.es_challen); + MEMCPY(outp, pcb->eap.es_challenge, pcb->eap.es_challen); + INCPTR(pcb->eap.es_challen, outp); + MEMCPY(outp, pcb->eap.es_server.ea_name, pcb->eap.es_server.ea_namelen); + INCPTR(pcb->eap.es_server.ea_namelen, outp); + break; + +#ifdef USE_SRP + case eapSRP1: + PUTCHAR(EAPT_SRP, outp); + PUTCHAR(EAPSRP_CHALLENGE, outp); + + PUTCHAR(pcb->eap.es_server.ea_namelen, outp); + MEMCPY(outp, pcb->eap.es_server.ea_name, pcb->eap.es_server.ea_namelen); + INCPTR(pcb->eap.es_server.ea_namelen, outp); + + ts = (struct t_server *)pcb->eap.es_server.ea_session; + assert(ts != NULL); + PUTCHAR(ts->s.len, outp); + MEMCPY(outp, ts->s.data, ts->s.len); + INCPTR(ts->s.len, outp); + + if (ts->g.len == 1 && ts->g.data[0] == 2) { + PUTCHAR(0, outp); + } else { + PUTCHAR(ts->g.len, outp); + MEMCPY(outp, ts->g.data, ts->g.len); + INCPTR(ts->g.len, outp); + } + + if (ts->n.len != sizeof (wkmodulus) || + BCMP(ts->n.data, wkmodulus, sizeof (wkmodulus)) != 0) { + MEMCPY(outp, ts->n.data, ts->n.len); + INCPTR(ts->n.len, outp); + } + break; + + case eapSRP2: + PUTCHAR(EAPT_SRP, outp); + PUTCHAR(EAPSRP_SKEY, outp); + + ts = (struct t_server *)pcb->eap.es_server.ea_session; + assert(ts != NULL); + MEMCPY(outp, ts->B.data, ts->B.len); + INCPTR(ts->B.len, outp); + break; + + case eapSRP3: + PUTCHAR(EAPT_SRP, outp); + PUTCHAR(EAPSRP_SVALIDATOR, outp); + PUTLONG(SRPVAL_EBIT, outp); + ts = (struct t_server *)pcb->eap.es_server.ea_session; + assert(ts != NULL); + MEMCPY(outp, t_serverresponse(ts), SHA_DIGESTSIZE); + INCPTR(SHA_DIGESTSIZE, outp); + + if (pncrypt_setkey(0)) { + /* Generate pseudonym */ + optr = outp; + cp = (unsigned char *)pcb->eap.es_server.ea_peer; + if ((j = i = pcb->eap.es_server.ea_peerlen) > 7) + j = 7; + clear[0] = i; + MEMCPY(clear + 1, cp, j); + i -= j; + cp += j; + /* FIXME: if we want to do SRP, we need to find a way to pass the PolarSSL des_context instead of using static memory */ + if (!DesEncrypt(clear, cipher)) { + ppp_dbglog("no DES here; not generating pseudonym"); + break; + } + BZERO(&b64, sizeof (b64)); + outp++; /* space for pseudonym length */ + outp += b64enc(&b64, cipher, 8, outp); + while (i >= 8) { + /* FIXME: if we want to do SRP, we need to find a way to pass the PolarSSL des_context instead of using static memory */ + (void) DesEncrypt(cp, cipher); + outp += b64enc(&b64, cipher, 8, outp); + cp += 8; + i -= 8; + } + if (i > 0) { + MEMCPY(clear, cp, i); + cp += i; + magic_random_bytes(cp, 8-i); + /* FIXME: if we want to do SRP, we need to find a way to pass the PolarSSL des_context instead of using static memory */ + (void) DesEncrypt(clear, cipher); + outp += b64enc(&b64, cipher, 8, outp); + } + outp += b64flush(&b64, outp); + + /* Set length and pad out to next 20 octet boundary */ + i = outp - optr - 1; + *optr = i; + i %= SHA_DIGESTSIZE; + if (i != 0) { + magic_random_bytes(outp, SHA_DIGESTSIZE-i); + INCPTR(SHA_DIGESTSIZE-i, outp); + } + + /* Obscure the pseudonym with SHA1 hash */ + SHA1Init(&ctxt); + SHA1Update(&ctxt, &pcb->eap.es_server.ea_id, 1); + SHA1Update(&ctxt, pcb->eap.es_server.ea_skey, + SESSION_KEY_LEN); + SHA1Update(&ctxt, pcb->eap.es_server.ea_peer, + pcb->eap.es_server.ea_peerlen); + while (optr < outp) { + SHA1Final(dig, &ctxt); + cp = dig; + while (cp < dig + SHA_DIGESTSIZE) + *optr++ ^= *cp++; + SHA1Init(&ctxt); + SHA1Update(&ctxt, &pcb->eap.es_server.ea_id, 1); + SHA1Update(&ctxt, pcb->eap.es_server.ea_skey, + SESSION_KEY_LEN); + SHA1Update(&ctxt, optr - SHA_DIGESTSIZE, + SHA_DIGESTSIZE); + } + } + break; + + case eapSRP4: + PUTCHAR(EAPT_SRP, outp); + PUTCHAR(EAPSRP_LWRECHALLENGE, outp); + pcb->eap.es_challen = EAP_MIN_CHALLENGE_LENGTH + + magic_pow(EAP_MIN_MAX_POWER_OF_TWO_CHALLENGE_LENGTH); + magic_random_bytes(pcb->eap.es_challenge, pcb->eap.es_challen); + MEMCPY(outp, pcb->eap.es_challenge, pcb->eap.es_challen); + INCPTR(pcb->eap.es_challen, outp); + break; +#endif /* USE_SRP */ + + default: + return; + } + + outlen = (outp - (unsigned char*)p->payload) - PPP_HDRLEN; + PUTSHORT(outlen, lenloc); + + pbuf_realloc(p, outlen + PPP_HDRLEN); + ppp_write(pcb, p); + + pcb->eap.es_server.ea_requests++; + + if (pcb->settings.eap_timeout_time > 0) + TIMEOUT(eap_server_timeout, pcb, pcb->settings.eap_timeout_time); +} + +/* + * eap_authpeer - Authenticate our peer (behave as server). + * + * Start server state and send first request. This is called only + * after eap_lowerup. + */ +void eap_authpeer(ppp_pcb *pcb, const char *localname) { + + /* Save the name we're given. */ + pcb->eap.es_server.ea_name = localname; + pcb->eap.es_server.ea_namelen = strlen(localname); + + pcb->eap.es_savedtime = pcb->settings.eap_timeout_time; + + /* Lower layer up yet? */ + if (pcb->eap.es_server.ea_state == eapInitial || + pcb->eap.es_server.ea_state == eapPending) { + pcb->eap.es_server.ea_state = eapPending; + return; + } + + pcb->eap.es_server.ea_state = eapPending; + + /* ID number not updated here intentionally; hashed into M1 */ + eap_send_request(pcb); +} + +/* + * eap_server_timeout - Retransmission timer for sending Requests + * expired. + */ +static void eap_server_timeout(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + if (!eap_server_active(pcb)) + return; + + /* EAP ID number must not change on timeout. */ + eap_send_request(pcb); +} + +/* + * When it's time to send rechallenge the peer, this timeout is + * called. Once the rechallenge is successful, the response handler + * will restart the timer. If it fails, then the link is dropped. + */ +static void eap_rechallenge(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + if (pcb->eap.es_server.ea_state != eapOpen && + pcb->eap.es_server.ea_state != eapSRP4) + return; + + pcb->eap.es_server.ea_requests = 0; + pcb->eap.es_server.ea_state = eapIdentify; + eap_figure_next_state(pcb, 0); + pcb->eap.es_server.ea_id++; + eap_send_request(pcb); +} + +static void srp_lwrechallenge(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + if (pcb->eap.es_server.ea_state != eapOpen || + pcb->eap.es_server.ea_type != EAPT_SRP) + return; + + pcb->eap.es_server.ea_requests = 0; + pcb->eap.es_server.ea_state = eapSRP4; + pcb->eap.es_server.ea_id++; + eap_send_request(pcb); +} +#endif /* PPP_SERVER */ + +/* + * eap_lowerup - The lower layer is now up. + * + * This is called before either eap_authpeer or eap_authwithpeer. See + * link_established() in auth.c. All that's necessary here is to + * return to closed state so that those two routines will do the right + * thing. + */ +static void eap_lowerup(ppp_pcb *pcb) { + pcb->eap.es_client.ea_state = eapClosed; +#if PPP_SERVER + pcb->eap.es_server.ea_state = eapClosed; +#endif /* PPP_SERVER */ +} + +/* + * eap_lowerdown - The lower layer is now down. + * + * Cancel all timeouts and return to initial state. + */ +static void eap_lowerdown(ppp_pcb *pcb) { + + if (eap_client_active(pcb) && pcb->settings.eap_req_time > 0) { + UNTIMEOUT(eap_client_timeout, pcb); + } +#if PPP_SERVER + if (eap_server_active(pcb)) { + if (pcb->settings.eap_timeout_time > 0) { + UNTIMEOUT(eap_server_timeout, pcb); + } + } else { + if ((pcb->eap.es_server.ea_state == eapOpen || + pcb->eap.es_server.ea_state == eapSRP4) && + pcb->eap.es_rechallenge > 0) { + UNTIMEOUT(eap_rechallenge, (void *)pcb); + } + if (pcb->eap.es_server.ea_state == eapOpen && + pcb->eap.es_lwrechallenge > 0) { + UNTIMEOUT(srp_lwrechallenge, (void *)pcb); + } + } + + pcb->eap.es_client.ea_state = pcb->eap.es_server.ea_state = eapInitial; + pcb->eap.es_client.ea_requests = pcb->eap.es_server.ea_requests = 0; +#endif /* PPP_SERVER */ +} + +/* + * eap_protrej - Peer doesn't speak this protocol. + * + * This shouldn't happen. If it does, it represents authentication + * failure. + */ +static void eap_protrej(ppp_pcb *pcb) { + + if (eap_client_active(pcb)) { + ppp_error("EAP authentication failed due to Protocol-Reject"); + auth_withpeer_fail(pcb, PPP_EAP); + } +#if PPP_SERVER + if (eap_server_active(pcb)) { + ppp_error("EAP authentication of peer failed on Protocol-Reject"); + auth_peer_fail(pcb, PPP_EAP); + } +#endif /* PPP_SERVER */ + eap_lowerdown(pcb); +} + +/* + * Format and send a regular EAP Response message. + */ +static void eap_send_response(ppp_pcb *pcb, u_char id, u_char typenum, const u_char *str, int lenstr) { + struct pbuf *p; + u_char *outp; + int msglen; + + msglen = EAP_HEADERLEN + sizeof (u_char) + lenstr; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + msglen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_RESPONSE, outp); + PUTCHAR(id, outp); + pcb->eap.es_client.ea_id = id; + PUTSHORT(msglen, outp); + PUTCHAR(typenum, outp); + if (lenstr > 0) { + MEMCPY(outp, str, lenstr); + } + + ppp_write(pcb, p); +} + +/* + * Format and send an MD5-Challenge EAP Response message. + */ +static void eap_chap_response(ppp_pcb *pcb, u_char id, u_char *hash, const char *name, int namelen) { + struct pbuf *p; + u_char *outp; + int msglen; + + msglen = EAP_HEADERLEN + 2 * sizeof (u_char) + MD5_SIGNATURE_SIZE + + namelen; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + msglen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_RESPONSE, outp); + PUTCHAR(id, outp); + pcb->eap.es_client.ea_id = id; + PUTSHORT(msglen, outp); + PUTCHAR(EAPT_MD5CHAP, outp); + PUTCHAR(MD5_SIGNATURE_SIZE, outp); + MEMCPY(outp, hash, MD5_SIGNATURE_SIZE); + INCPTR(MD5_SIGNATURE_SIZE, outp); + if (namelen > 0) { + MEMCPY(outp, name, namelen); + } + + ppp_write(pcb, p); +} + +#ifdef USE_SRP +/* + * Format and send a SRP EAP Response message. + */ +static void +eap_srp_response(esp, id, subtypenum, str, lenstr) +eap_state *esp; +u_char id; +u_char subtypenum; +u_char *str; +int lenstr; +{ + ppp_pcb *pcb = &ppp_pcb_list[pcb->eap.es_unit]; + struct pbuf *p; + u_char *outp; + int msglen; + + msglen = EAP_HEADERLEN + 2 * sizeof (u_char) + lenstr; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + msglen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_RESPONSE, outp); + PUTCHAR(id, outp); + pcb->eap.es_client.ea_id = id; + PUTSHORT(msglen, outp); + PUTCHAR(EAPT_SRP, outp); + PUTCHAR(subtypenum, outp); + if (lenstr > 0) { + MEMCPY(outp, str, lenstr); + } + + ppp_write(pcb, p); +} + +/* + * Format and send a SRP EAP Client Validator Response message. + */ +static void +eap_srpval_response(esp, id, flags, str) +eap_state *esp; +u_char id; +u32_t flags; +u_char *str; +{ + ppp_pcb *pcb = &ppp_pcb_list[pcb->eap.es_unit]; + struct pbuf *p; + u_char *outp; + int msglen; + + msglen = EAP_HEADERLEN + 2 * sizeof (u_char) + sizeof (u32_t) + + SHA_DIGESTSIZE; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + msglen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_RESPONSE, outp); + PUTCHAR(id, outp); + pcb->eap.es_client.ea_id = id; + PUTSHORT(msglen, outp); + PUTCHAR(EAPT_SRP, outp); + PUTCHAR(EAPSRP_CVALIDATOR, outp); + PUTLONG(flags, outp); + MEMCPY(outp, str, SHA_DIGESTSIZE); + + ppp_write(pcb, p); +} +#endif /* USE_SRP */ + +static void eap_send_nak(ppp_pcb *pcb, u_char id, u_char type) { + struct pbuf *p; + u_char *outp; + int msglen; + + msglen = EAP_HEADERLEN + 2 * sizeof (u_char); + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN + msglen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + + MAKEHEADER(outp, PPP_EAP); + + PUTCHAR(EAP_RESPONSE, outp); + PUTCHAR(id, outp); + pcb->eap.es_client.ea_id = id; + PUTSHORT(msglen, outp); + PUTCHAR(EAPT_NAK, outp); + PUTCHAR(type, outp); + + ppp_write(pcb, p); +} + +#ifdef USE_SRP +static char * +name_of_pn_file() +{ + char *user, *path, *file; + struct passwd *pw; + size_t pl; + static bool pnlogged = 0; + + pw = getpwuid(getuid()); + if (pw == NULL || (user = pw->pw_dir) == NULL || user[0] == 0) { + errno = EINVAL; + return (NULL); + } + file = _PATH_PSEUDONYM; + pl = strlen(user) + strlen(file) + 2; + path = malloc(pl); + if (path == NULL) + return (NULL); + (void) slprintf(path, pl, "%s/%s", user, file); + if (!pnlogged) { + ppp_dbglog("pseudonym file: %s", path); + pnlogged = 1; + } + return (path); +} + +static int +open_pn_file(modebits) +mode_t modebits; +{ + char *path; + int fd, err; + + if ((path = name_of_pn_file()) == NULL) + return (-1); + fd = open(path, modebits, S_IRUSR | S_IWUSR); + err = errno; + free(path); + errno = err; + return (fd); +} + +static void +remove_pn_file() +{ + char *path; + + if ((path = name_of_pn_file()) != NULL) { + (void) unlink(path); + (void) free(path); + } +} + +static void +write_pseudonym(esp, inp, len, id) +eap_state *esp; +u_char *inp; +int len, id; +{ + u_char val; + u_char *datp, *digp; + SHA1_CTX ctxt; + u_char dig[SHA_DIGESTSIZE]; + int dsize, fd, olen = len; + + /* + * Do the decoding by working backwards. This eliminates the need + * to save the decoded output in a separate buffer. + */ + val = id; + while (len > 0) { + if ((dsize = len % SHA_DIGESTSIZE) == 0) + dsize = SHA_DIGESTSIZE; + len -= dsize; + datp = inp + len; + SHA1Init(&ctxt); + SHA1Update(&ctxt, &val, 1); + SHA1Update(&ctxt, pcb->eap.es_client.ea_skey, SESSION_KEY_LEN); + if (len > 0) { + SHA1Update(&ctxt, datp, SHA_DIGESTSIZE); + } else { + SHA1Update(&ctxt, pcb->eap.es_client.ea_name, + pcb->eap.es_client.ea_namelen); + } + SHA1Final(dig, &ctxt); + for (digp = dig; digp < dig + SHA_DIGESTSIZE; digp++) + *datp++ ^= *digp; + } + + /* Now check that the result is sane */ + if (olen <= 0 || *inp + 1 > olen) { + ppp_dbglog("EAP: decoded pseudonym is unusable <%.*B>", olen, inp); + return; + } + + /* Save it away */ + fd = open_pn_file(O_WRONLY | O_CREAT | O_TRUNC); + if (fd < 0) { + ppp_dbglog("EAP: error saving pseudonym: %m"); + return; + } + len = write(fd, inp + 1, *inp); + if (close(fd) != -1 && len == *inp) { + ppp_dbglog("EAP: saved pseudonym"); + pcb->eap.es_usedpseudo = 0; + } else { + ppp_dbglog("EAP: failed to save pseudonym"); + remove_pn_file(); + } +} +#endif /* USE_SRP */ + +/* + * eap_request - Receive EAP Request message (client mode). + */ +static void eap_request(ppp_pcb *pcb, u_char *inp, int id, int len) { + u_char typenum; + u_char vallen; + int secret_len; + char secret[MAXSECRETLEN]; + char rhostname[MAXNAMELEN]; + lwip_md5_context mdContext; + u_char hash[MD5_SIGNATURE_SIZE]; +#ifdef USE_SRP + struct t_client *tc; + struct t_num sval, gval, Nval, *Ap, Bval; + u_char vals[2]; + SHA1_CTX ctxt; + u_char dig[SHA_DIGESTSIZE]; + int fd; +#endif /* USE_SRP */ + + /* + * Note: we update es_client.ea_id *only if* a Response + * message is being generated. Otherwise, we leave it the + * same for duplicate detection purposes. + */ + + pcb->eap.es_client.ea_requests++; + if (pcb->settings.eap_allow_req != 0 && + pcb->eap.es_client.ea_requests > pcb->settings.eap_allow_req) { + ppp_info("EAP: received too many Request messages"); + if (pcb->settings.eap_req_time > 0) { + UNTIMEOUT(eap_client_timeout, pcb); + } + auth_withpeer_fail(pcb, PPP_EAP); + return; + } + + if (len <= 0) { + ppp_error("EAP: empty Request message discarded"); + return; + } + + GETCHAR(typenum, inp); + len--; + + switch (typenum) { + case EAPT_IDENTITY: + if (len > 0) + ppp_info("EAP: Identity prompt \"%.*q\"", len, inp); +#ifdef USE_SRP + if (pcb->eap.es_usepseudo && + (pcb->eap.es_usedpseudo == 0 || + (pcb->eap.es_usedpseudo == 1 && + id == pcb->eap.es_client.ea_id))) { + pcb->eap.es_usedpseudo = 1; + /* Try to get a pseudonym */ + if ((fd = open_pn_file(O_RDONLY)) >= 0) { + strcpy(rhostname, SRP_PSEUDO_ID); + len = read(fd, rhostname + SRP_PSEUDO_LEN, + sizeof (rhostname) - SRP_PSEUDO_LEN); + /* XXX NAI unsupported */ + if (len > 0) { + eap_send_response(pcb, id, typenum, + rhostname, len + SRP_PSEUDO_LEN); + } + (void) close(fd); + if (len > 0) + break; + } + } + /* Stop using pseudonym now. */ + if (pcb->eap.es_usepseudo && pcb->eap.es_usedpseudo != 2) { + remove_pn_file(); + pcb->eap.es_usedpseudo = 2; + } +#endif /* USE_SRP */ + eap_send_response(pcb, id, typenum, (const u_char*)pcb->eap.es_client.ea_name, + pcb->eap.es_client.ea_namelen); + break; + + case EAPT_NOTIFICATION: + if (len > 0) + ppp_info("EAP: Notification \"%.*q\"", len, inp); + eap_send_response(pcb, id, typenum, NULL, 0); + break; + + case EAPT_NAK: + /* + * Avoid the temptation to send Response Nak in reply + * to Request Nak here. It can only lead to trouble. + */ + ppp_warn("EAP: unexpected Nak in Request; ignored"); + /* Return because we're waiting for something real. */ + return; + + case EAPT_MD5CHAP: + if (len < 1) { + ppp_error("EAP: received MD5-Challenge with no data"); + /* Bogus request; wait for something real. */ + return; + } + GETCHAR(vallen, inp); + len--; + if (vallen < 8 || vallen > len) { + ppp_error("EAP: MD5-Challenge with bad length %d (8..%d)", + vallen, len); + /* Try something better. */ + eap_send_nak(pcb, id, EAPT_SRP); + break; + } + + /* Not so likely to happen. */ + if (vallen >= len + sizeof (rhostname)) { + ppp_dbglog("EAP: trimming really long peer name down"); + MEMCPY(rhostname, inp + vallen, sizeof (rhostname) - 1); + rhostname[sizeof (rhostname) - 1] = '\0'; + } else { + MEMCPY(rhostname, inp + vallen, len - vallen); + rhostname[len - vallen] = '\0'; + } + +#if PPP_REMOTENAME + /* In case the remote doesn't give us his name. */ + if (pcb->settings.explicit_remote || + (pcb->settings.remote_name[0] != '\0' && vallen == len)) + strlcpy(rhostname, pcb->settings.remote_name, sizeof (rhostname)); +#endif /* PPP_REMOTENAME */ + + /* + * Get the secret for authenticating ourselves with + * the specified host. + */ + if (!get_secret(pcb, pcb->eap.es_client.ea_name, + rhostname, secret, &secret_len, 0)) { + ppp_dbglog("EAP: no MD5 secret for auth to %q", rhostname); + eap_send_nak(pcb, id, EAPT_SRP); + break; + } + lwip_md5_init(&mdContext); + lwip_md5_starts(&mdContext); + typenum = id; + lwip_md5_update(&mdContext, &typenum, 1); + lwip_md5_update(&mdContext, (u_char *)secret, secret_len); + BZERO(secret, sizeof (secret)); + lwip_md5_update(&mdContext, inp, vallen); + lwip_md5_finish(&mdContext, hash); + lwip_md5_free(&mdContext); + eap_chap_response(pcb, id, hash, pcb->eap.es_client.ea_name, + pcb->eap.es_client.ea_namelen); + break; + +#ifdef USE_SRP + case EAPT_SRP: + if (len < 1) { + ppp_error("EAP: received empty SRP Request"); + /* Bogus request; wait for something real. */ + return; + } + + /* Get subtype */ + GETCHAR(vallen, inp); + len--; + switch (vallen) { + case EAPSRP_CHALLENGE: + tc = NULL; + if (pcb->eap.es_client.ea_session != NULL) { + tc = (struct t_client *)pcb->eap.es_client. + ea_session; + /* + * If this is a new challenge, then start + * over with a new client session context. + * Otherwise, just resend last response. + */ + if (id != pcb->eap.es_client.ea_id) { + t_clientclose(tc); + pcb->eap.es_client.ea_session = NULL; + tc = NULL; + } + } + /* No session key just yet */ + pcb->eap.es_client.ea_skey = NULL; + if (tc == NULL) { + int rhostnamelen; + + GETCHAR(vallen, inp); + len--; + if (vallen >= len) { + ppp_error("EAP: badly-formed SRP Challenge" + " (name)"); + /* Ignore badly-formed messages */ + return; + } + MEMCPY(rhostname, inp, vallen); + rhostname[vallen] = '\0'; + INCPTR(vallen, inp); + len -= vallen; + + /* + * In case the remote doesn't give us his name, + * use configured name. + */ + if (explicit_remote || + (remote_name[0] != '\0' && vallen == 0)) { + strlcpy(rhostname, remote_name, + sizeof (rhostname)); + } + + rhostnamelen = (int)strlen(rhostname); + if (rhostnamelen > MAXNAMELEN) { + rhostnamelen = MAXNAMELEN; + } + MEMCPY(pcb->eap.es_client.ea_peer, rhostname, rhostnamelen); + pcb->eap.es_client.ea_peer[rhostnamelen] = '\0'; + pcb->eap.es_client.ea_peerlen = rhostnamelen; + + GETCHAR(vallen, inp); + len--; + if (vallen >= len) { + ppp_error("EAP: badly-formed SRP Challenge" + " (s)"); + /* Ignore badly-formed messages */ + return; + } + sval.data = inp; + sval.len = vallen; + INCPTR(vallen, inp); + len -= vallen; + + GETCHAR(vallen, inp); + len--; + if (vallen > len) { + ppp_error("EAP: badly-formed SRP Challenge" + " (g)"); + /* Ignore badly-formed messages */ + return; + } + /* If no generator present, then use value 2 */ + if (vallen == 0) { + gval.data = (u_char *)"\002"; + gval.len = 1; + } else { + gval.data = inp; + gval.len = vallen; + } + INCPTR(vallen, inp); + len -= vallen; + + /* + * If no modulus present, then use well-known + * value. + */ + if (len == 0) { + Nval.data = (u_char *)wkmodulus; + Nval.len = sizeof (wkmodulus); + } else { + Nval.data = inp; + Nval.len = len; + } + tc = t_clientopen(pcb->eap.es_client.ea_name, + &Nval, &gval, &sval); + if (tc == NULL) { + eap_send_nak(pcb, id, EAPT_MD5CHAP); + break; + } + pcb->eap.es_client.ea_session = (void *)tc; + + /* Add Challenge ID & type to verifier */ + vals[0] = id; + vals[1] = EAPT_SRP; + t_clientaddexdata(tc, vals, 2); + } + Ap = t_clientgenexp(tc); + eap_srp_response(esp, id, EAPSRP_CKEY, Ap->data, + Ap->len); + break; + + case EAPSRP_SKEY: + tc = (struct t_client *)pcb->eap.es_client.ea_session; + if (tc == NULL) { + ppp_warn("EAP: peer sent Subtype 2 without 1"); + eap_send_nak(pcb, id, EAPT_MD5CHAP); + break; + } + if (pcb->eap.es_client.ea_skey != NULL) { + /* + * ID number should not change here. Warn + * if it does (but otherwise ignore). + */ + if (id != pcb->eap.es_client.ea_id) { + ppp_warn("EAP: ID changed from %d to %d " + "in SRP Subtype 2 rexmit", + pcb->eap.es_client.ea_id, id); + } + } else { + if (get_srp_secret(pcb->eap.es_unit, + pcb->eap.es_client.ea_name, + pcb->eap.es_client.ea_peer, secret, 0) == 0) { + /* + * Can't work with this peer because + * the secret is missing. Just give + * up. + */ + eap_send_nak(pcb, id, EAPT_MD5CHAP); + break; + } + Bval.data = inp; + Bval.len = len; + t_clientpasswd(tc, secret); + BZERO(secret, sizeof (secret)); + pcb->eap.es_client.ea_skey = + t_clientgetkey(tc, &Bval); + if (pcb->eap.es_client.ea_skey == NULL) { + /* Server is rogue; stop now */ + ppp_error("EAP: SRP server is rogue"); + goto client_failure; + } + } + eap_srpval_response(esp, id, SRPVAL_EBIT, + t_clientresponse(tc)); + break; + + case EAPSRP_SVALIDATOR: + tc = (struct t_client *)pcb->eap.es_client.ea_session; + if (tc == NULL || pcb->eap.es_client.ea_skey == NULL) { + ppp_warn("EAP: peer sent Subtype 3 without 1/2"); + eap_send_nak(pcb, id, EAPT_MD5CHAP); + break; + } + /* + * If we're already open, then this ought to be a + * duplicate. Otherwise, check that the server is + * who we think it is. + */ + if (pcb->eap.es_client.ea_state == eapOpen) { + if (id != pcb->eap.es_client.ea_id) { + ppp_warn("EAP: ID changed from %d to %d " + "in SRP Subtype 3 rexmit", + pcb->eap.es_client.ea_id, id); + } + } else { + len -= sizeof (u32_t) + SHA_DIGESTSIZE; + if (len < 0 || t_clientverify(tc, inp + + sizeof (u32_t)) != 0) { + ppp_error("EAP: SRP server verification " + "failed"); + goto client_failure; + } + GETLONG(pcb->eap.es_client.ea_keyflags, inp); + /* Save pseudonym if user wants it. */ + if (len > 0 && pcb->eap.es_usepseudo) { + INCPTR(SHA_DIGESTSIZE, inp); + write_pseudonym(esp, inp, len, id); + } + } + /* + * We've verified our peer. We're now mostly done, + * except for waiting on the regular EAP Success + * message. + */ + eap_srp_response(esp, id, EAPSRP_ACK, NULL, 0); + break; + + case EAPSRP_LWRECHALLENGE: + if (len < 4) { + ppp_warn("EAP: malformed Lightweight rechallenge"); + return; + } + SHA1Init(&ctxt); + vals[0] = id; + SHA1Update(&ctxt, vals, 1); + SHA1Update(&ctxt, pcb->eap.es_client.ea_skey, + SESSION_KEY_LEN); + SHA1Update(&ctxt, inp, len); + SHA1Update(&ctxt, pcb->eap.es_client.ea_name, + pcb->eap.es_client.ea_namelen); + SHA1Final(dig, &ctxt); + eap_srp_response(esp, id, EAPSRP_LWRECHALLENGE, dig, + SHA_DIGESTSIZE); + break; + + default: + ppp_error("EAP: unknown SRP Subtype %d", vallen); + eap_send_nak(pcb, id, EAPT_MD5CHAP); + break; + } + break; +#endif /* USE_SRP */ + + default: + ppp_info("EAP: unknown authentication type %d; Naking", typenum); + eap_send_nak(pcb, id, EAPT_SRP); + break; + } + + if (pcb->settings.eap_req_time > 0) { + UNTIMEOUT(eap_client_timeout, pcb); + TIMEOUT(eap_client_timeout, pcb, + pcb->settings.eap_req_time); + } + return; + +#ifdef USE_SRP +client_failure: + pcb->eap.es_client.ea_state = eapBadAuth; + if (pcb->settings.eap_req_time > 0) { + UNTIMEOUT(eap_client_timeout, (void *)esp); + } + pcb->eap.es_client.ea_session = NULL; + t_clientclose(tc); + auth_withpeer_fail(pcb, PPP_EAP); +#endif /* USE_SRP */ +} + +#if PPP_SERVER +/* + * eap_response - Receive EAP Response message (server mode). + */ +static void eap_response(ppp_pcb *pcb, u_char *inp, int id, int len) { + u_char typenum; + u_char vallen; + int secret_len; + char secret[MAXSECRETLEN]; + char rhostname[MAXNAMELEN]; + lwip_md5_context mdContext; + u_char hash[MD5_SIGNATURE_SIZE]; +#ifdef USE_SRP + struct t_server *ts; + struct t_num A; + SHA1_CTX ctxt; + u_char dig[SHA_DIGESTSIZE]; +#endif /* USE_SRP */ + + if (pcb->eap.es_server.ea_id != id) { + ppp_dbglog("EAP: discarding Response %d; expected ID %d", id, + pcb->eap.es_server.ea_id); + return; + } + + pcb->eap.es_server.ea_responses++; + + if (len <= 0) { + ppp_error("EAP: empty Response message discarded"); + return; + } + + GETCHAR(typenum, inp); + len--; + + switch (typenum) { + case EAPT_IDENTITY: + if (pcb->eap.es_server.ea_state != eapIdentify) { + ppp_dbglog("EAP discarding unwanted Identify \"%.q\"", len, + inp); + break; + } + ppp_info("EAP: unauthenticated peer name \"%.*q\"", len, inp); + if (len > MAXNAMELEN) { + len = MAXNAMELEN; + } + MEMCPY(pcb->eap.es_server.ea_peer, inp, len); + pcb->eap.es_server.ea_peer[len] = '\0'; + pcb->eap.es_server.ea_peerlen = len; + eap_figure_next_state(pcb, 0); + break; + + case EAPT_NOTIFICATION: + ppp_dbglog("EAP unexpected Notification; response discarded"); + break; + + case EAPT_NAK: + if (len < 1) { + ppp_info("EAP: Nak Response with no suggested protocol"); + eap_figure_next_state(pcb, 1); + break; + } + + GETCHAR(vallen, inp); + len--; + + if ( +#if PPP_REMOTENAME + !pcb->explicit_remote && +#endif /* PPP_REMOTENAME */ + pcb->eap.es_server.ea_state == eapIdentify){ + /* Peer cannot Nak Identify Request */ + eap_figure_next_state(pcb, 1); + break; + } + + switch (vallen) { + case EAPT_SRP: + /* Run through SRP validator selection again. */ + pcb->eap.es_server.ea_state = eapIdentify; + eap_figure_next_state(pcb, 0); + break; + + case EAPT_MD5CHAP: + pcb->eap.es_server.ea_state = eapMD5Chall; + break; + + default: + ppp_dbglog("EAP: peer requesting unknown Type %d", vallen); + switch (pcb->eap.es_server.ea_state) { + case eapSRP1: + case eapSRP2: + case eapSRP3: + pcb->eap.es_server.ea_state = eapMD5Chall; + break; + case eapMD5Chall: + case eapSRP4: + pcb->eap.es_server.ea_state = eapIdentify; + eap_figure_next_state(pcb, 0); + break; + default: + break; + } + break; + } + break; + + case EAPT_MD5CHAP: + if (pcb->eap.es_server.ea_state != eapMD5Chall) { + ppp_error("EAP: unexpected MD5-Response"); + eap_figure_next_state(pcb, 1); + break; + } + if (len < 1) { + ppp_error("EAP: received MD5-Response with no data"); + eap_figure_next_state(pcb, 1); + break; + } + GETCHAR(vallen, inp); + len--; + if (vallen != 16 || vallen > len) { + ppp_error("EAP: MD5-Response with bad length %d", vallen); + eap_figure_next_state(pcb, 1); + break; + } + + /* Not so likely to happen. */ + if (vallen >= len + sizeof (rhostname)) { + ppp_dbglog("EAP: trimming really long peer name down"); + MEMCPY(rhostname, inp + vallen, sizeof (rhostname) - 1); + rhostname[sizeof (rhostname) - 1] = '\0'; + } else { + MEMCPY(rhostname, inp + vallen, len - vallen); + rhostname[len - vallen] = '\0'; + } + +#if PPP_REMOTENAME + /* In case the remote doesn't give us his name. */ + if (explicit_remote || + (remote_name[0] != '\0' && vallen == len)) + strlcpy(rhostname, remote_name, sizeof (rhostname)); +#endif /* PPP_REMOTENAME */ + + /* + * Get the secret for authenticating the specified + * host. + */ + if (!get_secret(pcb, rhostname, + pcb->eap.es_server.ea_name, secret, &secret_len, 1)) { + ppp_dbglog("EAP: no MD5 secret for auth of %q", rhostname); + eap_send_failure(pcb); + break; + } + lwip_md5_init(&mdContext); + lwip_md5_starts(&mdContext); + lwip_md5_update(&mdContext, &pcb->eap.es_server.ea_id, 1); + lwip_md5_update(&mdContext, (u_char *)secret, secret_len); + BZERO(secret, sizeof (secret)); + lwip_md5_update(&mdContext, pcb->eap.es_challenge, pcb->eap.es_challen); + lwip_md5_finish(&mdContext, hash); + lwip_md5_free(&mdContext); + if (BCMP(hash, inp, MD5_SIGNATURE_SIZE) != 0) { + eap_send_failure(pcb); + break; + } + pcb->eap.es_server.ea_type = EAPT_MD5CHAP; + eap_send_success(pcb); + eap_figure_next_state(pcb, 0); + if (pcb->eap.es_rechallenge != 0) + TIMEOUT(eap_rechallenge, pcb, pcb->eap.es_rechallenge); + break; + +#ifdef USE_SRP + case EAPT_SRP: + if (len < 1) { + ppp_error("EAP: empty SRP Response"); + eap_figure_next_state(pcb, 1); + break; + } + GETCHAR(typenum, inp); + len--; + switch (typenum) { + case EAPSRP_CKEY: + if (pcb->eap.es_server.ea_state != eapSRP1) { + ppp_error("EAP: unexpected SRP Subtype 1 Response"); + eap_figure_next_state(pcb, 1); + break; + } + A.data = inp; + A.len = len; + ts = (struct t_server *)pcb->eap.es_server.ea_session; + assert(ts != NULL); + pcb->eap.es_server.ea_skey = t_servergetkey(ts, &A); + if (pcb->eap.es_server.ea_skey == NULL) { + /* Client's A value is bogus; terminate now */ + ppp_error("EAP: bogus A value from client"); + eap_send_failure(pcb); + } else { + eap_figure_next_state(pcb, 0); + } + break; + + case EAPSRP_CVALIDATOR: + if (pcb->eap.es_server.ea_state != eapSRP2) { + ppp_error("EAP: unexpected SRP Subtype 2 Response"); + eap_figure_next_state(pcb, 1); + break; + } + if (len < sizeof (u32_t) + SHA_DIGESTSIZE) { + ppp_error("EAP: M1 length %d < %d", len, + sizeof (u32_t) + SHA_DIGESTSIZE); + eap_figure_next_state(pcb, 1); + break; + } + GETLONG(pcb->eap.es_server.ea_keyflags, inp); + ts = (struct t_server *)pcb->eap.es_server.ea_session; + assert(ts != NULL); + if (t_serververify(ts, inp)) { + ppp_info("EAP: unable to validate client identity"); + eap_send_failure(pcb); + break; + } + eap_figure_next_state(pcb, 0); + break; + + case EAPSRP_ACK: + if (pcb->eap.es_server.ea_state != eapSRP3) { + ppp_error("EAP: unexpected SRP Subtype 3 Response"); + eap_send_failure(esp); + break; + } + pcb->eap.es_server.ea_type = EAPT_SRP; + eap_send_success(pcb, esp); + eap_figure_next_state(pcb, 0); + if (pcb->eap.es_rechallenge != 0) + TIMEOUT(eap_rechallenge, pcb, + pcb->eap.es_rechallenge); + if (pcb->eap.es_lwrechallenge != 0) + TIMEOUT(srp_lwrechallenge, pcb, + pcb->eap.es_lwrechallenge); + break; + + case EAPSRP_LWRECHALLENGE: + if (pcb->eap.es_server.ea_state != eapSRP4) { + ppp_info("EAP: unexpected SRP Subtype 4 Response"); + return; + } + if (len != SHA_DIGESTSIZE) { + ppp_error("EAP: bad Lightweight rechallenge " + "response"); + return; + } + SHA1Init(&ctxt); + vallen = id; + SHA1Update(&ctxt, &vallen, 1); + SHA1Update(&ctxt, pcb->eap.es_server.ea_skey, + SESSION_KEY_LEN); + SHA1Update(&ctxt, pcb->eap.es_challenge, pcb->eap.es_challen); + SHA1Update(&ctxt, pcb->eap.es_server.ea_peer, + pcb->eap.es_server.ea_peerlen); + SHA1Final(dig, &ctxt); + if (BCMP(dig, inp, SHA_DIGESTSIZE) != 0) { + ppp_error("EAP: failed Lightweight rechallenge"); + eap_send_failure(pcb); + break; + } + pcb->eap.es_server.ea_state = eapOpen; + if (pcb->eap.es_lwrechallenge != 0) + TIMEOUT(srp_lwrechallenge, esp, + pcb->eap.es_lwrechallenge); + break; + } + break; +#endif /* USE_SRP */ + + default: + /* This can't happen. */ + ppp_error("EAP: unknown Response type %d; ignored", typenum); + return; + } + + if (pcb->settings.eap_timeout_time > 0) { + UNTIMEOUT(eap_server_timeout, pcb); + } + + if (pcb->eap.es_server.ea_state != eapBadAuth && + pcb->eap.es_server.ea_state != eapOpen) { + pcb->eap.es_server.ea_id++; + eap_send_request(pcb); + } +} +#endif /* PPP_SERVER */ + +/* + * eap_success - Receive EAP Success message (client mode). + */ +static void eap_success(ppp_pcb *pcb, u_char *inp, int id, int len) { + LWIP_UNUSED_ARG(id); + + if (pcb->eap.es_client.ea_state != eapOpen && !eap_client_active(pcb)) { + ppp_dbglog("EAP unexpected success message in state %s (%d)", + eap_state_name(pcb->eap.es_client.ea_state), + pcb->eap.es_client.ea_state); + return; + } + + if (pcb->settings.eap_req_time > 0) { + UNTIMEOUT(eap_client_timeout, pcb); + } + + if (len > 0) { + /* This is odd. The spec doesn't allow for this. */ + PRINTMSG(inp, len); + } + + pcb->eap.es_client.ea_state = eapOpen; + auth_withpeer_success(pcb, PPP_EAP, 0); +} + +/* + * eap_failure - Receive EAP Failure message (client mode). + */ +static void eap_failure(ppp_pcb *pcb, u_char *inp, int id, int len) { + LWIP_UNUSED_ARG(id); + + if (!eap_client_active(pcb)) { + ppp_dbglog("EAP unexpected failure message in state %s (%d)", + eap_state_name(pcb->eap.es_client.ea_state), + pcb->eap.es_client.ea_state); + } + + if (pcb->settings.eap_req_time > 0) { + UNTIMEOUT(eap_client_timeout, pcb); + } + + if (len > 0) { + /* This is odd. The spec doesn't allow for this. */ + PRINTMSG(inp, len); + } + + pcb->eap.es_client.ea_state = eapBadAuth; + + ppp_error("EAP: peer reports authentication failure"); + auth_withpeer_fail(pcb, PPP_EAP); +} + +/* + * eap_input - Handle received EAP message. + */ +static void eap_input(ppp_pcb *pcb, u_char *inp, int inlen) { + u_char code, id; + int len; + + /* + * Parse header (code, id and length). If packet too short, + * drop it. + */ + if (inlen < EAP_HEADERLEN) { + ppp_error("EAP: packet too short: %d < %d", inlen, EAP_HEADERLEN); + return; + } + GETCHAR(code, inp); + GETCHAR(id, inp); + GETSHORT(len, inp); + if (len < EAP_HEADERLEN || len > inlen) { + ppp_error("EAP: packet has illegal length field %d (%d..%d)", len, + EAP_HEADERLEN, inlen); + return; + } + len -= EAP_HEADERLEN; + + /* Dispatch based on message code */ + switch (code) { + case EAP_REQUEST: + eap_request(pcb, inp, id, len); + break; + +#if PPP_SERVER + case EAP_RESPONSE: + eap_response(pcb, inp, id, len); + break; +#endif /* PPP_SERVER */ + + case EAP_SUCCESS: + eap_success(pcb, inp, id, len); + break; + + case EAP_FAILURE: + eap_failure(pcb, inp, id, len); + break; + + default: /* XXX Need code reject */ + /* Note: it's not legal to send EAP Nak here. */ + ppp_warn("EAP: unknown code %d received", code); + break; + } +} + +#if PRINTPKT_SUPPORT +/* + * eap_printpkt - print the contents of an EAP packet. + */ +static const char* const eap_codenames[] = { + "Request", "Response", "Success", "Failure" +}; + +static const char* const eap_typenames[] = { + "Identity", "Notification", "Nak", "MD5-Challenge", + "OTP", "Generic-Token", NULL, NULL, + "RSA", "DSS", "KEA", "KEA-Validate", + "TLS", "Defender", "Windows 2000", "Arcot", + "Cisco", "Nokia", "SRP" +}; + +static int eap_printpkt(const u_char *inp, int inlen, void (*printer) (void *, const char *, ...), void *arg) { + int code, id, len, rtype, vallen; + const u_char *pstart; + u32_t uval; + + if (inlen < EAP_HEADERLEN) + return (0); + pstart = inp; + GETCHAR(code, inp); + GETCHAR(id, inp); + GETSHORT(len, inp); + if (len < EAP_HEADERLEN || len > inlen) + return (0); + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(eap_codenames)) + printer(arg, " %s", eap_codenames[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= EAP_HEADERLEN; + switch (code) { + case EAP_REQUEST: + if (len < 1) { + printer(arg, " "); + break; + } + GETCHAR(rtype, inp); + len--; + if (rtype >= 1 && rtype <= (int)LWIP_ARRAYSIZE(eap_typenames)) + printer(arg, " %s", eap_typenames[rtype-1]); + else + printer(arg, " type=0x%x", rtype); + switch (rtype) { + case EAPT_IDENTITY: + case EAPT_NOTIFICATION: + if (len > 0) { + printer(arg, " "); + INCPTR(len, inp); + len = 0; + } else { + printer(arg, " "); + } + break; + + case EAPT_MD5CHAP: + if (len <= 0) + break; + GETCHAR(vallen, inp); + len--; + if (vallen > len) + goto truncated; + printer(arg, " ", vallen, inp); + INCPTR(vallen, inp); + len -= vallen; + if (len > 0) { + printer(arg, " "); + INCPTR(len, inp); + len = 0; + } else { + printer(arg, " "); + } + break; + + case EAPT_SRP: + if (len < 3) + goto truncated; + GETCHAR(vallen, inp); + len--; + printer(arg, "-%d", vallen); + switch (vallen) { + case EAPSRP_CHALLENGE: + GETCHAR(vallen, inp); + len--; + if (vallen >= len) + goto truncated; + if (vallen > 0) { + printer(arg, " "); + } else { + printer(arg, " "); + } + INCPTR(vallen, inp); + len -= vallen; + GETCHAR(vallen, inp); + len--; + if (vallen >= len) + goto truncated; + printer(arg, " ", vallen, inp); + INCPTR(vallen, inp); + len -= vallen; + GETCHAR(vallen, inp); + len--; + if (vallen > len) + goto truncated; + if (vallen == 0) { + printer(arg, " "); + } else { + printer(arg, " ", vallen, inp); + } + INCPTR(vallen, inp); + len -= vallen; + if (len == 0) { + printer(arg, " "); + } else { + printer(arg, " ", len, inp); + INCPTR(len, inp); + len = 0; + } + break; + + case EAPSRP_SKEY: + printer(arg, " ", len, inp); + INCPTR(len, inp); + len = 0; + break; + + case EAPSRP_SVALIDATOR: + if (len < (int)sizeof (u32_t)) + break; + GETLONG(uval, inp); + len -= sizeof (u32_t); + if (uval & SRPVAL_EBIT) { + printer(arg, " E"); + uval &= ~SRPVAL_EBIT; + } + if (uval != 0) { + printer(arg, " f<%X>", uval); + } + if ((vallen = len) > SHA_DIGESTSIZE) + vallen = SHA_DIGESTSIZE; + printer(arg, " ", len, inp, + len < SHA_DIGESTSIZE ? "?" : ""); + INCPTR(vallen, inp); + len -= vallen; + if (len > 0) { + printer(arg, " ", len, inp); + INCPTR(len, inp); + len = 0; + } + break; + + case EAPSRP_LWRECHALLENGE: + printer(arg, " ", len, inp); + INCPTR(len, inp); + len = 0; + break; + default: + break; + } + break; + default: + break; + } + break; + + case EAP_RESPONSE: + if (len < 1) + break; + GETCHAR(rtype, inp); + len--; + if (rtype >= 1 && rtype <= (int)LWIP_ARRAYSIZE(eap_typenames)) + printer(arg, " %s", eap_typenames[rtype-1]); + else + printer(arg, " type=0x%x", rtype); + switch (rtype) { + case EAPT_IDENTITY: + if (len > 0) { + printer(arg, " "); + INCPTR(len, inp); + len = 0; + } + break; + + case EAPT_NAK: + if (len <= 0) { + printer(arg, " "); + break; + } + GETCHAR(rtype, inp); + len--; + printer(arg, " = 1 && rtype < (int)LWIP_ARRAYSIZE(eap_typenames)) + printer(arg, " (%s)", eap_typenames[rtype-1]); + printer(arg, ">"); + break; + + case EAPT_MD5CHAP: + if (len <= 0) { + printer(arg, " "); + break; + } + GETCHAR(vallen, inp); + len--; + if (vallen > len) + goto truncated; + printer(arg, " ", vallen, inp); + INCPTR(vallen, inp); + len -= vallen; + if (len > 0) { + printer(arg, " "); + INCPTR(len, inp); + len = 0; + } else { + printer(arg, " "); + } + break; + + case EAPT_SRP: + if (len < 1) + goto truncated; + GETCHAR(vallen, inp); + len--; + printer(arg, "-%d", vallen); + switch (vallen) { + case EAPSRP_CKEY: + printer(arg, " ", len, inp); + INCPTR(len, inp); + len = 0; + break; + + case EAPSRP_CVALIDATOR: + if (len < (int)sizeof (u32_t)) + break; + GETLONG(uval, inp); + len -= sizeof (u32_t); + if (uval & SRPVAL_EBIT) { + printer(arg, " E"); + uval &= ~SRPVAL_EBIT; + } + if (uval != 0) { + printer(arg, " f<%X>", uval); + } + printer(arg, " ", len, inp, + len == SHA_DIGESTSIZE ? "" : "?"); + INCPTR(len, inp); + len = 0; + break; + + case EAPSRP_ACK: + break; + + case EAPSRP_LWRECHALLENGE: + printer(arg, " ", len, inp, + len == SHA_DIGESTSIZE ? "" : "?"); + if ((vallen = len) > SHA_DIGESTSIZE) + vallen = SHA_DIGESTSIZE; + INCPTR(vallen, inp); + len -= vallen; + break; + default: + break; + } + break; + default: + break; + } + break; + + case EAP_SUCCESS: /* No payload expected for these! */ + case EAP_FAILURE: + default: + break; + + truncated: + printer(arg, " "); + break; + } + + if (len > 8) + printer(arg, "%8B...", inp); + else if (len > 0) + printer(arg, "%.*B", len, inp); + INCPTR(len, inp); + + return (inp - pstart); +} +#endif /* PRINTPKT_SUPPORT */ + +#endif /* PPP_SUPPORT && EAP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/ecp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/ecp.c new file mode 100644 index 0000000..a22eda8 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/ecp.c @@ -0,0 +1,191 @@ +/* + * ecp.c - PPP Encryption Control Protocol. + * + * Copyright (c) 2002 Google, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * Derived from ccp.c, which is: + * + * Copyright (c) 1994-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && ECP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/ecp.h" + +#if PPP_OPTIONS +static option_t ecp_option_list[] = { + { "noecp", o_bool, &ecp_protent.enabled_flag, + "Disable ECP negotiation" }, + { "-ecp", o_bool, &ecp_protent.enabled_flag, + "Disable ECP negotiation", OPT_ALIAS }, + + { NULL } +}; +#endif /* PPP_OPTIONS */ + +/* + * Protocol entry points from main code. + */ +static void ecp_init (int unit); +/* +static void ecp_open (int unit); +static void ecp_close (int unit, char *); +static void ecp_lowerup (int unit); +static void ecp_lowerdown (int); +static void ecp_input (int unit, u_char *pkt, int len); +static void ecp_protrej (int unit); +*/ +#if PRINTPKT_SUPPORT +static int ecp_printpkt (const u_char *pkt, int len, + void (*printer) (void *, char *, ...), + void *arg); +#endif /* PRINTPKT_SUPPORT */ +/* +static void ecp_datainput (int unit, u_char *pkt, int len); +*/ + +const struct protent ecp_protent = { + PPP_ECP, + ecp_init, + NULL, /* ecp_input, */ + NULL, /* ecp_protrej, */ + NULL, /* ecp_lowerup, */ + NULL, /* ecp_lowerdown, */ + NULL, /* ecp_open, */ + NULL, /* ecp_close, */ +#if PRINTPKT_SUPPORT + ecp_printpkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, /* ecp_datainput, */ +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "ECP", + "Encrypted", +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + ecp_option_list, + NULL, +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + NULL, + NULL +#endif /* DEMAND_SUPPORT */ +}; + +fsm ecp_fsm[NUM_PPP]; +ecp_options ecp_wantoptions[NUM_PPP]; /* what to request the peer to use */ +ecp_options ecp_gotoptions[NUM_PPP]; /* what the peer agreed to do */ +ecp_options ecp_allowoptions[NUM_PPP]; /* what we'll agree to do */ +ecp_options ecp_hisoptions[NUM_PPP]; /* what we agreed to do */ + +static const fsm_callbacks ecp_callbacks = { + NULL, /* ecp_resetci, */ + NULL, /* ecp_cilen, */ + NULL, /* ecp_addci, */ + NULL, /* ecp_ackci, */ + NULL, /* ecp_nakci, */ + NULL, /* ecp_rejci, */ + NULL, /* ecp_reqci, */ + NULL, /* ecp_up, */ + NULL, /* ecp_down, */ + NULL, + NULL, + NULL, + NULL, + NULL, /* ecp_extcode, */ + "ECP" +}; + +/* + * ecp_init - initialize ECP. + */ +static void +ecp_init(unit) + int unit; +{ + fsm *f = &ecp_fsm[unit]; + + f->unit = unit; + f->protocol = PPP_ECP; + f->callbacks = &ecp_callbacks; + fsm_init(f); + +#if 0 /* Not necessary, everything is cleared in ppp_new() */ + memset(&ecp_wantoptions[unit], 0, sizeof(ecp_options)); + memset(&ecp_gotoptions[unit], 0, sizeof(ecp_options)); + memset(&ecp_allowoptions[unit], 0, sizeof(ecp_options)); + memset(&ecp_hisoptions[unit], 0, sizeof(ecp_options)); +#endif /* 0 */ + +} + + +#if PRINTPKT_SUPPORT +static int +ecp_printpkt(p, plen, printer, arg) + const u_char *p; + int plen; + void (*printer) (void *, char *, ...); + void *arg; +{ + return 0; +} +#endif /* PRINTPKT_SUPPORT */ + +#endif /* PPP_SUPPORT && ECP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/eui64.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/eui64.c new file mode 100644 index 0000000..15af855 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/eui64.c @@ -0,0 +1,56 @@ +/* + * eui64.c - EUI64 routines for IPv6CP. + * + * Copyright (c) 1999 Tommi Komulainen. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Tommi Komulainen + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: eui64.c,v 1.6 2002/12/04 23:03:32 paulus Exp $ + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPP_IPV6_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/eui64.h" + +/* + * eui64_ntoa - Make an ascii representation of an interface identifier + */ +char *eui64_ntoa(eui64_t e) { + static char buf[20]; + + sprintf(buf, "%02x%02x:%02x%02x:%02x%02x:%02x%02x", + e.e8[0], e.e8[1], e.e8[2], e.e8[3], + e.e8[4], e.e8[5], e.e8[6], e.e8[7]); + return buf; +} + +#endif /* PPP_SUPPORT && PPP_IPV6_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/fsm.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/fsm.c new file mode 100644 index 0000000..9d32e7c --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/fsm.c @@ -0,0 +1,799 @@ +/* + * fsm.c - {Link, IP} Control Protocol Finite State Machine. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +/* + * @todo: + * Randomize fsm id on link/init. + * Deal with variable outgoing MTU. + */ + +#if 0 /* UNUSED */ +#include +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" + +static void fsm_timeout (void *); +static void fsm_rconfreq(fsm *f, u_char id, u_char *inp, int len); +static void fsm_rconfack(fsm *f, int id, u_char *inp, int len); +static void fsm_rconfnakrej(fsm *f, int code, int id, u_char *inp, int len); +static void fsm_rtermreq(fsm *f, int id, u_char *p, int len); +static void fsm_rtermack(fsm *f); +static void fsm_rcoderej(fsm *f, u_char *inp, int len); +static void fsm_sconfreq(fsm *f, int retransmit); + +#define PROTO_NAME(f) ((f)->callbacks->proto_name) + +/* + * fsm_init - Initialize fsm. + * + * Initialize fsm state. + */ +void fsm_init(fsm *f) { + ppp_pcb *pcb = f->pcb; + f->state = PPP_FSM_INITIAL; + f->flags = 0; + f->id = 0; /* XXX Start with random id? */ + f->maxnakloops = pcb->settings.fsm_max_nak_loops; + f->term_reason_len = 0; +} + + +/* + * fsm_lowerup - The lower layer is up. + */ +void fsm_lowerup(fsm *f) { + switch( f->state ){ + case PPP_FSM_INITIAL: + f->state = PPP_FSM_CLOSED; + break; + + case PPP_FSM_STARTING: + if( f->flags & OPT_SILENT ) + f->state = PPP_FSM_STOPPED; + else { + /* Send an initial configure-request */ + fsm_sconfreq(f, 0); + f->state = PPP_FSM_REQSENT; + } + break; + + default: + FSMDEBUG(("%s: Up event in state %d!", PROTO_NAME(f), f->state)); + /* no break */ + } +} + + +/* + * fsm_lowerdown - The lower layer is down. + * + * Cancel all timeouts and inform upper layers. + */ +void fsm_lowerdown(fsm *f) { + switch( f->state ){ + case PPP_FSM_CLOSED: + f->state = PPP_FSM_INITIAL; + break; + + case PPP_FSM_STOPPED: + f->state = PPP_FSM_STARTING; + if( f->callbacks->starting ) + (*f->callbacks->starting)(f); + break; + + case PPP_FSM_CLOSING: + f->state = PPP_FSM_INITIAL; + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + break; + + case PPP_FSM_STOPPING: + case PPP_FSM_REQSENT: + case PPP_FSM_ACKRCVD: + case PPP_FSM_ACKSENT: + f->state = PPP_FSM_STARTING; + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + break; + + case PPP_FSM_OPENED: + if( f->callbacks->down ) + (*f->callbacks->down)(f); + f->state = PPP_FSM_STARTING; + break; + + default: + FSMDEBUG(("%s: Down event in state %d!", PROTO_NAME(f), f->state)); + /* no break */ + } +} + + +/* + * fsm_open - Link is allowed to come up. + */ +void fsm_open(fsm *f) { + switch( f->state ){ + case PPP_FSM_INITIAL: + f->state = PPP_FSM_STARTING; + if( f->callbacks->starting ) + (*f->callbacks->starting)(f); + break; + + case PPP_FSM_CLOSED: + if( f->flags & OPT_SILENT ) + f->state = PPP_FSM_STOPPED; + else { + /* Send an initial configure-request */ + fsm_sconfreq(f, 0); + f->state = PPP_FSM_REQSENT; + } + break; + + case PPP_FSM_CLOSING: + f->state = PPP_FSM_STOPPING; + /* fall through */ + /* no break */ + case PPP_FSM_STOPPED: + case PPP_FSM_OPENED: + if( f->flags & OPT_RESTART ){ + fsm_lowerdown(f); + fsm_lowerup(f); + } + break; + default: + break; + } +} + +/* + * terminate_layer - Start process of shutting down the FSM + * + * Cancel any timeout running, notify upper layers we're done, and + * send a terminate-request message as configured. + */ +static void terminate_layer(fsm *f, int nextstate) { + ppp_pcb *pcb = f->pcb; + + if( f->state != PPP_FSM_OPENED ) + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + else if( f->callbacks->down ) + (*f->callbacks->down)(f); /* Inform upper layers we're down */ + + /* Init restart counter and send Terminate-Request */ + f->retransmits = pcb->settings.fsm_max_term_transmits; + fsm_sdata(f, TERMREQ, f->reqid = ++f->id, + (const u_char *) f->term_reason, f->term_reason_len); + + if (f->retransmits == 0) { + /* + * User asked for no terminate requests at all; just close it. + * We've already fired off one Terminate-Request just to be nice + * to the peer, but we're not going to wait for a reply. + */ + f->state = nextstate == PPP_FSM_CLOSING ? PPP_FSM_CLOSED : PPP_FSM_STOPPED; + if( f->callbacks->finished ) + (*f->callbacks->finished)(f); + return; + } + + TIMEOUT(fsm_timeout, f, pcb->settings.fsm_timeout_time); + --f->retransmits; + + f->state = nextstate; +} + +/* + * fsm_close - Start closing connection. + * + * Cancel timeouts and either initiate close or possibly go directly to + * the PPP_FSM_CLOSED state. + */ +void fsm_close(fsm *f, const char *reason) { + f->term_reason = reason; + f->term_reason_len = (reason == NULL? 0: (u8_t)LWIP_MIN(strlen(reason), 0xFF) ); + switch( f->state ){ + case PPP_FSM_STARTING: + f->state = PPP_FSM_INITIAL; + break; + case PPP_FSM_STOPPED: + f->state = PPP_FSM_CLOSED; + break; + case PPP_FSM_STOPPING: + f->state = PPP_FSM_CLOSING; + break; + + case PPP_FSM_REQSENT: + case PPP_FSM_ACKRCVD: + case PPP_FSM_ACKSENT: + case PPP_FSM_OPENED: + terminate_layer(f, PPP_FSM_CLOSING); + break; + default: + break; + } +} + + +/* + * fsm_timeout - Timeout expired. + */ +static void fsm_timeout(void *arg) { + fsm *f = (fsm *) arg; + ppp_pcb *pcb = f->pcb; + + switch (f->state) { + case PPP_FSM_CLOSING: + case PPP_FSM_STOPPING: + if( f->retransmits <= 0 ){ + /* + * We've waited for an ack long enough. Peer probably heard us. + */ + f->state = (f->state == PPP_FSM_CLOSING)? PPP_FSM_CLOSED: PPP_FSM_STOPPED; + if( f->callbacks->finished ) + (*f->callbacks->finished)(f); + } else { + /* Send Terminate-Request */ + fsm_sdata(f, TERMREQ, f->reqid = ++f->id, + (const u_char *) f->term_reason, f->term_reason_len); + TIMEOUT(fsm_timeout, f, pcb->settings.fsm_timeout_time); + --f->retransmits; + } + break; + + case PPP_FSM_REQSENT: + case PPP_FSM_ACKRCVD: + case PPP_FSM_ACKSENT: + if (f->retransmits <= 0) { + ppp_warn("%s: timeout sending Config-Requests", PROTO_NAME(f)); + f->state = PPP_FSM_STOPPED; + if( (f->flags & OPT_PASSIVE) == 0 && f->callbacks->finished ) + (*f->callbacks->finished)(f); + + } else { + /* Retransmit the configure-request */ + if (f->callbacks->retransmit) + (*f->callbacks->retransmit)(f); + fsm_sconfreq(f, 1); /* Re-send Configure-Request */ + if( f->state == PPP_FSM_ACKRCVD ) + f->state = PPP_FSM_REQSENT; + } + break; + + default: + FSMDEBUG(("%s: Timeout event in state %d!", PROTO_NAME(f), f->state)); + /* no break */ + } +} + + +/* + * fsm_input - Input packet. + */ +void fsm_input(fsm *f, u_char *inpacket, int l) { + u_char *inp; + u_char code, id; + int len; + + /* + * Parse header (code, id and length). + * If packet too short, drop it. + */ + inp = inpacket; + if (l < HEADERLEN) { + FSMDEBUG(("fsm_input(%x): Rcvd short header.", f->protocol)); + return; + } + GETCHAR(code, inp); + GETCHAR(id, inp); + GETSHORT(len, inp); + if (len < HEADERLEN) { + FSMDEBUG(("fsm_input(%x): Rcvd illegal length.", f->protocol)); + return; + } + if (len > l) { + FSMDEBUG(("fsm_input(%x): Rcvd short packet.", f->protocol)); + return; + } + len -= HEADERLEN; /* subtract header length */ + + if( f->state == PPP_FSM_INITIAL || f->state == PPP_FSM_STARTING ){ + FSMDEBUG(("fsm_input(%x): Rcvd packet in state %d.", + f->protocol, f->state)); + return; + } + + /* + * Action depends on code. + */ + switch (code) { + case CONFREQ: + fsm_rconfreq(f, id, inp, len); + break; + + case CONFACK: + fsm_rconfack(f, id, inp, len); + break; + + case CONFNAK: + case CONFREJ: + fsm_rconfnakrej(f, code, id, inp, len); + break; + + case TERMREQ: + fsm_rtermreq(f, id, inp, len); + break; + + case TERMACK: + fsm_rtermack(f); + break; + + case CODEREJ: + fsm_rcoderej(f, inp, len); + break; + + default: + if( !f->callbacks->extcode + || !(*f->callbacks->extcode)(f, code, id, inp, len) ) + fsm_sdata(f, CODEREJ, ++f->id, inpacket, len + HEADERLEN); + break; + } +} + + +/* + * fsm_rconfreq - Receive Configure-Request. + */ +static void fsm_rconfreq(fsm *f, u_char id, u_char *inp, int len) { + int code, reject_if_disagree; + + switch( f->state ){ + case PPP_FSM_CLOSED: + /* Go away, we're closed */ + fsm_sdata(f, TERMACK, id, NULL, 0); + return; + case PPP_FSM_CLOSING: + case PPP_FSM_STOPPING: + return; + + case PPP_FSM_OPENED: + /* Go down and restart negotiation */ + if( f->callbacks->down ) + (*f->callbacks->down)(f); /* Inform upper layers */ + fsm_sconfreq(f, 0); /* Send initial Configure-Request */ + f->state = PPP_FSM_REQSENT; + break; + + case PPP_FSM_STOPPED: + /* Negotiation started by our peer */ + fsm_sconfreq(f, 0); /* Send initial Configure-Request */ + f->state = PPP_FSM_REQSENT; + break; + default: + break; + } + + /* + * Pass the requested configuration options + * to protocol-specific code for checking. + */ + if (f->callbacks->reqci){ /* Check CI */ + reject_if_disagree = (f->nakloops >= f->maxnakloops); + code = (*f->callbacks->reqci)(f, inp, &len, reject_if_disagree); + } else if (len) + code = CONFREJ; /* Reject all CI */ + else + code = CONFACK; + + /* send the Ack, Nak or Rej to the peer */ + fsm_sdata(f, code, id, inp, len); + + if (code == CONFACK) { + if (f->state == PPP_FSM_ACKRCVD) { + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + f->state = PPP_FSM_OPENED; + if (f->callbacks->up) + (*f->callbacks->up)(f); /* Inform upper layers */ + } else + f->state = PPP_FSM_ACKSENT; + f->nakloops = 0; + + } else { + /* we sent CONFACK or CONFREJ */ + if (f->state != PPP_FSM_ACKRCVD) + f->state = PPP_FSM_REQSENT; + if( code == CONFNAK ) + ++f->nakloops; + } +} + + +/* + * fsm_rconfack - Receive Configure-Ack. + */ +static void fsm_rconfack(fsm *f, int id, u_char *inp, int len) { + ppp_pcb *pcb = f->pcb; + + if (id != f->reqid || f->seen_ack) /* Expected id? */ + return; /* Nope, toss... */ + if( !(f->callbacks->ackci? (*f->callbacks->ackci)(f, inp, len): + (len == 0)) ){ + /* Ack is bad - ignore it */ + ppp_error("Received bad configure-ack: %P", inp, len); + return; + } + f->seen_ack = 1; + f->rnakloops = 0; + + switch (f->state) { + case PPP_FSM_CLOSED: + case PPP_FSM_STOPPED: + fsm_sdata(f, TERMACK, id, NULL, 0); + break; + + case PPP_FSM_REQSENT: + f->state = PPP_FSM_ACKRCVD; + f->retransmits = pcb->settings.fsm_max_conf_req_transmits; + break; + + case PPP_FSM_ACKRCVD: + /* Huh? an extra valid Ack? oh well... */ + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + fsm_sconfreq(f, 0); + f->state = PPP_FSM_REQSENT; + break; + + case PPP_FSM_ACKSENT: + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + f->state = PPP_FSM_OPENED; + f->retransmits = pcb->settings.fsm_max_conf_req_transmits; + if (f->callbacks->up) + (*f->callbacks->up)(f); /* Inform upper layers */ + break; + + case PPP_FSM_OPENED: + /* Go down and restart negotiation */ + if (f->callbacks->down) + (*f->callbacks->down)(f); /* Inform upper layers */ + fsm_sconfreq(f, 0); /* Send initial Configure-Request */ + f->state = PPP_FSM_REQSENT; + break; + default: + break; + } +} + + +/* + * fsm_rconfnakrej - Receive Configure-Nak or Configure-Reject. + */ +static void fsm_rconfnakrej(fsm *f, int code, int id, u_char *inp, int len) { + int ret; + int treat_as_reject; + + if (id != f->reqid || f->seen_ack) /* Expected id? */ + return; /* Nope, toss... */ + + if (code == CONFNAK) { + ++f->rnakloops; + treat_as_reject = (f->rnakloops >= f->maxnakloops); + if (f->callbacks->nakci == NULL + || !(ret = f->callbacks->nakci(f, inp, len, treat_as_reject))) { + ppp_error("Received bad configure-nak: %P", inp, len); + return; + } + } else { + f->rnakloops = 0; + if (f->callbacks->rejci == NULL + || !(ret = f->callbacks->rejci(f, inp, len))) { + ppp_error("Received bad configure-rej: %P", inp, len); + return; + } + } + + f->seen_ack = 1; + + switch (f->state) { + case PPP_FSM_CLOSED: + case PPP_FSM_STOPPED: + fsm_sdata(f, TERMACK, id, NULL, 0); + break; + + case PPP_FSM_REQSENT: + case PPP_FSM_ACKSENT: + /* They didn't agree to what we wanted - try another request */ + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + if (ret < 0) + f->state = PPP_FSM_STOPPED; /* kludge for stopping CCP */ + else + fsm_sconfreq(f, 0); /* Send Configure-Request */ + break; + + case PPP_FSM_ACKRCVD: + /* Got a Nak/reject when we had already had an Ack?? oh well... */ + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + fsm_sconfreq(f, 0); + f->state = PPP_FSM_REQSENT; + break; + + case PPP_FSM_OPENED: + /* Go down and restart negotiation */ + if (f->callbacks->down) + (*f->callbacks->down)(f); /* Inform upper layers */ + fsm_sconfreq(f, 0); /* Send initial Configure-Request */ + f->state = PPP_FSM_REQSENT; + break; + default: + break; + } +} + + +/* + * fsm_rtermreq - Receive Terminate-Req. + */ +static void fsm_rtermreq(fsm *f, int id, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + + switch (f->state) { + case PPP_FSM_ACKRCVD: + case PPP_FSM_ACKSENT: + f->state = PPP_FSM_REQSENT; /* Start over but keep trying */ + break; + + case PPP_FSM_OPENED: + if (len > 0) { + ppp_info("%s terminated by peer (%0.*v)", PROTO_NAME(f), len, p); + } else + ppp_info("%s terminated by peer", PROTO_NAME(f)); + f->retransmits = 0; + f->state = PPP_FSM_STOPPING; + if (f->callbacks->down) + (*f->callbacks->down)(f); /* Inform upper layers */ + TIMEOUT(fsm_timeout, f, pcb->settings.fsm_timeout_time); + break; + default: + break; + } + + fsm_sdata(f, TERMACK, id, NULL, 0); +} + + +/* + * fsm_rtermack - Receive Terminate-Ack. + */ +static void fsm_rtermack(fsm *f) { + switch (f->state) { + case PPP_FSM_CLOSING: + UNTIMEOUT(fsm_timeout, f); + f->state = PPP_FSM_CLOSED; + if( f->callbacks->finished ) + (*f->callbacks->finished)(f); + break; + case PPP_FSM_STOPPING: + UNTIMEOUT(fsm_timeout, f); + f->state = PPP_FSM_STOPPED; + if( f->callbacks->finished ) + (*f->callbacks->finished)(f); + break; + + case PPP_FSM_ACKRCVD: + f->state = PPP_FSM_REQSENT; + break; + + case PPP_FSM_OPENED: + if (f->callbacks->down) + (*f->callbacks->down)(f); /* Inform upper layers */ + fsm_sconfreq(f, 0); + f->state = PPP_FSM_REQSENT; + break; + default: + break; + } +} + + +/* + * fsm_rcoderej - Receive an Code-Reject. + */ +static void fsm_rcoderej(fsm *f, u_char *inp, int len) { + u_char code, id; + + if (len < HEADERLEN) { + FSMDEBUG(("fsm_rcoderej: Rcvd short Code-Reject packet!")); + return; + } + GETCHAR(code, inp); + GETCHAR(id, inp); + ppp_warn("%s: Rcvd Code-Reject for code %d, id %d", PROTO_NAME(f), code, id); + + if( f->state == PPP_FSM_ACKRCVD ) + f->state = PPP_FSM_REQSENT; +} + + +/* + * fsm_protreject - Peer doesn't speak this protocol. + * + * Treat this as a catastrophic error (RXJ-). + */ +void fsm_protreject(fsm *f) { + switch( f->state ){ + case PPP_FSM_CLOSING: + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + /* fall through */ + /* no break */ + case PPP_FSM_CLOSED: + f->state = PPP_FSM_CLOSED; + if( f->callbacks->finished ) + (*f->callbacks->finished)(f); + break; + + case PPP_FSM_STOPPING: + case PPP_FSM_REQSENT: + case PPP_FSM_ACKRCVD: + case PPP_FSM_ACKSENT: + UNTIMEOUT(fsm_timeout, f); /* Cancel timeout */ + /* fall through */ + /* no break */ + case PPP_FSM_STOPPED: + f->state = PPP_FSM_STOPPED; + if( f->callbacks->finished ) + (*f->callbacks->finished)(f); + break; + + case PPP_FSM_OPENED: + terminate_layer(f, PPP_FSM_STOPPING); + break; + + default: + FSMDEBUG(("%s: Protocol-reject event in state %d!", + PROTO_NAME(f), f->state)); + /* no break */ + } +} + + +/* + * fsm_sconfreq - Send a Configure-Request. + */ +static void fsm_sconfreq(fsm *f, int retransmit) { + ppp_pcb *pcb = f->pcb; + struct pbuf *p; + u_char *outp; + int cilen; + + if( f->state != PPP_FSM_REQSENT && f->state != PPP_FSM_ACKRCVD && f->state != PPP_FSM_ACKSENT ){ + /* Not currently negotiating - reset options */ + if( f->callbacks->resetci ) + (*f->callbacks->resetci)(f); + f->nakloops = 0; + f->rnakloops = 0; + } + + if( !retransmit ){ + /* New request - reset retransmission counter, use new ID */ + f->retransmits = pcb->settings.fsm_max_conf_req_transmits; + f->reqid = ++f->id; + } + + f->seen_ack = 0; + + /* + * Make up the request packet + */ + if( f->callbacks->cilen && f->callbacks->addci ){ + cilen = (*f->callbacks->cilen)(f); + if( cilen > pcb->peer_mru - HEADERLEN ) + cilen = pcb->peer_mru - HEADERLEN; + } else + cilen = 0; + + p = pbuf_alloc(PBUF_RAW, (u16_t)(cilen + HEADERLEN + PPP_HDRLEN), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + /* send the request to our peer */ + outp = (u_char*)p->payload; + MAKEHEADER(outp, f->protocol); + PUTCHAR(CONFREQ, outp); + PUTCHAR(f->reqid, outp); + PUTSHORT(cilen + HEADERLEN, outp); + if (cilen != 0) { + (*f->callbacks->addci)(f, outp, &cilen); + LWIP_ASSERT("cilen == p->len - HEADERLEN - PPP_HDRLEN", cilen == p->len - HEADERLEN - PPP_HDRLEN); + } + + ppp_write(pcb, p); + + /* start the retransmit timer */ + --f->retransmits; + TIMEOUT(fsm_timeout, f, pcb->settings.fsm_timeout_time); +} + + +/* + * fsm_sdata - Send some data. + * + * Used for all packets sent to our peer by this module. + */ +void fsm_sdata(fsm *f, u_char code, u_char id, const u_char *data, int datalen) { + ppp_pcb *pcb = f->pcb; + struct pbuf *p; + u_char *outp; + int outlen; + + /* Adjust length to be smaller than MTU */ + if (datalen > pcb->peer_mru - HEADERLEN) + datalen = pcb->peer_mru - HEADERLEN; + outlen = datalen + HEADERLEN; + + p = pbuf_alloc(PBUF_RAW, (u16_t)(outlen + PPP_HDRLEN), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + if (datalen) /* && data != outp + PPP_HDRLEN + HEADERLEN) -- was only for fsm_sconfreq() */ + MEMCPY(outp + PPP_HDRLEN + HEADERLEN, data, datalen); + MAKEHEADER(outp, f->protocol); + PUTCHAR(code, outp); + PUTCHAR(id, outp); + PUTSHORT(outlen, outp); + ppp_write(pcb, p); +} + +#endif /* PPP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/ipcp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/ipcp.c new file mode 100644 index 0000000..3fba320 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/ipcp.c @@ -0,0 +1,2418 @@ +/* + * ipcp.c - PPP IP Control Protocol. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPP_IPV4_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +/* + * @todo: + */ + +#if 0 /* UNUSED */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/ipcp.h" + +#if 0 /* UNUSED */ +/* global vars */ +u32_t netmask = 0; /* IP netmask to set on interface */ +#endif /* UNUSED */ + +#if 0 /* UNUSED */ +bool disable_defaultip = 0; /* Don't use hostname for default IP adrs */ +#endif /* UNUSED */ + +#if 0 /* moved to ppp_settings */ +bool noremoteip = 0; /* Let him have no IP address */ +#endif /* moved to ppp_setting */ + +#if 0 /* UNUSED */ +/* Hook for a plugin to know when IP protocol has come up */ +void (*ip_up_hook) (void) = NULL; + +/* Hook for a plugin to know when IP protocol has come down */ +void (*ip_down_hook) (void) = NULL; + +/* Hook for a plugin to choose the remote IP address */ +void (*ip_choose_hook) (u32_t *) = NULL; +#endif /* UNUSED */ + +#if PPP_NOTIFY +/* Notifiers for when IPCP goes up and down */ +struct notifier *ip_up_notifier = NULL; +struct notifier *ip_down_notifier = NULL; +#endif /* PPP_NOTIFY */ + +/* local vars */ +#if 0 /* moved to ppp_pcb */ +static int default_route_set[NUM_PPP]; /* Have set up a default route */ +static int proxy_arp_set[NUM_PPP]; /* Have created proxy arp entry */ +static int ipcp_is_up; /* have called np_up() */ +static int ipcp_is_open; /* haven't called np_finished() */ +static bool ask_for_local; /* request our address from peer */ +#endif /* moved to ppp_pcb */ +#if 0 /* UNUSED */ +static char vj_value[8]; /* string form of vj option value */ +static char netmask_str[20]; /* string form of netmask value */ +#endif /* UNUSED */ + +/* + * Callbacks for fsm code. (CI = Configuration Information) + */ +static void ipcp_resetci(fsm *f); /* Reset our CI */ +static int ipcp_cilen(fsm *f); /* Return length of our CI */ +static void ipcp_addci(fsm *f, u_char *ucp, int *lenp); /* Add our CI */ +static int ipcp_ackci(fsm *f, u_char *p, int len); /* Peer ack'd our CI */ +static int ipcp_nakci(fsm *f, u_char *p, int len, int treat_as_reject);/* Peer nak'd our CI */ +static int ipcp_rejci(fsm *f, u_char *p, int len); /* Peer rej'd our CI */ +static int ipcp_reqci(fsm *f, u_char *inp, int *len, int reject_if_disagree); /* Rcv CI */ +static void ipcp_up(fsm *f); /* We're UP */ +static void ipcp_down(fsm *f); /* We're DOWN */ +static void ipcp_finished(fsm *f); /* Don't need lower layer */ + +static const fsm_callbacks ipcp_callbacks = { /* IPCP callback routines */ + ipcp_resetci, /* Reset our Configuration Information */ + ipcp_cilen, /* Length of our Configuration Information */ + ipcp_addci, /* Add our Configuration Information */ + ipcp_ackci, /* ACK our Configuration Information */ + ipcp_nakci, /* NAK our Configuration Information */ + ipcp_rejci, /* Reject our Configuration Information */ + ipcp_reqci, /* Request peer's Configuration Information */ + ipcp_up, /* Called when fsm reaches OPENED state */ + ipcp_down, /* Called when fsm leaves OPENED state */ + NULL, /* Called when we want the lower layer up */ + ipcp_finished, /* Called when we want the lower layer down */ + NULL, /* Called when Protocol-Reject received */ + NULL, /* Retransmission is necessary */ + NULL, /* Called to handle protocol-specific codes */ + "IPCP" /* String name of protocol */ +}; + +/* + * Command-line options. + */ +#if PPP_OPTIONS +static int setvjslots (char **); +static int setdnsaddr (char **); +static int setwinsaddr (char **); +static int setnetmask (char **); +int setipaddr (char *, char **, int); + +static void printipaddr (option_t *, void (*)(void *, char *,...),void *); + +static option_t ipcp_option_list[] = { + { "noip", o_bool, &ipcp_protent.enabled_flag, + "Disable IP and IPCP" }, + { "-ip", o_bool, &ipcp_protent.enabled_flag, + "Disable IP and IPCP", OPT_ALIAS }, + + { "novj", o_bool, &ipcp_wantoptions[0].neg_vj, + "Disable VJ compression", OPT_A2CLR, &ipcp_allowoptions[0].neg_vj }, + { "-vj", o_bool, &ipcp_wantoptions[0].neg_vj, + "Disable VJ compression", OPT_ALIAS | OPT_A2CLR, + &ipcp_allowoptions[0].neg_vj }, + + { "novjccomp", o_bool, &ipcp_wantoptions[0].cflag, + "Disable VJ connection-ID compression", OPT_A2CLR, + &ipcp_allowoptions[0].cflag }, + { "-vjccomp", o_bool, &ipcp_wantoptions[0].cflag, + "Disable VJ connection-ID compression", OPT_ALIAS | OPT_A2CLR, + &ipcp_allowoptions[0].cflag }, + + { "vj-max-slots", o_special, (void *)setvjslots, + "Set maximum VJ header slots", + OPT_PRIO | OPT_A2STRVAL | OPT_STATIC, vj_value }, + + { "ipcp-accept-local", o_bool, &ipcp_wantoptions[0].accept_local, + "Accept peer's address for us", 1 }, + { "ipcp-accept-remote", o_bool, &ipcp_wantoptions[0].accept_remote, + "Accept peer's address for it", 1 }, + + { "ipparam", o_string, &ipparam, + "Set ip script parameter", OPT_PRIO }, + + { "noipdefault", o_bool, &disable_defaultip, + "Don't use name for default IP adrs", 1 }, + + { "ms-dns", 1, (void *)setdnsaddr, + "DNS address for the peer's use" }, + { "ms-wins", 1, (void *)setwinsaddr, + "Nameserver for SMB over TCP/IP for peer" }, + + { "ipcp-restart", o_int, &ipcp_fsm[0].timeouttime, + "Set timeout for IPCP", OPT_PRIO }, + { "ipcp-max-terminate", o_int, &ipcp_fsm[0].maxtermtransmits, + "Set max #xmits for term-reqs", OPT_PRIO }, + { "ipcp-max-configure", o_int, &ipcp_fsm[0].maxconfreqtransmits, + "Set max #xmits for conf-reqs", OPT_PRIO }, + { "ipcp-max-failure", o_int, &ipcp_fsm[0].maxnakloops, + "Set max #conf-naks for IPCP", OPT_PRIO }, + + { "defaultroute", o_bool, &ipcp_wantoptions[0].default_route, + "Add default route", OPT_ENABLE|1, &ipcp_allowoptions[0].default_route }, + { "nodefaultroute", o_bool, &ipcp_allowoptions[0].default_route, + "disable defaultroute option", OPT_A2CLR, + &ipcp_wantoptions[0].default_route }, + { "-defaultroute", o_bool, &ipcp_allowoptions[0].default_route, + "disable defaultroute option", OPT_ALIAS | OPT_A2CLR, + &ipcp_wantoptions[0].default_route }, + + { "replacedefaultroute", o_bool, + &ipcp_wantoptions[0].replace_default_route, + "Replace default route", 1 + }, + { "noreplacedefaultroute", o_bool, + &ipcp_allowoptions[0].replace_default_route, + "Never replace default route", OPT_A2COPY, + &ipcp_wantoptions[0].replace_default_route }, + { "proxyarp", o_bool, &ipcp_wantoptions[0].proxy_arp, + "Add proxy ARP entry", OPT_ENABLE|1, &ipcp_allowoptions[0].proxy_arp }, + { "noproxyarp", o_bool, &ipcp_allowoptions[0].proxy_arp, + "disable proxyarp option", OPT_A2CLR, + &ipcp_wantoptions[0].proxy_arp }, + { "-proxyarp", o_bool, &ipcp_allowoptions[0].proxy_arp, + "disable proxyarp option", OPT_ALIAS | OPT_A2CLR, + &ipcp_wantoptions[0].proxy_arp }, + + { "usepeerdns", o_bool, &usepeerdns, + "Ask peer for DNS address(es)", 1 }, + + { "netmask", o_special, (void *)setnetmask, + "set netmask", OPT_PRIO | OPT_A2STRVAL | OPT_STATIC, netmask_str }, + + { "ipcp-no-addresses", o_bool, &ipcp_wantoptions[0].old_addrs, + "Disable old-style IP-Addresses usage", OPT_A2CLR, + &ipcp_allowoptions[0].old_addrs }, + { "ipcp-no-address", o_bool, &ipcp_wantoptions[0].neg_addr, + "Disable IP-Address usage", OPT_A2CLR, + &ipcp_allowoptions[0].neg_addr }, + + { "noremoteip", o_bool, &noremoteip, + "Allow peer to have no IP address", 1 }, + + { "nosendip", o_bool, &ipcp_wantoptions[0].neg_addr, + "Don't send our IP address to peer", OPT_A2CLR, + &ipcp_wantoptions[0].old_addrs}, + + { "IP addresses", o_wild, (void *) &setipaddr, + "set local and remote IP addresses", + OPT_NOARG | OPT_A2PRINTER, (void *) &printipaddr }, + + { NULL } +}; +#endif /* PPP_OPTIONS */ + +/* + * Protocol entry points from main code. + */ +static void ipcp_init(ppp_pcb *pcb); +static void ipcp_open(ppp_pcb *pcb); +static void ipcp_close(ppp_pcb *pcb, const char *reason); +static void ipcp_lowerup(ppp_pcb *pcb); +static void ipcp_lowerdown(ppp_pcb *pcb); +static void ipcp_input(ppp_pcb *pcb, u_char *p, int len); +static void ipcp_protrej(ppp_pcb *pcb); +#if PRINTPKT_SUPPORT +static int ipcp_printpkt(const u_char *p, int plen, + void (*printer) (void *, const char *, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS +static void ip_check_options (void); +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT +static int ip_demand_conf (int); +static int ip_active_pkt (u_char *, int); +#endif /* DEMAND_SUPPORT */ +#if 0 /* UNUSED */ +static void create_resolv (u32_t, u32_t); +#endif /* UNUSED */ + +const struct protent ipcp_protent = { + PPP_IPCP, + ipcp_init, + ipcp_input, + ipcp_protrej, + ipcp_lowerup, + ipcp_lowerdown, + ipcp_open, + ipcp_close, +#if PRINTPKT_SUPPORT + ipcp_printpkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "IPCP", + "IP", +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + ipcp_option_list, + ip_check_options, +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + ip_demand_conf, + ip_active_pkt +#endif /* DEMAND_SUPPORT */ +}; + +static void ipcp_clear_addrs(ppp_pcb *pcb, u32_t ouraddr, u32_t hisaddr, u8_t replacedefaultroute); + +/* + * Lengths of configuration options. + */ +#define CILEN_VOID 2 +#define CILEN_COMPRESS 4 /* min length for compression protocol opt. */ +#define CILEN_VJ 6 /* length for RFC1332 Van-Jacobson opt. */ +#define CILEN_ADDR 6 /* new-style single address option */ +#define CILEN_ADDRS 10 /* old-style dual address option */ + + +#define CODENAME(x) ((x) == CONFACK ? "ACK" : \ + (x) == CONFNAK ? "NAK" : "REJ") + +#if 0 /* UNUSED, already defined by lwIP */ +/* + * Make a string representation of a network IP address. + */ +char * +ip_ntoa(ipaddr) +u32_t ipaddr; +{ + static char b[64]; + + slprintf(b, sizeof(b), "%I", ipaddr); + return b; +} +#endif /* UNUSED, already defined by lwIP */ + +/* + * Option parsing. + */ +#if PPP_OPTIONS +/* + * setvjslots - set maximum number of connection slots for VJ compression + */ +static int +setvjslots(argv) + char **argv; +{ + int value; + + if (!int_option(*argv, &value)) + return 0; + + if (value < 2 || value > 16) { + option_error("vj-max-slots value must be between 2 and 16"); + return 0; + } + ipcp_wantoptions [0].maxslotindex = + ipcp_allowoptions[0].maxslotindex = value - 1; + slprintf(vj_value, sizeof(vj_value), "%d", value); + return 1; +} + +/* + * setdnsaddr - set the dns address(es) + */ +static int +setdnsaddr(argv) + char **argv; +{ + u32_t dns; + struct hostent *hp; + + dns = inet_addr(*argv); + if (dns == (u32_t) -1) { + if ((hp = gethostbyname(*argv)) == NULL) { + option_error("invalid address parameter '%s' for ms-dns option", + *argv); + return 0; + } + dns = *(u32_t *)hp->h_addr; + } + + /* We take the last 2 values given, the 2nd-last as the primary + and the last as the secondary. If only one is given it + becomes both primary and secondary. */ + if (ipcp_allowoptions[0].dnsaddr[1] == 0) + ipcp_allowoptions[0].dnsaddr[0] = dns; + else + ipcp_allowoptions[0].dnsaddr[0] = ipcp_allowoptions[0].dnsaddr[1]; + + /* always set the secondary address value. */ + ipcp_allowoptions[0].dnsaddr[1] = dns; + + return (1); +} + +/* + * setwinsaddr - set the wins address(es) + * This is primrarly used with the Samba package under UNIX or for pointing + * the caller to the existing WINS server on a Windows NT platform. + */ +static int +setwinsaddr(argv) + char **argv; +{ + u32_t wins; + struct hostent *hp; + + wins = inet_addr(*argv); + if (wins == (u32_t) -1) { + if ((hp = gethostbyname(*argv)) == NULL) { + option_error("invalid address parameter '%s' for ms-wins option", + *argv); + return 0; + } + wins = *(u32_t *)hp->h_addr; + } + + /* We take the last 2 values given, the 2nd-last as the primary + and the last as the secondary. If only one is given it + becomes both primary and secondary. */ + if (ipcp_allowoptions[0].winsaddr[1] == 0) + ipcp_allowoptions[0].winsaddr[0] = wins; + else + ipcp_allowoptions[0].winsaddr[0] = ipcp_allowoptions[0].winsaddr[1]; + + /* always set the secondary address value. */ + ipcp_allowoptions[0].winsaddr[1] = wins; + + return (1); +} + +/* + * setipaddr - Set the IP address + * If doit is 0, the call is to check whether this option is + * potentially an IP address specification. + * Not static so that plugins can call it to set the addresses + */ +int +setipaddr(arg, argv, doit) + char *arg; + char **argv; + int doit; +{ + struct hostent *hp; + char *colon; + u32_t local, remote; + ipcp_options *wo = &ipcp_wantoptions[0]; + static int prio_local = 0, prio_remote = 0; + + /* + * IP address pair separated by ":". + */ + if ((colon = strchr(arg, ':')) == NULL) + return 0; + if (!doit) + return 1; + + /* + * If colon first character, then no local addr. + */ + if (colon != arg && option_priority >= prio_local) { + *colon = '\0'; + if ((local = inet_addr(arg)) == (u32_t) -1) { + if ((hp = gethostbyname(arg)) == NULL) { + option_error("unknown host: %s", arg); + return 0; + } + local = *(u32_t *)hp->h_addr; + } + if (bad_ip_adrs(local)) { + option_error("bad local IP address %s", ip_ntoa(local)); + return 0; + } + if (local != 0) + wo->ouraddr = local; + *colon = ':'; + prio_local = option_priority; + } + + /* + * If colon last character, then no remote addr. + */ + if (*++colon != '\0' && option_priority >= prio_remote) { + if ((remote = inet_addr(colon)) == (u32_t) -1) { + if ((hp = gethostbyname(colon)) == NULL) { + option_error("unknown host: %s", colon); + return 0; + } + remote = *(u32_t *)hp->h_addr; + if (remote_name[0] == 0) + strlcpy(remote_name, colon, sizeof(remote_name)); + } + if (bad_ip_adrs(remote)) { + option_error("bad remote IP address %s", ip_ntoa(remote)); + return 0; + } + if (remote != 0) + wo->hisaddr = remote; + prio_remote = option_priority; + } + + return 1; +} + +static void +printipaddr(opt, printer, arg) + option_t *opt; + void (*printer) (void *, char *, ...); + void *arg; +{ + ipcp_options *wo = &ipcp_wantoptions[0]; + + if (wo->ouraddr != 0) + printer(arg, "%I", wo->ouraddr); + printer(arg, ":"); + if (wo->hisaddr != 0) + printer(arg, "%I", wo->hisaddr); +} + +/* + * setnetmask - set the netmask to be used on the interface. + */ +static int +setnetmask(argv) + char **argv; +{ + u32_t mask; + int n; + char *p; + + /* + * Unfortunately, if we use inet_addr, we can't tell whether + * a result of all 1s is an error or a valid 255.255.255.255. + */ + p = *argv; + n = parse_dotted_ip(p, &mask); + + mask = lwip_htonl(mask); + + if (n == 0 || p[n] != 0 || (netmask & ~mask) != 0) { + option_error("invalid netmask value '%s'", *argv); + return 0; + } + + netmask = mask; + slprintf(netmask_str, sizeof(netmask_str), "%I", mask); + + return (1); +} + +int +parse_dotted_ip(p, vp) + char *p; + u32_t *vp; +{ + int n; + u32_t v, b; + char *endp, *p0 = p; + + v = 0; + for (n = 3;; --n) { + b = strtoul(p, &endp, 0); + if (endp == p) + return 0; + if (b > 255) { + if (n < 3) + return 0; + /* accept e.g. 0xffffff00 */ + *vp = b; + return endp - p0; + } + v |= b << (n * 8); + p = endp; + if (n == 0) + break; + if (*p != '.') + return 0; + ++p; + } + *vp = v; + return p - p0; +} +#endif /* PPP_OPTIONS */ + +/* + * ipcp_init - Initialize IPCP. + */ +static void ipcp_init(ppp_pcb *pcb) { + fsm *f = &pcb->ipcp_fsm; + + ipcp_options *wo = &pcb->ipcp_wantoptions; + ipcp_options *ao = &pcb->ipcp_allowoptions; + + f->pcb = pcb; + f->protocol = PPP_IPCP; + f->callbacks = &ipcp_callbacks; + fsm_init(f); + + /* + * Some 3G modems use repeated IPCP NAKs as a way of stalling + * until they can contact a server on the network, so we increase + * the default number of NAKs we accept before we start treating + * them as rejects. + */ + f->maxnakloops = 100; + +#if 0 /* Not necessary, everything is cleared in ppp_new() */ + memset(wo, 0, sizeof(*wo)); + memset(ao, 0, sizeof(*ao)); +#endif /* 0 */ + + wo->neg_addr = wo->old_addrs = 1; +#if VJ_SUPPORT + wo->neg_vj = 1; + wo->vj_protocol = IPCP_VJ_COMP; + wo->maxslotindex = MAX_STATES - 1; /* really max index */ + wo->cflag = 1; +#endif /* VJ_SUPPORT */ + +#if 0 /* UNUSED */ + /* wanting default route by default */ + wo->default_route = 1; +#endif /* UNUSED */ + + ao->neg_addr = ao->old_addrs = 1; +#if VJ_SUPPORT + /* max slots and slot-id compression are currently hardwired in */ + /* ppp_if.c to 16 and 1, this needs to be changed (among other */ + /* things) gmc */ + + ao->neg_vj = 1; + ao->maxslotindex = MAX_STATES - 1; + ao->cflag = 1; +#endif /* #if VJ_SUPPORT */ + +#if 0 /* UNUSED */ + /* + * XXX These control whether the user may use the proxyarp + * and defaultroute options. + */ + ao->proxy_arp = 1; + ao->default_route = 1; +#endif /* UNUSED */ +} + + +/* + * ipcp_open - IPCP is allowed to come up. + */ +static void ipcp_open(ppp_pcb *pcb) { + fsm *f = &pcb->ipcp_fsm; + fsm_open(f); + pcb->ipcp_is_open = 1; +} + + +/* + * ipcp_close - Take IPCP down. + */ +static void ipcp_close(ppp_pcb *pcb, const char *reason) { + fsm *f = &pcb->ipcp_fsm; + fsm_close(f, reason); +} + + +/* + * ipcp_lowerup - The lower layer is up. + */ +static void ipcp_lowerup(ppp_pcb *pcb) { + fsm *f = &pcb->ipcp_fsm; + fsm_lowerup(f); +} + + +/* + * ipcp_lowerdown - The lower layer is down. + */ +static void ipcp_lowerdown(ppp_pcb *pcb) { + fsm *f = &pcb->ipcp_fsm; + fsm_lowerdown(f); +} + + +/* + * ipcp_input - Input IPCP packet. + */ +static void ipcp_input(ppp_pcb *pcb, u_char *p, int len) { + fsm *f = &pcb->ipcp_fsm; + fsm_input(f, p, len); +} + + +/* + * ipcp_protrej - A Protocol-Reject was received for IPCP. + * + * Pretend the lower layer went down, so we shut up. + */ +static void ipcp_protrej(ppp_pcb *pcb) { + fsm *f = &pcb->ipcp_fsm; + fsm_lowerdown(f); +} + + +/* + * ipcp_resetci - Reset our CI. + * Called by fsm_sconfreq, Send Configure Request. + */ +static void ipcp_resetci(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipcp_options *wo = &pcb->ipcp_wantoptions; + ipcp_options *go = &pcb->ipcp_gotoptions; + ipcp_options *ao = &pcb->ipcp_allowoptions; + + wo->req_addr = (wo->neg_addr || wo->old_addrs) && + (ao->neg_addr || ao->old_addrs); + if (wo->ouraddr == 0) + wo->accept_local = 1; + if (wo->hisaddr == 0) + wo->accept_remote = 1; +#if LWIP_DNS + wo->req_dns1 = wo->req_dns2 = pcb->settings.usepeerdns; /* Request DNS addresses from the peer */ +#endif /* LWIP_DNS */ + *go = *wo; + if (!pcb->ask_for_local) + go->ouraddr = 0; +#if 0 /* UNUSED */ + if (ip_choose_hook) { + ip_choose_hook(&wo->hisaddr); + if (wo->hisaddr) { + wo->accept_remote = 0; + } + } +#endif /* UNUSED */ + BZERO(&pcb->ipcp_hisoptions, sizeof(ipcp_options)); +} + + +/* + * ipcp_cilen - Return length of our CI. + * Called by fsm_sconfreq, Send Configure Request. + */ +static int ipcp_cilen(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipcp_options *go = &pcb->ipcp_gotoptions; +#if VJ_SUPPORT + ipcp_options *wo = &pcb->ipcp_wantoptions; +#endif /* VJ_SUPPORT */ + ipcp_options *ho = &pcb->ipcp_hisoptions; + +#define LENCIADDRS(neg) (neg ? CILEN_ADDRS : 0) +#if VJ_SUPPORT +#define LENCIVJ(neg, old) (neg ? (old? CILEN_COMPRESS : CILEN_VJ) : 0) +#endif /* VJ_SUPPORT */ +#define LENCIADDR(neg) (neg ? CILEN_ADDR : 0) +#if LWIP_DNS +#define LENCIDNS(neg) LENCIADDR(neg) +#endif /* LWIP_DNS */ +#if 0 /* UNUSED - WINS */ +#define LENCIWINS(neg) LENCIADDR(neg) +#endif /* UNUSED - WINS */ + + /* + * First see if we want to change our options to the old + * forms because we have received old forms from the peer. + */ + if (go->neg_addr && go->old_addrs && !ho->neg_addr && ho->old_addrs) + go->neg_addr = 0; + +#if VJ_SUPPORT + if (wo->neg_vj && !go->neg_vj && !go->old_vj) { + /* try an older style of VJ negotiation */ + /* use the old style only if the peer did */ + if (ho->neg_vj && ho->old_vj) { + go->neg_vj = 1; + go->old_vj = 1; + go->vj_protocol = ho->vj_protocol; + } + } +#endif /* VJ_SUPPORT */ + + return (LENCIADDRS(!go->neg_addr && go->old_addrs) + +#if VJ_SUPPORT + LENCIVJ(go->neg_vj, go->old_vj) + +#endif /* VJ_SUPPORT */ + LENCIADDR(go->neg_addr) + +#if LWIP_DNS + LENCIDNS(go->req_dns1) + + LENCIDNS(go->req_dns2) + +#endif /* LWIP_DNS */ +#if 0 /* UNUSED - WINS */ + LENCIWINS(go->winsaddr[0]) + + LENCIWINS(go->winsaddr[1]) + +#endif /* UNUSED - WINS */ + 0); +} + + +/* + * ipcp_addci - Add our desired CIs to a packet. + * Called by fsm_sconfreq, Send Configure Request. + */ +static void ipcp_addci(fsm *f, u_char *ucp, int *lenp) { + ppp_pcb *pcb = f->pcb; + ipcp_options *go = &pcb->ipcp_gotoptions; + int len = *lenp; + +#define ADDCIADDRS(opt, neg, val1, val2) \ + if (neg) { \ + if (len >= CILEN_ADDRS) { \ + u32_t l; \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_ADDRS, ucp); \ + l = lwip_ntohl(val1); \ + PUTLONG(l, ucp); \ + l = lwip_ntohl(val2); \ + PUTLONG(l, ucp); \ + len -= CILEN_ADDRS; \ + } else \ + go->old_addrs = 0; \ + } + +#if VJ_SUPPORT +#define ADDCIVJ(opt, neg, val, old, maxslotindex, cflag) \ + if (neg) { \ + int vjlen = old? CILEN_COMPRESS : CILEN_VJ; \ + if (len >= vjlen) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(vjlen, ucp); \ + PUTSHORT(val, ucp); \ + if (!old) { \ + PUTCHAR(maxslotindex, ucp); \ + PUTCHAR(cflag, ucp); \ + } \ + len -= vjlen; \ + } else \ + neg = 0; \ + } +#endif /* VJ_SUPPORT */ + +#define ADDCIADDR(opt, neg, val) \ + if (neg) { \ + if (len >= CILEN_ADDR) { \ + u32_t l; \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_ADDR, ucp); \ + l = lwip_ntohl(val); \ + PUTLONG(l, ucp); \ + len -= CILEN_ADDR; \ + } else \ + neg = 0; \ + } + +#if LWIP_DNS +#define ADDCIDNS(opt, neg, addr) \ + if (neg) { \ + if (len >= CILEN_ADDR) { \ + u32_t l; \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_ADDR, ucp); \ + l = lwip_ntohl(addr); \ + PUTLONG(l, ucp); \ + len -= CILEN_ADDR; \ + } else \ + neg = 0; \ + } +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ +#define ADDCIWINS(opt, addr) \ + if (addr) { \ + if (len >= CILEN_ADDR) { \ + u32_t l; \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_ADDR, ucp); \ + l = lwip_ntohl(addr); \ + PUTLONG(l, ucp); \ + len -= CILEN_ADDR; \ + } else \ + addr = 0; \ + } +#endif /* UNUSED - WINS */ + + ADDCIADDRS(CI_ADDRS, !go->neg_addr && go->old_addrs, go->ouraddr, + go->hisaddr); + +#if VJ_SUPPORT + ADDCIVJ(CI_COMPRESSTYPE, go->neg_vj, go->vj_protocol, go->old_vj, + go->maxslotindex, go->cflag); +#endif /* VJ_SUPPORT */ + + ADDCIADDR(CI_ADDR, go->neg_addr, go->ouraddr); + +#if LWIP_DNS + ADDCIDNS(CI_MS_DNS1, go->req_dns1, go->dnsaddr[0]); + + ADDCIDNS(CI_MS_DNS2, go->req_dns2, go->dnsaddr[1]); +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ + ADDCIWINS(CI_MS_WINS1, go->winsaddr[0]); + + ADDCIWINS(CI_MS_WINS2, go->winsaddr[1]); +#endif /* UNUSED - WINS */ + + *lenp -= len; +} + + +/* + * ipcp_ackci - Ack our CIs. + * Called by fsm_rconfack, Receive Configure ACK. + * + * Returns: + * 0 - Ack was bad. + * 1 - Ack was good. + */ +static int ipcp_ackci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + ipcp_options *go = &pcb->ipcp_gotoptions; + u_short cilen, citype; + u32_t cilong; +#if VJ_SUPPORT + u_short cishort; + u_char cimaxslotindex, cicflag; +#endif /* VJ_SUPPORT */ + + /* + * CIs must be in exactly the same order that we sent... + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ + +#define ACKCIADDRS(opt, neg, val1, val2) \ + if (neg) { \ + u32_t l; \ + if ((len -= CILEN_ADDRS) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_ADDRS || \ + citype != opt) \ + goto bad; \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + if (val1 != cilong) \ + goto bad; \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + if (val2 != cilong) \ + goto bad; \ + } + +#if VJ_SUPPORT +#define ACKCIVJ(opt, neg, val, old, maxslotindex, cflag) \ + if (neg) { \ + int vjlen = old? CILEN_COMPRESS : CILEN_VJ; \ + if ((len -= vjlen) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != vjlen || \ + citype != opt) \ + goto bad; \ + GETSHORT(cishort, p); \ + if (cishort != val) \ + goto bad; \ + if (!old) { \ + GETCHAR(cimaxslotindex, p); \ + if (cimaxslotindex != maxslotindex) \ + goto bad; \ + GETCHAR(cicflag, p); \ + if (cicflag != cflag) \ + goto bad; \ + } \ + } +#endif /* VJ_SUPPORT */ + +#define ACKCIADDR(opt, neg, val) \ + if (neg) { \ + u32_t l; \ + if ((len -= CILEN_ADDR) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_ADDR || \ + citype != opt) \ + goto bad; \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + if (val != cilong) \ + goto bad; \ + } + +#if LWIP_DNS +#define ACKCIDNS(opt, neg, addr) \ + if (neg) { \ + u32_t l; \ + if ((len -= CILEN_ADDR) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_ADDR || citype != opt) \ + goto bad; \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + if (addr != cilong) \ + goto bad; \ + } +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ +#define ACKCIWINS(opt, addr) \ + if (addr) { \ + u32_t l; \ + if ((len -= CILEN_ADDR) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_ADDR || citype != opt) \ + goto bad; \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + if (addr != cilong) \ + goto bad; \ + } +#endif /* UNUSED - WINS */ + + ACKCIADDRS(CI_ADDRS, !go->neg_addr && go->old_addrs, go->ouraddr, + go->hisaddr); + +#if VJ_SUPPORT + ACKCIVJ(CI_COMPRESSTYPE, go->neg_vj, go->vj_protocol, go->old_vj, + go->maxslotindex, go->cflag); +#endif /* VJ_SUPPORT */ + + ACKCIADDR(CI_ADDR, go->neg_addr, go->ouraddr); + +#if LWIP_DNS + ACKCIDNS(CI_MS_DNS1, go->req_dns1, go->dnsaddr[0]); + + ACKCIDNS(CI_MS_DNS2, go->req_dns2, go->dnsaddr[1]); +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ + ACKCIWINS(CI_MS_WINS1, go->winsaddr[0]); + + ACKCIWINS(CI_MS_WINS2, go->winsaddr[1]); +#endif /* UNUSED - WINS */ + + /* + * If there are any remaining CIs, then this packet is bad. + */ + if (len != 0) + goto bad; + return (1); + +bad: + IPCPDEBUG(("ipcp_ackci: received bad Ack!")); + return (0); +} + +/* + * ipcp_nakci - Peer has sent a NAK for some of our CIs. + * This should not modify any state if the Nak is bad + * or if IPCP is in the OPENED state. + * Calback from fsm_rconfnakrej - Receive Configure-Nak or Configure-Reject. + * + * Returns: + * 0 - Nak was bad. + * 1 - Nak was good. + */ +static int ipcp_nakci(fsm *f, u_char *p, int len, int treat_as_reject) { + ppp_pcb *pcb = f->pcb; + ipcp_options *go = &pcb->ipcp_gotoptions; + u_char citype, cilen, *next; +#if VJ_SUPPORT + u_char cimaxslotindex, cicflag; + u_short cishort; +#endif /* VJ_SUPPORT */ + u32_t ciaddr1, ciaddr2, l; +#if LWIP_DNS + u32_t cidnsaddr; +#endif /* LWIP_DNS */ + ipcp_options no; /* options we've seen Naks for */ + ipcp_options try_; /* options to request next time */ + + BZERO(&no, sizeof(no)); + try_ = *go; + + /* + * Any Nak'd CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define NAKCIADDRS(opt, neg, code) \ + if ((neg) && \ + (cilen = p[1]) == CILEN_ADDRS && \ + len >= cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + ciaddr1 = lwip_htonl(l); \ + GETLONG(l, p); \ + ciaddr2 = lwip_htonl(l); \ + no.old_addrs = 1; \ + code \ + } + +#if VJ_SUPPORT +#define NAKCIVJ(opt, neg, code) \ + if (go->neg && \ + ((cilen = p[1]) == CILEN_COMPRESS || cilen == CILEN_VJ) && \ + len >= cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + no.neg = 1; \ + code \ + } +#endif /* VJ_SUPPORT */ + +#define NAKCIADDR(opt, neg, code) \ + if (go->neg && \ + (cilen = p[1]) == CILEN_ADDR && \ + len >= cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + ciaddr1 = lwip_htonl(l); \ + no.neg = 1; \ + code \ + } + +#if LWIP_DNS +#define NAKCIDNS(opt, neg, code) \ + if (go->neg && \ + ((cilen = p[1]) == CILEN_ADDR) && \ + len >= cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + cidnsaddr = lwip_htonl(l); \ + no.neg = 1; \ + code \ + } +#endif /* LWIP_DNS */ + + /* + * Accept the peer's idea of {our,his} address, if different + * from our idea, only if the accept_{local,remote} flag is set. + */ + NAKCIADDRS(CI_ADDRS, !go->neg_addr && go->old_addrs, + if (treat_as_reject) { + try_.old_addrs = 0; + } else { + if (go->accept_local && ciaddr1) { + /* take his idea of our address */ + try_.ouraddr = ciaddr1; + } + if (go->accept_remote && ciaddr2) { + /* take his idea of his address */ + try_.hisaddr = ciaddr2; + } + } + ); + +#if VJ_SUPPORT + /* + * Accept the peer's value of maxslotindex provided that it + * is less than what we asked for. Turn off slot-ID compression + * if the peer wants. Send old-style compress-type option if + * the peer wants. + */ + NAKCIVJ(CI_COMPRESSTYPE, neg_vj, + if (treat_as_reject) { + try_.neg_vj = 0; + } else if (cilen == CILEN_VJ) { + GETCHAR(cimaxslotindex, p); + GETCHAR(cicflag, p); + if (cishort == IPCP_VJ_COMP) { + try_.old_vj = 0; + if (cimaxslotindex < go->maxslotindex) + try_.maxslotindex = cimaxslotindex; + if (!cicflag) + try_.cflag = 0; + } else { + try_.neg_vj = 0; + } + } else { + if (cishort == IPCP_VJ_COMP || cishort == IPCP_VJ_COMP_OLD) { + try_.old_vj = 1; + try_.vj_protocol = cishort; + } else { + try_.neg_vj = 0; + } + } + ); +#endif /* VJ_SUPPORT */ + + NAKCIADDR(CI_ADDR, neg_addr, + if (treat_as_reject) { + try_.neg_addr = 0; + try_.old_addrs = 0; + } else if (go->accept_local && ciaddr1) { + /* take his idea of our address */ + try_.ouraddr = ciaddr1; + } + ); + +#if LWIP_DNS + NAKCIDNS(CI_MS_DNS1, req_dns1, + if (treat_as_reject) { + try_.req_dns1 = 0; + } else { + try_.dnsaddr[0] = cidnsaddr; + } + ); + + NAKCIDNS(CI_MS_DNS2, req_dns2, + if (treat_as_reject) { + try_.req_dns2 = 0; + } else { + try_.dnsaddr[1] = cidnsaddr; + } + ); +#endif /* #if LWIP_DNS */ + + /* + * There may be remaining CIs, if the peer is requesting negotiation + * on an option that we didn't include in our request packet. + * If they want to negotiate about IP addresses, we comply. + * If they want us to ask for compression, we refuse. + * If they want us to ask for ms-dns, we do that, since some + * peers get huffy if we don't. + */ + while (len >= CILEN_VOID) { + GETCHAR(citype, p); + GETCHAR(cilen, p); + if ( cilen < CILEN_VOID || (len -= cilen) < 0 ) + goto bad; + next = p + cilen - 2; + + switch (citype) { +#if VJ_SUPPORT + case CI_COMPRESSTYPE: + if (go->neg_vj || no.neg_vj || + (cilen != CILEN_VJ && cilen != CILEN_COMPRESS)) + goto bad; + no.neg_vj = 1; + break; +#endif /* VJ_SUPPORT */ + case CI_ADDRS: + if ((!go->neg_addr && go->old_addrs) || no.old_addrs + || cilen != CILEN_ADDRS) + goto bad; + try_.neg_addr = 0; + GETLONG(l, p); + ciaddr1 = lwip_htonl(l); + if (ciaddr1 && go->accept_local) + try_.ouraddr = ciaddr1; + GETLONG(l, p); + ciaddr2 = lwip_htonl(l); + if (ciaddr2 && go->accept_remote) + try_.hisaddr = ciaddr2; + no.old_addrs = 1; + break; + case CI_ADDR: + if (go->neg_addr || no.neg_addr || cilen != CILEN_ADDR) + goto bad; + try_.old_addrs = 0; + GETLONG(l, p); + ciaddr1 = lwip_htonl(l); + if (ciaddr1 && go->accept_local) + try_.ouraddr = ciaddr1; + if (try_.ouraddr != 0) + try_.neg_addr = 1; + no.neg_addr = 1; + break; +#if LWIP_DNS + case CI_MS_DNS1: + if (go->req_dns1 || no.req_dns1 || cilen != CILEN_ADDR) + goto bad; + GETLONG(l, p); + try_.dnsaddr[0] = lwip_htonl(l); + try_.req_dns1 = 1; + no.req_dns1 = 1; + break; + case CI_MS_DNS2: + if (go->req_dns2 || no.req_dns2 || cilen != CILEN_ADDR) + goto bad; + GETLONG(l, p); + try_.dnsaddr[1] = lwip_htonl(l); + try_.req_dns2 = 1; + no.req_dns2 = 1; + break; +#endif /* LWIP_DNS */ +#if 0 /* UNUSED - WINS */ + case CI_MS_WINS1: + case CI_MS_WINS2: + if (cilen != CILEN_ADDR) + goto bad; + GETLONG(l, p); + ciaddr1 = lwip_htonl(l); + if (ciaddr1) + try_.winsaddr[citype == CI_MS_WINS2] = ciaddr1; + break; +#endif /* UNUSED - WINS */ + default: + break; + } + p = next; + } + + /* + * OK, the Nak is good. Now we can update state. + * If there are any remaining options, we ignore them. + */ + if (f->state != PPP_FSM_OPENED) + *go = try_; + + return 1; + +bad: + IPCPDEBUG(("ipcp_nakci: received bad Nak!")); + return 0; +} + + +/* + * ipcp_rejci - Reject some of our CIs. + * Callback from fsm_rconfnakrej. + */ +static int ipcp_rejci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + ipcp_options *go = &pcb->ipcp_gotoptions; + u_char cilen; +#if VJ_SUPPORT + u_char cimaxslotindex, ciflag; + u_short cishort; +#endif /* VJ_SUPPORT */ + u32_t cilong; + ipcp_options try_; /* options to request next time */ + + try_ = *go; + /* + * Any Rejected CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define REJCIADDRS(opt, neg, val1, val2) \ + if ((neg) && \ + (cilen = p[1]) == CILEN_ADDRS && \ + len >= cilen && \ + p[0] == opt) { \ + u32_t l; \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + /* Check rejected value. */ \ + if (cilong != val1) \ + goto bad; \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + /* Check rejected value. */ \ + if (cilong != val2) \ + goto bad; \ + try_.old_addrs = 0; \ + } + +#if VJ_SUPPORT +#define REJCIVJ(opt, neg, val, old, maxslot, cflag) \ + if (go->neg && \ + p[1] == (old? CILEN_COMPRESS : CILEN_VJ) && \ + len >= p[1] && \ + p[0] == opt) { \ + len -= p[1]; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + /* Check rejected value. */ \ + if (cishort != val) \ + goto bad; \ + if (!old) { \ + GETCHAR(cimaxslotindex, p); \ + if (cimaxslotindex != maxslot) \ + goto bad; \ + GETCHAR(ciflag, p); \ + if (ciflag != cflag) \ + goto bad; \ + } \ + try_.neg = 0; \ + } +#endif /* VJ_SUPPORT */ + +#define REJCIADDR(opt, neg, val) \ + if (go->neg && \ + (cilen = p[1]) == CILEN_ADDR && \ + len >= cilen && \ + p[0] == opt) { \ + u32_t l; \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + /* Check rejected value. */ \ + if (cilong != val) \ + goto bad; \ + try_.neg = 0; \ + } + +#if LWIP_DNS +#define REJCIDNS(opt, neg, dnsaddr) \ + if (go->neg && \ + ((cilen = p[1]) == CILEN_ADDR) && \ + len >= cilen && \ + p[0] == opt) { \ + u32_t l; \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + /* Check rejected value. */ \ + if (cilong != dnsaddr) \ + goto bad; \ + try_.neg = 0; \ + } +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ +#define REJCIWINS(opt, addr) \ + if (addr && \ + ((cilen = p[1]) == CILEN_ADDR) && \ + len >= cilen && \ + p[0] == opt) { \ + u32_t l; \ + len -= cilen; \ + INCPTR(2, p); \ + GETLONG(l, p); \ + cilong = lwip_htonl(l); \ + /* Check rejected value. */ \ + if (cilong != addr) \ + goto bad; \ + try_.winsaddr[opt == CI_MS_WINS2] = 0; \ + } +#endif /* UNUSED - WINS */ + + REJCIADDRS(CI_ADDRS, !go->neg_addr && go->old_addrs, + go->ouraddr, go->hisaddr); + +#if VJ_SUPPORT + REJCIVJ(CI_COMPRESSTYPE, neg_vj, go->vj_protocol, go->old_vj, + go->maxslotindex, go->cflag); +#endif /* VJ_SUPPORT */ + + REJCIADDR(CI_ADDR, neg_addr, go->ouraddr); + +#if LWIP_DNS + REJCIDNS(CI_MS_DNS1, req_dns1, go->dnsaddr[0]); + + REJCIDNS(CI_MS_DNS2, req_dns2, go->dnsaddr[1]); +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ + REJCIWINS(CI_MS_WINS1, go->winsaddr[0]); + + REJCIWINS(CI_MS_WINS2, go->winsaddr[1]); +#endif /* UNUSED - WINS */ + + /* + * If there are any remaining CIs, then this packet is bad. + */ + if (len != 0) + goto bad; + /* + * Now we can update state. + */ + if (f->state != PPP_FSM_OPENED) + *go = try_; + return 1; + +bad: + IPCPDEBUG(("ipcp_rejci: received bad Reject!")); + return 0; +} + + +/* + * ipcp_reqci - Check the peer's requested CIs and send appropriate response. + * Callback from fsm_rconfreq, Receive Configure Request + * + * Returns: CONFACK, CONFNAK or CONFREJ and input packet modified + * appropriately. If reject_if_disagree is non-zero, doesn't return + * CONFNAK; returns CONFREJ if it can't return CONFACK. + * + * inp = Requested CIs + * len = Length of requested CIs + */ +static int ipcp_reqci(fsm *f, u_char *inp, int *len, int reject_if_disagree) { + ppp_pcb *pcb = f->pcb; + ipcp_options *wo = &pcb->ipcp_wantoptions; + ipcp_options *ho = &pcb->ipcp_hisoptions; + ipcp_options *ao = &pcb->ipcp_allowoptions; + u_char *cip, *next; /* Pointer to current and next CIs */ + u_short cilen, citype; /* Parsed len, type */ +#if VJ_SUPPORT + u_short cishort; /* Parsed short value */ +#endif /* VJ_SUPPORT */ + u32_t tl, ciaddr1, ciaddr2;/* Parsed address values */ + int rc = CONFACK; /* Final packet return code */ + int orc; /* Individual option return code */ + u_char *p; /* Pointer to next char to parse */ + u_char *ucp = inp; /* Pointer to current output char */ + int l = *len; /* Length left */ +#if VJ_SUPPORT + u_char maxslotindex, cflag; +#endif /* VJ_SUPPORT */ +#if LWIP_DNS + int d; +#endif /* LWIP_DNS */ + + /* + * Reset all his options. + */ + BZERO(ho, sizeof(*ho)); + + /* + * Process all his options. + */ + next = inp; + while (l) { + orc = CONFACK; /* Assume success */ + cip = p = next; /* Remember begining of CI */ + if (l < 2 || /* Not enough data for CI header or */ + p[1] < 2 || /* CI length too small or */ + p[1] > l) { /* CI length too big? */ + IPCPDEBUG(("ipcp_reqci: bad CI length!")); + orc = CONFREJ; /* Reject bad CI */ + cilen = l; /* Reject till end of packet */ + l = 0; /* Don't loop again */ + goto endswitch; + } + GETCHAR(citype, p); /* Parse CI type */ + GETCHAR(cilen, p); /* Parse CI length */ + l -= cilen; /* Adjust remaining length */ + next += cilen; /* Step to next CI */ + + switch (citype) { /* Check CI type */ + case CI_ADDRS: + if (!ao->old_addrs || ho->neg_addr || + cilen != CILEN_ADDRS) { /* Check CI length */ + orc = CONFREJ; /* Reject CI */ + break; + } + + /* + * If he has no address, or if we both have his address but + * disagree about it, then NAK it with our idea. + * In particular, if we don't know his address, but he does, + * then accept it. + */ + GETLONG(tl, p); /* Parse source address (his) */ + ciaddr1 = lwip_htonl(tl); + if (ciaddr1 != wo->hisaddr + && (ciaddr1 == 0 || !wo->accept_remote)) { + orc = CONFNAK; + if (!reject_if_disagree) { + DECPTR(sizeof(u32_t), p); + tl = lwip_ntohl(wo->hisaddr); + PUTLONG(tl, p); + } + } else if (ciaddr1 == 0 && wo->hisaddr == 0) { + /* + * If neither we nor he knows his address, reject the option. + */ + orc = CONFREJ; + wo->req_addr = 0; /* don't NAK with 0.0.0.0 later */ + break; + } + + /* + * If he doesn't know our address, or if we both have our address + * but disagree about it, then NAK it with our idea. + */ + GETLONG(tl, p); /* Parse desination address (ours) */ + ciaddr2 = lwip_htonl(tl); + if (ciaddr2 != wo->ouraddr) { + if (ciaddr2 == 0 || !wo->accept_local) { + orc = CONFNAK; + if (!reject_if_disagree) { + DECPTR(sizeof(u32_t), p); + tl = lwip_ntohl(wo->ouraddr); + PUTLONG(tl, p); + } + } else { + wo->ouraddr = ciaddr2; /* accept peer's idea */ + } + } + + ho->old_addrs = 1; + ho->hisaddr = ciaddr1; + ho->ouraddr = ciaddr2; + break; + + case CI_ADDR: + if (!ao->neg_addr || ho->old_addrs || + cilen != CILEN_ADDR) { /* Check CI length */ + orc = CONFREJ; /* Reject CI */ + break; + } + + /* + * If he has no address, or if we both have his address but + * disagree about it, then NAK it with our idea. + * In particular, if we don't know his address, but he does, + * then accept it. + */ + GETLONG(tl, p); /* Parse source address (his) */ + ciaddr1 = lwip_htonl(tl); + if (ciaddr1 != wo->hisaddr + && (ciaddr1 == 0 || !wo->accept_remote)) { + orc = CONFNAK; + if (!reject_if_disagree) { + DECPTR(sizeof(u32_t), p); + tl = lwip_ntohl(wo->hisaddr); + PUTLONG(tl, p); + } + } else if (ciaddr1 == 0 && wo->hisaddr == 0) { + /* + * Don't ACK an address of 0.0.0.0 - reject it instead. + */ + orc = CONFREJ; + wo->req_addr = 0; /* don't NAK with 0.0.0.0 later */ + break; + } + + ho->neg_addr = 1; + ho->hisaddr = ciaddr1; + break; + +#if LWIP_DNS + case CI_MS_DNS1: + case CI_MS_DNS2: + /* Microsoft primary or secondary DNS request */ + d = citype == CI_MS_DNS2; + + /* If we do not have a DNS address then we cannot send it */ + if (ao->dnsaddr[d] == 0 || + cilen != CILEN_ADDR) { /* Check CI length */ + orc = CONFREJ; /* Reject CI */ + break; + } + GETLONG(tl, p); + if (lwip_htonl(tl) != ao->dnsaddr[d]) { + DECPTR(sizeof(u32_t), p); + tl = lwip_ntohl(ao->dnsaddr[d]); + PUTLONG(tl, p); + orc = CONFNAK; + } + break; +#endif /* LWIP_DNS */ + +#if 0 /* UNUSED - WINS */ + case CI_MS_WINS1: + case CI_MS_WINS2: + /* Microsoft primary or secondary WINS request */ + d = citype == CI_MS_WINS2; + + /* If we do not have a DNS address then we cannot send it */ + if (ao->winsaddr[d] == 0 || + cilen != CILEN_ADDR) { /* Check CI length */ + orc = CONFREJ; /* Reject CI */ + break; + } + GETLONG(tl, p); + if (lwip_htonl(tl) != ao->winsaddr[d]) { + DECPTR(sizeof(u32_t), p); + tl = lwip_ntohl(ao->winsaddr[d]); + PUTLONG(tl, p); + orc = CONFNAK; + } + break; +#endif /* UNUSED - WINS */ + +#if VJ_SUPPORT + case CI_COMPRESSTYPE: + if (!ao->neg_vj || + (cilen != CILEN_VJ && cilen != CILEN_COMPRESS)) { + orc = CONFREJ; + break; + } + GETSHORT(cishort, p); + + if (!(cishort == IPCP_VJ_COMP || + (cishort == IPCP_VJ_COMP_OLD && cilen == CILEN_COMPRESS))) { + orc = CONFREJ; + break; + } + + ho->neg_vj = 1; + ho->vj_protocol = cishort; + if (cilen == CILEN_VJ) { + GETCHAR(maxslotindex, p); + if (maxslotindex > ao->maxslotindex) { + orc = CONFNAK; + if (!reject_if_disagree){ + DECPTR(1, p); + PUTCHAR(ao->maxslotindex, p); + } + } + GETCHAR(cflag, p); + if (cflag && !ao->cflag) { + orc = CONFNAK; + if (!reject_if_disagree){ + DECPTR(1, p); + PUTCHAR(wo->cflag, p); + } + } + ho->maxslotindex = maxslotindex; + ho->cflag = cflag; + } else { + ho->old_vj = 1; + ho->maxslotindex = MAX_STATES - 1; + ho->cflag = 1; + } + break; +#endif /* VJ_SUPPORT */ + + default: + orc = CONFREJ; + break; + } +endswitch: + if (orc == CONFACK && /* Good CI */ + rc != CONFACK) /* but prior CI wasnt? */ + continue; /* Don't send this one */ + + if (orc == CONFNAK) { /* Nak this CI? */ + if (reject_if_disagree) /* Getting fed up with sending NAKs? */ + orc = CONFREJ; /* Get tough if so */ + else { + if (rc == CONFREJ) /* Rejecting prior CI? */ + continue; /* Don't send this one */ + if (rc == CONFACK) { /* Ack'd all prior CIs? */ + rc = CONFNAK; /* Not anymore... */ + ucp = inp; /* Backup */ + } + } + } + + if (orc == CONFREJ && /* Reject this CI */ + rc != CONFREJ) { /* but no prior ones? */ + rc = CONFREJ; + ucp = inp; /* Backup */ + } + + /* Need to move CI? */ + if (ucp != cip) + MEMCPY(ucp, cip, cilen); /* Move it */ + + /* Update output pointer */ + INCPTR(cilen, ucp); + } + + /* + * If we aren't rejecting this packet, and we want to negotiate + * their address, and they didn't send their address, then we + * send a NAK with a CI_ADDR option appended. We assume the + * input buffer is long enough that we can append the extra + * option safely. + */ + if (rc != CONFREJ && !ho->neg_addr && !ho->old_addrs && + wo->req_addr && !reject_if_disagree && !pcb->settings.noremoteip) { + if (rc == CONFACK) { + rc = CONFNAK; + ucp = inp; /* reset pointer */ + wo->req_addr = 0; /* don't ask again */ + } + PUTCHAR(CI_ADDR, ucp); + PUTCHAR(CILEN_ADDR, ucp); + tl = lwip_ntohl(wo->hisaddr); + PUTLONG(tl, ucp); + } + + *len = ucp - inp; /* Compute output length */ + IPCPDEBUG(("ipcp: returning Configure-%s", CODENAME(rc))); + return (rc); /* Return final code */ +} + + +#if 0 /* UNUSED */ +/* + * ip_check_options - check that any IP-related options are OK, + * and assign appropriate defaults. + */ +static void +ip_check_options() +{ + struct hostent *hp; + u32_t local; + ipcp_options *wo = &ipcp_wantoptions[0]; + + /* + * Default our local IP address based on our hostname. + * If local IP address already given, don't bother. + */ + if (wo->ouraddr == 0 && !disable_defaultip) { + /* + * Look up our hostname (possibly with domain name appended) + * and take the first IP address as our local IP address. + * If there isn't an IP address for our hostname, too bad. + */ + wo->accept_local = 1; /* don't insist on this default value */ + if ((hp = gethostbyname(hostname)) != NULL) { + local = *(u32_t *)hp->h_addr; + if (local != 0 && !bad_ip_adrs(local)) + wo->ouraddr = local; + } + } + ask_for_local = wo->ouraddr != 0 || !disable_defaultip; +} +#endif /* UNUSED */ + +#if DEMAND_SUPPORT +/* + * ip_demand_conf - configure the interface as though + * IPCP were up, for use with dial-on-demand. + */ +static int +ip_demand_conf(u) + int u; +{ + ppp_pcb *pcb = &ppp_pcb_list[u]; + ipcp_options *wo = &ipcp_wantoptions[u]; + + if (wo->hisaddr == 0 && !pcb->settings.noremoteip) { + /* make up an arbitrary address for the peer */ + wo->hisaddr = lwip_htonl(0x0a707070 + ifunit); + wo->accept_remote = 1; + } + if (wo->ouraddr == 0) { + /* make up an arbitrary address for us */ + wo->ouraddr = lwip_htonl(0x0a404040 + ifunit); + wo->accept_local = 1; + ask_for_local = 0; /* don't tell the peer this address */ + } + if (!sifaddr(pcb, wo->ouraddr, wo->hisaddr, get_mask(wo->ouraddr))) + return 0; + if (!sifup(pcb)) + return 0; + if (!sifnpmode(pcb, PPP_IP, NPMODE_QUEUE)) + return 0; +#if 0 /* UNUSED */ + if (wo->default_route) + if (sifdefaultroute(pcb, wo->ouraddr, wo->hisaddr, + wo->replace_default_route)) + default_route_set[u] = 1; +#endif /* UNUSED */ +#if 0 /* UNUSED - PROXY ARP */ + if (wo->proxy_arp) + if (sifproxyarp(pcb, wo->hisaddr)) + proxy_arp_set[u] = 1; +#endif /* UNUSED - PROXY ARP */ + + ppp_notice("local IP address %I", wo->ouraddr); + if (wo->hisaddr) + ppp_notice("remote IP address %I", wo->hisaddr); + + return 1; +} +#endif /* DEMAND_SUPPORT */ + +/* + * ipcp_up - IPCP has come UP. + * + * Configure the IP network interface appropriately and bring it up. + */ +static void ipcp_up(fsm *f) { + ppp_pcb *pcb = f->pcb; + u32_t mask; + ipcp_options *ho = &pcb->ipcp_hisoptions; + ipcp_options *go = &pcb->ipcp_gotoptions; + ipcp_options *wo = &pcb->ipcp_wantoptions; + + IPCPDEBUG(("ipcp: up")); + + /* + * We must have a non-zero IP address for both ends of the link. + */ + if (!ho->neg_addr && !ho->old_addrs) + ho->hisaddr = wo->hisaddr; + + if (!(go->neg_addr || go->old_addrs) && (wo->neg_addr || wo->old_addrs) + && wo->ouraddr != 0) { + ppp_error("Peer refused to agree to our IP address"); + ipcp_close(f->pcb, "Refused our IP address"); + return; + } + if (go->ouraddr == 0) { + ppp_error("Could not determine local IP address"); + ipcp_close(f->pcb, "Could not determine local IP address"); + return; + } + if (ho->hisaddr == 0 && !pcb->settings.noremoteip) { + ho->hisaddr = lwip_htonl(0x0a404040); + ppp_warn("Could not determine remote IP address: defaulting to %I", + ho->hisaddr); + } +#if 0 /* UNUSED */ + script_setenv("IPLOCAL", ip_ntoa(go->ouraddr), 0); + if (ho->hisaddr != 0) + script_setenv("IPREMOTE", ip_ntoa(ho->hisaddr), 1); +#endif /* UNUSED */ + +#if LWIP_DNS + if (!go->req_dns1) + go->dnsaddr[0] = 0; + if (!go->req_dns2) + go->dnsaddr[1] = 0; +#if 0 /* UNUSED */ + if (go->dnsaddr[0]) + script_setenv("DNS1", ip_ntoa(go->dnsaddr[0]), 0); + if (go->dnsaddr[1]) + script_setenv("DNS2", ip_ntoa(go->dnsaddr[1]), 0); +#endif /* UNUSED */ + if (pcb->settings.usepeerdns && (go->dnsaddr[0] || go->dnsaddr[1])) { + sdns(pcb, go->dnsaddr[0], go->dnsaddr[1]); +#if 0 /* UNUSED */ + script_setenv("USEPEERDNS", "1", 0); + create_resolv(go->dnsaddr[0], go->dnsaddr[1]); +#endif /* UNUSED */ + } +#endif /* LWIP_DNS */ + + /* + * Check that the peer is allowed to use the IP address it wants. + */ + if (ho->hisaddr != 0) { + u32_t addr = lwip_ntohl(ho->hisaddr); + if ((addr >> IP_CLASSA_NSHIFT) == IP_LOOPBACKNET + || IP_MULTICAST(addr) || IP_BADCLASS(addr) + /* + * For now, consider that PPP in server mode with peer required + * to authenticate must provide the peer IP address, reject any + * IP address wanted by peer different than the one we wanted. + */ +#if PPP_SERVER && PPP_AUTH_SUPPORT + || (pcb->settings.auth_required && wo->hisaddr != ho->hisaddr) +#endif /* PPP_SERVER && PPP_AUTH_SUPPORT */ + ) { + ppp_error("Peer is not authorized to use remote address %I", ho->hisaddr); + ipcp_close(pcb, "Unauthorized remote IP address"); + return; + } + } +#if 0 /* Unused */ + /* Upstream checking code */ + if (ho->hisaddr != 0 && !auth_ip_addr(f->unit, ho->hisaddr)) { + ppp_error("Peer is not authorized to use remote address %I", ho->hisaddr); + ipcp_close(f->unit, "Unauthorized remote IP address"); + return; + } +#endif /* Unused */ + +#if VJ_SUPPORT + /* set tcp compression */ + sifvjcomp(pcb, ho->neg_vj, ho->cflag, ho->maxslotindex); +#endif /* VJ_SUPPORT */ + +#if DEMAND_SUPPORT + /* + * If we are doing dial-on-demand, the interface is already + * configured, so we put out any saved-up packets, then set the + * interface to pass IP packets. + */ + if (demand) { + if (go->ouraddr != wo->ouraddr || ho->hisaddr != wo->hisaddr) { + ipcp_clear_addrs(f->unit, wo->ouraddr, wo->hisaddr, + wo->replace_default_route); + if (go->ouraddr != wo->ouraddr) { + ppp_warn("Local IP address changed to %I", go->ouraddr); + script_setenv("OLDIPLOCAL", ip_ntoa(wo->ouraddr), 0); + wo->ouraddr = go->ouraddr; + } else + script_unsetenv("OLDIPLOCAL"); + if (ho->hisaddr != wo->hisaddr && wo->hisaddr != 0) { + ppp_warn("Remote IP address changed to %I", ho->hisaddr); + script_setenv("OLDIPREMOTE", ip_ntoa(wo->hisaddr), 0); + wo->hisaddr = ho->hisaddr; + } else + script_unsetenv("OLDIPREMOTE"); + + /* Set the interface to the new addresses */ + mask = get_mask(go->ouraddr); + if (!sifaddr(pcb, go->ouraddr, ho->hisaddr, mask)) { +#if PPP_DEBUG + ppp_warn("Interface configuration failed"); +#endif /* PPP_DEBUG */ + ipcp_close(f->unit, "Interface configuration failed"); + return; + } + + /* assign a default route through the interface if required */ + if (ipcp_wantoptions[f->unit].default_route) + if (sifdefaultroute(pcb, go->ouraddr, ho->hisaddr, + wo->replace_default_route)) + default_route_set[f->unit] = 1; + +#if 0 /* UNUSED - PROXY ARP */ + /* Make a proxy ARP entry if requested. */ + if (ho->hisaddr != 0 && ipcp_wantoptions[f->unit].proxy_arp) + if (sifproxyarp(pcb, ho->hisaddr)) + proxy_arp_set[f->unit] = 1; +#endif /* UNUSED - PROXY ARP */ + + } + demand_rexmit(PPP_IP,go->ouraddr); + sifnpmode(pcb, PPP_IP, NPMODE_PASS); + + } else +#endif /* DEMAND_SUPPORT */ + { + /* + * Set IP addresses and (if specified) netmask. + */ + mask = get_mask(go->ouraddr); + +#if !(defined(SVR4) && (defined(SNI) || defined(__USLC__))) + if (!sifaddr(pcb, go->ouraddr, ho->hisaddr, mask)) { +#if PPP_DEBUG + ppp_warn("Interface configuration failed"); +#endif /* PPP_DEBUG */ + ipcp_close(f->pcb, "Interface configuration failed"); + return; + } +#endif + + /* bring the interface up for IP */ + if (!sifup(pcb)) { +#if PPP_DEBUG + ppp_warn("Interface failed to come up"); +#endif /* PPP_DEBUG */ + ipcp_close(f->pcb, "Interface configuration failed"); + return; + } + +#if (defined(SVR4) && (defined(SNI) || defined(__USLC__))) + if (!sifaddr(pcb, go->ouraddr, ho->hisaddr, mask)) { +#if PPP_DEBUG + ppp_warn("Interface configuration failed"); +#endif /* PPP_DEBUG */ + ipcp_close(f->unit, "Interface configuration failed"); + return; + } +#endif +#if DEMAND_SUPPORT + sifnpmode(pcb, PPP_IP, NPMODE_PASS); +#endif /* DEMAND_SUPPORT */ + +#if 0 /* UNUSED */ + /* assign a default route through the interface if required */ + if (wo->default_route) + if (sifdefaultroute(pcb, go->ouraddr, ho->hisaddr, + wo->replace_default_route)) + pcb->default_route_set = 1; +#endif /* UNUSED */ + +#if 0 /* UNUSED - PROXY ARP */ + /* Make a proxy ARP entry if requested. */ + if (ho->hisaddr != 0 && wo->proxy_arp) + if (sifproxyarp(pcb, ho->hisaddr)) + pcb->proxy_arp_set = 1; +#endif /* UNUSED - PROXY ARP */ + + wo->ouraddr = go->ouraddr; + + ppp_notice("local IP address %I", go->ouraddr); + if (ho->hisaddr != 0) + ppp_notice("remote IP address %I", ho->hisaddr); +#if LWIP_DNS + if (go->dnsaddr[0]) + ppp_notice("primary DNS address %I", go->dnsaddr[0]); + if (go->dnsaddr[1]) + ppp_notice("secondary DNS address %I", go->dnsaddr[1]); +#endif /* LWIP_DNS */ + } + +#if PPP_STATS_SUPPORT + reset_link_stats(f->unit); +#endif /* PPP_STATS_SUPPORT */ + + np_up(pcb, PPP_IP); + pcb->ipcp_is_up = 1; + +#if PPP_NOTIFY + notify(ip_up_notifier, 0); +#endif /* PPP_NOTIFY */ +#if 0 /* UNUSED */ + if (ip_up_hook) + ip_up_hook(); +#endif /* UNUSED */ +} + + +/* + * ipcp_down - IPCP has gone DOWN. + * + * Take the IP network interface down, clear its addresses + * and delete routes through it. + */ +static void ipcp_down(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipcp_options *ho = &pcb->ipcp_hisoptions; + ipcp_options *go = &pcb->ipcp_gotoptions; + + IPCPDEBUG(("ipcp: down")); +#if PPP_STATS_SUPPORT + /* XXX a bit IPv4-centric here, we only need to get the stats + * before the interface is marked down. */ + /* XXX more correct: we must get the stats before running the notifiers, + * at least for the radius plugin */ + update_link_stats(f->unit); +#endif /* PPP_STATS_SUPPORT */ +#if PPP_NOTIFY + notify(ip_down_notifier, 0); +#endif /* PPP_NOTIFY */ +#if 0 /* UNUSED */ + if (ip_down_hook) + ip_down_hook(); +#endif /* UNUSED */ + if (pcb->ipcp_is_up) { + pcb->ipcp_is_up = 0; + np_down(pcb, PPP_IP); + } +#if VJ_SUPPORT + sifvjcomp(pcb, 0, 0, 0); +#endif /* VJ_SUPPORT */ + +#if PPP_STATS_SUPPORT + print_link_stats(); /* _after_ running the notifiers and ip_down_hook(), + * because print_link_stats() sets link_stats_valid + * to 0 (zero) */ +#endif /* PPP_STATS_SUPPORT */ + +#if DEMAND_SUPPORT + /* + * If we are doing dial-on-demand, set the interface + * to queue up outgoing packets (for now). + */ + if (demand) { + sifnpmode(pcb, PPP_IP, NPMODE_QUEUE); + } else +#endif /* DEMAND_SUPPORT */ + { +#if DEMAND_SUPPORT + sifnpmode(pcb, PPP_IP, NPMODE_DROP); +#endif /* DEMAND_SUPPORT */ + sifdown(pcb); + ipcp_clear_addrs(pcb, go->ouraddr, + ho->hisaddr, 0); +#if LWIP_DNS + cdns(pcb, go->dnsaddr[0], go->dnsaddr[1]); +#endif /* LWIP_DNS */ + } +} + + +/* + * ipcp_clear_addrs() - clear the interface addresses, routes, + * proxy arp entries, etc. + */ +static void ipcp_clear_addrs(ppp_pcb *pcb, u32_t ouraddr, u32_t hisaddr, u8_t replacedefaultroute) { + LWIP_UNUSED_ARG(replacedefaultroute); + +#if 0 /* UNUSED - PROXY ARP */ + if (pcb->proxy_arp_set) { + cifproxyarp(pcb, hisaddr); + pcb->proxy_arp_set = 0; + } +#endif /* UNUSED - PROXY ARP */ +#if 0 /* UNUSED */ + /* If replacedefaultroute, sifdefaultroute will be called soon + * with replacedefaultroute set and that will overwrite the current + * default route. This is the case only when doing demand, otherwise + * during demand, this cifdefaultroute would restore the old default + * route which is not what we want in this case. In the non-demand + * case, we'll delete the default route and restore the old if there + * is one saved by an sifdefaultroute with replacedefaultroute. + */ + if (!replacedefaultroute && pcb->default_route_set) { + cifdefaultroute(pcb, ouraddr, hisaddr); + pcb->default_route_set = 0; + } +#endif /* UNUSED */ + cifaddr(pcb, ouraddr, hisaddr); +} + + +/* + * ipcp_finished - possibly shut down the lower layers. + */ +static void ipcp_finished(fsm *f) { + ppp_pcb *pcb = f->pcb; + if (pcb->ipcp_is_open) { + pcb->ipcp_is_open = 0; + np_finished(pcb, PPP_IP); + } +} + + +#if 0 /* UNUSED */ +/* + * create_resolv - create the replacement resolv.conf file + */ +static void +create_resolv(peerdns1, peerdns2) + u32_t peerdns1, peerdns2; +{ + +} +#endif /* UNUSED */ + +#if PRINTPKT_SUPPORT +/* + * ipcp_printpkt - print the contents of an IPCP packet. + */ +static const char* const ipcp_codenames[] = { + "ConfReq", "ConfAck", "ConfNak", "ConfRej", + "TermReq", "TermAck", "CodeRej" +}; + +static int ipcp_printpkt(const u_char *p, int plen, + void (*printer) (void *, const char *, ...), void *arg) { + int code, id, len, olen; + const u_char *pstart, *optend; +#if VJ_SUPPORT + u_short cishort; +#endif /* VJ_SUPPORT */ + u32_t cilong; + + if (plen < HEADERLEN) + return 0; + pstart = p; + GETCHAR(code, p); + GETCHAR(id, p); + GETSHORT(len, p); + if (len < HEADERLEN || len > plen) + return 0; + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(ipcp_codenames)) + printer(arg, " %s", ipcp_codenames[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= HEADERLEN; + switch (code) { + case CONFREQ: + case CONFACK: + case CONFNAK: + case CONFREJ: + /* print option list */ + while (len >= 2) { + GETCHAR(code, p); + GETCHAR(olen, p); + p -= 2; + if (olen < 2 || olen > len) { + break; + } + printer(arg, " <"); + len -= olen; + optend = p + olen; + switch (code) { + case CI_ADDRS: + if (olen == CILEN_ADDRS) { + p += 2; + GETLONG(cilong, p); + printer(arg, "addrs %I", lwip_htonl(cilong)); + GETLONG(cilong, p); + printer(arg, " %I", lwip_htonl(cilong)); + } + break; +#if VJ_SUPPORT + case CI_COMPRESSTYPE: + if (olen >= CILEN_COMPRESS) { + p += 2; + GETSHORT(cishort, p); + printer(arg, "compress "); + switch (cishort) { + case IPCP_VJ_COMP: + printer(arg, "VJ"); + break; + case IPCP_VJ_COMP_OLD: + printer(arg, "old-VJ"); + break; + default: + printer(arg, "0x%x", cishort); + } + } + break; +#endif /* VJ_SUPPORT */ + case CI_ADDR: + if (olen == CILEN_ADDR) { + p += 2; + GETLONG(cilong, p); + printer(arg, "addr %I", lwip_htonl(cilong)); + } + break; +#if LWIP_DNS + case CI_MS_DNS1: + case CI_MS_DNS2: + p += 2; + GETLONG(cilong, p); + printer(arg, "ms-dns%d %I", (code == CI_MS_DNS1? 1: 2), + htonl(cilong)); + break; +#endif /* LWIP_DNS */ +#if 0 /* UNUSED - WINS */ + case CI_MS_WINS1: + case CI_MS_WINS2: + p += 2; + GETLONG(cilong, p); + printer(arg, "ms-wins %I", lwip_htonl(cilong)); + break; +#endif /* UNUSED - WINS */ + default: + break; + } + while (p < optend) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + printer(arg, ">"); + } + break; + + case TERMACK: + case TERMREQ: + if (len > 0 && *p >= ' ' && *p < 0x7f) { + printer(arg, " "); + ppp_print_string(p, len, printer, arg); + p += len; + len = 0; + } + break; + default: + break; + } + + /* print the rest of the bytes in the packet */ + for (; len > 0; --len) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + + return p - pstart; +} +#endif /* PRINTPKT_SUPPORT */ + +#if DEMAND_SUPPORT +/* + * ip_active_pkt - see if this IP packet is worth bringing the link up for. + * We don't bring the link up for IP fragments or for TCP FIN packets + * with no data. + */ +#define IP_HDRLEN 20 /* bytes */ +#define IP_OFFMASK 0x1fff +#ifndef IPPROTO_TCP +#define IPPROTO_TCP 6 +#endif +#define TCP_HDRLEN 20 +#define TH_FIN 0x01 + +/* + * We use these macros because the IP header may be at an odd address, + * and some compilers might use word loads to get th_off or ip_hl. + */ + +#define net_short(x) (((x)[0] << 8) + (x)[1]) +#define get_iphl(x) (((unsigned char *)(x))[0] & 0xF) +#define get_ipoff(x) net_short((unsigned char *)(x) + 6) +#define get_ipproto(x) (((unsigned char *)(x))[9]) +#define get_tcpoff(x) (((unsigned char *)(x))[12] >> 4) +#define get_tcpflags(x) (((unsigned char *)(x))[13]) + +static int +ip_active_pkt(pkt, len) + u_char *pkt; + int len; +{ + u_char *tcp; + int hlen; + + len -= PPP_HDRLEN; + pkt += PPP_HDRLEN; + if (len < IP_HDRLEN) + return 0; + if ((get_ipoff(pkt) & IP_OFFMASK) != 0) + return 0; + if (get_ipproto(pkt) != IPPROTO_TCP) + return 1; + hlen = get_iphl(pkt) * 4; + if (len < hlen + TCP_HDRLEN) + return 0; + tcp = pkt + hlen; + if ((get_tcpflags(tcp) & TH_FIN) != 0 && len == hlen + get_tcpoff(tcp) * 4) + return 0; + return 1; +} +#endif /* DEMAND_SUPPORT */ + +#endif /* PPP_SUPPORT && PPP_IPV4_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/ipv6cp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/ipv6cp.c new file mode 100644 index 0000000..e7d81c9 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/ipv6cp.c @@ -0,0 +1,1533 @@ +/* + * ipv6cp.c - PPP IPV6 Control Protocol. + * + * Copyright (c) 1999 Tommi Komulainen. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Tommi Komulainen + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/* Original version, based on RFC2023 : + + Copyright (c) 1995, 1996, 1997 Francis.Dupont@inria.fr, INRIA Rocquencourt, + Alain.Durand@imag.fr, IMAG, + Jean-Luc.Richier@imag.fr, IMAG-LSR. + + Copyright (c) 1998, 1999 Francis.Dupont@inria.fr, GIE DYADE, + Alain.Durand@imag.fr, IMAG, + Jean-Luc.Richier@imag.fr, IMAG-LSR. + + Ce travail a été fait au sein du GIE DYADE (Groupement d'Intérêt + Économique ayant pour membres BULL S.A. et l'INRIA). + + Ce logiciel informatique est disponible aux conditions + usuelles dans la recherche, c'est-à-dire qu'il peut + être utilisé, copié, modifié, distribué à l'unique + condition que ce texte soit conservé afin que + l'origine de ce logiciel soit reconnue. + + Le nom de l'Institut National de Recherche en Informatique + et en Automatique (INRIA), de l'IMAG, ou d'une personne morale + ou physique ayant participé à l'élaboration de ce logiciel ne peut + être utilisé sans son accord préalable explicite. + + Ce logiciel est fourni tel quel sans aucune garantie, + support ou responsabilité d'aucune sorte. + Ce logiciel est dérivé de sources d'origine + "University of California at Berkeley" et + "Digital Equipment Corporation" couvertes par des copyrights. + + L'Institut d'Informatique et de Mathématiques Appliquées de Grenoble (IMAG) + est une fédération d'unités mixtes de recherche du CNRS, de l'Institut National + Polytechnique de Grenoble et de l'Université Joseph Fourier regroupant + sept laboratoires dont le laboratoire Logiciels, Systèmes, Réseaux (LSR). + + This work has been done in the context of GIE DYADE (joint R & D venture + between BULL S.A. and INRIA). + + This software is available with usual "research" terms + with the aim of retain credits of the software. + Permission to use, copy, modify and distribute this software for any + purpose and without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies, + and the name of INRIA, IMAG, or any contributor not be used in advertising + or publicity pertaining to this material without the prior explicit + permission. The software is provided "as is" without any + warranties, support or liabilities of any kind. + This software is derived from source code from + "University of California at Berkeley" and + "Digital Equipment Corporation" protected by copyrights. + + Grenoble's Institute of Computer Science and Applied Mathematics (IMAG) + is a federation of seven research units funded by the CNRS, National + Polytechnic Institute of Grenoble and University Joseph Fourier. + The research unit in Software, Systems, Networks (LSR) is member of IMAG. +*/ + +/* + * Derived from : + * + * + * ipcp.c - PPP IP Control Protocol. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $Id: ipv6cp.c,v 1.21 2005/08/25 23:59:34 paulus Exp $ + */ + +/* + * @todo: + * + * Proxy Neighbour Discovery. + * + * Better defines for selecting the ordering of + * interface up / set address. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPP_IPV6_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/fsm.h" +#include "netif/ppp/ipcp.h" +#include "netif/ppp/ipv6cp.h" +#include "netif/ppp/magic.h" + +/* global vars */ +#if 0 /* UNUSED */ +int no_ifaceid_neg = 0; +#endif /* UNUSED */ + +/* + * Callbacks for fsm code. (CI = Configuration Information) + */ +static void ipv6cp_resetci(fsm *f); /* Reset our CI */ +static int ipv6cp_cilen(fsm *f); /* Return length of our CI */ +static void ipv6cp_addci(fsm *f, u_char *ucp, int *lenp); /* Add our CI */ +static int ipv6cp_ackci(fsm *f, u_char *p, int len); /* Peer ack'd our CI */ +static int ipv6cp_nakci(fsm *f, u_char *p, int len, int treat_as_reject); /* Peer nak'd our CI */ +static int ipv6cp_rejci(fsm *f, u_char *p, int len); /* Peer rej'd our CI */ +static int ipv6cp_reqci(fsm *f, u_char *inp, int *len, int reject_if_disagree); /* Rcv CI */ +static void ipv6cp_up(fsm *f); /* We're UP */ +static void ipv6cp_down(fsm *f); /* We're DOWN */ +static void ipv6cp_finished(fsm *f); /* Don't need lower layer */ + +static const fsm_callbacks ipv6cp_callbacks = { /* IPV6CP callback routines */ + ipv6cp_resetci, /* Reset our Configuration Information */ + ipv6cp_cilen, /* Length of our Configuration Information */ + ipv6cp_addci, /* Add our Configuration Information */ + ipv6cp_ackci, /* ACK our Configuration Information */ + ipv6cp_nakci, /* NAK our Configuration Information */ + ipv6cp_rejci, /* Reject our Configuration Information */ + ipv6cp_reqci, /* Request peer's Configuration Information */ + ipv6cp_up, /* Called when fsm reaches OPENED state */ + ipv6cp_down, /* Called when fsm leaves OPENED state */ + NULL, /* Called when we want the lower layer up */ + ipv6cp_finished, /* Called when we want the lower layer down */ + NULL, /* Called when Protocol-Reject received */ + NULL, /* Retransmission is necessary */ + NULL, /* Called to handle protocol-specific codes */ + "IPV6CP" /* String name of protocol */ +}; + +#if PPP_OPTIONS +/* + * Command-line options. + */ +static int setifaceid(char **arg)); +static void printifaceid(option_t *, + void (*)(void *, char *, ...), void *)); + +static option_t ipv6cp_option_list[] = { + { "ipv6", o_special, (void *)setifaceid, + "Set interface identifiers for IPV6", + OPT_A2PRINTER, (void *)printifaceid }, + + { "+ipv6", o_bool, &ipv6cp_protent.enabled_flag, + "Enable IPv6 and IPv6CP", OPT_PRIO | 1 }, + { "noipv6", o_bool, &ipv6cp_protent.enabled_flag, + "Disable IPv6 and IPv6CP", OPT_PRIOSUB }, + { "-ipv6", o_bool, &ipv6cp_protent.enabled_flag, + "Disable IPv6 and IPv6CP", OPT_PRIOSUB | OPT_ALIAS }, + + { "ipv6cp-accept-local", o_bool, &ipv6cp_allowoptions[0].accept_local, + "Accept peer's interface identifier for us", 1 }, + + { "ipv6cp-use-ipaddr", o_bool, &ipv6cp_allowoptions[0].use_ip, + "Use (default) IPv4 address as interface identifier", 1 }, + + { "ipv6cp-use-persistent", o_bool, &ipv6cp_wantoptions[0].use_persistent, + "Use uniquely-available persistent value for link local address", 1 }, + + { "ipv6cp-restart", o_int, &ipv6cp_fsm[0].timeouttime, + "Set timeout for IPv6CP", OPT_PRIO }, + { "ipv6cp-max-terminate", o_int, &ipv6cp_fsm[0].maxtermtransmits, + "Set max #xmits for term-reqs", OPT_PRIO }, + { "ipv6cp-max-configure", o_int, &ipv6cp_fsm[0].maxconfreqtransmits, + "Set max #xmits for conf-reqs", OPT_PRIO }, + { "ipv6cp-max-failure", o_int, &ipv6cp_fsm[0].maxnakloops, + "Set max #conf-naks for IPv6CP", OPT_PRIO }, + + { NULL } +}; +#endif /* PPP_OPTIONS */ + +/* + * Protocol entry points from main code. + */ +static void ipv6cp_init(ppp_pcb *pcb); +static void ipv6cp_open(ppp_pcb *pcb); +static void ipv6cp_close(ppp_pcb *pcb, const char *reason); +static void ipv6cp_lowerup(ppp_pcb *pcb); +static void ipv6cp_lowerdown(ppp_pcb *pcb); +static void ipv6cp_input(ppp_pcb *pcb, u_char *p, int len); +static void ipv6cp_protrej(ppp_pcb *pcb); +#if PPP_OPTIONS +static void ipv6_check_options(void); +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT +static int ipv6_demand_conf(int u); +#endif /* DEMAND_SUPPORT */ +#if PRINTPKT_SUPPORT +static int ipv6cp_printpkt(const u_char *p, int plen, + void (*printer)(void *, const char *, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ +#if DEMAND_SUPPORT +static int ipv6_active_pkt(u_char *pkt, int len); +#endif /* DEMAND_SUPPORT */ + +const struct protent ipv6cp_protent = { + PPP_IPV6CP, + ipv6cp_init, + ipv6cp_input, + ipv6cp_protrej, + ipv6cp_lowerup, + ipv6cp_lowerdown, + ipv6cp_open, + ipv6cp_close, +#if PRINTPKT_SUPPORT + ipv6cp_printpkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "IPV6CP", + "IPV6", +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + ipv6cp_option_list, + ipv6_check_options, +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + ipv6_demand_conf, + ipv6_active_pkt +#endif /* DEMAND_SUPPORT */ +}; + +static void ipv6cp_clear_addrs(ppp_pcb *pcb, eui64_t ourid, eui64_t hisid); +#if 0 /* UNUSED */ +static void ipv6cp_script(char *)); +static void ipv6cp_script_done(void *)); +#endif /* UNUSED */ + +/* + * Lengths of configuration options. + */ +#define CILEN_VOID 2 +#define CILEN_COMPRESS 4 /* length for RFC2023 compress opt. */ +#define CILEN_IFACEID 10 /* RFC2472, interface identifier */ + +#define CODENAME(x) ((x) == CONFACK ? "ACK" : \ + (x) == CONFNAK ? "NAK" : "REJ") + +#if 0 /* UNUSED */ +/* + * This state variable is used to ensure that we don't + * run an ipcp-up/down script while one is already running. + */ +static enum script_state { + s_down, + s_up, +} ipv6cp_script_state; +static pid_t ipv6cp_script_pid; +#endif /* UNUSED */ + +static char *llv6_ntoa(eui64_t ifaceid); + +#if PPP_OPTIONS +/* + * setifaceid - set the interface identifiers manually + */ +static int +setifaceid(argv) + char **argv; +{ + char *comma, *arg, c; + ipv6cp_options *wo = &ipv6cp_wantoptions[0]; + struct in6_addr addr; + static int prio_local, prio_remote; + +#define VALIDID(a) ( (((a).s6_addr32[0] == 0) && ((a).s6_addr32[1] == 0)) && \ + (((a).s6_addr32[2] != 0) || ((a).s6_addr32[3] != 0)) ) + + arg = *argv; + if ((comma = strchr(arg, ',')) == NULL) + comma = arg + strlen(arg); + + /* + * If comma first character, then no local identifier + */ + if (comma != arg) { + c = *comma; + *comma = '\0'; + + if (inet_pton(AF_INET6, arg, &addr) == 0 || !VALIDID(addr)) { + option_error("Illegal interface identifier (local): %s", arg); + return 0; + } + + if (option_priority >= prio_local) { + eui64_copy(addr.s6_addr32[2], wo->ourid); + wo->opt_local = 1; + prio_local = option_priority; + } + *comma = c; + } + + /* + * If comma last character, the no remote identifier + */ + if (*comma != 0 && *++comma != '\0') { + if (inet_pton(AF_INET6, comma, &addr) == 0 || !VALIDID(addr)) { + option_error("Illegal interface identifier (remote): %s", comma); + return 0; + } + if (option_priority >= prio_remote) { + eui64_copy(addr.s6_addr32[2], wo->hisid); + wo->opt_remote = 1; + prio_remote = option_priority; + } + } + + if (override_value("+ipv6", option_priority, option_source)) + ipv6cp_protent.enabled_flag = 1; + return 1; +} + +static void +printifaceid(opt, printer, arg) + option_t *opt; + void (*printer)(void *, char *, ...)); + void *arg; +{ + ipv6cp_options *wo = &ipv6cp_wantoptions[0]; + + if (wo->opt_local) + printer(arg, "%s", llv6_ntoa(wo->ourid)); + printer(arg, ","); + if (wo->opt_remote) + printer(arg, "%s", llv6_ntoa(wo->hisid)); +} +#endif /* PPP_OPTIONS */ + +/* + * Make a string representation of a network address. + */ +static char * +llv6_ntoa(eui64_t ifaceid) +{ + static char b[26]; + + sprintf(b, "fe80::%02x%02x:%02x%02x:%02x%02x:%02x%02x", + ifaceid.e8[0], ifaceid.e8[1], ifaceid.e8[2], ifaceid.e8[3], + ifaceid.e8[4], ifaceid.e8[5], ifaceid.e8[6], ifaceid.e8[7]); + + return b; +} + + +/* + * ipv6cp_init - Initialize IPV6CP. + */ +static void ipv6cp_init(ppp_pcb *pcb) { + fsm *f = &pcb->ipv6cp_fsm; + ipv6cp_options *wo = &pcb->ipv6cp_wantoptions; + ipv6cp_options *ao = &pcb->ipv6cp_allowoptions; + + f->pcb = pcb; + f->protocol = PPP_IPV6CP; + f->callbacks = &ipv6cp_callbacks; + fsm_init(f); + +#if 0 /* Not necessary, everything is cleared in ppp_new() */ + memset(wo, 0, sizeof(*wo)); + memset(ao, 0, sizeof(*ao)); +#endif /* 0 */ + + wo->accept_local = 1; + wo->neg_ifaceid = 1; + ao->neg_ifaceid = 1; + +#ifdef IPV6CP_COMP + wo->neg_vj = 1; + ao->neg_vj = 1; + wo->vj_protocol = IPV6CP_COMP; +#endif + +} + + +/* + * ipv6cp_open - IPV6CP is allowed to come up. + */ +static void ipv6cp_open(ppp_pcb *pcb) { + fsm_open(&pcb->ipv6cp_fsm); +} + + +/* + * ipv6cp_close - Take IPV6CP down. + */ +static void ipv6cp_close(ppp_pcb *pcb, const char *reason) { + fsm_close(&pcb->ipv6cp_fsm, reason); +} + + +/* + * ipv6cp_lowerup - The lower layer is up. + */ +static void ipv6cp_lowerup(ppp_pcb *pcb) { + fsm_lowerup(&pcb->ipv6cp_fsm); +} + + +/* + * ipv6cp_lowerdown - The lower layer is down. + */ +static void ipv6cp_lowerdown(ppp_pcb *pcb) { + fsm_lowerdown(&pcb->ipv6cp_fsm); +} + + +/* + * ipv6cp_input - Input IPV6CP packet. + */ +static void ipv6cp_input(ppp_pcb *pcb, u_char *p, int len) { + fsm_input(&pcb->ipv6cp_fsm, p, len); +} + + +/* + * ipv6cp_protrej - A Protocol-Reject was received for IPV6CP. + * + * Pretend the lower layer went down, so we shut up. + */ +static void ipv6cp_protrej(ppp_pcb *pcb) { + fsm_lowerdown(&pcb->ipv6cp_fsm); +} + + +/* + * ipv6cp_resetci - Reset our CI. + */ +static void ipv6cp_resetci(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *wo = &pcb->ipv6cp_wantoptions; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + ipv6cp_options *ao = &pcb->ipv6cp_allowoptions; + + wo->req_ifaceid = wo->neg_ifaceid && ao->neg_ifaceid; + + if (!wo->opt_local) { + eui64_magic_nz(wo->ourid); + } + + *go = *wo; + eui64_zero(go->hisid); /* last proposed interface identifier */ +} + + +/* + * ipv6cp_cilen - Return length of our CI. + */ +static int ipv6cp_cilen(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + +#ifdef IPV6CP_COMP +#define LENCIVJ(neg) (neg ? CILEN_COMPRESS : 0) +#endif /* IPV6CP_COMP */ +#define LENCIIFACEID(neg) (neg ? CILEN_IFACEID : 0) + + return (LENCIIFACEID(go->neg_ifaceid) + +#ifdef IPV6CP_COMP + LENCIVJ(go->neg_vj) + +#endif /* IPV6CP_COMP */ + 0); +} + + +/* + * ipv6cp_addci - Add our desired CIs to a packet. + */ +static void ipv6cp_addci(fsm *f, u_char *ucp, int *lenp) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + int len = *lenp; + +#ifdef IPV6CP_COMP +#define ADDCIVJ(opt, neg, val) \ + if (neg) { \ + int vjlen = CILEN_COMPRESS; \ + if (len >= vjlen) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(vjlen, ucp); \ + PUTSHORT(val, ucp); \ + len -= vjlen; \ + } else \ + neg = 0; \ + } +#endif /* IPV6CP_COMP */ + +#define ADDCIIFACEID(opt, neg, val1) \ + if (neg) { \ + int idlen = CILEN_IFACEID; \ + if (len >= idlen) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(idlen, ucp); \ + eui64_put(val1, ucp); \ + len -= idlen; \ + } else \ + neg = 0; \ + } + + ADDCIIFACEID(CI_IFACEID, go->neg_ifaceid, go->ourid); + +#ifdef IPV6CP_COMP + ADDCIVJ(CI_COMPRESSTYPE, go->neg_vj, go->vj_protocol); +#endif /* IPV6CP_COMP */ + + *lenp -= len; +} + + +/* + * ipv6cp_ackci - Ack our CIs. + * + * Returns: + * 0 - Ack was bad. + * 1 - Ack was good. + */ +static int ipv6cp_ackci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + u_short cilen, citype; +#ifdef IPV6CP_COMP + u_short cishort; +#endif /* IPV6CP_COMP */ + eui64_t ifaceid; + + /* + * CIs must be in exactly the same order that we sent... + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ + +#ifdef IPV6CP_COMP +#define ACKCIVJ(opt, neg, val) \ + if (neg) { \ + int vjlen = CILEN_COMPRESS; \ + if ((len -= vjlen) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != vjlen || \ + citype != opt) \ + goto bad; \ + GETSHORT(cishort, p); \ + if (cishort != val) \ + goto bad; \ + } +#endif /* IPV6CP_COMP */ + +#define ACKCIIFACEID(opt, neg, val1) \ + if (neg) { \ + int idlen = CILEN_IFACEID; \ + if ((len -= idlen) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != idlen || \ + citype != opt) \ + goto bad; \ + eui64_get(ifaceid, p); \ + if (! eui64_equals(val1, ifaceid)) \ + goto bad; \ + } + + ACKCIIFACEID(CI_IFACEID, go->neg_ifaceid, go->ourid); + +#ifdef IPV6CP_COMP + ACKCIVJ(CI_COMPRESSTYPE, go->neg_vj, go->vj_protocol); +#endif /* IPV6CP_COMP */ + + /* + * If there are any remaining CIs, then this packet is bad. + */ + if (len != 0) + goto bad; + return (1); + +bad: + IPV6CPDEBUG(("ipv6cp_ackci: received bad Ack!")); + return (0); +} + +/* + * ipv6cp_nakci - Peer has sent a NAK for some of our CIs. + * This should not modify any state if the Nak is bad + * or if IPV6CP is in the OPENED state. + * + * Returns: + * 0 - Nak was bad. + * 1 - Nak was good. + */ +static int ipv6cp_nakci(fsm *f, u_char *p, int len, int treat_as_reject) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + u_char citype, cilen, *next; +#ifdef IPV6CP_COMP + u_short cishort; +#endif /* IPV6CP_COMP */ + eui64_t ifaceid; + ipv6cp_options no; /* options we've seen Naks for */ + ipv6cp_options try_; /* options to request next time */ + + BZERO(&no, sizeof(no)); + try_ = *go; + + /* + * Any Nak'd CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define NAKCIIFACEID(opt, neg, code) \ + if (go->neg && \ + len >= (cilen = CILEN_IFACEID) && \ + p[1] == cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + eui64_get(ifaceid, p); \ + no.neg = 1; \ + code \ + } + +#ifdef IPV6CP_COMP +#define NAKCIVJ(opt, neg, code) \ + if (go->neg && \ + ((cilen = p[1]) == CILEN_COMPRESS) && \ + len >= cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + no.neg = 1; \ + code \ + } +#endif /* IPV6CP_COMP */ + + /* + * Accept the peer's idea of {our,his} interface identifier, if different + * from our idea, only if the accept_{local,remote} flag is set. + */ + NAKCIIFACEID(CI_IFACEID, neg_ifaceid, + if (treat_as_reject) { + try_.neg_ifaceid = 0; + } else if (go->accept_local) { + while (eui64_iszero(ifaceid) || + eui64_equals(ifaceid, go->hisid)) /* bad luck */ + eui64_magic(ifaceid); + try_.ourid = ifaceid; + IPV6CPDEBUG(("local LL address %s", llv6_ntoa(ifaceid))); + } + ); + +#ifdef IPV6CP_COMP + NAKCIVJ(CI_COMPRESSTYPE, neg_vj, + { + if (cishort == IPV6CP_COMP && !treat_as_reject) { + try_.vj_protocol = cishort; + } else { + try_.neg_vj = 0; + } + } + ); +#endif /* IPV6CP_COMP */ + + /* + * There may be remaining CIs, if the peer is requesting negotiation + * on an option that we didn't include in our request packet. + * If they want to negotiate about interface identifier, we comply. + * If they want us to ask for compression, we refuse. + */ + while (len >= CILEN_VOID) { + GETCHAR(citype, p); + GETCHAR(cilen, p); + if ( cilen < CILEN_VOID || (len -= cilen) < 0 ) + goto bad; + next = p + cilen - 2; + + switch (citype) { +#ifdef IPV6CP_COMP + case CI_COMPRESSTYPE: + if (go->neg_vj || no.neg_vj || + (cilen != CILEN_COMPRESS)) + goto bad; + no.neg_vj = 1; + break; +#endif /* IPV6CP_COMP */ + case CI_IFACEID: + if (go->neg_ifaceid || no.neg_ifaceid || cilen != CILEN_IFACEID) + goto bad; + try_.neg_ifaceid = 1; + eui64_get(ifaceid, p); + if (go->accept_local) { + while (eui64_iszero(ifaceid) || + eui64_equals(ifaceid, go->hisid)) /* bad luck */ + eui64_magic(ifaceid); + try_.ourid = ifaceid; + } + no.neg_ifaceid = 1; + break; + default: + break; + } + p = next; + } + + /* If there is still anything left, this packet is bad. */ + if (len != 0) + goto bad; + + /* + * OK, the Nak is good. Now we can update state. + */ + if (f->state != PPP_FSM_OPENED) + *go = try_; + + return 1; + +bad: + IPV6CPDEBUG(("ipv6cp_nakci: received bad Nak!")); + return 0; +} + + +/* + * ipv6cp_rejci - Reject some of our CIs. + */ +static int ipv6cp_rejci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + u_char cilen; +#ifdef IPV6CP_COMP + u_short cishort; +#endif /* IPV6CP_COMP */ + eui64_t ifaceid; + ipv6cp_options try_; /* options to request next time */ + + try_ = *go; + /* + * Any Rejected CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define REJCIIFACEID(opt, neg, val1) \ + if (go->neg && \ + len >= (cilen = CILEN_IFACEID) && \ + p[1] == cilen && \ + p[0] == opt) { \ + len -= cilen; \ + INCPTR(2, p); \ + eui64_get(ifaceid, p); \ + /* Check rejected value. */ \ + if (! eui64_equals(ifaceid, val1)) \ + goto bad; \ + try_.neg = 0; \ + } + +#ifdef IPV6CP_COMP +#define REJCIVJ(opt, neg, val) \ + if (go->neg && \ + p[1] == CILEN_COMPRESS && \ + len >= p[1] && \ + p[0] == opt) { \ + len -= p[1]; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + /* Check rejected value. */ \ + if (cishort != val) \ + goto bad; \ + try_.neg = 0; \ + } +#endif /* IPV6CP_COMP */ + + REJCIIFACEID(CI_IFACEID, neg_ifaceid, go->ourid); + +#ifdef IPV6CP_COMP + REJCIVJ(CI_COMPRESSTYPE, neg_vj, go->vj_protocol); +#endif /* IPV6CP_COMP */ + + /* + * If there are any remaining CIs, then this packet is bad. + */ + if (len != 0) + goto bad; + /* + * Now we can update state. + */ + if (f->state != PPP_FSM_OPENED) + *go = try_; + return 1; + +bad: + IPV6CPDEBUG(("ipv6cp_rejci: received bad Reject!")); + return 0; +} + + +/* + * ipv6cp_reqci - Check the peer's requested CIs and send appropriate response. + * + * Returns: CONFACK, CONFNAK or CONFREJ and input packet modified + * appropriately. If reject_if_disagree is non-zero, doesn't return + * CONFNAK; returns CONFREJ if it can't return CONFACK. + * + * inp = Requested CIs + * len = Length of requested CIs + * + */ +static int ipv6cp_reqci(fsm *f, u_char *inp, int *len, int reject_if_disagree) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *wo = &pcb->ipv6cp_wantoptions; + ipv6cp_options *ho = &pcb->ipv6cp_hisoptions; + ipv6cp_options *ao = &pcb->ipv6cp_allowoptions; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + u_char *cip, *next; /* Pointer to current and next CIs */ + u_short cilen, citype; /* Parsed len, type */ +#ifdef IPV6CP_COMP + u_short cishort; /* Parsed short value */ +#endif /* IPV6CP_COMP */ + eui64_t ifaceid; /* Parsed interface identifier */ + int rc = CONFACK; /* Final packet return code */ + int orc; /* Individual option return code */ + u_char *p; /* Pointer to next char to parse */ + u_char *ucp = inp; /* Pointer to current output char */ + int l = *len; /* Length left */ + + /* + * Reset all his options. + */ + BZERO(ho, sizeof(*ho)); + + /* + * Process all his options. + */ + next = inp; + while (l) { + orc = CONFACK; /* Assume success */ + cip = p = next; /* Remember begining of CI */ + if (l < 2 || /* Not enough data for CI header or */ + p[1] < 2 || /* CI length too small or */ + p[1] > l) { /* CI length too big? */ + IPV6CPDEBUG(("ipv6cp_reqci: bad CI length!")); + orc = CONFREJ; /* Reject bad CI */ + cilen = l; /* Reject till end of packet */ + l = 0; /* Don't loop again */ + goto endswitch; + } + GETCHAR(citype, p); /* Parse CI type */ + GETCHAR(cilen, p); /* Parse CI length */ + l -= cilen; /* Adjust remaining length */ + next += cilen; /* Step to next CI */ + + switch (citype) { /* Check CI type */ + case CI_IFACEID: + IPV6CPDEBUG(("ipv6cp: received interface identifier ")); + + if (!ao->neg_ifaceid || + cilen != CILEN_IFACEID) { /* Check CI length */ + orc = CONFREJ; /* Reject CI */ + break; + } + + /* + * If he has no interface identifier, or if we both have same + * identifier then NAK it with new idea. + * In particular, if we don't know his identifier, but he does, + * then accept it. + */ + eui64_get(ifaceid, p); + IPV6CPDEBUG(("(%s)", llv6_ntoa(ifaceid))); + if (eui64_iszero(ifaceid) && eui64_iszero(go->ourid)) { + orc = CONFREJ; /* Reject CI */ + break; + } + if (!eui64_iszero(wo->hisid) && + !eui64_equals(ifaceid, wo->hisid) && + eui64_iszero(go->hisid)) { + + orc = CONFNAK; + ifaceid = wo->hisid; + go->hisid = ifaceid; + DECPTR(sizeof(ifaceid), p); + eui64_put(ifaceid, p); + } else + if (eui64_iszero(ifaceid) || eui64_equals(ifaceid, go->ourid)) { + orc = CONFNAK; + if (eui64_iszero(go->hisid)) /* first time, try option */ + ifaceid = wo->hisid; + while (eui64_iszero(ifaceid) || + eui64_equals(ifaceid, go->ourid)) /* bad luck */ + eui64_magic(ifaceid); + go->hisid = ifaceid; + DECPTR(sizeof(ifaceid), p); + eui64_put(ifaceid, p); + } + + ho->neg_ifaceid = 1; + ho->hisid = ifaceid; + break; + +#ifdef IPV6CP_COMP + case CI_COMPRESSTYPE: + IPV6CPDEBUG(("ipv6cp: received COMPRESSTYPE ")); + if (!ao->neg_vj || + (cilen != CILEN_COMPRESS)) { + orc = CONFREJ; + break; + } + GETSHORT(cishort, p); + IPV6CPDEBUG(("(%d)", cishort)); + + if (!(cishort == IPV6CP_COMP)) { + orc = CONFREJ; + break; + } + + ho->neg_vj = 1; + ho->vj_protocol = cishort; + break; +#endif /* IPV6CP_COMP */ + + default: + orc = CONFREJ; + break; + } + +endswitch: + IPV6CPDEBUG((" (%s)\n", CODENAME(orc))); + + if (orc == CONFACK && /* Good CI */ + rc != CONFACK) /* but prior CI wasnt? */ + continue; /* Don't send this one */ + + if (orc == CONFNAK) { /* Nak this CI? */ + if (reject_if_disagree) /* Getting fed up with sending NAKs? */ + orc = CONFREJ; /* Get tough if so */ + else { + if (rc == CONFREJ) /* Rejecting prior CI? */ + continue; /* Don't send this one */ + if (rc == CONFACK) { /* Ack'd all prior CIs? */ + rc = CONFNAK; /* Not anymore... */ + ucp = inp; /* Backup */ + } + } + } + + if (orc == CONFREJ && /* Reject this CI */ + rc != CONFREJ) { /* but no prior ones? */ + rc = CONFREJ; + ucp = inp; /* Backup */ + } + + /* Need to move CI? */ + if (ucp != cip) + MEMCPY(ucp, cip, cilen); /* Move it */ + + /* Update output pointer */ + INCPTR(cilen, ucp); + } + + /* + * If we aren't rejecting this packet, and we want to negotiate + * their identifier and they didn't send their identifier, then we + * send a NAK with a CI_IFACEID option appended. We assume the + * input buffer is long enough that we can append the extra + * option safely. + */ + if (rc != CONFREJ && !ho->neg_ifaceid && + wo->req_ifaceid && !reject_if_disagree) { + if (rc == CONFACK) { + rc = CONFNAK; + ucp = inp; /* reset pointer */ + wo->req_ifaceid = 0; /* don't ask again */ + } + PUTCHAR(CI_IFACEID, ucp); + PUTCHAR(CILEN_IFACEID, ucp); + eui64_put(wo->hisid, ucp); + } + + *len = ucp - inp; /* Compute output length */ + IPV6CPDEBUG(("ipv6cp: returning Configure-%s", CODENAME(rc))); + return (rc); /* Return final code */ +} + +#if PPP_OPTIONS +/* + * ipv6_check_options - check that any IP-related options are OK, + * and assign appropriate defaults. + */ +static void ipv6_check_options() { + ipv6cp_options *wo = &ipv6cp_wantoptions[0]; + + if (!ipv6cp_protent.enabled_flag) + return; + + /* + * Persistent link-local id is only used when user has not explicitly + * configure/hard-code the id + */ + if ((wo->use_persistent) && (!wo->opt_local) && (!wo->opt_remote)) { + + /* + * On systems where there are no Ethernet interfaces used, there + * may be other ways to obtain a persistent id. Right now, it + * will fall back to using magic [see eui64_magic] below when + * an EUI-48 from MAC address can't be obtained. Other possibilities + * include obtaining EEPROM serial numbers, or some other unique + * yet persistent number. On Sparc platforms, this is possible, + * but too bad there's no standards yet for x86 machines. + */ + if (ether_to_eui64(&wo->ourid)) { + wo->opt_local = 1; + } + } + + if (!wo->opt_local) { /* init interface identifier */ + if (wo->use_ip && eui64_iszero(wo->ourid)) { + eui64_setlo32(wo->ourid, lwip_ntohl(ipcp_wantoptions[0].ouraddr)); + if (!eui64_iszero(wo->ourid)) + wo->opt_local = 1; + } + + while (eui64_iszero(wo->ourid)) + eui64_magic(wo->ourid); + } + + if (!wo->opt_remote) { + if (wo->use_ip && eui64_iszero(wo->hisid)) { + eui64_setlo32(wo->hisid, lwip_ntohl(ipcp_wantoptions[0].hisaddr)); + if (!eui64_iszero(wo->hisid)) + wo->opt_remote = 1; + } + } + + if (demand && (eui64_iszero(wo->ourid) || eui64_iszero(wo->hisid))) { + option_error("local/remote LL address required for demand-dialling\n"); + exit(1); + } +} +#endif /* PPP_OPTIONS */ + +#if DEMAND_SUPPORT +/* + * ipv6_demand_conf - configure the interface as though + * IPV6CP were up, for use with dial-on-demand. + */ +static int ipv6_demand_conf(int u) { + ipv6cp_options *wo = &ipv6cp_wantoptions[u]; + + if (!sif6up(u)) + return 0; + + if (!sif6addr(u, wo->ourid, wo->hisid)) + return 0; + + if (!sifnpmode(u, PPP_IPV6, NPMODE_QUEUE)) + return 0; + + ppp_notice("ipv6_demand_conf"); + ppp_notice("local LL address %s", llv6_ntoa(wo->ourid)); + ppp_notice("remote LL address %s", llv6_ntoa(wo->hisid)); + + return 1; +} +#endif /* DEMAND_SUPPORT */ + + +/* + * ipv6cp_up - IPV6CP has come UP. + * + * Configure the IPv6 network interface appropriately and bring it up. + */ +static void ipv6cp_up(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *wo = &pcb->ipv6cp_wantoptions; + ipv6cp_options *ho = &pcb->ipv6cp_hisoptions; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + + IPV6CPDEBUG(("ipv6cp: up")); + + /* + * We must have a non-zero LL address for both ends of the link. + */ + if (!ho->neg_ifaceid) + ho->hisid = wo->hisid; + +#if 0 /* UNUSED */ + if(!no_ifaceid_neg) { +#endif /* UNUSED */ + if (eui64_iszero(ho->hisid)) { + ppp_error("Could not determine remote LL address"); + ipv6cp_close(f->pcb, "Could not determine remote LL address"); + return; + } + if (eui64_iszero(go->ourid)) { + ppp_error("Could not determine local LL address"); + ipv6cp_close(f->pcb, "Could not determine local LL address"); + return; + } + if (eui64_equals(go->ourid, ho->hisid)) { + ppp_error("local and remote LL addresses are equal"); + ipv6cp_close(f->pcb, "local and remote LL addresses are equal"); + return; + } +#if 0 /* UNUSED */ + } +#endif /* UNUSED */ +#if 0 /* UNUSED */ + script_setenv("LLLOCAL", llv6_ntoa(go->ourid), 0); + script_setenv("LLREMOTE", llv6_ntoa(ho->hisid), 0); +#endif /* UNUSED */ + +#ifdef IPV6CP_COMP + /* set tcp compression */ + sif6comp(f->unit, ho->neg_vj); +#endif + +#if DEMAND_SUPPORT + /* + * If we are doing dial-on-demand, the interface is already + * configured, so we put out any saved-up packets, then set the + * interface to pass IPv6 packets. + */ + if (demand) { + if (! eui64_equals(go->ourid, wo->ourid) || + ! eui64_equals(ho->hisid, wo->hisid)) { + if (! eui64_equals(go->ourid, wo->ourid)) + warn("Local LL address changed to %s", + llv6_ntoa(go->ourid)); + if (! eui64_equals(ho->hisid, wo->hisid)) + warn("Remote LL address changed to %s", + llv6_ntoa(ho->hisid)); + ipv6cp_clear_addrs(f->pcb, go->ourid, ho->hisid); + + /* Set the interface to the new addresses */ + if (!sif6addr(f->pcb, go->ourid, ho->hisid)) { + if (debug) + warn("sif6addr failed"); + ipv6cp_close(f->unit, "Interface configuration failed"); + return; + } + + } + demand_rexmit(PPP_IPV6); + sifnpmode(f->unit, PPP_IPV6, NPMODE_PASS); + + } else +#endif /* DEMAND_SUPPORT */ + { + /* + * Set LL addresses + */ + if (!sif6addr(f->pcb, go->ourid, ho->hisid)) { + PPPDEBUG(LOG_DEBUG, ("sif6addr failed")); + ipv6cp_close(f->pcb, "Interface configuration failed"); + return; + } + + /* bring the interface up for IPv6 */ + if (!sif6up(f->pcb)) { + PPPDEBUG(LOG_DEBUG, ("sif6up failed (IPV6)")); + ipv6cp_close(f->pcb, "Interface configuration failed"); + return; + } +#if DEMAND_SUPPORT + sifnpmode(f->pcb, PPP_IPV6, NPMODE_PASS); +#endif /* DEMAND_SUPPORT */ + + ppp_notice("local LL address %s", llv6_ntoa(go->ourid)); + ppp_notice("remote LL address %s", llv6_ntoa(ho->hisid)); + } + + np_up(f->pcb, PPP_IPV6); + pcb->ipv6cp_is_up = 1; + +#if 0 /* UNUSED */ + /* + * Execute the ipv6-up script, like this: + * /etc/ppp/ipv6-up interface tty speed local-LL remote-LL + */ + if (ipv6cp_script_state == s_down && ipv6cp_script_pid == 0) { + ipv6cp_script_state = s_up; + ipv6cp_script(_PATH_IPV6UP); + } +#endif /* UNUSED */ +} + + +/* + * ipv6cp_down - IPV6CP has gone DOWN. + * + * Take the IPv6 network interface down, clear its addresses + * and delete routes through it. + */ +static void ipv6cp_down(fsm *f) { + ppp_pcb *pcb = f->pcb; + ipv6cp_options *go = &pcb->ipv6cp_gotoptions; + ipv6cp_options *ho = &pcb->ipv6cp_hisoptions; + + IPV6CPDEBUG(("ipv6cp: down")); +#if PPP_STATS_SUPPORT + update_link_stats(f->unit); +#endif /* PPP_STATS_SUPPORT */ + if (pcb->ipv6cp_is_up) { + pcb->ipv6cp_is_up = 0; + np_down(f->pcb, PPP_IPV6); + } +#ifdef IPV6CP_COMP + sif6comp(f->unit, 0); +#endif + +#if DEMAND_SUPPORT + /* + * If we are doing dial-on-demand, set the interface + * to queue up outgoing packets (for now). + */ + if (demand) { + sifnpmode(f->pcb, PPP_IPV6, NPMODE_QUEUE); + } else +#endif /* DEMAND_SUPPORT */ + { +#if DEMAND_SUPPORT + sifnpmode(f->pcb, PPP_IPV6, NPMODE_DROP); +#endif /* DEMAND_SUPPORT */ + ipv6cp_clear_addrs(f->pcb, + go->ourid, + ho->hisid); + sif6down(f->pcb); + } + +#if 0 /* UNUSED */ + /* Execute the ipv6-down script */ + if (ipv6cp_script_state == s_up && ipv6cp_script_pid == 0) { + ipv6cp_script_state = s_down; + ipv6cp_script(_PATH_IPV6DOWN); + } +#endif /* UNUSED */ +} + + +/* + * ipv6cp_clear_addrs() - clear the interface addresses, routes, + * proxy neighbour discovery entries, etc. + */ +static void ipv6cp_clear_addrs(ppp_pcb *pcb, eui64_t ourid, eui64_t hisid) { + cif6addr(pcb, ourid, hisid); +} + + +/* + * ipv6cp_finished - possibly shut down the lower layers. + */ +static void ipv6cp_finished(fsm *f) { + np_finished(f->pcb, PPP_IPV6); +} + + +#if 0 /* UNUSED */ +/* + * ipv6cp_script_done - called when the ipv6-up or ipv6-down script + * has finished. + */ +static void +ipv6cp_script_done(arg) + void *arg; +{ + ipv6cp_script_pid = 0; + switch (ipv6cp_script_state) { + case s_up: + if (ipv6cp_fsm[0].state != PPP_FSM_OPENED) { + ipv6cp_script_state = s_down; + ipv6cp_script(_PATH_IPV6DOWN); + } + break; + case s_down: + if (ipv6cp_fsm[0].state == PPP_FSM_OPENED) { + ipv6cp_script_state = s_up; + ipv6cp_script(_PATH_IPV6UP); + } + break; + } +} + + +/* + * ipv6cp_script - Execute a script with arguments + * interface-name tty-name speed local-LL remote-LL. + */ +static void +ipv6cp_script(script) + char *script; +{ + char strspeed[32], strlocal[32], strremote[32]; + char *argv[8]; + + sprintf(strspeed, "%d", baud_rate); + strcpy(strlocal, llv6_ntoa(ipv6cp_gotoptions[0].ourid)); + strcpy(strremote, llv6_ntoa(ipv6cp_hisoptions[0].hisid)); + + argv[0] = script; + argv[1] = ifname; + argv[2] = devnam; + argv[3] = strspeed; + argv[4] = strlocal; + argv[5] = strremote; + argv[6] = ipparam; + argv[7] = NULL; + + ipv6cp_script_pid = run_program(script, argv, 0, ipv6cp_script_done, + NULL, 0); +} +#endif /* UNUSED */ + +#if PRINTPKT_SUPPORT +/* + * ipv6cp_printpkt - print the contents of an IPV6CP packet. + */ +static const char* const ipv6cp_codenames[] = { + "ConfReq", "ConfAck", "ConfNak", "ConfRej", + "TermReq", "TermAck", "CodeRej" +}; + +static int ipv6cp_printpkt(const u_char *p, int plen, + void (*printer)(void *, const char *, ...), void *arg) { + int code, id, len, olen; + const u_char *pstart, *optend; +#ifdef IPV6CP_COMP + u_short cishort; +#endif /* IPV6CP_COMP */ + eui64_t ifaceid; + + if (plen < HEADERLEN) + return 0; + pstart = p; + GETCHAR(code, p); + GETCHAR(id, p); + GETSHORT(len, p); + if (len < HEADERLEN || len > plen) + return 0; + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(ipv6cp_codenames)) + printer(arg, " %s", ipv6cp_codenames[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= HEADERLEN; + switch (code) { + case CONFREQ: + case CONFACK: + case CONFNAK: + case CONFREJ: + /* print option list */ + while (len >= 2) { + GETCHAR(code, p); + GETCHAR(olen, p); + p -= 2; + if (olen < 2 || olen > len) { + break; + } + printer(arg, " <"); + len -= olen; + optend = p + olen; + switch (code) { +#ifdef IPV6CP_COMP + case CI_COMPRESSTYPE: + if (olen >= CILEN_COMPRESS) { + p += 2; + GETSHORT(cishort, p); + printer(arg, "compress "); + printer(arg, "0x%x", cishort); + } + break; +#endif /* IPV6CP_COMP */ + case CI_IFACEID: + if (olen == CILEN_IFACEID) { + p += 2; + eui64_get(ifaceid, p); + printer(arg, "addr %s", llv6_ntoa(ifaceid)); + } + break; + default: + break; + } + while (p < optend) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + printer(arg, ">"); + } + break; + + case TERMACK: + case TERMREQ: + if (len > 0 && *p >= ' ' && *p < 0x7f) { + printer(arg, " "); + ppp_print_string(p, len, printer, arg); + p += len; + len = 0; + } + break; + default: + break; + } + + /* print the rest of the bytes in the packet */ + for (; len > 0; --len) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + + return p - pstart; +} +#endif /* PRINTPKT_SUPPORT */ + +#if DEMAND_SUPPORT +/* + * ipv6_active_pkt - see if this IP packet is worth bringing the link up for. + * We don't bring the link up for IP fragments or for TCP FIN packets + * with no data. + */ +#define IP6_HDRLEN 40 /* bytes */ +#define IP6_NHDR_FRAG 44 /* fragment IPv6 header */ +#define TCP_HDRLEN 20 +#define TH_FIN 0x01 + +/* + * We use these macros because the IP header may be at an odd address, + * and some compilers might use word loads to get th_off or ip_hl. + */ + +#define get_ip6nh(x) (((unsigned char *)(x))[6]) +#define get_tcpoff(x) (((unsigned char *)(x))[12] >> 4) +#define get_tcpflags(x) (((unsigned char *)(x))[13]) + +static int ipv6_active_pkt(u_char *pkt, int len) { + u_char *tcp; + + len -= PPP_HDRLEN; + pkt += PPP_HDRLEN; + if (len < IP6_HDRLEN) + return 0; + if (get_ip6nh(pkt) == IP6_NHDR_FRAG) + return 0; + if (get_ip6nh(pkt) != IPPROTO_TCP) + return 1; + if (len < IP6_HDRLEN + TCP_HDRLEN) + return 0; + tcp = pkt + IP6_HDRLEN; + if ((get_tcpflags(tcp) & TH_FIN) != 0 && len == IP6_HDRLEN + get_tcpoff(tcp) * 4) + return 0; + return 1; +} +#endif /* DEMAND_SUPPORT */ + +#endif /* PPP_SUPPORT && PPP_IPV6_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/lcp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/lcp.c new file mode 100644 index 0000000..1316918 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/lcp.c @@ -0,0 +1,2790 @@ +/* + * lcp.c - PPP Link Control Protocol. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +/* + * @todo: + */ + +#if 0 /* UNUSED */ +#include +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/lcp.h" +#if CHAP_SUPPORT +#include "netif/ppp/chap-new.h" +#endif /* CHAP_SUPPORT */ +#include "netif/ppp/magic.h" + +/* + * When the link comes up we want to be able to wait for a short while, + * or until seeing some input from the peer, before starting to send + * configure-requests. We do this by delaying the fsm_lowerup call. + */ +/* steal a bit in fsm flags word */ +#define DELAYED_UP 0x80 + +static void lcp_delayed_up(void *arg); + +/* + * LCP-related command-line options. + */ +#if 0 /* UNUSED */ +int lcp_echo_interval = 0; /* Interval between LCP echo-requests */ +int lcp_echo_fails = 0; /* Tolerance to unanswered echo-requests */ +#endif /* UNUSED */ + +#if 0 /* UNUSED */ +/* options */ +static u_int lcp_echo_interval = LCP_ECHOINTERVAL; /* Interval between LCP echo-requests */ +static u_int lcp_echo_fails = LCP_MAXECHOFAILS; /* Tolerance to unanswered echo-requests */ +#endif /* UNUSED */ + +#if 0 /* UNUSED */ +#if PPP_LCP_ADAPTIVE +bool lcp_echo_adaptive = 0; /* request echo only if the link was idle */ +#endif +bool lax_recv = 0; /* accept control chars in asyncmap */ +bool noendpoint = 0; /* don't send/accept endpoint discriminator */ +#endif /* UNUSED */ + +#if PPP_OPTIONS +static int noopt (char **); +#endif /* PPP_OPTIONS */ + +#ifdef HAVE_MULTILINK +static int setendpoint (char **); +static void printendpoint (option_t *, void (*)(void *, char *, ...), + void *); +#endif /* HAVE_MULTILINK */ + +#if PPP_OPTIONS +static option_t lcp_option_list[] = { + /* LCP options */ + { "-all", o_special_noarg, (void *)noopt, + "Don't request/allow any LCP options" }, + + { "noaccomp", o_bool, &lcp_wantoptions[0].neg_accompression, + "Disable address/control compression", + OPT_A2CLR, &lcp_allowoptions[0].neg_accompression }, + { "-ac", o_bool, &lcp_wantoptions[0].neg_accompression, + "Disable address/control compression", + OPT_ALIAS | OPT_A2CLR, &lcp_allowoptions[0].neg_accompression }, + + { "asyncmap", o_uint32, &lcp_wantoptions[0].asyncmap, + "Set asyncmap (for received packets)", + OPT_OR, &lcp_wantoptions[0].neg_asyncmap }, + { "-as", o_uint32, &lcp_wantoptions[0].asyncmap, + "Set asyncmap (for received packets)", + OPT_ALIAS | OPT_OR, &lcp_wantoptions[0].neg_asyncmap }, + { "default-asyncmap", o_uint32, &lcp_wantoptions[0].asyncmap, + "Disable asyncmap negotiation", + OPT_OR | OPT_NOARG | OPT_VAL(~0U) | OPT_A2CLR, + &lcp_allowoptions[0].neg_asyncmap }, + { "-am", o_uint32, &lcp_wantoptions[0].asyncmap, + "Disable asyncmap negotiation", + OPT_ALIAS | OPT_OR | OPT_NOARG | OPT_VAL(~0U) | OPT_A2CLR, + &lcp_allowoptions[0].neg_asyncmap }, + + { "nomagic", o_bool, &lcp_wantoptions[0].neg_magicnumber, + "Disable magic number negotiation (looped-back line detection)", + OPT_A2CLR, &lcp_allowoptions[0].neg_magicnumber }, + { "-mn", o_bool, &lcp_wantoptions[0].neg_magicnumber, + "Disable magic number negotiation (looped-back line detection)", + OPT_ALIAS | OPT_A2CLR, &lcp_allowoptions[0].neg_magicnumber }, + + { "mru", o_int, &lcp_wantoptions[0].mru, + "Set MRU (maximum received packet size) for negotiation", + OPT_PRIO, &lcp_wantoptions[0].neg_mru }, + { "default-mru", o_bool, &lcp_wantoptions[0].neg_mru, + "Disable MRU negotiation (use default 1500)", + OPT_PRIOSUB | OPT_A2CLR, &lcp_allowoptions[0].neg_mru }, + { "-mru", o_bool, &lcp_wantoptions[0].neg_mru, + "Disable MRU negotiation (use default 1500)", + OPT_ALIAS | OPT_PRIOSUB | OPT_A2CLR, &lcp_allowoptions[0].neg_mru }, + + { "mtu", o_int, &lcp_allowoptions[0].mru, + "Set our MTU", OPT_LIMITS, NULL, MAXMRU, MINMRU }, + + { "nopcomp", o_bool, &lcp_wantoptions[0].neg_pcompression, + "Disable protocol field compression", + OPT_A2CLR, &lcp_allowoptions[0].neg_pcompression }, + { "-pc", o_bool, &lcp_wantoptions[0].neg_pcompression, + "Disable protocol field compression", + OPT_ALIAS | OPT_A2CLR, &lcp_allowoptions[0].neg_pcompression }, + + { "passive", o_bool, &lcp_wantoptions[0].passive, + "Set passive mode", 1 }, + { "-p", o_bool, &lcp_wantoptions[0].passive, + "Set passive mode", OPT_ALIAS | 1 }, + + { "silent", o_bool, &lcp_wantoptions[0].silent, + "Set silent mode", 1 }, + + { "lcp-echo-failure", o_int, &lcp_echo_fails, + "Set number of consecutive echo failures to indicate link failure", + OPT_PRIO }, + { "lcp-echo-interval", o_int, &lcp_echo_interval, + "Set time in seconds between LCP echo requests", OPT_PRIO }, +#if PPP_LCP_ADAPTIVE + { "lcp-echo-adaptive", o_bool, &lcp_echo_adaptive, + "Suppress LCP echo requests if traffic was received", 1 }, +#endif + { "lcp-restart", o_int, &lcp_fsm[0].timeouttime, + "Set time in seconds between LCP retransmissions", OPT_PRIO }, + { "lcp-max-terminate", o_int, &lcp_fsm[0].maxtermtransmits, + "Set maximum number of LCP terminate-request transmissions", OPT_PRIO }, + { "lcp-max-configure", o_int, &lcp_fsm[0].maxconfreqtransmits, + "Set maximum number of LCP configure-request transmissions", OPT_PRIO }, + { "lcp-max-failure", o_int, &lcp_fsm[0].maxnakloops, + "Set limit on number of LCP configure-naks", OPT_PRIO }, + + { "receive-all", o_bool, &lax_recv, + "Accept all received control characters", 1 }, + +#ifdef HAVE_MULTILINK + { "mrru", o_int, &lcp_wantoptions[0].mrru, + "Maximum received packet size for multilink bundle", + OPT_PRIO, &lcp_wantoptions[0].neg_mrru }, + + { "mpshortseq", o_bool, &lcp_wantoptions[0].neg_ssnhf, + "Use short sequence numbers in multilink headers", + OPT_PRIO | 1, &lcp_allowoptions[0].neg_ssnhf }, + { "nompshortseq", o_bool, &lcp_wantoptions[0].neg_ssnhf, + "Don't use short sequence numbers in multilink headers", + OPT_PRIOSUB | OPT_A2CLR, &lcp_allowoptions[0].neg_ssnhf }, + + { "endpoint", o_special, (void *) setendpoint, + "Endpoint discriminator for multilink", + OPT_PRIO | OPT_A2PRINTER, (void *) printendpoint }, +#endif /* HAVE_MULTILINK */ + + { "noendpoint", o_bool, &noendpoint, + "Don't send or accept multilink endpoint discriminator", 1 }, + + {NULL} +}; +#endif /* PPP_OPTIONS */ + +/* + * Callbacks for fsm code. (CI = Configuration Information) + */ +static void lcp_resetci(fsm *f); /* Reset our CI */ +static int lcp_cilen(fsm *f); /* Return length of our CI */ +static void lcp_addci(fsm *f, u_char *ucp, int *lenp); /* Add our CI to pkt */ +static int lcp_ackci(fsm *f, u_char *p, int len); /* Peer ack'd our CI */ +static int lcp_nakci(fsm *f, u_char *p, int len, int treat_as_reject); /* Peer nak'd our CI */ +static int lcp_rejci(fsm *f, u_char *p, int len); /* Peer rej'd our CI */ +static int lcp_reqci(fsm *f, u_char *inp, int *lenp, int reject_if_disagree); /* Rcv peer CI */ +static void lcp_up(fsm *f); /* We're UP */ +static void lcp_down(fsm *f); /* We're DOWN */ +static void lcp_starting (fsm *); /* We need lower layer up */ +static void lcp_finished (fsm *); /* We need lower layer down */ +static int lcp_extcode(fsm *f, int code, int id, u_char *inp, int len); +static void lcp_rprotrej(fsm *f, u_char *inp, int len); + +/* + * routines to send LCP echos to peer + */ + +static void lcp_echo_lowerup(ppp_pcb *pcb); +static void lcp_echo_lowerdown(ppp_pcb *pcb); +static void LcpEchoTimeout(void *arg); +static void lcp_received_echo_reply(fsm *f, int id, u_char *inp, int len); +static void LcpSendEchoRequest(fsm *f); +static void LcpLinkFailure(fsm *f); +static void LcpEchoCheck(fsm *f); + +static const fsm_callbacks lcp_callbacks = { /* LCP callback routines */ + lcp_resetci, /* Reset our Configuration Information */ + lcp_cilen, /* Length of our Configuration Information */ + lcp_addci, /* Add our Configuration Information */ + lcp_ackci, /* ACK our Configuration Information */ + lcp_nakci, /* NAK our Configuration Information */ + lcp_rejci, /* Reject our Configuration Information */ + lcp_reqci, /* Request peer's Configuration Information */ + lcp_up, /* Called when fsm reaches OPENED state */ + lcp_down, /* Called when fsm leaves OPENED state */ + lcp_starting, /* Called when we want the lower layer up */ + lcp_finished, /* Called when we want the lower layer down */ + NULL, /* Called when Protocol-Reject received */ + NULL, /* Retransmission is necessary */ + lcp_extcode, /* Called to handle LCP-specific codes */ + "LCP" /* String name of protocol */ +}; + +/* + * Protocol entry points. + * Some of these are called directly. + */ + +static void lcp_init(ppp_pcb *pcb); +static void lcp_input(ppp_pcb *pcb, u_char *p, int len); +static void lcp_protrej(ppp_pcb *pcb); +#if PRINTPKT_SUPPORT +static int lcp_printpkt(const u_char *p, int plen, + void (*printer) (void *, const char *, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ + +const struct protent lcp_protent = { + PPP_LCP, + lcp_init, + lcp_input, + lcp_protrej, + lcp_lowerup, + lcp_lowerdown, + lcp_open, + lcp_close, +#if PRINTPKT_SUPPORT + lcp_printpkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "LCP", + NULL, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + lcp_option_list, + NULL, +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + NULL, + NULL +#endif /* DEMAND_SUPPORT */ +}; + +/* + * Length of each type of configuration option (in octets) + */ +#define CILEN_VOID 2 +#define CILEN_CHAR 3 +#define CILEN_SHORT 4 /* CILEN_VOID + 2 */ +#if CHAP_SUPPORT +#define CILEN_CHAP 5 /* CILEN_VOID + 2 + 1 */ +#endif /* CHAP_SUPPORT */ +#define CILEN_LONG 6 /* CILEN_VOID + 4 */ +#if LQR_SUPPORT +#define CILEN_LQR 8 /* CILEN_VOID + 2 + 4 */ +#endif /* LQR_SUPPORT */ +#define CILEN_CBCP 3 + +#define CODENAME(x) ((x) == CONFACK ? "ACK" : \ + (x) == CONFNAK ? "NAK" : "REJ") + +#if PPP_OPTIONS +/* + * noopt - Disable all options (why?). + */ +static int +noopt(argv) + char **argv; +{ + BZERO((char *) &lcp_wantoptions[0], sizeof (struct lcp_options)); + BZERO((char *) &lcp_allowoptions[0], sizeof (struct lcp_options)); + + return (1); +} +#endif /* PPP_OPTIONS */ + +#ifdef HAVE_MULTILINK +static int +setendpoint(argv) + char **argv; +{ + if (str_to_epdisc(&lcp_wantoptions[0].endpoint, *argv)) { + lcp_wantoptions[0].neg_endpoint = 1; + return 1; + } + option_error("Can't parse '%s' as an endpoint discriminator", *argv); + return 0; +} + +static void +printendpoint(opt, printer, arg) + option_t *opt; + void (*printer) (void *, char *, ...); + void *arg; +{ + printer(arg, "%s", epdisc_to_str(&lcp_wantoptions[0].endpoint)); +} +#endif /* HAVE_MULTILINK */ + +/* + * lcp_init - Initialize LCP. + */ +static void lcp_init(ppp_pcb *pcb) { + fsm *f = &pcb->lcp_fsm; + lcp_options *wo = &pcb->lcp_wantoptions; + lcp_options *ao = &pcb->lcp_allowoptions; + + f->pcb = pcb; + f->protocol = PPP_LCP; + f->callbacks = &lcp_callbacks; + + fsm_init(f); + + BZERO(wo, sizeof(*wo)); + wo->neg_mru = 1; + wo->mru = PPP_DEFMRU; + wo->neg_asyncmap = 1; + wo->neg_magicnumber = 1; + wo->neg_pcompression = 1; + wo->neg_accompression = 1; + + BZERO(ao, sizeof(*ao)); + ao->neg_mru = 1; + ao->mru = PPP_MAXMRU; + ao->neg_asyncmap = 1; +#if CHAP_SUPPORT + ao->neg_chap = 1; + ao->chap_mdtype = CHAP_MDTYPE_SUPPORTED; +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + ao->neg_upap = 1; +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + ao->neg_eap = 1; +#endif /* EAP_SUPPORT */ + ao->neg_magicnumber = 1; + ao->neg_pcompression = 1; + ao->neg_accompression = 1; + ao->neg_endpoint = 1; +} + + +/* + * lcp_open - LCP is allowed to come up. + */ +void lcp_open(ppp_pcb *pcb) { + fsm *f = &pcb->lcp_fsm; + lcp_options *wo = &pcb->lcp_wantoptions; + + f->flags &= ~(OPT_PASSIVE | OPT_SILENT); + if (wo->passive) + f->flags |= OPT_PASSIVE; + if (wo->silent) + f->flags |= OPT_SILENT; + fsm_open(f); +} + + +/* + * lcp_close - Take LCP down. + */ +void lcp_close(ppp_pcb *pcb, const char *reason) { + fsm *f = &pcb->lcp_fsm; + int oldstate; + + if (pcb->phase != PPP_PHASE_DEAD +#ifdef HAVE_MULTILINK + && pcb->phase != PPP_PHASE_MASTER +#endif /* HAVE_MULTILINK */ + ) + new_phase(pcb, PPP_PHASE_TERMINATE); + + if (f->flags & DELAYED_UP) { + UNTIMEOUT(lcp_delayed_up, f); + f->state = PPP_FSM_STOPPED; + } + oldstate = f->state; + + fsm_close(f, reason); + if (oldstate == PPP_FSM_STOPPED && (f->flags & (OPT_PASSIVE|OPT_SILENT|DELAYED_UP))) { + /* + * This action is not strictly according to the FSM in RFC1548, + * but it does mean that the program terminates if you do a + * lcp_close() when a connection hasn't been established + * because we are in passive/silent mode or because we have + * delayed the fsm_lowerup() call and it hasn't happened yet. + */ + f->flags &= ~DELAYED_UP; + lcp_finished(f); + } +} + + +/* + * lcp_lowerup - The lower layer is up. + */ +void lcp_lowerup(ppp_pcb *pcb) { + lcp_options *wo = &pcb->lcp_wantoptions; + fsm *f = &pcb->lcp_fsm; + /* + * Don't use A/C or protocol compression on transmission, + * but accept A/C and protocol compressed packets + * if we are going to ask for A/C and protocol compression. + */ + if (ppp_send_config(pcb, PPP_MRU, 0xffffffff, 0, 0) < 0 + || ppp_recv_config(pcb, PPP_MRU, (pcb->settings.lax_recv? 0: 0xffffffff), + wo->neg_pcompression, wo->neg_accompression) < 0) + return; + pcb->peer_mru = PPP_MRU; + + if (pcb->settings.listen_time != 0) { + f->flags |= DELAYED_UP; + TIMEOUTMS(lcp_delayed_up, f, pcb->settings.listen_time); + } else + fsm_lowerup(f); +} + + +/* + * lcp_lowerdown - The lower layer is down. + */ +void lcp_lowerdown(ppp_pcb *pcb) { + fsm *f = &pcb->lcp_fsm; + + if (f->flags & DELAYED_UP) { + f->flags &= ~DELAYED_UP; + UNTIMEOUT(lcp_delayed_up, f); + } else + fsm_lowerdown(f); +} + + +/* + * lcp_delayed_up - Bring the lower layer up now. + */ +static void lcp_delayed_up(void *arg) { + fsm *f = (fsm*)arg; + + if (f->flags & DELAYED_UP) { + f->flags &= ~DELAYED_UP; + fsm_lowerup(f); + } +} + + +/* + * lcp_input - Input LCP packet. + */ +static void lcp_input(ppp_pcb *pcb, u_char *p, int len) { + fsm *f = &pcb->lcp_fsm; + + if (f->flags & DELAYED_UP) { + f->flags &= ~DELAYED_UP; + UNTIMEOUT(lcp_delayed_up, f); + fsm_lowerup(f); + } + fsm_input(f, p, len); +} + +/* + * lcp_extcode - Handle a LCP-specific code. + */ +static int lcp_extcode(fsm *f, int code, int id, u_char *inp, int len) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + u_char *magp; + + switch( code ){ + case PROTREJ: + lcp_rprotrej(f, inp, len); + break; + + case ECHOREQ: + if (f->state != PPP_FSM_OPENED) + break; + magp = inp; + PUTLONG(go->magicnumber, magp); + fsm_sdata(f, ECHOREP, id, inp, len); + break; + + case ECHOREP: + lcp_received_echo_reply(f, id, inp, len); + break; + + case DISCREQ: + case IDENTIF: + case TIMEREM: + break; + + default: + return 0; + } + return 1; +} + + +/* + * lcp_rprotrej - Receive an Protocol-Reject. + * + * Figure out which protocol is rejected and inform it. + */ +static void lcp_rprotrej(fsm *f, u_char *inp, int len) { + int i; + const struct protent *protp; + u_short prot; +#if PPP_PROTOCOLNAME + const char *pname; +#endif /* PPP_PROTOCOLNAME */ + + if (len < 2) { + LCPDEBUG(("lcp_rprotrej: Rcvd short Protocol-Reject packet!")); + return; + } + + GETSHORT(prot, inp); + + /* + * Protocol-Reject packets received in any state other than the LCP + * OPENED state SHOULD be silently discarded. + */ + if( f->state != PPP_FSM_OPENED ){ + LCPDEBUG(("Protocol-Reject discarded: LCP in state %d", f->state)); + return; + } + +#if PPP_PROTOCOLNAME + pname = protocol_name(prot); +#endif /* PPP_PROTOCOLNAME */ + + /* + * Upcall the proper Protocol-Reject routine. + */ + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (protp->protocol == prot) { +#if PPP_PROTOCOLNAME + if (pname != NULL) + ppp_dbglog("Protocol-Reject for '%s' (0x%x) received", pname, + prot); + else +#endif /* PPP_PROTOCOLNAME */ + ppp_dbglog("Protocol-Reject for 0x%x received", prot); + (*protp->protrej)(f->pcb); + return; + } + +#if PPP_PROTOCOLNAME + if (pname != NULL) + ppp_warn("Protocol-Reject for unsupported protocol '%s' (0x%x)", pname, + prot); + else +#endif /* #if PPP_PROTOCOLNAME */ + ppp_warn("Protocol-Reject for unsupported protocol 0x%x", prot); +} + + +/* + * lcp_protrej - A Protocol-Reject was received. + */ +/*ARGSUSED*/ +static void lcp_protrej(ppp_pcb *pcb) { + /* + * Can't reject LCP! + */ + ppp_error("Received Protocol-Reject for LCP!"); + fsm_protreject(&pcb->lcp_fsm); +} + + +/* + * lcp_sprotrej - Send a Protocol-Reject for some protocol. + */ +void lcp_sprotrej(ppp_pcb *pcb, u_char *p, int len) { + fsm *f = &pcb->lcp_fsm; + /* + * Send back the protocol and the information field of the + * rejected packet. We only get here if LCP is in the OPENED state. + */ +#if 0 + p += 2; + len -= 2; +#endif + + fsm_sdata(f, PROTREJ, ++f->id, + p, len); +} + + +/* + * lcp_resetci - Reset our CI. + */ +static void lcp_resetci(fsm *f) { + ppp_pcb *pcb = f->pcb; + lcp_options *wo = &pcb->lcp_wantoptions; + lcp_options *go = &pcb->lcp_gotoptions; + lcp_options *ao = &pcb->lcp_allowoptions; + +#if PPP_AUTH_SUPPORT + + /* note: default value is true for allow options */ + if (pcb->settings.user && pcb->settings.passwd) { +#if PAP_SUPPORT + if (pcb->settings.refuse_pap) { + ao->neg_upap = 0; + } +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + if (pcb->settings.refuse_chap) { + ao->chap_mdtype &= ~MDTYPE_MD5; + } +#if MSCHAP_SUPPORT + if (pcb->settings.refuse_mschap) { + ao->chap_mdtype &= ~MDTYPE_MICROSOFT; + } + if (pcb->settings.refuse_mschap_v2) { + ao->chap_mdtype &= ~MDTYPE_MICROSOFT_V2; + } +#endif /* MSCHAP_SUPPORT */ + ao->neg_chap = (ao->chap_mdtype != MDTYPE_NONE); +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + if (pcb->settings.refuse_eap) { + ao->neg_eap = 0; + } +#endif /* EAP_SUPPORT */ + +#if PPP_SERVER + /* note: default value is false for wanted options */ + if (pcb->settings.auth_required) { +#if PAP_SUPPORT + if (!pcb->settings.refuse_pap) { + wo->neg_upap = 1; + } +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + if (!pcb->settings.refuse_chap) { + wo->chap_mdtype |= MDTYPE_MD5; + } +#if MSCHAP_SUPPORT + if (!pcb->settings.refuse_mschap) { + wo->chap_mdtype |= MDTYPE_MICROSOFT; + } + if (!pcb->settings.refuse_mschap_v2) { + wo->chap_mdtype |= MDTYPE_MICROSOFT_V2; + } +#endif /* MSCHAP_SUPPORT */ + wo->neg_chap = (wo->chap_mdtype != MDTYPE_NONE); +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + if (!pcb->settings.refuse_eap) { + wo->neg_eap = 1; + } +#endif /* EAP_SUPPORT */ + } +#endif /* PPP_SERVER */ + + } else { +#if PAP_SUPPORT + ao->neg_upap = 0; +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + ao->neg_chap = 0; + ao->chap_mdtype = MDTYPE_NONE; +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + ao->neg_eap = 0; +#endif /* EAP_SUPPORT */ + } + + PPPDEBUG(LOG_DEBUG, ("ppp: auth protocols:")); +#if PAP_SUPPORT + PPPDEBUG(LOG_DEBUG, (" PAP=%d", ao->neg_upap)); +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + PPPDEBUG(LOG_DEBUG, (" CHAP=%d CHAP_MD5=%d", ao->neg_chap, !!(ao->chap_mdtype&MDTYPE_MD5))); +#if MSCHAP_SUPPORT + PPPDEBUG(LOG_DEBUG, (" CHAP_MS=%d CHAP_MS2=%d", !!(ao->chap_mdtype&MDTYPE_MICROSOFT), !!(ao->chap_mdtype&MDTYPE_MICROSOFT_V2))); +#endif /* MSCHAP_SUPPORT */ +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + PPPDEBUG(LOG_DEBUG, (" EAP=%d", ao->neg_eap)); +#endif /* EAP_SUPPORT */ + PPPDEBUG(LOG_DEBUG, ("\n")); + +#endif /* PPP_AUTH_SUPPORT */ + + wo->magicnumber = magic(); + wo->numloops = 0; + *go = *wo; +#ifdef HAVE_MULTILINK + if (!multilink) { + go->neg_mrru = 0; +#endif /* HAVE_MULTILINK */ + go->neg_ssnhf = 0; + go->neg_endpoint = 0; +#ifdef HAVE_MULTILINK + } +#endif /* HAVE_MULTILINK */ + if (pcb->settings.noendpoint) + ao->neg_endpoint = 0; + pcb->peer_mru = PPP_MRU; +#if 0 /* UNUSED */ + auth_reset(pcb); +#endif /* UNUSED */ +} + + +/* + * lcp_cilen - Return length of our CI. + */ +static int lcp_cilen(fsm *f) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + +#define LENCIVOID(neg) ((neg) ? CILEN_VOID : 0) +#if CHAP_SUPPORT +#define LENCICHAP(neg) ((neg) ? CILEN_CHAP : 0) +#endif /* CHAP_SUPPORT */ +#define LENCISHORT(neg) ((neg) ? CILEN_SHORT : 0) +#define LENCILONG(neg) ((neg) ? CILEN_LONG : 0) +#if LQR_SUPPORT +#define LENCILQR(neg) ((neg) ? CILEN_LQR: 0) +#endif /* LQR_SUPPORT */ +#define LENCICBCP(neg) ((neg) ? CILEN_CBCP: 0) + /* + * NB: we only ask for one of CHAP, UPAP, or EAP, even if we will + * accept more than one. We prefer EAP first, then CHAP, then + * PAP. + */ + return (LENCISHORT(go->neg_mru && go->mru != PPP_DEFMRU) + + LENCILONG(go->neg_asyncmap && go->asyncmap != 0xFFFFFFFF) + +#if EAP_SUPPORT + LENCISHORT(go->neg_eap) + +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT /* cannot be improved, embedding a directive within macro arguments is not portable */ +#if EAP_SUPPORT + LENCICHAP(!go->neg_eap && go->neg_chap) + +#endif /* EAP_SUPPORT */ +#if !EAP_SUPPORT + LENCICHAP(go->neg_chap) + +#endif /* !EAP_SUPPORT */ +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT /* cannot be improved, embedding a directive within macro arguments is not portable */ +#if EAP_SUPPORT && CHAP_SUPPORT + LENCISHORT(!go->neg_eap && !go->neg_chap && go->neg_upap) + +#endif /* EAP_SUPPORT && CHAP_SUPPORT */ +#if EAP_SUPPORT && !CHAP_SUPPORT + LENCISHORT(!go->neg_eap && go->neg_upap) + +#endif /* EAP_SUPPORT && !CHAP_SUPPORT */ +#if !EAP_SUPPORT && CHAP_SUPPORT + LENCISHORT(!go->neg_chap && go->neg_upap) + +#endif /* !EAP_SUPPORT && CHAP_SUPPORT */ +#if !EAP_SUPPORT && !CHAP_SUPPORT + LENCISHORT(go->neg_upap) + +#endif /* !EAP_SUPPORT && !CHAP_SUPPORT */ +#endif /* PAP_SUPPORT */ +#if LQR_SUPPORT + LENCILQR(go->neg_lqr) + +#endif /* LQR_SUPPORT */ + LENCICBCP(go->neg_cbcp) + + LENCILONG(go->neg_magicnumber) + + LENCIVOID(go->neg_pcompression) + + LENCIVOID(go->neg_accompression) + +#ifdef HAVE_MULTILINK + LENCISHORT(go->neg_mrru) + +#endif /* HAVE_MULTILINK */ + LENCIVOID(go->neg_ssnhf) + + (go->neg_endpoint? CILEN_CHAR + go->endpoint.length: 0)); +} + + +/* + * lcp_addci - Add our desired CIs to a packet. + */ +static void lcp_addci(fsm *f, u_char *ucp, int *lenp) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + u_char *start_ucp = ucp; + +#define ADDCIVOID(opt, neg) \ + if (neg) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_VOID, ucp); \ + } +#define ADDCISHORT(opt, neg, val) \ + if (neg) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_SHORT, ucp); \ + PUTSHORT(val, ucp); \ + } +#if CHAP_SUPPORT +#define ADDCICHAP(opt, neg, val) \ + if (neg) { \ + PUTCHAR((opt), ucp); \ + PUTCHAR(CILEN_CHAP, ucp); \ + PUTSHORT(PPP_CHAP, ucp); \ + PUTCHAR((CHAP_DIGEST(val)), ucp); \ + } +#endif /* CHAP_SUPPORT */ +#define ADDCILONG(opt, neg, val) \ + if (neg) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_LONG, ucp); \ + PUTLONG(val, ucp); \ + } +#if LQR_SUPPORT +#define ADDCILQR(opt, neg, val) \ + if (neg) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_LQR, ucp); \ + PUTSHORT(PPP_LQR, ucp); \ + PUTLONG(val, ucp); \ + } +#endif /* LQR_SUPPORT */ +#define ADDCICHAR(opt, neg, val) \ + if (neg) { \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_CHAR, ucp); \ + PUTCHAR(val, ucp); \ + } +#define ADDCIENDP(opt, neg, class, val, len) \ + if (neg) { \ + int i; \ + PUTCHAR(opt, ucp); \ + PUTCHAR(CILEN_CHAR + len, ucp); \ + PUTCHAR(class, ucp); \ + for (i = 0; i < len; ++i) \ + PUTCHAR(val[i], ucp); \ + } + + ADDCISHORT(CI_MRU, go->neg_mru && go->mru != PPP_DEFMRU, go->mru); + ADDCILONG(CI_ASYNCMAP, go->neg_asyncmap && go->asyncmap != 0xFFFFFFFF, + go->asyncmap); +#if EAP_SUPPORT + ADDCISHORT(CI_AUTHTYPE, go->neg_eap, PPP_EAP); +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT /* cannot be improved, embedding a directive within macro arguments is not portable */ +#if EAP_SUPPORT + ADDCICHAP(CI_AUTHTYPE, !go->neg_eap && go->neg_chap, go->chap_mdtype); +#endif /* EAP_SUPPORT */ +#if !EAP_SUPPORT + ADDCICHAP(CI_AUTHTYPE, go->neg_chap, go->chap_mdtype); +#endif /* !EAP_SUPPORT */ +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT /* cannot be improved, embedding a directive within macro arguments is not portable */ +#if EAP_SUPPORT && CHAP_SUPPORT + ADDCISHORT(CI_AUTHTYPE, !go->neg_eap && !go->neg_chap && go->neg_upap, PPP_PAP); +#endif /* EAP_SUPPORT && CHAP_SUPPORT */ +#if EAP_SUPPORT && !CHAP_SUPPORT + ADDCISHORT(CI_AUTHTYPE, !go->neg_eap && go->neg_upap, PPP_PAP); +#endif /* EAP_SUPPORT && !CHAP_SUPPORT */ +#if !EAP_SUPPORT && CHAP_SUPPORT + ADDCISHORT(CI_AUTHTYPE, !go->neg_chap && go->neg_upap, PPP_PAP); +#endif /* !EAP_SUPPORT && CHAP_SUPPORT */ +#if !EAP_SUPPORT && !CHAP_SUPPORT + ADDCISHORT(CI_AUTHTYPE, go->neg_upap, PPP_PAP); +#endif /* !EAP_SUPPORT && !CHAP_SUPPORT */ +#endif /* PAP_SUPPORT */ +#if LQR_SUPPORT + ADDCILQR(CI_QUALITY, go->neg_lqr, go->lqr_period); +#endif /* LQR_SUPPORT */ + ADDCICHAR(CI_CALLBACK, go->neg_cbcp, CBCP_OPT); + ADDCILONG(CI_MAGICNUMBER, go->neg_magicnumber, go->magicnumber); + ADDCIVOID(CI_PCOMPRESSION, go->neg_pcompression); + ADDCIVOID(CI_ACCOMPRESSION, go->neg_accompression); +#ifdef HAVE_MULTILINK + ADDCISHORT(CI_MRRU, go->neg_mrru, go->mrru); +#endif + ADDCIVOID(CI_SSNHF, go->neg_ssnhf); + ADDCIENDP(CI_EPDISC, go->neg_endpoint, go->endpoint.class_, + go->endpoint.value, go->endpoint.length); + + if (ucp - start_ucp != *lenp) { + /* this should never happen, because peer_mtu should be 1500 */ + ppp_error("Bug in lcp_addci: wrong length"); + } +} + + +/* + * lcp_ackci - Ack our CIs. + * This should not modify any state if the Ack is bad. + * + * Returns: + * 0 - Ack was bad. + * 1 - Ack was good. + */ +static int lcp_ackci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + u_char cilen, citype, cichar; + u_short cishort; + u32_t cilong; + + /* + * CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define ACKCIVOID(opt, neg) \ + if (neg) { \ + if ((len -= CILEN_VOID) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_VOID || \ + citype != opt) \ + goto bad; \ + } +#define ACKCISHORT(opt, neg, val) \ + if (neg) { \ + if ((len -= CILEN_SHORT) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_SHORT || \ + citype != opt) \ + goto bad; \ + GETSHORT(cishort, p); \ + if (cishort != val) \ + goto bad; \ + } +#define ACKCICHAR(opt, neg, val) \ + if (neg) { \ + if ((len -= CILEN_CHAR) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_CHAR || \ + citype != opt) \ + goto bad; \ + GETCHAR(cichar, p); \ + if (cichar != val) \ + goto bad; \ + } +#if CHAP_SUPPORT +#define ACKCICHAP(opt, neg, val) \ + if (neg) { \ + if ((len -= CILEN_CHAP) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_CHAP || \ + citype != (opt)) \ + goto bad; \ + GETSHORT(cishort, p); \ + if (cishort != PPP_CHAP) \ + goto bad; \ + GETCHAR(cichar, p); \ + if (cichar != (CHAP_DIGEST(val))) \ + goto bad; \ + } +#endif /* CHAP_SUPPORT */ +#define ACKCILONG(opt, neg, val) \ + if (neg) { \ + if ((len -= CILEN_LONG) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_LONG || \ + citype != opt) \ + goto bad; \ + GETLONG(cilong, p); \ + if (cilong != val) \ + goto bad; \ + } +#if LQR_SUPPORT +#define ACKCILQR(opt, neg, val) \ + if (neg) { \ + if ((len -= CILEN_LQR) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_LQR || \ + citype != opt) \ + goto bad; \ + GETSHORT(cishort, p); \ + if (cishort != PPP_LQR) \ + goto bad; \ + GETLONG(cilong, p); \ + if (cilong != val) \ + goto bad; \ + } +#endif /* LQR_SUPPORT */ +#define ACKCIENDP(opt, neg, class, val, vlen) \ + if (neg) { \ + int i; \ + if ((len -= CILEN_CHAR + vlen) < 0) \ + goto bad; \ + GETCHAR(citype, p); \ + GETCHAR(cilen, p); \ + if (cilen != CILEN_CHAR + vlen || \ + citype != opt) \ + goto bad; \ + GETCHAR(cichar, p); \ + if (cichar != class) \ + goto bad; \ + for (i = 0; i < vlen; ++i) { \ + GETCHAR(cichar, p); \ + if (cichar != val[i]) \ + goto bad; \ + } \ + } + + ACKCISHORT(CI_MRU, go->neg_mru && go->mru != PPP_DEFMRU, go->mru); + ACKCILONG(CI_ASYNCMAP, go->neg_asyncmap && go->asyncmap != 0xFFFFFFFF, + go->asyncmap); +#if EAP_SUPPORT + ACKCISHORT(CI_AUTHTYPE, go->neg_eap, PPP_EAP); +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT /* cannot be improved, embedding a directive within macro arguments is not portable */ +#if EAP_SUPPORT + ACKCICHAP(CI_AUTHTYPE, !go->neg_eap && go->neg_chap, go->chap_mdtype); +#endif /* EAP_SUPPORT */ +#if !EAP_SUPPORT + ACKCICHAP(CI_AUTHTYPE, go->neg_chap, go->chap_mdtype); +#endif /* !EAP_SUPPORT */ +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT /* cannot be improved, embedding a directive within macro arguments is not portable */ +#if EAP_SUPPORT && CHAP_SUPPORT + ACKCISHORT(CI_AUTHTYPE, !go->neg_eap && !go->neg_chap && go->neg_upap, PPP_PAP); +#endif /* EAP_SUPPORT && CHAP_SUPPORT */ +#if EAP_SUPPORT && !CHAP_SUPPORT + ACKCISHORT(CI_AUTHTYPE, !go->neg_eap && go->neg_upap, PPP_PAP); +#endif /* EAP_SUPPORT && !CHAP_SUPPORT */ +#if !EAP_SUPPORT && CHAP_SUPPORT + ACKCISHORT(CI_AUTHTYPE, !go->neg_chap && go->neg_upap, PPP_PAP); +#endif /* !EAP_SUPPORT && CHAP_SUPPORT */ +#if !EAP_SUPPORT && !CHAP_SUPPORT + ACKCISHORT(CI_AUTHTYPE, go->neg_upap, PPP_PAP); +#endif /* !EAP_SUPPORT && !CHAP_SUPPORT */ +#endif /* PAP_SUPPORT */ +#if LQR_SUPPORT + ACKCILQR(CI_QUALITY, go->neg_lqr, go->lqr_period); +#endif /* LQR_SUPPORT */ + ACKCICHAR(CI_CALLBACK, go->neg_cbcp, CBCP_OPT); + ACKCILONG(CI_MAGICNUMBER, go->neg_magicnumber, go->magicnumber); + ACKCIVOID(CI_PCOMPRESSION, go->neg_pcompression); + ACKCIVOID(CI_ACCOMPRESSION, go->neg_accompression); +#ifdef HAVE_MULTILINK + ACKCISHORT(CI_MRRU, go->neg_mrru, go->mrru); +#endif /* HAVE_MULTILINK */ + ACKCIVOID(CI_SSNHF, go->neg_ssnhf); + ACKCIENDP(CI_EPDISC, go->neg_endpoint, go->endpoint.class_, + go->endpoint.value, go->endpoint.length); + + /* + * If there are any remaining CIs, then this packet is bad. + */ + if (len != 0) + goto bad; + return (1); +bad: + LCPDEBUG(("lcp_acki: received bad Ack!")); + return (0); +} + + +/* + * lcp_nakci - Peer has sent a NAK for some of our CIs. + * This should not modify any state if the Nak is bad + * or if LCP is in the OPENED state. + * + * Returns: + * 0 - Nak was bad. + * 1 - Nak was good. + */ +static int lcp_nakci(fsm *f, u_char *p, int len, int treat_as_reject) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + lcp_options *wo = &pcb->lcp_wantoptions; + u_char citype, cichar, *next; + u_short cishort; + u32_t cilong; + lcp_options no; /* options we've seen Naks for */ + lcp_options try_; /* options to request next time */ + int looped_back = 0; + int cilen; + + BZERO(&no, sizeof(no)); + try_ = *go; + + /* + * Any Nak'd CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define NAKCIVOID(opt, neg) \ + if (go->neg && \ + len >= CILEN_VOID && \ + p[1] == CILEN_VOID && \ + p[0] == opt) { \ + len -= CILEN_VOID; \ + INCPTR(CILEN_VOID, p); \ + no.neg = 1; \ + try_.neg = 0; \ + } +#if CHAP_SUPPORT +#define NAKCICHAP(opt, neg, code) \ + if (go->neg && \ + len >= CILEN_CHAP && \ + p[1] == CILEN_CHAP && \ + p[0] == opt) { \ + len -= CILEN_CHAP; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETCHAR(cichar, p); \ + no.neg = 1; \ + code \ + } +#endif /* CHAP_SUPPORT */ +#define NAKCICHAR(opt, neg, code) \ + if (go->neg && \ + len >= CILEN_CHAR && \ + p[1] == CILEN_CHAR && \ + p[0] == opt) { \ + len -= CILEN_CHAR; \ + INCPTR(2, p); \ + GETCHAR(cichar, p); \ + no.neg = 1; \ + code \ + } +#define NAKCISHORT(opt, neg, code) \ + if (go->neg && \ + len >= CILEN_SHORT && \ + p[1] == CILEN_SHORT && \ + p[0] == opt) { \ + len -= CILEN_SHORT; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + no.neg = 1; \ + code \ + } +#define NAKCILONG(opt, neg, code) \ + if (go->neg && \ + len >= CILEN_LONG && \ + p[1] == CILEN_LONG && \ + p[0] == opt) { \ + len -= CILEN_LONG; \ + INCPTR(2, p); \ + GETLONG(cilong, p); \ + no.neg = 1; \ + code \ + } +#if LQR_SUPPORT +#define NAKCILQR(opt, neg, code) \ + if (go->neg && \ + len >= CILEN_LQR && \ + p[1] == CILEN_LQR && \ + p[0] == opt) { \ + len -= CILEN_LQR; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETLONG(cilong, p); \ + no.neg = 1; \ + code \ + } +#endif /* LQR_SUPPORT */ +#define NAKCIENDP(opt, neg) \ + if (go->neg && \ + len >= CILEN_CHAR && \ + p[0] == opt && \ + p[1] >= CILEN_CHAR && \ + p[1] <= len) { \ + len -= p[1]; \ + INCPTR(p[1], p); \ + no.neg = 1; \ + try_.neg = 0; \ + } + + /* + * NOTE! There must be no assignments to individual fields of *go in + * the code below. Any such assignment is a BUG! + */ + /* + * We don't care if they want to send us smaller packets than + * we want. Therefore, accept any MRU less than what we asked for, + * but then ignore the new value when setting the MRU in the kernel. + * If they send us a bigger MRU than what we asked, accept it, up to + * the limit of the default MRU we'd get if we didn't negotiate. + */ + if (go->neg_mru && go->mru != PPP_DEFMRU) { + NAKCISHORT(CI_MRU, neg_mru, + if (cishort <= wo->mru || cishort <= PPP_DEFMRU) + try_.mru = cishort; + ); + } + + /* + * Add any characters they want to our (receive-side) asyncmap. + */ + if (go->neg_asyncmap && go->asyncmap != 0xFFFFFFFF) { + NAKCILONG(CI_ASYNCMAP, neg_asyncmap, + try_.asyncmap = go->asyncmap | cilong; + ); + } + + /* + * If they've nak'd our authentication-protocol, check whether + * they are proposing a different protocol, or a different + * hash algorithm for CHAP. + */ + if ((0 +#if CHAP_SUPPORT + || go->neg_chap +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + || go->neg_upap +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + || go->neg_eap +#endif /* EAP_SUPPORT */ + ) + && len >= CILEN_SHORT + && p[0] == CI_AUTHTYPE && p[1] >= CILEN_SHORT && p[1] <= len) { + cilen = p[1]; + len -= cilen; +#if CHAP_SUPPORT + no.neg_chap = go->neg_chap; +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + no.neg_upap = go->neg_upap; +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + no.neg_eap = go->neg_eap; +#endif /* EAP_SUPPORT */ + INCPTR(2, p); + GETSHORT(cishort, p); + +#if PAP_SUPPORT + if (cishort == PPP_PAP && cilen == CILEN_SHORT) { +#if EAP_SUPPORT + /* If we were asking for EAP, then we need to stop that. */ + if (go->neg_eap) + try_.neg_eap = 0; + else +#endif /* EAP_SUPPORT */ + +#if CHAP_SUPPORT + /* If we were asking for CHAP, then we need to stop that. */ + if (go->neg_chap) + try_.neg_chap = 0; + else +#endif /* CHAP_SUPPORT */ + + /* + * If we weren't asking for CHAP or EAP, then we were asking for + * PAP, in which case this Nak is bad. + */ + goto bad; + } else +#endif /* PAP_SUPPORT */ + +#if CHAP_SUPPORT + if (cishort == PPP_CHAP && cilen == CILEN_CHAP) { + GETCHAR(cichar, p); +#if EAP_SUPPORT + /* Stop asking for EAP, if we were. */ + if (go->neg_eap) { + try_.neg_eap = 0; + /* Try to set up to use their suggestion, if possible */ + if (CHAP_CANDIGEST(go->chap_mdtype, cichar)) + try_.chap_mdtype = CHAP_MDTYPE_D(cichar); + } else +#endif /* EAP_SUPPORT */ + if (go->neg_chap) { + /* + * We were asking for our preferred algorithm, they must + * want something different. + */ + if (cichar != CHAP_DIGEST(go->chap_mdtype)) { + if (CHAP_CANDIGEST(go->chap_mdtype, cichar)) { + /* Use their suggestion if we support it ... */ + try_.chap_mdtype = CHAP_MDTYPE_D(cichar); + } else { + /* ... otherwise, try our next-preferred algorithm. */ + try_.chap_mdtype &= ~(CHAP_MDTYPE(try_.chap_mdtype)); + if (try_.chap_mdtype == MDTYPE_NONE) /* out of algos */ + try_.neg_chap = 0; + } + } else { + /* + * Whoops, they Nak'd our algorithm of choice + * but then suggested it back to us. + */ + goto bad; + } + } else { + /* + * Stop asking for PAP if we were asking for it. + */ +#if PAP_SUPPORT + try_.neg_upap = 0; +#endif /* PAP_SUPPORT */ + } + + } else +#endif /* CHAP_SUPPORT */ + { + +#if EAP_SUPPORT + /* + * If we were asking for EAP, and they're Conf-Naking EAP, + * well, that's just strange. Nobody should do that. + */ + if (cishort == PPP_EAP && cilen == CILEN_SHORT && go->neg_eap) + ppp_dbglog("Unexpected Conf-Nak for EAP"); + + /* + * We don't recognize what they're suggesting. + * Stop asking for what we were asking for. + */ + if (go->neg_eap) + try_.neg_eap = 0; + else +#endif /* EAP_SUPPORT */ + +#if CHAP_SUPPORT + if (go->neg_chap) + try_.neg_chap = 0; + else +#endif /* CHAP_SUPPORT */ + +#if PAP_SUPPORT + if(1) + try_.neg_upap = 0; + else +#endif /* PAP_SUPPORT */ + {} + + p += cilen - CILEN_SHORT; + } + } + +#if LQR_SUPPORT + /* + * If they can't cope with our link quality protocol, we'll have + * to stop asking for LQR. We haven't got any other protocol. + * If they Nak the reporting period, take their value XXX ? + */ + NAKCILQR(CI_QUALITY, neg_lqr, + if (cishort != PPP_LQR) + try_.neg_lqr = 0; + else + try_.lqr_period = cilong; + ); +#endif /* LQR_SUPPORT */ + + /* + * Only implementing CBCP...not the rest of the callback options + */ + NAKCICHAR(CI_CALLBACK, neg_cbcp, + try_.neg_cbcp = 0; + (void)cichar; /* if CHAP support is not compiled, cichar is set but not used, which makes some compilers complaining */ + ); + + /* + * Check for a looped-back line. + */ + NAKCILONG(CI_MAGICNUMBER, neg_magicnumber, + try_.magicnumber = magic(); + looped_back = 1; + ); + + /* + * Peer shouldn't send Nak for protocol compression or + * address/control compression requests; they should send + * a Reject instead. If they send a Nak, treat it as a Reject. + */ + NAKCIVOID(CI_PCOMPRESSION, neg_pcompression); + NAKCIVOID(CI_ACCOMPRESSION, neg_accompression); + +#ifdef HAVE_MULTILINK + /* + * Nak for MRRU option - accept their value if it is smaller + * than the one we want. + */ + if (go->neg_mrru) { + NAKCISHORT(CI_MRRU, neg_mrru, + if (treat_as_reject) + try_.neg_mrru = 0; + else if (cishort <= wo->mrru) + try_.mrru = cishort; + ); + } +#else /* HAVE_MULTILINK */ + LWIP_UNUSED_ARG(treat_as_reject); +#endif /* HAVE_MULTILINK */ + + /* + * Nak for short sequence numbers shouldn't be sent, treat it + * like a reject. + */ + NAKCIVOID(CI_SSNHF, neg_ssnhf); + + /* + * Nak of the endpoint discriminator option is not permitted, + * treat it like a reject. + */ + NAKCIENDP(CI_EPDISC, neg_endpoint); + + /* + * There may be remaining CIs, if the peer is requesting negotiation + * on an option that we didn't include in our request packet. + * If we see an option that we requested, or one we've already seen + * in this packet, then this packet is bad. + * If we wanted to respond by starting to negotiate on the requested + * option(s), we could, but we don't, because except for the + * authentication type and quality protocol, if we are not negotiating + * an option, it is because we were told not to. + * For the authentication type, the Nak from the peer means + * `let me authenticate myself with you' which is a bit pointless. + * For the quality protocol, the Nak means `ask me to send you quality + * reports', but if we didn't ask for them, we don't want them. + * An option we don't recognize represents the peer asking to + * negotiate some option we don't support, so ignore it. + */ + while (len >= CILEN_VOID) { + GETCHAR(citype, p); + GETCHAR(cilen, p); + if (cilen < CILEN_VOID || (len -= cilen) < 0) + goto bad; + next = p + cilen - 2; + + switch (citype) { + case CI_MRU: + if ((go->neg_mru && go->mru != PPP_DEFMRU) + || no.neg_mru || cilen != CILEN_SHORT) + goto bad; + GETSHORT(cishort, p); + if (cishort < PPP_DEFMRU) { + try_.neg_mru = 1; + try_.mru = cishort; + } + break; + case CI_ASYNCMAP: + if ((go->neg_asyncmap && go->asyncmap != 0xFFFFFFFF) + || no.neg_asyncmap || cilen != CILEN_LONG) + goto bad; + break; + case CI_AUTHTYPE: + if (0 +#if CHAP_SUPPORT + || go->neg_chap || no.neg_chap +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + || go->neg_upap || no.neg_upap +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + || go->neg_eap || no.neg_eap +#endif /* EAP_SUPPORT */ + ) + goto bad; + break; + case CI_MAGICNUMBER: + if (go->neg_magicnumber || no.neg_magicnumber || + cilen != CILEN_LONG) + goto bad; + break; + case CI_PCOMPRESSION: + if (go->neg_pcompression || no.neg_pcompression + || cilen != CILEN_VOID) + goto bad; + break; + case CI_ACCOMPRESSION: + if (go->neg_accompression || no.neg_accompression + || cilen != CILEN_VOID) + goto bad; + break; +#if LQR_SUPPORT + case CI_QUALITY: + if (go->neg_lqr || no.neg_lqr || cilen != CILEN_LQR) + goto bad; + break; +#endif /* LQR_SUPPORT */ +#ifdef HAVE_MULTILINK + case CI_MRRU: + if (go->neg_mrru || no.neg_mrru || cilen != CILEN_SHORT) + goto bad; + break; +#endif /* HAVE_MULTILINK */ + case CI_SSNHF: + if (go->neg_ssnhf || no.neg_ssnhf || cilen != CILEN_VOID) + goto bad; + try_.neg_ssnhf = 1; + break; + case CI_EPDISC: + if (go->neg_endpoint || no.neg_endpoint || cilen < CILEN_CHAR) + goto bad; + break; + default: + break; + } + p = next; + } + + /* + * OK, the Nak is good. Now we can update state. + * If there are any options left we ignore them. + */ + if (f->state != PPP_FSM_OPENED) { + if (looped_back) { + if (++try_.numloops >= pcb->settings.lcp_loopbackfail) { + ppp_notice("Serial line is looped back."); + pcb->err_code = PPPERR_LOOPBACK; + lcp_close(f->pcb, "Loopback detected"); + } + } else + try_.numloops = 0; + *go = try_; + } + + return 1; + +bad: + LCPDEBUG(("lcp_nakci: received bad Nak!")); + return 0; +} + + +/* + * lcp_rejci - Peer has Rejected some of our CIs. + * This should not modify any state if the Reject is bad + * or if LCP is in the OPENED state. + * + * Returns: + * 0 - Reject was bad. + * 1 - Reject was good. + */ +static int lcp_rejci(fsm *f, u_char *p, int len) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + u_char cichar; + u_short cishort; + u32_t cilong; + lcp_options try_; /* options to request next time */ + + try_ = *go; + + /* + * Any Rejected CIs must be in exactly the same order that we sent. + * Check packet length and CI length at each step. + * If we find any deviations, then this packet is bad. + */ +#define REJCIVOID(opt, neg) \ + if (go->neg && \ + len >= CILEN_VOID && \ + p[1] == CILEN_VOID && \ + p[0] == opt) { \ + len -= CILEN_VOID; \ + INCPTR(CILEN_VOID, p); \ + try_.neg = 0; \ + } +#define REJCISHORT(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_SHORT && \ + p[1] == CILEN_SHORT && \ + p[0] == opt) { \ + len -= CILEN_SHORT; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + /* Check rejected value. */ \ + if (cishort != val) \ + goto bad; \ + try_.neg = 0; \ + } + +#if CHAP_SUPPORT && EAP_SUPPORT && PAP_SUPPORT +#define REJCICHAP(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_CHAP && \ + p[1] == CILEN_CHAP && \ + p[0] == opt) { \ + len -= CILEN_CHAP; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETCHAR(cichar, p); \ + /* Check rejected value. */ \ + if ((cishort != PPP_CHAP) || (cichar != (CHAP_DIGEST(val)))) \ + goto bad; \ + try_.neg = 0; \ + try_.neg_eap = try_.neg_upap = 0; \ + } +#endif /* CHAP_SUPPORT && EAP_SUPPORT && PAP_SUPPORT */ + +#if CHAP_SUPPORT && !EAP_SUPPORT && PAP_SUPPORT +#define REJCICHAP(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_CHAP && \ + p[1] == CILEN_CHAP && \ + p[0] == opt) { \ + len -= CILEN_CHAP; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETCHAR(cichar, p); \ + /* Check rejected value. */ \ + if ((cishort != PPP_CHAP) || (cichar != (CHAP_DIGEST(val)))) \ + goto bad; \ + try_.neg = 0; \ + try_.neg_upap = 0; \ + } +#endif /* CHAP_SUPPORT && !EAP_SUPPORT && PAP_SUPPORT */ + +#if CHAP_SUPPORT && EAP_SUPPORT && !PAP_SUPPORT +#define REJCICHAP(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_CHAP && \ + p[1] == CILEN_CHAP && \ + p[0] == opt) { \ + len -= CILEN_CHAP; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETCHAR(cichar, p); \ + /* Check rejected value. */ \ + if ((cishort != PPP_CHAP) || (cichar != (CHAP_DIGEST(val)))) \ + goto bad; \ + try_.neg = 0; \ + try_.neg_eap = 0; \ + } +#endif /* CHAP_SUPPORT && EAP_SUPPORT && !PAP_SUPPORT */ + +#if CHAP_SUPPORT && !EAP_SUPPORT && !PAP_SUPPORT +#define REJCICHAP(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_CHAP && \ + p[1] == CILEN_CHAP && \ + p[0] == opt) { \ + len -= CILEN_CHAP; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETCHAR(cichar, p); \ + /* Check rejected value. */ \ + if ((cishort != PPP_CHAP) || (cichar != (CHAP_DIGEST(val)))) \ + goto bad; \ + try_.neg = 0; \ + } +#endif /* CHAP_SUPPORT && !EAP_SUPPORT && !PAP_SUPPORT */ + +#define REJCILONG(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_LONG && \ + p[1] == CILEN_LONG && \ + p[0] == opt) { \ + len -= CILEN_LONG; \ + INCPTR(2, p); \ + GETLONG(cilong, p); \ + /* Check rejected value. */ \ + if (cilong != val) \ + goto bad; \ + try_.neg = 0; \ + } +#if LQR_SUPPORT +#define REJCILQR(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_LQR && \ + p[1] == CILEN_LQR && \ + p[0] == opt) { \ + len -= CILEN_LQR; \ + INCPTR(2, p); \ + GETSHORT(cishort, p); \ + GETLONG(cilong, p); \ + /* Check rejected value. */ \ + if (cishort != PPP_LQR || cilong != val) \ + goto bad; \ + try_.neg = 0; \ + } +#endif /* LQR_SUPPORT */ +#define REJCICBCP(opt, neg, val) \ + if (go->neg && \ + len >= CILEN_CBCP && \ + p[1] == CILEN_CBCP && \ + p[0] == opt) { \ + len -= CILEN_CBCP; \ + INCPTR(2, p); \ + GETCHAR(cichar, p); \ + /* Check rejected value. */ \ + if (cichar != val) \ + goto bad; \ + try_.neg = 0; \ + } +#define REJCIENDP(opt, neg, class, val, vlen) \ + if (go->neg && \ + len >= CILEN_CHAR + vlen && \ + p[0] == opt && \ + p[1] == CILEN_CHAR + vlen) { \ + int i; \ + len -= CILEN_CHAR + vlen; \ + INCPTR(2, p); \ + GETCHAR(cichar, p); \ + if (cichar != class) \ + goto bad; \ + for (i = 0; i < vlen; ++i) { \ + GETCHAR(cichar, p); \ + if (cichar != val[i]) \ + goto bad; \ + } \ + try_.neg = 0; \ + } + + REJCISHORT(CI_MRU, neg_mru, go->mru); + REJCILONG(CI_ASYNCMAP, neg_asyncmap, go->asyncmap); +#if EAP_SUPPORT + REJCISHORT(CI_AUTHTYPE, neg_eap, PPP_EAP); + if (!go->neg_eap) { +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT + REJCICHAP(CI_AUTHTYPE, neg_chap, go->chap_mdtype); + if (!go->neg_chap) { +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + REJCISHORT(CI_AUTHTYPE, neg_upap, PPP_PAP); +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + } +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + } +#endif /* EAP_SUPPORT */ +#if LQR_SUPPORT + REJCILQR(CI_QUALITY, neg_lqr, go->lqr_period); +#endif /* LQR_SUPPORT */ + REJCICBCP(CI_CALLBACK, neg_cbcp, CBCP_OPT); + REJCILONG(CI_MAGICNUMBER, neg_magicnumber, go->magicnumber); + REJCIVOID(CI_PCOMPRESSION, neg_pcompression); + REJCIVOID(CI_ACCOMPRESSION, neg_accompression); +#ifdef HAVE_MULTILINK + REJCISHORT(CI_MRRU, neg_mrru, go->mrru); +#endif /* HAVE_MULTILINK */ + REJCIVOID(CI_SSNHF, neg_ssnhf); + REJCIENDP(CI_EPDISC, neg_endpoint, go->endpoint.class_, + go->endpoint.value, go->endpoint.length); + + /* + * If there are any remaining CIs, then this packet is bad. + */ + if (len != 0) + goto bad; + /* + * Now we can update state. + */ + if (f->state != PPP_FSM_OPENED) + *go = try_; + return 1; + +bad: + LCPDEBUG(("lcp_rejci: received bad Reject!")); + return 0; +} + + +/* + * lcp_reqci - Check the peer's requested CIs and send appropriate response. + * + * Returns: CONFACK, CONFNAK or CONFREJ and input packet modified + * appropriately. If reject_if_disagree is non-zero, doesn't return + * CONFNAK; returns CONFREJ if it can't return CONFACK. + * + * inp = Requested CIs + * lenp = Length of requested CIs + */ +static int lcp_reqci(fsm *f, u_char *inp, int *lenp, int reject_if_disagree) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + lcp_options *ho = &pcb->lcp_hisoptions; + lcp_options *ao = &pcb->lcp_allowoptions; + u_char *cip, *next; /* Pointer to current and next CIs */ + int cilen, citype, cichar; /* Parsed len, type, char value */ + u_short cishort; /* Parsed short value */ + u32_t cilong; /* Parse long value */ + int rc = CONFACK; /* Final packet return code */ + int orc; /* Individual option return code */ + u_char *p; /* Pointer to next char to parse */ + u_char *rejp; /* Pointer to next char in reject frame */ + struct pbuf *nakp; /* Nak buffer */ + u_char *nakoutp; /* Pointer to next char in Nak frame */ + int l = *lenp; /* Length left */ + + /* + * Reset all his options. + */ + BZERO(ho, sizeof(*ho)); + + /* + * Process all his options. + */ + next = inp; + nakp = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_CTRL_PBUF_MAX_SIZE), PPP_CTRL_PBUF_TYPE); + if(NULL == nakp) + return 0; + if(nakp->tot_len != nakp->len) { + pbuf_free(nakp); + return 0; + } + + nakoutp = (u_char*)nakp->payload; + rejp = inp; + while (l) { + orc = CONFACK; /* Assume success */ + cip = p = next; /* Remember begining of CI */ + if (l < 2 || /* Not enough data for CI header or */ + p[1] < 2 || /* CI length too small or */ + p[1] > l) { /* CI length too big? */ + LCPDEBUG(("lcp_reqci: bad CI length!")); + orc = CONFREJ; /* Reject bad CI */ + cilen = l; /* Reject till end of packet */ + l = 0; /* Don't loop again */ + citype = 0; + goto endswitch; + } + GETCHAR(citype, p); /* Parse CI type */ + GETCHAR(cilen, p); /* Parse CI length */ + l -= cilen; /* Adjust remaining length */ + next += cilen; /* Step to next CI */ + + switch (citype) { /* Check CI type */ + case CI_MRU: + if (!ao->neg_mru || /* Allow option? */ + cilen != CILEN_SHORT) { /* Check CI length */ + orc = CONFREJ; /* Reject CI */ + break; + } + GETSHORT(cishort, p); /* Parse MRU */ + + /* + * He must be able to receive at least our minimum. + * No need to check a maximum. If he sends a large number, + * we'll just ignore it. + */ + if (cishort < PPP_MINMRU) { + orc = CONFNAK; /* Nak CI */ + PUTCHAR(CI_MRU, nakoutp); + PUTCHAR(CILEN_SHORT, nakoutp); + PUTSHORT(PPP_MINMRU, nakoutp); /* Give him a hint */ + break; + } + ho->neg_mru = 1; /* Remember he sent MRU */ + ho->mru = cishort; /* And remember value */ + break; + + case CI_ASYNCMAP: + if (!ao->neg_asyncmap || + cilen != CILEN_LONG) { + orc = CONFREJ; + break; + } + GETLONG(cilong, p); + + /* + * Asyncmap must have set at least the bits + * which are set in lcp_allowoptions[unit].asyncmap. + */ + if ((ao->asyncmap & ~cilong) != 0) { + orc = CONFNAK; + PUTCHAR(CI_ASYNCMAP, nakoutp); + PUTCHAR(CILEN_LONG, nakoutp); + PUTLONG(ao->asyncmap | cilong, nakoutp); + break; + } + ho->neg_asyncmap = 1; + ho->asyncmap = cilong; + break; + + case CI_AUTHTYPE: + if (cilen < CILEN_SHORT || + !(0 +#if PAP_SUPPORT + || ao->neg_upap +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + || ao->neg_chap +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + || ao->neg_eap +#endif /* EAP_SUPPORT */ + )) { + /* + * Reject the option if we're not willing to authenticate. + */ + ppp_dbglog("No auth is possible"); + orc = CONFREJ; + break; + } + GETSHORT(cishort, p); + + /* + * Authtype must be PAP, CHAP, or EAP. + * + * Note: if more than one of ao->neg_upap, ao->neg_chap, and + * ao->neg_eap are set, and the peer sends a Configure-Request + * with two or more authenticate-protocol requests, then we will + * reject the second request. + * Whether we end up doing CHAP, UPAP, or EAP depends then on + * the ordering of the CIs in the peer's Configure-Request. + */ + +#if PAP_SUPPORT + if (cishort == PPP_PAP) { + /* we've already accepted CHAP or EAP */ + if (0 +#if CHAP_SUPPORT + || ho->neg_chap +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + || ho->neg_eap +#endif /* EAP_SUPPORT */ + || cilen != CILEN_SHORT) { + LCPDEBUG(("lcp_reqci: rcvd AUTHTYPE PAP, rejecting...")); + orc = CONFREJ; + break; + } + if (!ao->neg_upap) { /* we don't want to do PAP */ + orc = CONFNAK; /* NAK it and suggest CHAP or EAP */ + PUTCHAR(CI_AUTHTYPE, nakoutp); +#if EAP_SUPPORT + if (ao->neg_eap) { + PUTCHAR(CILEN_SHORT, nakoutp); + PUTSHORT(PPP_EAP, nakoutp); + } else { +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT + PUTCHAR(CILEN_CHAP, nakoutp); + PUTSHORT(PPP_CHAP, nakoutp); + PUTCHAR(CHAP_DIGEST(ao->chap_mdtype), nakoutp); +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + } +#endif /* EAP_SUPPORT */ + break; + } + ho->neg_upap = 1; + break; + } +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + if (cishort == PPP_CHAP) { + /* we've already accepted PAP or EAP */ + if ( +#if PAP_SUPPORT + ho->neg_upap || +#endif /* PAP_SUPPORT */ +#if EAP_SUPPORT + ho->neg_eap || +#endif /* EAP_SUPPORT */ + cilen != CILEN_CHAP) { + LCPDEBUG(("lcp_reqci: rcvd AUTHTYPE CHAP, rejecting...")); + orc = CONFREJ; + break; + } + if (!ao->neg_chap) { /* we don't want to do CHAP */ + orc = CONFNAK; /* NAK it and suggest EAP or PAP */ + PUTCHAR(CI_AUTHTYPE, nakoutp); + PUTCHAR(CILEN_SHORT, nakoutp); +#if EAP_SUPPORT + if (ao->neg_eap) { + PUTSHORT(PPP_EAP, nakoutp); + } else +#endif /* EAP_SUPPORT */ +#if PAP_SUPPORT + if(1) { + PUTSHORT(PPP_PAP, nakoutp); + } + else +#endif /* PAP_SUPPORT */ + {} + break; + } + GETCHAR(cichar, p); /* get digest type */ + if (!(CHAP_CANDIGEST(ao->chap_mdtype, cichar))) { + /* + * We can't/won't do the requested type, + * suggest something else. + */ + orc = CONFNAK; + PUTCHAR(CI_AUTHTYPE, nakoutp); + PUTCHAR(CILEN_CHAP, nakoutp); + PUTSHORT(PPP_CHAP, nakoutp); + PUTCHAR(CHAP_DIGEST(ao->chap_mdtype), nakoutp); + break; + } + ho->chap_mdtype = CHAP_MDTYPE_D(cichar); /* save md type */ + ho->neg_chap = 1; + break; + } +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + if (cishort == PPP_EAP) { + /* we've already accepted CHAP or PAP */ + if ( +#if CHAP_SUPPORT + ho->neg_chap || +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + ho->neg_upap || +#endif /* PAP_SUPPORT */ + cilen != CILEN_SHORT) { + LCPDEBUG(("lcp_reqci: rcvd AUTHTYPE EAP, rejecting...")); + orc = CONFREJ; + break; + } + if (!ao->neg_eap) { /* we don't want to do EAP */ + orc = CONFNAK; /* NAK it and suggest CHAP or PAP */ + PUTCHAR(CI_AUTHTYPE, nakoutp); +#if CHAP_SUPPORT + if (ao->neg_chap) { + PUTCHAR(CILEN_CHAP, nakoutp); + PUTSHORT(PPP_CHAP, nakoutp); + PUTCHAR(CHAP_DIGEST(ao->chap_mdtype), nakoutp); + } else +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + if(1) { + PUTCHAR(CILEN_SHORT, nakoutp); + PUTSHORT(PPP_PAP, nakoutp); + } else +#endif /* PAP_SUPPORT */ + {} + break; + } + ho->neg_eap = 1; + break; + } +#endif /* EAP_SUPPORT */ + + /* + * We don't recognize the protocol they're asking for. + * Nak it with something we're willing to do. + * (At this point we know ao->neg_upap || ao->neg_chap || + * ao->neg_eap.) + */ + orc = CONFNAK; + PUTCHAR(CI_AUTHTYPE, nakoutp); + +#if EAP_SUPPORT + if (ao->neg_eap) { + PUTCHAR(CILEN_SHORT, nakoutp); + PUTSHORT(PPP_EAP, nakoutp); + } else +#endif /* EAP_SUPPORT */ +#if CHAP_SUPPORT + if (ao->neg_chap) { + PUTCHAR(CILEN_CHAP, nakoutp); + PUTSHORT(PPP_CHAP, nakoutp); + PUTCHAR(CHAP_DIGEST(ao->chap_mdtype), nakoutp); + } else +#endif /* CHAP_SUPPORT */ +#if PAP_SUPPORT + if(1) { + PUTCHAR(CILEN_SHORT, nakoutp); + PUTSHORT(PPP_PAP, nakoutp); + } else +#endif /* PAP_SUPPORT */ + {} + break; + +#if LQR_SUPPORT + case CI_QUALITY: + if (!ao->neg_lqr || + cilen != CILEN_LQR) { + orc = CONFREJ; + break; + } + + GETSHORT(cishort, p); + GETLONG(cilong, p); + + /* + * Check the protocol and the reporting period. + * XXX When should we Nak this, and what with? + */ + if (cishort != PPP_LQR) { + orc = CONFNAK; + PUTCHAR(CI_QUALITY, nakoutp); + PUTCHAR(CILEN_LQR, nakoutp); + PUTSHORT(PPP_LQR, nakoutp); + PUTLONG(ao->lqr_period, nakoutp); + break; + } + break; +#endif /* LQR_SUPPORT */ + + case CI_MAGICNUMBER: + if (!(ao->neg_magicnumber || go->neg_magicnumber) || + cilen != CILEN_LONG) { + orc = CONFREJ; + break; + } + GETLONG(cilong, p); + + /* + * He must have a different magic number. + */ + if (go->neg_magicnumber && + cilong == go->magicnumber) { + cilong = magic(); /* Don't put magic() inside macro! */ + orc = CONFNAK; + PUTCHAR(CI_MAGICNUMBER, nakoutp); + PUTCHAR(CILEN_LONG, nakoutp); + PUTLONG(cilong, nakoutp); + break; + } + ho->neg_magicnumber = 1; + ho->magicnumber = cilong; + break; + + + case CI_PCOMPRESSION: + if (!ao->neg_pcompression || + cilen != CILEN_VOID) { + orc = CONFREJ; + break; + } + ho->neg_pcompression = 1; + break; + + case CI_ACCOMPRESSION: + if (!ao->neg_accompression || + cilen != CILEN_VOID) { + orc = CONFREJ; + break; + } + ho->neg_accompression = 1; + break; + +#ifdef HAVE_MULTILINK + case CI_MRRU: + if (!ao->neg_mrru + || !multilink + || cilen != CILEN_SHORT) { + orc = CONFREJ; + break; + } + + GETSHORT(cishort, p); + /* possibly should insist on a minimum/maximum MRRU here */ + ho->neg_mrru = 1; + ho->mrru = cishort; + break; +#endif /* HAVE_MULTILINK */ + + case CI_SSNHF: + if (!ao->neg_ssnhf +#ifdef HAVE_MULTILINK + || !multilink +#endif /* HAVE_MULTILINK */ + || cilen != CILEN_VOID) { + orc = CONFREJ; + break; + } + ho->neg_ssnhf = 1; + break; + + case CI_EPDISC: + if (!ao->neg_endpoint || + cilen < CILEN_CHAR || + cilen > CILEN_CHAR + MAX_ENDP_LEN) { + orc = CONFREJ; + break; + } + GETCHAR(cichar, p); + cilen -= CILEN_CHAR; + ho->neg_endpoint = 1; + ho->endpoint.class_ = cichar; + ho->endpoint.length = cilen; + MEMCPY(ho->endpoint.value, p, cilen); + INCPTR(cilen, p); + break; + + default: + LCPDEBUG(("lcp_reqci: rcvd unknown option %d", citype)); + orc = CONFREJ; + break; + } + +endswitch: + if (orc == CONFACK && /* Good CI */ + rc != CONFACK) /* but prior CI wasnt? */ + continue; /* Don't send this one */ + + if (orc == CONFNAK) { /* Nak this CI? */ + if (reject_if_disagree /* Getting fed up with sending NAKs? */ + && citype != CI_MAGICNUMBER) { + orc = CONFREJ; /* Get tough if so */ + } else { + if (rc == CONFREJ) /* Rejecting prior CI? */ + continue; /* Don't send this one */ + rc = CONFNAK; + } + } + if (orc == CONFREJ) { /* Reject this CI */ + rc = CONFREJ; + if (cip != rejp) /* Need to move rejected CI? */ + MEMCPY(rejp, cip, cilen); /* Move it */ + INCPTR(cilen, rejp); /* Update output pointer */ + } + } + + /* + * If we wanted to send additional NAKs (for unsent CIs), the + * code would go here. The extra NAKs would go at *nakoutp. + * At present there are no cases where we want to ask the + * peer to negotiate an option. + */ + + switch (rc) { + case CONFACK: + *lenp = next - inp; + break; + case CONFNAK: + /* + * Copy the Nak'd options from the nak buffer to the caller's buffer. + */ + *lenp = nakoutp - (u_char*)nakp->payload; + MEMCPY(inp, nakp->payload, *lenp); + break; + case CONFREJ: + *lenp = rejp - inp; + break; + default: + break; + } + + pbuf_free(nakp); + LCPDEBUG(("lcp_reqci: returning CONF%s.", CODENAME(rc))); + return (rc); /* Return final code */ +} + + +/* + * lcp_up - LCP has come UP. + */ +static void lcp_up(fsm *f) { + ppp_pcb *pcb = f->pcb; + lcp_options *wo = &pcb->lcp_wantoptions; + lcp_options *ho = &pcb->lcp_hisoptions; + lcp_options *go = &pcb->lcp_gotoptions; + lcp_options *ao = &pcb->lcp_allowoptions; + int mtu, mru; + + if (!go->neg_magicnumber) + go->magicnumber = 0; + if (!ho->neg_magicnumber) + ho->magicnumber = 0; + + /* + * Set our MTU to the smaller of the MTU we wanted and + * the MRU our peer wanted. If we negotiated an MRU, + * set our MRU to the larger of value we wanted and + * the value we got in the negotiation. + * Note on the MTU: the link MTU can be the MRU the peer wanted, + * the interface MTU is set to the lowest of that, the + * MTU we want to use, and our link MRU. + */ + mtu = ho->neg_mru? ho->mru: PPP_MRU; + mru = go->neg_mru? LWIP_MAX(wo->mru, go->mru): PPP_MRU; +#ifdef HAVE_MULTILINK + if (!(multilink && go->neg_mrru && ho->neg_mrru)) +#endif /* HAVE_MULTILINK */ + netif_set_mtu(pcb, LWIP_MIN(LWIP_MIN(mtu, mru), ao->mru)); + ppp_send_config(pcb, mtu, + (ho->neg_asyncmap? ho->asyncmap: 0xffffffff), + ho->neg_pcompression, ho->neg_accompression); + ppp_recv_config(pcb, mru, + (pcb->settings.lax_recv? 0: go->neg_asyncmap? go->asyncmap: 0xffffffff), + go->neg_pcompression, go->neg_accompression); + + if (ho->neg_mru) + pcb->peer_mru = ho->mru; + + lcp_echo_lowerup(f->pcb); /* Enable echo messages */ + + link_established(pcb); +} + + +/* + * lcp_down - LCP has gone DOWN. + * + * Alert other protocols. + */ +static void lcp_down(fsm *f) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + + lcp_echo_lowerdown(f->pcb); + + link_down(pcb); + + ppp_send_config(pcb, PPP_MRU, 0xffffffff, 0, 0); + ppp_recv_config(pcb, PPP_MRU, + (go->neg_asyncmap? go->asyncmap: 0xffffffff), + go->neg_pcompression, go->neg_accompression); + pcb->peer_mru = PPP_MRU; +} + + +/* + * lcp_starting - LCP needs the lower layer up. + */ +static void lcp_starting(fsm *f) { + ppp_pcb *pcb = f->pcb; + link_required(pcb); +} + + +/* + * lcp_finished - LCP has finished with the lower layer. + */ +static void lcp_finished(fsm *f) { + ppp_pcb *pcb = f->pcb; + link_terminated(pcb); +} + + +#if PRINTPKT_SUPPORT +/* + * lcp_printpkt - print the contents of an LCP packet. + */ +static const char* const lcp_codenames[] = { + "ConfReq", "ConfAck", "ConfNak", "ConfRej", + "TermReq", "TermAck", "CodeRej", "ProtRej", + "EchoReq", "EchoRep", "DiscReq", "Ident", + "TimeRem" +}; + +static int lcp_printpkt(const u_char *p, int plen, + void (*printer) (void *, const char *, ...), void *arg) { + int code, id, len, olen, i; + const u_char *pstart, *optend; + u_short cishort; + u32_t cilong; + + if (plen < HEADERLEN) + return 0; + pstart = p; + GETCHAR(code, p); + GETCHAR(id, p); + GETSHORT(len, p); + if (len < HEADERLEN || len > plen) + return 0; + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(lcp_codenames)) + printer(arg, " %s", lcp_codenames[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= HEADERLEN; + switch (code) { + case CONFREQ: + case CONFACK: + case CONFNAK: + case CONFREJ: + /* print option list */ + while (len >= 2) { + GETCHAR(code, p); + GETCHAR(olen, p); + p -= 2; + if (olen < 2 || olen > len) { + break; + } + printer(arg, " <"); + len -= olen; + optend = p + olen; + switch (code) { + case CI_MRU: + if (olen == CILEN_SHORT) { + p += 2; + GETSHORT(cishort, p); + printer(arg, "mru %d", cishort); + } + break; + case CI_ASYNCMAP: + if (olen == CILEN_LONG) { + p += 2; + GETLONG(cilong, p); + printer(arg, "asyncmap 0x%x", cilong); + } + break; + case CI_AUTHTYPE: + if (olen >= CILEN_SHORT) { + p += 2; + printer(arg, "auth "); + GETSHORT(cishort, p); + switch (cishort) { +#if PAP_SUPPORT + case PPP_PAP: + printer(arg, "pap"); + break; +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + case PPP_CHAP: + printer(arg, "chap"); + if (p < optend) { + switch (*p) { + case CHAP_MD5: + printer(arg, " MD5"); + ++p; + break; +#if MSCHAP_SUPPORT + case CHAP_MICROSOFT: + printer(arg, " MS"); + ++p; + break; + + case CHAP_MICROSOFT_V2: + printer(arg, " MS-v2"); + ++p; + break; +#endif /* MSCHAP_SUPPORT */ + default: + break; + } + } + break; +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + case PPP_EAP: + printer(arg, "eap"); + break; +#endif /* EAP_SUPPORT */ + default: + printer(arg, "0x%x", cishort); + } + } + break; +#if LQR_SUPPORT + case CI_QUALITY: + if (olen >= CILEN_SHORT) { + p += 2; + printer(arg, "quality "); + GETSHORT(cishort, p); + switch (cishort) { + case PPP_LQR: + printer(arg, "lqr"); + break; + default: + printer(arg, "0x%x", cishort); + } + } + break; +#endif /* LQR_SUPPORT */ + case CI_CALLBACK: + if (olen >= CILEN_CHAR) { + p += 2; + printer(arg, "callback "); + GETCHAR(cishort, p); + switch (cishort) { + case CBCP_OPT: + printer(arg, "CBCP"); + break; + default: + printer(arg, "0x%x", cishort); + } + } + break; + case CI_MAGICNUMBER: + if (olen == CILEN_LONG) { + p += 2; + GETLONG(cilong, p); + printer(arg, "magic 0x%x", cilong); + } + break; + case CI_PCOMPRESSION: + if (olen == CILEN_VOID) { + p += 2; + printer(arg, "pcomp"); + } + break; + case CI_ACCOMPRESSION: + if (olen == CILEN_VOID) { + p += 2; + printer(arg, "accomp"); + } + break; + case CI_MRRU: + if (olen == CILEN_SHORT) { + p += 2; + GETSHORT(cishort, p); + printer(arg, "mrru %d", cishort); + } + break; + case CI_SSNHF: + if (olen == CILEN_VOID) { + p += 2; + printer(arg, "ssnhf"); + } + break; + case CI_EPDISC: +#ifdef HAVE_MULTILINK + if (olen >= CILEN_CHAR) { + struct epdisc epd; + p += 2; + GETCHAR(epd.class, p); + epd.length = olen - CILEN_CHAR; + if (epd.length > MAX_ENDP_LEN) + epd.length = MAX_ENDP_LEN; + if (epd.length > 0) { + MEMCPY(epd.value, p, epd.length); + p += epd.length; + } + printer(arg, "endpoint [%s]", epdisc_to_str(&epd)); + } +#else + printer(arg, "endpoint"); +#endif + break; + default: + break; + } + while (p < optend) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + printer(arg, ">"); + } + break; + + case TERMACK: + case TERMREQ: + if (len > 0 && *p >= ' ' && *p < 0x7f) { + printer(arg, " "); + ppp_print_string(p, len, printer, arg); + p += len; + len = 0; + } + break; + + case ECHOREQ: + case ECHOREP: + case DISCREQ: + if (len >= 4) { + GETLONG(cilong, p); + printer(arg, " magic=0x%x", cilong); + len -= 4; + } + break; + + case IDENTIF: + case TIMEREM: + if (len >= 4) { + GETLONG(cilong, p); + printer(arg, " magic=0x%x", cilong); + len -= 4; + } + if (code == TIMEREM) { + if (len < 4) + break; + GETLONG(cilong, p); + printer(arg, " seconds=%u", cilong); + len -= 4; + } + if (len > 0) { + printer(arg, " "); + ppp_print_string(p, len, printer, arg); + p += len; + len = 0; + } + break; + default: + break; + } + + /* print the rest of the bytes in the packet */ + for (i = 0; i < len && i < 32; ++i) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + if (i < len) { + printer(arg, " ..."); + p += len - i; + } + + return p - pstart; +} +#endif /* PRINTPKT_SUPPORT */ + +/* + * Time to shut down the link because there is nothing out there. + */ + +static void LcpLinkFailure(fsm *f) { + ppp_pcb *pcb = f->pcb; + if (f->state == PPP_FSM_OPENED) { + ppp_info("No response to %d echo-requests", pcb->lcp_echos_pending); + ppp_notice("Serial link appears to be disconnected."); + pcb->err_code = PPPERR_PEERDEAD; + lcp_close(pcb, "Peer not responding"); + } +} + +/* + * Timer expired for the LCP echo requests from this process. + */ + +static void LcpEchoCheck(fsm *f) { + ppp_pcb *pcb = f->pcb; + + LcpSendEchoRequest (f); + if (f->state != PPP_FSM_OPENED) + return; + + /* + * Start the timer for the next interval. + */ + if (pcb->lcp_echo_timer_running) + ppp_warn("assertion lcp_echo_timer_running==0 failed"); + TIMEOUT (LcpEchoTimeout, f, pcb->settings.lcp_echo_interval); + pcb->lcp_echo_timer_running = 1; +} + +/* + * LcpEchoTimeout - Timer expired on the LCP echo + */ + +static void LcpEchoTimeout(void *arg) { + fsm *f = (fsm*)arg; + ppp_pcb *pcb = f->pcb; + if (pcb->lcp_echo_timer_running != 0) { + pcb->lcp_echo_timer_running = 0; + LcpEchoCheck ((fsm *) arg); + } +} + +/* + * LcpEchoReply - LCP has received a reply to the echo + */ + +static void lcp_received_echo_reply(fsm *f, int id, u_char *inp, int len) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + u32_t magic_val; + LWIP_UNUSED_ARG(id); + + /* Check the magic number - don't count replies from ourselves. */ + if (len < 4) { + ppp_dbglog("lcp: received short Echo-Reply, length %d", len); + return; + } + GETLONG(magic_val, inp); + if (go->neg_magicnumber + && magic_val == go->magicnumber) { + ppp_warn("appear to have received our own echo-reply!"); + return; + } + + /* Reset the number of outstanding echo frames */ + pcb->lcp_echos_pending = 0; +} + +/* + * LcpSendEchoRequest - Send an echo request frame to the peer + */ + +static void LcpSendEchoRequest(fsm *f) { + ppp_pcb *pcb = f->pcb; + lcp_options *go = &pcb->lcp_gotoptions; + u32_t lcp_magic; + u_char pkt[4], *pktp; + + /* + * Detect the failure of the peer at this point. + */ + if (pcb->settings.lcp_echo_fails != 0) { + if (pcb->lcp_echos_pending >= pcb->settings.lcp_echo_fails) { + LcpLinkFailure(f); + pcb->lcp_echos_pending = 0; + } + } + +#if PPP_LCP_ADAPTIVE + /* + * If adaptive echos have been enabled, only send the echo request if + * no traffic was received since the last one. + */ + if (pcb->settings.lcp_echo_adaptive) { + static unsigned int last_pkts_in = 0; + +#if PPP_STATS_SUPPORT + update_link_stats(f->unit); + link_stats_valid = 0; +#endif /* PPP_STATS_SUPPORT */ + + if (link_stats.pkts_in != last_pkts_in) { + last_pkts_in = link_stats.pkts_in; + return; + } + } +#endif + + /* + * Make and send the echo request frame. + */ + if (f->state == PPP_FSM_OPENED) { + lcp_magic = go->magicnumber; + pktp = pkt; + PUTLONG(lcp_magic, pktp); + fsm_sdata(f, ECHOREQ, pcb->lcp_echo_number++, pkt, pktp - pkt); + ++pcb->lcp_echos_pending; + } +} + +/* + * lcp_echo_lowerup - Start the timer for the LCP frame + */ + +static void lcp_echo_lowerup(ppp_pcb *pcb) { + fsm *f = &pcb->lcp_fsm; + + /* Clear the parameters for generating echo frames */ + pcb->lcp_echos_pending = 0; + pcb->lcp_echo_number = 0; + pcb->lcp_echo_timer_running = 0; + + /* If a timeout interval is specified then start the timer */ + if (pcb->settings.lcp_echo_interval != 0) + LcpEchoCheck (f); +} + +/* + * lcp_echo_lowerdown - Stop the timer for the LCP frame + */ + +static void lcp_echo_lowerdown(ppp_pcb *pcb) { + fsm *f = &pcb->lcp_fsm; + + if (pcb->lcp_echo_timer_running != 0) { + UNTIMEOUT (LcpEchoTimeout, f); + pcb->lcp_echo_timer_running = 0; + } +} + +#endif /* PPP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/magic.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/magic.c new file mode 100644 index 0000000..32cfa26 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/magic.c @@ -0,0 +1,294 @@ +/* + * magic.c - PPP Magic Number routines. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/***************************************************************************** +* randm.c - Random number generator program file. +* +* Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. +* Copyright (c) 1998 by Global Election Systems Inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 03-01-01 Marc Boucher +* Ported to lwIP. +* 98-06-03 Guy Lancaster , Global Election Systems Inc. +* Extracted from avos. +*****************************************************************************/ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/magic.h" + +#if PPP_MD5_RANDM /* Using MD5 for better randomness if enabled */ + +#include "netif/ppp/pppcrypt.h" + +#define MD5_HASH_SIZE 16 +static char magic_randpool[MD5_HASH_SIZE]; /* Pool of randomness. */ +static long magic_randcount; /* Pseudo-random incrementer */ +static u32_t magic_randomseed; /* Seed used for random number generation. */ + +/* + * Churn the randomness pool on a random event. Call this early and often + * on random and semi-random system events to build randomness in time for + * usage. For randomly timed events, pass a null pointer and a zero length + * and this will use the system timer and other sources to add randomness. + * If new random data is available, pass a pointer to that and it will be + * included. + * + * Ref: Applied Cryptography 2nd Ed. by Bruce Schneier p. 427 + */ +static void magic_churnrand(char *rand_data, u32_t rand_len) { + lwip_md5_context md5_ctx; + + /* LWIP_DEBUGF(LOG_INFO, ("magic_churnrand: %u@%P\n", rand_len, rand_data)); */ + lwip_md5_init(&md5_ctx); + lwip_md5_starts(&md5_ctx); + lwip_md5_update(&md5_ctx, (u_char *)magic_randpool, sizeof(magic_randpool)); + if (rand_data) { + lwip_md5_update(&md5_ctx, (u_char *)rand_data, rand_len); + } else { + struct { + /* INCLUDE fields for any system sources of randomness */ + u32_t jiffies; +#ifdef LWIP_RAND + u32_t rand; +#endif /* LWIP_RAND */ + } sys_data; + magic_randomseed += sys_jiffies(); + sys_data.jiffies = magic_randomseed; +#ifdef LWIP_RAND + sys_data.rand = LWIP_RAND(); +#endif /* LWIP_RAND */ + /* Load sys_data fields here. */ + lwip_md5_update(&md5_ctx, (u_char *)&sys_data, sizeof(sys_data)); + } + lwip_md5_finish(&md5_ctx, (u_char *)magic_randpool); + lwip_md5_free(&md5_ctx); +/* LWIP_DEBUGF(LOG_INFO, ("magic_churnrand: -> 0\n")); */ +} + +/* + * Initialize the random number generator. + */ +void magic_init(void) { + magic_churnrand(NULL, 0); +} + +/* + * Randomize our random seed value. + */ +void magic_randomize(void) { + magic_churnrand(NULL, 0); +} + +/* + * magic_random_bytes - Fill a buffer with random bytes. + * + * Use the random pool to generate random data. This degrades to pseudo + * random when used faster than randomness is supplied using magic_churnrand(). + * Note: It's important that there be sufficient randomness in magic_randpool + * before this is called for otherwise the range of the result may be + * narrow enough to make a search feasible. + * + * Ref: Applied Cryptography 2nd Ed. by Bruce Schneier p. 427 + * + * XXX Why does he not just call magic_churnrand() for each block? Probably + * so that you don't ever publish the seed which could possibly help + * predict future values. + * XXX Why don't we preserve md5 between blocks and just update it with + * magic_randcount each time? Probably there is a weakness but I wish that + * it was documented. + */ +void magic_random_bytes(unsigned char *buf, u32_t buf_len) { + lwip_md5_context md5_ctx; + u_char tmp[MD5_HASH_SIZE]; + u32_t n; + + while (buf_len > 0) { + lwip_md5_init(&md5_ctx); + lwip_md5_starts(&md5_ctx); + lwip_md5_update(&md5_ctx, (u_char *)magic_randpool, sizeof(magic_randpool)); + lwip_md5_update(&md5_ctx, (u_char *)&magic_randcount, sizeof(magic_randcount)); + lwip_md5_finish(&md5_ctx, tmp); + lwip_md5_free(&md5_ctx); + magic_randcount++; + n = LWIP_MIN(buf_len, MD5_HASH_SIZE); + MEMCPY(buf, tmp, n); + buf += n; + buf_len -= n; + } +} + +/* + * Return a new random number. + */ +u32_t magic(void) { + u32_t new_rand; + + magic_random_bytes((unsigned char *)&new_rand, sizeof(new_rand)); + + return new_rand; +} + +#else /* PPP_MD5_RANDM */ + +/*****************************/ +/*** LOCAL DATA STRUCTURES ***/ +/*****************************/ +#ifndef LWIP_RAND +static int magic_randomized; /* Set when truely randomized. */ +#endif /* LWIP_RAND */ +static u32_t magic_randomseed; /* Seed used for random number generation. */ + + +/***********************************/ +/*** PUBLIC FUNCTION DEFINITIONS ***/ +/***********************************/ + +/* + * Initialize the random number generator. + * + * Here we attempt to compute a random number seed but even if + * it isn't random, we'll randomize it later. + * + * The current method uses the fields from the real time clock, + * the idle process counter, the millisecond counter, and the + * hardware timer tick counter. When this is invoked + * in startup(), then the idle counter and timer values may + * repeat after each boot and the real time clock may not be + * operational. Thus we call it again on the first random + * event. + */ +void magic_init(void) { + magic_randomseed += sys_jiffies(); +#ifndef LWIP_RAND + /* Initialize the Borland random number generator. */ + srand((unsigned)magic_randomseed); +#endif /* LWIP_RAND */ +} + +/* + * magic_init - Initialize the magic number generator. + * + * Randomize our random seed value. Here we use the fact that + * this function is called at *truely random* times by the polling + * and network functions. Here we only get 16 bits of new random + * value but we use the previous value to randomize the other 16 + * bits. + */ +void magic_randomize(void) { +#ifndef LWIP_RAND + if (!magic_randomized) { + magic_randomized = !0; + magic_init(); + /* The initialization function also updates the seed. */ + } else { +#endif /* LWIP_RAND */ + magic_randomseed += sys_jiffies(); +#ifndef LWIP_RAND + } +#endif /* LWIP_RAND */ +} + +/* + * Return a new random number. + * + * Here we use the Borland rand() function to supply a pseudo random + * number which we make truely random by combining it with our own + * seed which is randomized by truely random events. + * Thus the numbers will be truely random unless there have been no + * operator or network events in which case it will be pseudo random + * seeded by the real time clock. + */ +u32_t magic(void) { +#ifdef LWIP_RAND + return LWIP_RAND() + magic_randomseed; +#else /* LWIP_RAND */ + return ((u32_t)rand() << 16) + (u32_t)rand() + magic_randomseed; +#endif /* LWIP_RAND */ +} + +/* + * magic_random_bytes - Fill a buffer with random bytes. + */ +void magic_random_bytes(unsigned char *buf, u32_t buf_len) { + u32_t new_rand, n; + + while (buf_len > 0) { + new_rand = magic(); + n = LWIP_MIN(buf_len, sizeof(new_rand)); + MEMCPY(buf, &new_rand, n); + buf += n; + buf_len -= n; + } +} +#endif /* PPP_MD5_RANDM */ + +/* + * Return a new random number between 0 and (2^pow)-1 included. + */ +u32_t magic_pow(u8_t pow) { + return magic() & ~(~0UL<. + * Copyright (c) 2002,2003,2004 Google, Inc. + * All rights reserved. + * + * License: + * Permission to use, copy, modify, and distribute this software and its + * documentation is hereby granted, provided that the above copyright + * notice appears in all copies. This software is provided without any + * warranty, express or implied. + * + * Changelog: + * 08/12/05 - Matt Domsch + * Only need extra skb padding on transmit, not receive. + * 06/18/04 - Matt Domsch , Oleg Makarenko + * Use Linux kernel 2.6 arc4 and sha1 routines rather than + * providing our own. + * 2/15/04 - TS: added #include and testing for Kernel + * version before using + * MOD_DEC_USAGE_COUNT/MOD_INC_USAGE_COUNT which are + * deprecated in 2.6 + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && MPPE_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include + +#include "lwip/err.h" + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/ccp.h" +#include "netif/ppp/mppe.h" +#include "netif/ppp/pppdebug.h" +#include "netif/ppp/pppcrypt.h" + +#define SHA1_SIGNATURE_SIZE 20 + +/* ppp_mppe_state.bits definitions */ +#define MPPE_BIT_A 0x80 /* Encryption table were (re)inititalized */ +#define MPPE_BIT_B 0x40 /* MPPC only (not implemented) */ +#define MPPE_BIT_C 0x20 /* MPPC only (not implemented) */ +#define MPPE_BIT_D 0x10 /* This is an encrypted frame */ + +#define MPPE_BIT_FLUSHED MPPE_BIT_A +#define MPPE_BIT_ENCRYPTED MPPE_BIT_D + +#define MPPE_BITS(p) ((p)[0] & 0xf0) +#define MPPE_CCOUNT(p) ((((p)[0] & 0x0f) << 8) + (p)[1]) +#define MPPE_CCOUNT_SPACE 0x1000 /* The size of the ccount space */ + +#define MPPE_OVHD 2 /* MPPE overhead/packet */ +#define SANITY_MAX 1600 /* Max bogon factor we will tolerate */ + +/* + * Perform the MPPE rekey algorithm, from RFC 3078, sec. 7.3. + * Well, not what's written there, but rather what they meant. + */ +static void mppe_rekey(ppp_mppe_state * state, int initial_key) +{ + lwip_sha1_context sha1_ctx; + u8_t sha1_digest[SHA1_SIGNATURE_SIZE]; + + /* + * Key Derivation, from RFC 3078, RFC 3079. + * Equivalent to Get_Key() for MS-CHAP as described in RFC 3079. + */ + lwip_sha1_init(&sha1_ctx); + lwip_sha1_starts(&sha1_ctx); + lwip_sha1_update(&sha1_ctx, state->master_key, state->keylen); + lwip_sha1_update(&sha1_ctx, mppe_sha1_pad1, SHA1_PAD_SIZE); + lwip_sha1_update(&sha1_ctx, state->session_key, state->keylen); + lwip_sha1_update(&sha1_ctx, mppe_sha1_pad2, SHA1_PAD_SIZE); + lwip_sha1_finish(&sha1_ctx, sha1_digest); + lwip_sha1_free(&sha1_ctx); + MEMCPY(state->session_key, sha1_digest, state->keylen); + + if (!initial_key) { + lwip_arc4_init(&state->arc4); + lwip_arc4_setup(&state->arc4, sha1_digest, state->keylen); + lwip_arc4_crypt(&state->arc4, state->session_key, state->keylen); + lwip_arc4_free(&state->arc4); + } + if (state->keylen == 8) { + /* See RFC 3078 */ + state->session_key[0] = 0xd1; + state->session_key[1] = 0x26; + state->session_key[2] = 0x9e; + } + lwip_arc4_init(&state->arc4); + lwip_arc4_setup(&state->arc4, state->session_key, state->keylen); +} + +/* + * Set key, used by MSCHAP before mppe_init() is actually called by CCP so we + * don't have to keep multiple copies of keys. + */ +void mppe_set_key(ppp_pcb *pcb, ppp_mppe_state *state, u8_t *key) { + LWIP_UNUSED_ARG(pcb); + MEMCPY(state->master_key, key, MPPE_MAX_KEY_LEN); +} + +/* + * Initialize (de)compressor state. + */ +void +mppe_init(ppp_pcb *pcb, ppp_mppe_state *state, u8_t options) +{ +#if PPP_DEBUG + const u8_t *debugstr = (const u8_t*)"mppe_comp_init"; + if (&pcb->mppe_decomp == state) { + debugstr = (const u8_t*)"mppe_decomp_init"; + } +#endif /* PPP_DEBUG */ + + /* Save keys. */ + MEMCPY(state->session_key, state->master_key, sizeof(state->master_key)); + + if (options & MPPE_OPT_128) + state->keylen = 16; + else if (options & MPPE_OPT_40) + state->keylen = 8; + else { + PPPDEBUG(LOG_DEBUG, ("%s[%d]: unknown key length\n", debugstr, + pcb->netif->num)); + lcp_close(pcb, "MPPE required but peer negotiation failed"); + return; + } + if (options & MPPE_OPT_STATEFUL) + state->stateful = 1; + + /* Generate the initial session key. */ + mppe_rekey(state, 1); + +#if PPP_DEBUG + { + int i; + char mkey[sizeof(state->master_key) * 2 + 1]; + char skey[sizeof(state->session_key) * 2 + 1]; + + PPPDEBUG(LOG_DEBUG, ("%s[%d]: initialized with %d-bit %s mode\n", + debugstr, pcb->netif->num, (state->keylen == 16) ? 128 : 40, + (state->stateful) ? "stateful" : "stateless")); + + for (i = 0; i < (int)sizeof(state->master_key); i++) + sprintf(mkey + i * 2, "%02x", state->master_key[i]); + for (i = 0; i < (int)sizeof(state->session_key); i++) + sprintf(skey + i * 2, "%02x", state->session_key[i]); + PPPDEBUG(LOG_DEBUG, + ("%s[%d]: keys: master: %s initial session: %s\n", + debugstr, pcb->netif->num, mkey, skey)); + } +#endif /* PPP_DEBUG */ + + /* + * Initialize the coherency count. The initial value is not specified + * in RFC 3078, but we can make a reasonable assumption that it will + * start at 0. Setting it to the max here makes the comp/decomp code + * do the right thing (determined through experiment). + */ + state->ccount = MPPE_CCOUNT_SPACE - 1; + + /* + * Note that even though we have initialized the key table, we don't + * set the FLUSHED bit. This is contrary to RFC 3078, sec. 3.1. + */ + state->bits = MPPE_BIT_ENCRYPTED; +} + +/* + * We received a CCP Reset-Request (actually, we are sending a Reset-Ack), + * tell the compressor to rekey. Note that we MUST NOT rekey for + * every CCP Reset-Request; we only rekey on the next xmit packet. + * We might get multiple CCP Reset-Requests if our CCP Reset-Ack is lost. + * So, rekeying for every CCP Reset-Request is broken as the peer will not + * know how many times we've rekeyed. (If we rekey and THEN get another + * CCP Reset-Request, we must rekey again.) + */ +void mppe_comp_reset(ppp_pcb *pcb, ppp_mppe_state *state) +{ + LWIP_UNUSED_ARG(pcb); + state->bits |= MPPE_BIT_FLUSHED; +} + +/* + * Compress (encrypt) a packet. + * It's strange to call this a compressor, since the output is always + * MPPE_OVHD + 2 bytes larger than the input. + */ +err_t +mppe_compress(ppp_pcb *pcb, ppp_mppe_state *state, struct pbuf **pb, u16_t protocol) +{ + struct pbuf *n, *np; + u8_t *pl; + err_t err; + + LWIP_UNUSED_ARG(pcb); + + /* TCP stack requires that we don't change the packet payload, therefore we copy + * the whole packet before encryption. + */ + np = pbuf_alloc(PBUF_RAW, MPPE_OVHD + sizeof(protocol) + (*pb)->tot_len, PBUF_RAM); + if (!np) { + return ERR_MEM; + } + + /* Hide MPPE header + protocol */ + pbuf_remove_header(np, MPPE_OVHD + sizeof(protocol)); + + if ((err = pbuf_copy(np, *pb)) != ERR_OK) { + pbuf_free(np); + return err; + } + + /* Reveal MPPE header + protocol */ + pbuf_add_header(np, MPPE_OVHD + sizeof(protocol)); + + *pb = np; + pl = (u8_t*)np->payload; + + state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; + PPPDEBUG(LOG_DEBUG, ("mppe_compress[%d]: ccount %d\n", pcb->netif->num, state->ccount)); + /* FIXME: use PUT* macros */ + pl[0] = state->ccount>>8; + pl[1] = state->ccount; + + if (!state->stateful || /* stateless mode */ + ((state->ccount & 0xff) == 0xff) || /* "flag" packet */ + (state->bits & MPPE_BIT_FLUSHED)) { /* CCP Reset-Request */ + /* We must rekey */ + if (state->stateful) { + PPPDEBUG(LOG_DEBUG, ("mppe_compress[%d]: rekeying\n", pcb->netif->num)); + } + mppe_rekey(state, 0); + state->bits |= MPPE_BIT_FLUSHED; + } + pl[0] |= state->bits; + state->bits &= ~MPPE_BIT_FLUSHED; /* reset for next xmit */ + pl += MPPE_OVHD; + + /* Add protocol */ + /* FIXME: add PFC support */ + pl[0] = protocol >> 8; + pl[1] = protocol; + + /* Hide MPPE header */ + pbuf_remove_header(np, MPPE_OVHD); + + /* Encrypt packet */ + for (n = np; n != NULL; n = n->next) { + lwip_arc4_crypt(&state->arc4, (u8_t*)n->payload, n->len); + if (n->tot_len == n->len) { + break; + } + } + + /* Reveal MPPE header */ + pbuf_add_header(np, MPPE_OVHD); + + return ERR_OK; +} + +/* + * We received a CCP Reset-Ack. Just ignore it. + */ +void mppe_decomp_reset(ppp_pcb *pcb, ppp_mppe_state *state) +{ + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(state); + return; +} + +/* + * Decompress (decrypt) an MPPE packet. + */ +err_t +mppe_decompress(ppp_pcb *pcb, ppp_mppe_state *state, struct pbuf **pb) +{ + struct pbuf *n0 = *pb, *n; + u8_t *pl; + u16_t ccount; + u8_t flushed; + + /* MPPE Header */ + if (n0->len < MPPE_OVHD) { + PPPDEBUG(LOG_DEBUG, + ("mppe_decompress[%d]: short pkt (%d)\n", + pcb->netif->num, n0->len)); + state->sanity_errors += 100; + goto sanity_error; + } + + pl = (u8_t*)n0->payload; + flushed = MPPE_BITS(pl) & MPPE_BIT_FLUSHED; + ccount = MPPE_CCOUNT(pl); + PPPDEBUG(LOG_DEBUG, ("mppe_decompress[%d]: ccount %d\n", + pcb->netif->num, ccount)); + + /* sanity checks -- terminate with extreme prejudice */ + if (!(MPPE_BITS(pl) & MPPE_BIT_ENCRYPTED)) { + PPPDEBUG(LOG_DEBUG, + ("mppe_decompress[%d]: ENCRYPTED bit not set!\n", + pcb->netif->num)); + state->sanity_errors += 100; + goto sanity_error; + } + if (!state->stateful && !flushed) { + PPPDEBUG(LOG_DEBUG, ("mppe_decompress[%d]: FLUSHED bit not set in " + "stateless mode!\n", pcb->netif->num)); + state->sanity_errors += 100; + goto sanity_error; + } + if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) { + PPPDEBUG(LOG_DEBUG, ("mppe_decompress[%d]: FLUSHED bit not set on " + "flag packet!\n", pcb->netif->num)); + state->sanity_errors += 100; + goto sanity_error; + } + + /* + * Check the coherency count. + */ + + if (!state->stateful) { + /* Discard late packet */ + if ((ccount - state->ccount) % MPPE_CCOUNT_SPACE > MPPE_CCOUNT_SPACE / 2) { + state->sanity_errors++; + goto sanity_error; + } + + /* RFC 3078, sec 8.1. Rekey for every packet. */ + while (state->ccount != ccount) { + mppe_rekey(state, 0); + state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; + } + } else { + /* RFC 3078, sec 8.2. */ + if (!state->discard) { + /* normal state */ + state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; + if (ccount != state->ccount) { + /* + * (ccount > state->ccount) + * Packet loss detected, enter the discard state. + * Signal the peer to rekey (by sending a CCP Reset-Request). + */ + state->discard = 1; + ccp_resetrequest(pcb); + return ERR_BUF; + } + } else { + /* discard state */ + if (!flushed) { + /* ccp.c will be silent (no additional CCP Reset-Requests). */ + return ERR_BUF; + } else { + /* Rekey for every missed "flag" packet. */ + while ((ccount & ~0xff) != + (state->ccount & ~0xff)) { + mppe_rekey(state, 0); + state->ccount = + (state->ccount + + 256) % MPPE_CCOUNT_SPACE; + } + + /* reset */ + state->discard = 0; + state->ccount = ccount; + /* + * Another problem with RFC 3078 here. It implies that the + * peer need not send a Reset-Ack packet. But RFC 1962 + * requires it. Hopefully, M$ does send a Reset-Ack; even + * though it isn't required for MPPE synchronization, it is + * required to reset CCP state. + */ + } + } + if (flushed) + mppe_rekey(state, 0); + } + + /* Hide MPPE header */ + pbuf_remove_header(n0, MPPE_OVHD); + + /* Decrypt the packet. */ + for (n = n0; n != NULL; n = n->next) { + lwip_arc4_crypt(&state->arc4, (u8_t*)n->payload, n->len); + if (n->tot_len == n->len) { + break; + } + } + + /* good packet credit */ + state->sanity_errors >>= 1; + + return ERR_OK; + +sanity_error: + if (state->sanity_errors >= SANITY_MAX) { + /* + * Take LCP down if the peer is sending too many bogons. + * We don't want to do this for a single or just a few + * instances since it could just be due to packet corruption. + */ + lcp_close(pcb, "Too many MPPE errors"); + } + return ERR_BUF; +} + +#endif /* PPP_SUPPORT && MPPE_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/multilink.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/multilink.c new file mode 100644 index 0000000..9153dea --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/multilink.c @@ -0,0 +1,609 @@ +/* + * multilink.c - support routines for multilink. + * + * Copyright (c) 2000-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && defined(HAVE_MULTILINK) /* don't build if not configured for use in lwipopts.h */ + +/* Multilink support + * + * Multilink uses Samba TDB (Trivial Database Library), which + * we cannot port, because it needs a filesystem. + * + * We have to choose between doing a memory-shared TDB-clone, + * or dropping multilink support at all. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/lcp.h" +#include "netif/ppp/tdb.h" + +bool endpoint_specified; /* user gave explicit endpoint discriminator */ +char *bundle_id; /* identifier for our bundle */ +char *blinks_id; /* key for the list of links */ +bool doing_multilink; /* multilink was enabled and agreed to */ +bool multilink_master; /* we own the multilink bundle */ + +extern TDB_CONTEXT *pppdb; +extern char db_key[]; + +static void make_bundle_links (int append); +static void remove_bundle_link (void); +static void iterate_bundle_links (void (*func) (char *)); + +static int get_default_epdisc (struct epdisc *); +static int parse_num (char *str, const char *key, int *valp); +static int owns_unit (TDB_DATA pid, int unit); + +#define set_ip_epdisc(ep, addr) do { \ + ep->length = 4; \ + ep->value[0] = addr >> 24; \ + ep->value[1] = addr >> 16; \ + ep->value[2] = addr >> 8; \ + ep->value[3] = addr; \ +} while (0) + +#define LOCAL_IP_ADDR(addr) \ + (((addr) & 0xff000000) == 0x0a000000 /* 10.x.x.x */ \ + || ((addr) & 0xfff00000) == 0xac100000 /* 172.16.x.x */ \ + || ((addr) & 0xffff0000) == 0xc0a80000) /* 192.168.x.x */ + +#define process_exists(n) (kill((n), 0) == 0 || errno != ESRCH) + +void +mp_check_options() +{ + lcp_options *wo = &lcp_wantoptions[0]; + lcp_options *ao = &lcp_allowoptions[0]; + + doing_multilink = 0; + if (!multilink) + return; + /* if we're doing multilink, we have to negotiate MRRU */ + if (!wo->neg_mrru) { + /* mrru not specified, default to mru */ + wo->mrru = wo->mru; + wo->neg_mrru = 1; + } + ao->mrru = ao->mru; + ao->neg_mrru = 1; + + if (!wo->neg_endpoint && !noendpoint) { + /* get a default endpoint value */ + wo->neg_endpoint = get_default_epdisc(&wo->endpoint); + } +} + +/* + * Make a new bundle or join us to an existing bundle + * if we are doing multilink. + */ +int +mp_join_bundle() +{ + lcp_options *go = &lcp_gotoptions[0]; + lcp_options *ho = &lcp_hisoptions[0]; + lcp_options *ao = &lcp_allowoptions[0]; + int unit, pppd_pid; + int l, mtu; + char *p; + TDB_DATA key, pid, rec; + + if (doing_multilink) { + /* have previously joined a bundle */ + if (!go->neg_mrru || !ho->neg_mrru) { + notice("oops, didn't get multilink on renegotiation"); + lcp_close(pcb, "multilink required"); + return 0; + } + /* XXX should check the peer_authname and ho->endpoint + are the same as previously */ + return 0; + } + + if (!go->neg_mrru || !ho->neg_mrru) { + /* not doing multilink */ + if (go->neg_mrru) + notice("oops, multilink negotiated only for receive"); + mtu = ho->neg_mru? ho->mru: PPP_MRU; + if (mtu > ao->mru) + mtu = ao->mru; + if (demand) { + /* already have a bundle */ + cfg_bundle(0, 0, 0, 0); + netif_set_mtu(pcb, mtu); + return 0; + } + make_new_bundle(0, 0, 0, 0); + set_ifunit(1); + netif_set_mtu(pcb, mtu); + return 0; + } + + doing_multilink = 1; + + /* + * Find the appropriate bundle or join a new one. + * First we make up a name for the bundle. + * The length estimate is worst-case assuming every + * character has to be quoted. + */ + l = 4 * strlen(peer_authname) + 10; + if (ho->neg_endpoint) + l += 3 * ho->endpoint.length + 8; + if (bundle_name) + l += 3 * strlen(bundle_name) + 2; + bundle_id = malloc(l); + if (bundle_id == 0) + novm("bundle identifier"); + + p = bundle_id; + p += slprintf(p, l-1, "BUNDLE=\"%q\"", peer_authname); + if (ho->neg_endpoint || bundle_name) + *p++ = '/'; + if (ho->neg_endpoint) + p += slprintf(p, bundle_id+l-p, "%s", + epdisc_to_str(&ho->endpoint)); + if (bundle_name) + p += slprintf(p, bundle_id+l-p, "/%v", bundle_name); + + /* Make the key for the list of links belonging to the bundle */ + l = p - bundle_id; + blinks_id = malloc(l + 7); + if (blinks_id == NULL) + novm("bundle links key"); + slprintf(blinks_id, l + 7, "BUNDLE_LINKS=%s", bundle_id + 7); + + /* + * For demand mode, we only need to configure the bundle + * and attach the link. + */ + mtu = LWIP_MIN(ho->mrru, ao->mru); + if (demand) { + cfg_bundle(go->mrru, ho->mrru, go->neg_ssnhf, ho->neg_ssnhf); + netif_set_mtu(pcb, mtu); + script_setenv("BUNDLE", bundle_id + 7, 1); + return 0; + } + + /* + * Check if the bundle ID is already in the database. + */ + unit = -1; + lock_db(); + key.dptr = bundle_id; + key.dsize = p - bundle_id; + pid = tdb_fetch(pppdb, key); + if (pid.dptr != NULL) { + /* bundle ID exists, see if the pppd record exists */ + rec = tdb_fetch(pppdb, pid); + if (rec.dptr != NULL && rec.dsize > 0) { + /* make sure the string is null-terminated */ + rec.dptr[rec.dsize-1] = 0; + /* parse the interface number */ + parse_num(rec.dptr, "IFNAME=ppp", &unit); + /* check the pid value */ + if (!parse_num(rec.dptr, "PPPD_PID=", &pppd_pid) + || !process_exists(pppd_pid) + || !owns_unit(pid, unit)) + unit = -1; + free(rec.dptr); + } + free(pid.dptr); + } + + if (unit >= 0) { + /* attach to existing unit */ + if (bundle_attach(unit)) { + set_ifunit(0); + script_setenv("BUNDLE", bundle_id + 7, 0); + make_bundle_links(1); + unlock_db(); + info("Link attached to %s", ifname); + return 1; + } + /* attach failed because bundle doesn't exist */ + } + + /* we have to make a new bundle */ + make_new_bundle(go->mrru, ho->mrru, go->neg_ssnhf, ho->neg_ssnhf); + set_ifunit(1); + netif_set_mtu(pcb, mtu); + script_setenv("BUNDLE", bundle_id + 7, 1); + make_bundle_links(pcb); + unlock_db(); + info("New bundle %s created", ifname); + multilink_master = 1; + return 0; +} + +void mp_exit_bundle() +{ + lock_db(); + remove_bundle_link(); + unlock_db(); +} + +static void sendhup(char *str) +{ + int pid; + + if (parse_num(str, "PPPD_PID=", &pid) && pid != getpid()) { + if (debug) + dbglog("sending SIGHUP to process %d", pid); + kill(pid, SIGHUP); + } +} + +void mp_bundle_terminated() +{ + TDB_DATA key; + + bundle_terminating = 1; + upper_layers_down(pcb); + notice("Connection terminated."); +#if PPP_STATS_SUPPORT + print_link_stats(); +#endif /* PPP_STATS_SUPPORT */ + if (!demand) { + remove_pidfiles(); + script_unsetenv("IFNAME"); + } + + lock_db(); + destroy_bundle(); + iterate_bundle_links(sendhup); + key.dptr = blinks_id; + key.dsize = strlen(blinks_id); + tdb_delete(pppdb, key); + unlock_db(); + + new_phase(PPP_PHASE_DEAD); + + doing_multilink = 0; + multilink_master = 0; +} + +static void make_bundle_links(int append) +{ + TDB_DATA key, rec; + char *p; + char entry[32]; + int l; + + key.dptr = blinks_id; + key.dsize = strlen(blinks_id); + slprintf(entry, sizeof(entry), "%s;", db_key); + p = entry; + if (append) { + rec = tdb_fetch(pppdb, key); + if (rec.dptr != NULL && rec.dsize > 0) { + rec.dptr[rec.dsize-1] = 0; + if (strstr(rec.dptr, db_key) != NULL) { + /* already in there? strange */ + warn("link entry already exists in tdb"); + return; + } + l = rec.dsize + strlen(entry); + p = malloc(l); + if (p == NULL) + novm("bundle link list"); + slprintf(p, l, "%s%s", rec.dptr, entry); + } else { + warn("bundle link list not found"); + } + if (rec.dptr != NULL) + free(rec.dptr); + } + rec.dptr = p; + rec.dsize = strlen(p) + 1; + if (tdb_store(pppdb, key, rec, TDB_REPLACE)) + error("couldn't %s bundle link list", + append? "update": "create"); + if (p != entry) + free(p); +} + +static void remove_bundle_link() +{ + TDB_DATA key, rec; + char entry[32]; + char *p, *q; + int l; + + key.dptr = blinks_id; + key.dsize = strlen(blinks_id); + slprintf(entry, sizeof(entry), "%s;", db_key); + + rec = tdb_fetch(pppdb, key); + if (rec.dptr == NULL || rec.dsize <= 0) { + if (rec.dptr != NULL) + free(rec.dptr); + return; + } + rec.dptr[rec.dsize-1] = 0; + p = strstr(rec.dptr, entry); + if (p != NULL) { + q = p + strlen(entry); + l = strlen(q) + 1; + memmove(p, q, l); + rec.dsize = p - rec.dptr + l; + if (tdb_store(pppdb, key, rec, TDB_REPLACE)) + error("couldn't update bundle link list (removal)"); + } + free(rec.dptr); +} + +static void iterate_bundle_links(void (*func)(char *)) +{ + TDB_DATA key, rec, pp; + char *p, *q; + + key.dptr = blinks_id; + key.dsize = strlen(blinks_id); + rec = tdb_fetch(pppdb, key); + if (rec.dptr == NULL || rec.dsize <= 0) { + error("bundle link list not found (iterating list)"); + if (rec.dptr != NULL) + free(rec.dptr); + return; + } + p = rec.dptr; + p[rec.dsize-1] = 0; + while ((q = strchr(p, ';')) != NULL) { + *q = 0; + key.dptr = p; + key.dsize = q - p; + pp = tdb_fetch(pppdb, key); + if (pp.dptr != NULL && pp.dsize > 0) { + pp.dptr[pp.dsize-1] = 0; + func(pp.dptr); + } + if (pp.dptr != NULL) + free(pp.dptr); + p = q + 1; + } + free(rec.dptr); +} + +static int +parse_num(str, key, valp) + char *str; + const char *key; + int *valp; +{ + char *p, *endp; + int i; + + p = strstr(str, key); + if (p != 0) { + p += strlen(key); + i = strtol(p, &endp, 10); + if (endp != p && (*endp == 0 || *endp == ';')) { + *valp = i; + return 1; + } + } + return 0; +} + +/* + * Check whether the pppd identified by `key' still owns ppp unit `unit'. + */ +static int +owns_unit(key, unit) + TDB_DATA key; + int unit; +{ + char ifkey[32]; + TDB_DATA kd, vd; + int ret = 0; + + slprintf(ifkey, sizeof(ifkey), "IFNAME=ppp%d", unit); + kd.dptr = ifkey; + kd.dsize = strlen(ifkey); + vd = tdb_fetch(pppdb, kd); + if (vd.dptr != NULL) { + ret = vd.dsize == key.dsize + && memcmp(vd.dptr, key.dptr, vd.dsize) == 0; + free(vd.dptr); + } + return ret; +} + +static int +get_default_epdisc(ep) + struct epdisc *ep; +{ + char *p; + struct hostent *hp; + u32_t addr; + + /* First try for an ethernet MAC address */ + p = get_first_ethernet(); + if (p != 0 && get_if_hwaddr(ep->value, p) >= 0) { + ep->class = EPD_MAC; + ep->length = 6; + return 1; + } + + /* see if our hostname corresponds to a reasonable IP address */ + hp = gethostbyname(hostname); + if (hp != NULL) { + addr = *(u32_t *)hp->h_addr; + if (!bad_ip_adrs(addr)) { + addr = lwip_ntohl(addr); + if (!LOCAL_IP_ADDR(addr)) { + ep->class = EPD_IP; + set_ip_epdisc(ep, addr); + return 1; + } + } + } + + return 0; +} + +/* + * epdisc_to_str - make a printable string from an endpoint discriminator. + */ + +static char *endp_class_names[] = { + "null", "local", "IP", "MAC", "magic", "phone" +}; + +char * +epdisc_to_str(ep) + struct epdisc *ep; +{ + static char str[MAX_ENDP_LEN*3+8]; + u_char *p = ep->value; + int i, mask = 0; + char *q, c, c2; + + if (ep->class == EPD_NULL && ep->length == 0) + return "null"; + if (ep->class == EPD_IP && ep->length == 4) { + u32_t addr; + + GETLONG(addr, p); + slprintf(str, sizeof(str), "IP:%I", lwip_htonl(addr)); + return str; + } + + c = ':'; + c2 = '.'; + if (ep->class == EPD_MAC && ep->length == 6) + c2 = ':'; + else if (ep->class == EPD_MAGIC && (ep->length % 4) == 0) + mask = 3; + q = str; + if (ep->class <= EPD_PHONENUM) + q += slprintf(q, sizeof(str)-1, "%s", + endp_class_names[ep->class]); + else + q += slprintf(q, sizeof(str)-1, "%d", ep->class); + c = ':'; + for (i = 0; i < ep->length && i < MAX_ENDP_LEN; ++i) { + if ((i & mask) == 0) { + *q++ = c; + c = c2; + } + q += slprintf(q, str + sizeof(str) - q, "%.2x", ep->value[i]); + } + return str; +} + +static int hexc_val(int c) +{ + if (c >= 'a') + return c - 'a' + 10; + if (c >= 'A') + return c - 'A' + 10; + return c - '0'; +} + +int +str_to_epdisc(ep, str) + struct epdisc *ep; + char *str; +{ + int i, l; + char *p, *endp; + + for (i = EPD_NULL; i <= EPD_PHONENUM; ++i) { + int sl = strlen(endp_class_names[i]); + if (strncasecmp(str, endp_class_names[i], sl) == 0) { + str += sl; + break; + } + } + if (i > EPD_PHONENUM) { + /* not a class name, try a decimal class number */ + i = strtol(str, &endp, 10); + if (endp == str) + return 0; /* can't parse class number */ + str = endp; + } + ep->class = i; + if (*str == 0) { + ep->length = 0; + return 1; + } + if (*str != ':' && *str != '.') + return 0; + ++str; + + if (i == EPD_IP) { + u32_t addr; + i = parse_dotted_ip(str, &addr); + if (i == 0 || str[i] != 0) + return 0; + set_ip_epdisc(ep, addr); + return 1; + } + if (i == EPD_MAC && get_if_hwaddr(ep->value, str) >= 0) { + ep->length = 6; + return 1; + } + + p = str; + for (l = 0; l < MAX_ENDP_LEN; ++l) { + if (*str == 0) + break; + if (p <= str) + for (p = str; isxdigit(*p); ++p) + ; + i = p - str; + if (i == 0) + return 0; + ep->value[l] = hexc_val(*str++); + if ((i & 1) == 0) + ep->value[l] = (ep->value[l] << 4) + hexc_val(*str++); + if (*str == ':' || *str == '.') + ++str; + } + if (*str != 0 || (ep->class == EPD_MAC && l != 6)) + return 0; + ep->length = l; + return 1; +} + +#endif /* PPP_SUPPORT && HAVE_MULTILINK */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/ppp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/ppp.c new file mode 100644 index 0000000..98d7b9e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/ppp.c @@ -0,0 +1,1628 @@ +/***************************************************************************** +* ppp.c - Network Point to Point Protocol program file. +* +* Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. +* portions Copyright (c) 1997 by Global Election Systems Inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 03-01-01 Marc Boucher +* Ported to lwIP. +* 97-11-05 Guy Lancaster , Global Election Systems Inc. +* Original. +*****************************************************************************/ + +/* + * ppp_defs.h - PPP definitions. + * + * if_pppvar.h - private structures and declarations for PPP. + * + * Copyright (c) 1994 The Australian National University. + * All rights reserved. + * + * Permission to use, copy, modify, and distribute this software and its + * documentation is hereby granted, provided that the above copyright + * notice appears in all copies. This software is provided without any + * warranty, express or implied. The Australian National University + * makes no representations about the suitability of this software for + * any purpose. + * + * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY + * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES + * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF + * THE AUSTRALIAN NATIONAL UNIVERSITY HAVE BEEN ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO + * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, + * OR MODIFICATIONS. + */ + +/* + * if_ppp.h - Point-to-Point Protocol definitions. + * + * Copyright (c) 1989 Carnegie Mellon University. + * All rights reserved. + * + * Redistribution and use in source and binary forms are permitted + * provided that the above copyright notice and this paragraph are + * duplicated in all such forms and that any documentation, + * advertising materials, and other materials related to such + * distribution and use acknowledge that the software was developed + * by Carnegie Mellon University. The name of the + * University may not be used to endorse or promote products derived + * from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + */ + +/** + * @defgroup ppp PPP + * @ingroup netifs + * @verbinclude "ppp.txt" + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/stats.h" +#include "lwip/sys.h" +#include "lwip/tcpip.h" +#include "lwip/api.h" +#include "lwip/snmp.h" +#include "lwip/ip4.h" /* for ip4_input() */ +#if PPP_IPV6_SUPPORT +#include "lwip/ip6.h" /* for ip6_input() */ +#endif /* PPP_IPV6_SUPPORT */ +#include "lwip/dns.h" + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/pppos.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/lcp.h" +#include "netif/ppp/magic.h" + +#if PAP_SUPPORT +#include "netif/ppp/upap.h" +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT +#include "netif/ppp/chap-new.h" +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT +#include "netif/ppp/eap.h" +#endif /* EAP_SUPPORT */ +#if CCP_SUPPORT +#include "netif/ppp/ccp.h" +#endif /* CCP_SUPPORT */ +#if MPPE_SUPPORT +#include "netif/ppp/mppe.h" +#endif /* MPPE_SUPPORT */ +#if ECP_SUPPORT +#include "netif/ppp/ecp.h" +#endif /* EAP_SUPPORT */ +#if VJ_SUPPORT +#include "netif/ppp/vj.h" +#endif /* VJ_SUPPORT */ +#if PPP_IPV4_SUPPORT +#include "netif/ppp/ipcp.h" +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT +#include "netif/ppp/ipv6cp.h" +#endif /* PPP_IPV6_SUPPORT */ + +/*************************/ +/*** LOCAL DEFINITIONS ***/ +/*************************/ + +/* Memory pools */ +#if PPPOS_SUPPORT +LWIP_MEMPOOL_PROTOTYPE(PPPOS_PCB); +#endif +#if PPPOE_SUPPORT +LWIP_MEMPOOL_PROTOTYPE(PPPOE_IF); +#endif +#if PPPOL2TP_SUPPORT +LWIP_MEMPOOL_PROTOTYPE(PPPOL2TP_PCB); +#endif +#if LWIP_PPP_API && LWIP_MPU_COMPATIBLE +LWIP_MEMPOOL_PROTOTYPE(PPPAPI_MSG); +#endif +LWIP_MEMPOOL_DECLARE(PPP_PCB, MEMP_NUM_PPP_PCB, sizeof(ppp_pcb), "PPP_PCB") + +/* FIXME: add stats per PPP session */ +#if PPP_STATS_SUPPORT +static struct timeval start_time; /* Time when link was started. */ +static struct pppd_stats old_link_stats; +struct pppd_stats link_stats; +unsigned link_connect_time; +int link_stats_valid; +#endif /* PPP_STATS_SUPPORT */ + +/* + * PPP Data Link Layer "protocol" table. + * One entry per supported protocol. + * The last entry must be NULL. + */ +const struct protent* const protocols[] = { + &lcp_protent, +#if PAP_SUPPORT + &pap_protent, +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + &chap_protent, +#endif /* CHAP_SUPPORT */ +#if CBCP_SUPPORT + &cbcp_protent, +#endif /* CBCP_SUPPORT */ +#if PPP_IPV4_SUPPORT + &ipcp_protent, +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT + &ipv6cp_protent, +#endif /* PPP_IPV6_SUPPORT */ +#if CCP_SUPPORT + &ccp_protent, +#endif /* CCP_SUPPORT */ +#if ECP_SUPPORT + &ecp_protent, +#endif /* ECP_SUPPORT */ +#ifdef AT_CHANGE + &atcp_protent, +#endif /* AT_CHANGE */ +#if EAP_SUPPORT + &eap_protent, +#endif /* EAP_SUPPORT */ + NULL +}; + +/* Prototypes for procedures local to this file. */ +static void ppp_do_connect(void *arg); +static err_t ppp_netif_init_cb(struct netif *netif); +#if PPP_IPV4_SUPPORT +static err_t ppp_netif_output_ip4(struct netif *netif, struct pbuf *pb, const ip4_addr_t *ipaddr); +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT +static err_t ppp_netif_output_ip6(struct netif *netif, struct pbuf *pb, const ip6_addr_t *ipaddr); +#endif /* PPP_IPV6_SUPPORT */ +static err_t ppp_netif_output(struct netif *netif, struct pbuf *pb, u16_t protocol); + +/***********************************/ +/*** PUBLIC FUNCTION DEFINITIONS ***/ +/***********************************/ +#if PPP_AUTH_SUPPORT +void ppp_set_auth(ppp_pcb *pcb, u8_t authtype, const char *user, const char *passwd) { + LWIP_ASSERT_CORE_LOCKED(); +#if PAP_SUPPORT + pcb->settings.refuse_pap = !(authtype & PPPAUTHTYPE_PAP); +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + pcb->settings.refuse_chap = !(authtype & PPPAUTHTYPE_CHAP); +#if MSCHAP_SUPPORT + pcb->settings.refuse_mschap = !(authtype & PPPAUTHTYPE_MSCHAP); + pcb->settings.refuse_mschap_v2 = !(authtype & PPPAUTHTYPE_MSCHAP_V2); +#endif /* MSCHAP_SUPPORT */ +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + pcb->settings.refuse_eap = !(authtype & PPPAUTHTYPE_EAP); +#endif /* EAP_SUPPORT */ + pcb->settings.user = user; + pcb->settings.passwd = passwd; +} +#endif /* PPP_AUTH_SUPPORT */ + +#if MPPE_SUPPORT +/* Set MPPE configuration */ +void ppp_set_mppe(ppp_pcb *pcb, u8_t flags) { + if (flags == PPP_MPPE_DISABLE) { + pcb->settings.require_mppe = 0; + return; + } + + pcb->settings.require_mppe = 1; + pcb->settings.refuse_mppe_stateful = !(flags & PPP_MPPE_ALLOW_STATEFUL); + pcb->settings.refuse_mppe_40 = !!(flags & PPP_MPPE_REFUSE_40); + pcb->settings.refuse_mppe_128 = !!(flags & PPP_MPPE_REFUSE_128); +} +#endif /* MPPE_SUPPORT */ + +#if PPP_NOTIFY_PHASE +void ppp_set_notify_phase_callback(ppp_pcb *pcb, ppp_notify_phase_cb_fn notify_phase_cb) { + pcb->notify_phase_cb = notify_phase_cb; + notify_phase_cb(pcb, pcb->phase, pcb->ctx_cb); +} +#endif /* PPP_NOTIFY_PHASE */ + +/* + * Initiate a PPP connection. + * + * This can only be called if PPP is in the dead phase. + * + * Holdoff is the time to wait (in seconds) before initiating + * the connection. + * + * If this port connects to a modem, the modem connection must be + * established before calling this. + */ +err_t ppp_connect(ppp_pcb *pcb, u16_t holdoff) { + LWIP_ASSERT_CORE_LOCKED(); + if (pcb->phase != PPP_PHASE_DEAD) { + return ERR_ALREADY; + } + + PPPDEBUG(LOG_DEBUG, ("ppp_connect[%d]: holdoff=%d\n", pcb->netif->num, holdoff)); + + magic_randomize(); + + if (holdoff == 0) { + ppp_do_connect(pcb); + return ERR_OK; + } + + new_phase(pcb, PPP_PHASE_HOLDOFF); + sys_timeout((u32_t)(holdoff*1000), ppp_do_connect, pcb); + return ERR_OK; +} + +#if PPP_SERVER +/* + * Listen for an incoming PPP connection. + * + * This can only be called if PPP is in the dead phase. + * + * If this port connects to a modem, the modem connection must be + * established before calling this. + */ +err_t ppp_listen(ppp_pcb *pcb) { + LWIP_ASSERT_CORE_LOCKED(); + if (pcb->phase != PPP_PHASE_DEAD) { + return ERR_ALREADY; + } + + PPPDEBUG(LOG_DEBUG, ("ppp_listen[%d]\n", pcb->netif->num)); + + magic_randomize(); + + if (pcb->link_cb->listen) { + new_phase(pcb, PPP_PHASE_INITIALIZE); + pcb->link_cb->listen(pcb, pcb->link_ctx_cb); + return ERR_OK; + } + return ERR_IF; +} +#endif /* PPP_SERVER */ + +/* + * Initiate the end of a PPP connection. + * Any outstanding packets in the queues are dropped. + * + * Setting nocarrier to 1 close the PPP connection without initiating the + * shutdown procedure. Always using nocarrier = 0 is still recommended, + * this is going to take a little longer time if your link is down, but + * is a safer choice for the PPP state machine. + * + * Return 0 on success, an error code on failure. + */ +err_t +ppp_close(ppp_pcb *pcb, u8_t nocarrier) +{ + LWIP_ASSERT_CORE_LOCKED(); + + pcb->err_code = PPPERR_USER; + + /* holdoff phase, cancel the reconnection */ + if (pcb->phase == PPP_PHASE_HOLDOFF) { + sys_untimeout(ppp_do_connect, pcb); + new_phase(pcb, PPP_PHASE_DEAD); + } + + /* dead phase, nothing to do, call the status callback to be consistent */ + if (pcb->phase == PPP_PHASE_DEAD) { + pcb->link_status_cb(pcb, pcb->err_code, pcb->ctx_cb); + return ERR_OK; + } + + /* Already terminating, nothing to do */ + if (pcb->phase >= PPP_PHASE_TERMINATE) { + return ERR_INPROGRESS; + } + + /* LCP not open, close link protocol */ + if (pcb->phase < PPP_PHASE_ESTABLISH) { + new_phase(pcb, PPP_PHASE_DISCONNECT); + ppp_link_terminated(pcb); + return ERR_OK; + } + + /* + * Only accept carrier lost signal on the stable running phase in order + * to prevent changing the PPP phase FSM in transition phases. + * + * Always using nocarrier = 0 is still recommended, this is going to + * take a little longer time, but is a safer choice from FSM point of view. + */ + if (nocarrier && pcb->phase == PPP_PHASE_RUNNING) { + PPPDEBUG(LOG_DEBUG, ("ppp_close[%d]: carrier lost -> lcp_lowerdown\n", pcb->netif->num)); + lcp_lowerdown(pcb); + /* forced link termination, this will force link protocol to disconnect. */ + link_terminated(pcb); + return ERR_OK; + } + + /* Disconnect */ + PPPDEBUG(LOG_DEBUG, ("ppp_close[%d]: kill_link -> lcp_close\n", pcb->netif->num)); + /* LCP soft close request. */ + lcp_close(pcb, "User request"); + return ERR_OK; +} + +/* + * Release the control block. + * + * This can only be called if PPP is in the dead phase. + * + * You must use ppp_close() before if you wish to terminate + * an established PPP session. + * + * Return 0 on success, an error code on failure. + */ +err_t ppp_free(ppp_pcb *pcb) { + err_t err; + LWIP_ASSERT_CORE_LOCKED(); + if (pcb->phase != PPP_PHASE_DEAD) { + return ERR_CONN; + } + + PPPDEBUG(LOG_DEBUG, ("ppp_free[%d]\n", pcb->netif->num)); + + netif_remove(pcb->netif); + + err = pcb->link_cb->free(pcb, pcb->link_ctx_cb); + + LWIP_MEMPOOL_FREE(PPP_PCB, pcb); + return err; +} + +/* Get and set parameters for the given connection. + * Return 0 on success, an error code on failure. */ +err_t +ppp_ioctl(ppp_pcb *pcb, u8_t cmd, void *arg) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (pcb == NULL) { + return ERR_VAL; + } + + switch(cmd) { + case PPPCTLG_UPSTATUS: /* Get the PPP up status. */ + if (!arg) { + goto fail; + } + *(int *)arg = (int)(0 +#if PPP_IPV4_SUPPORT + || pcb->if4_up +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT + || pcb->if6_up +#endif /* PPP_IPV6_SUPPORT */ + ); + return ERR_OK; + + case PPPCTLG_ERRCODE: /* Get the PPP error code. */ + if (!arg) { + goto fail; + } + *(int *)arg = (int)(pcb->err_code); + return ERR_OK; + + default: + goto fail; + } + +fail: + return ERR_VAL; +} + + +/**********************************/ +/*** LOCAL FUNCTION DEFINITIONS ***/ +/**********************************/ + +static void ppp_do_connect(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + LWIP_ASSERT("pcb->phase == PPP_PHASE_DEAD || pcb->phase == PPP_PHASE_HOLDOFF", pcb->phase == PPP_PHASE_DEAD || pcb->phase == PPP_PHASE_HOLDOFF); + + new_phase(pcb, PPP_PHASE_INITIALIZE); + pcb->link_cb->connect(pcb, pcb->link_ctx_cb); +} + +/* + * ppp_netif_init_cb - netif init callback + */ +static err_t ppp_netif_init_cb(struct netif *netif) { + netif->name[0] = 'p'; + netif->name[1] = 'p'; +#if PPP_IPV4_SUPPORT + netif->output = ppp_netif_output_ip4; +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT + netif->output_ip6 = ppp_netif_output_ip6; +#endif /* PPP_IPV6_SUPPORT */ + netif->flags = NETIF_FLAG_UP; +#if LWIP_NETIF_HOSTNAME + /* @todo: Initialize interface hostname */ + /* netif_set_hostname(netif, "lwip"); */ +#endif /* LWIP_NETIF_HOSTNAME */ + return ERR_OK; +} + +#if PPP_IPV4_SUPPORT +/* + * Send an IPv4 packet on the given connection. + */ +static err_t ppp_netif_output_ip4(struct netif *netif, struct pbuf *pb, const ip4_addr_t *ipaddr) { + LWIP_UNUSED_ARG(ipaddr); + return ppp_netif_output(netif, pb, PPP_IP); +} +#endif /* PPP_IPV4_SUPPORT */ + +#if PPP_IPV6_SUPPORT +/* + * Send an IPv6 packet on the given connection. + */ +static err_t ppp_netif_output_ip6(struct netif *netif, struct pbuf *pb, const ip6_addr_t *ipaddr) { + LWIP_UNUSED_ARG(ipaddr); + return ppp_netif_output(netif, pb, PPP_IPV6); +} +#endif /* PPP_IPV6_SUPPORT */ + +static err_t ppp_netif_output(struct netif *netif, struct pbuf *pb, u16_t protocol) { + ppp_pcb *pcb = (ppp_pcb*)netif->state; + err_t err; + struct pbuf *fpb = NULL; + + /* Check that the link is up. */ + if (0 +#if PPP_IPV4_SUPPORT + || (protocol == PPP_IP && !pcb->if4_up) +#endif /* PPP_IPV4_SUPPORT */ +#if PPP_IPV6_SUPPORT + || (protocol == PPP_IPV6 && !pcb->if6_up) +#endif /* PPP_IPV6_SUPPORT */ + ) { + PPPDEBUG(LOG_ERR, ("ppp_netif_output[%d]: link not up\n", pcb->netif->num)); + goto err_rte_drop; + } + +#if MPPE_SUPPORT + /* If MPPE is required, refuse any IP packet until we are able to crypt them. */ + if (pcb->settings.require_mppe && pcb->ccp_transmit_method != CI_MPPE) { + PPPDEBUG(LOG_ERR, ("ppp_netif_output[%d]: MPPE required, not up\n", pcb->netif->num)); + goto err_rte_drop; + } +#endif /* MPPE_SUPPORT */ + +#if VJ_SUPPORT + /* + * Attempt Van Jacobson header compression if VJ is configured and + * this is an IP packet. + */ + if (protocol == PPP_IP && pcb->vj_enabled) { + switch (vj_compress_tcp(&pcb->vj_comp, &pb)) { + case TYPE_IP: + /* No change... + protocol = PPP_IP; */ + break; + case TYPE_COMPRESSED_TCP: + /* vj_compress_tcp() returns a new allocated pbuf, indicate we should free + * our duplicated pbuf later */ + fpb = pb; + protocol = PPP_VJC_COMP; + break; + case TYPE_UNCOMPRESSED_TCP: + /* vj_compress_tcp() returns a new allocated pbuf, indicate we should free + * our duplicated pbuf later */ + fpb = pb; + protocol = PPP_VJC_UNCOMP; + break; + default: + PPPDEBUG(LOG_WARNING, ("ppp_netif_output[%d]: bad IP packet\n", pcb->netif->num)); + LINK_STATS_INC(link.proterr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(pcb->netif, ifoutdiscards); + return ERR_VAL; + } + } +#endif /* VJ_SUPPORT */ + +#if CCP_SUPPORT + switch (pcb->ccp_transmit_method) { + case 0: + break; /* Don't compress */ +#if MPPE_SUPPORT + case CI_MPPE: + if ((err = mppe_compress(pcb, &pcb->mppe_comp, &pb, protocol)) != ERR_OK) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); + goto err; + } + /* if VJ compressor returned a new allocated pbuf, free it */ + if (fpb) { + pbuf_free(fpb); + } + /* mppe_compress() returns a new allocated pbuf, indicate we should free + * our duplicated pbuf later */ + fpb = pb; + protocol = PPP_COMP; + break; +#endif /* MPPE_SUPPORT */ + default: + PPPDEBUG(LOG_ERR, ("ppp_netif_output[%d]: bad CCP transmit method\n", pcb->netif->num)); + goto err_rte_drop; /* Cannot really happen, we only negotiate what we are able to do */ + } +#endif /* CCP_SUPPORT */ + + err = pcb->link_cb->netif_output(pcb, pcb->link_ctx_cb, pb, protocol); + goto err; + +err_rte_drop: + err = ERR_RTE; + LINK_STATS_INC(link.rterr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(netif, ifoutdiscards); +err: + if (fpb) { + pbuf_free(fpb); + } + return err; +} + +/************************************/ +/*** PRIVATE FUNCTION DEFINITIONS ***/ +/************************************/ + +/* Initialize the PPP subsystem. */ +int ppp_init(void) +{ +#if PPPOS_SUPPORT + LWIP_MEMPOOL_INIT(PPPOS_PCB); +#endif +#if PPPOE_SUPPORT + LWIP_MEMPOOL_INIT(PPPOE_IF); +#endif +#if PPPOL2TP_SUPPORT + LWIP_MEMPOOL_INIT(PPPOL2TP_PCB); +#endif +#if LWIP_PPP_API && LWIP_MPU_COMPATIBLE + LWIP_MEMPOOL_INIT(PPPAPI_MSG); +#endif + + LWIP_MEMPOOL_INIT(PPP_PCB); + + /* + * Initialize magic number generator now so that protocols may + * use magic numbers in initialization. + */ + magic_init(); + + return 0; +} + +/* + * Create a new PPP control block. + * + * This initializes the PPP control block but does not + * attempt to negotiate the LCP session. + * + * Return a new PPP connection control block pointer + * on success or a null pointer on failure. + */ +ppp_pcb *ppp_new(struct netif *pppif, const struct link_callbacks *callbacks, void *link_ctx_cb, ppp_link_status_cb_fn link_status_cb, void *ctx_cb) { + ppp_pcb *pcb; + const struct protent *protp; + int i; + + /* PPP is single-threaded: without a callback, + * there is no way to know when the link is up. */ + if (link_status_cb == NULL) { + return NULL; + } + + pcb = (ppp_pcb*)LWIP_MEMPOOL_ALLOC(PPP_PCB); + if (pcb == NULL) { + return NULL; + } + + memset(pcb, 0, sizeof(ppp_pcb)); + + /* default configuration */ +#if PAP_SUPPORT + pcb->settings.pap_timeout_time = UPAP_DEFTIMEOUT; + pcb->settings.pap_max_transmits = UPAP_DEFTRANSMITS; +#if PPP_SERVER + pcb->settings.pap_req_timeout = UPAP_DEFREQTIME; +#endif /* PPP_SERVER */ +#endif /* PAP_SUPPORT */ + +#if CHAP_SUPPORT + pcb->settings.chap_timeout_time = CHAP_DEFTIMEOUT; + pcb->settings.chap_max_transmits = CHAP_DEFTRANSMITS; +#if PPP_SERVER + pcb->settings.chap_rechallenge_time = CHAP_DEFRECHALLENGETIME; +#endif /* PPP_SERVER */ +#endif /* CHAP_SUPPPORT */ + +#if EAP_SUPPORT + pcb->settings.eap_req_time = EAP_DEFREQTIME; + pcb->settings.eap_allow_req = EAP_DEFALLOWREQ; +#if PPP_SERVER + pcb->settings.eap_timeout_time = EAP_DEFTIMEOUT; + pcb->settings.eap_max_transmits = EAP_DEFTRANSMITS; +#endif /* PPP_SERVER */ +#endif /* EAP_SUPPORT */ + + pcb->settings.lcp_loopbackfail = LCP_DEFLOOPBACKFAIL; + pcb->settings.lcp_echo_interval = LCP_ECHOINTERVAL; + pcb->settings.lcp_echo_fails = LCP_MAXECHOFAILS; + + pcb->settings.fsm_timeout_time = FSM_DEFTIMEOUT; + pcb->settings.fsm_max_conf_req_transmits = FSM_DEFMAXCONFREQS; + pcb->settings.fsm_max_term_transmits = FSM_DEFMAXTERMREQS; + pcb->settings.fsm_max_nak_loops = FSM_DEFMAXNAKLOOPS; + + pcb->netif = pppif; + MIB2_INIT_NETIF(pppif, snmp_ifType_ppp, 0); + if (!netif_add(pcb->netif, +#if LWIP_IPV4 + IP4_ADDR_ANY4, IP4_ADDR_BROADCAST, IP4_ADDR_ANY4, +#endif /* LWIP_IPV4 */ + (void *)pcb, ppp_netif_init_cb, NULL)) { + LWIP_MEMPOOL_FREE(PPP_PCB, pcb); + PPPDEBUG(LOG_ERR, ("ppp_new: netif_add failed\n")); + return NULL; + } + + pcb->link_cb = callbacks; + pcb->link_ctx_cb = link_ctx_cb; + pcb->link_status_cb = link_status_cb; + pcb->ctx_cb = ctx_cb; + + /* + * Initialize each protocol. + */ + for (i = 0; (protp = protocols[i]) != NULL; ++i) { + (*protp->init)(pcb); + } + + new_phase(pcb, PPP_PHASE_DEAD); + return pcb; +} + +/** Initiate LCP open request */ +void ppp_start(ppp_pcb *pcb) { + PPPDEBUG(LOG_DEBUG, ("ppp_start[%d]\n", pcb->netif->num)); + + /* Clean data not taken care by anything else, mostly shared data. */ +#if PPP_STATS_SUPPORT + link_stats_valid = 0; +#endif /* PPP_STATS_SUPPORT */ +#if MPPE_SUPPORT + pcb->mppe_keys_set = 0; + memset(&pcb->mppe_comp, 0, sizeof(pcb->mppe_comp)); + memset(&pcb->mppe_decomp, 0, sizeof(pcb->mppe_decomp)); +#endif /* MPPE_SUPPORT */ +#if VJ_SUPPORT + vj_compress_init(&pcb->vj_comp); +#endif /* VJ_SUPPORT */ + + /* Start protocol */ + new_phase(pcb, PPP_PHASE_ESTABLISH); + lcp_open(pcb); + lcp_lowerup(pcb); + PPPDEBUG(LOG_DEBUG, ("ppp_start[%d]: finished\n", pcb->netif->num)); +} + +/** Called when link failed to setup */ +void ppp_link_failed(ppp_pcb *pcb) { + PPPDEBUG(LOG_DEBUG, ("ppp_link_failed[%d]\n", pcb->netif->num)); + new_phase(pcb, PPP_PHASE_DEAD); + pcb->err_code = PPPERR_OPEN; + pcb->link_status_cb(pcb, pcb->err_code, pcb->ctx_cb); +} + +/** Called when link is normally down (i.e. it was asked to end) */ +void ppp_link_end(ppp_pcb *pcb) { + PPPDEBUG(LOG_DEBUG, ("ppp_link_end[%d]\n", pcb->netif->num)); + new_phase(pcb, PPP_PHASE_DEAD); + if (pcb->err_code == PPPERR_NONE) { + pcb->err_code = PPPERR_CONNECT; + } + pcb->link_status_cb(pcb, pcb->err_code, pcb->ctx_cb); +} + +/* + * Pass the processed input packet to the appropriate handler. + * This function and all handlers run in the context of the tcpip_thread + */ +void ppp_input(ppp_pcb *pcb, struct pbuf *pb) { + u16_t protocol; +#if PPP_DEBUG && PPP_PROTOCOLNAME + const char *pname; +#endif /* PPP_DEBUG && PPP_PROTOCOLNAME */ + + magic_randomize(); + + if (pb->len < 2) { + PPPDEBUG(LOG_ERR, ("ppp_input[%d]: packet too short\n", pcb->netif->num)); + goto drop; + } + protocol = (((u8_t *)pb->payload)[0] << 8) | ((u8_t*)pb->payload)[1]; + +#if PRINTPKT_SUPPORT + ppp_dump_packet(pcb, "rcvd", (unsigned char *)pb->payload, pb->len); +#endif /* PRINTPKT_SUPPORT */ + + pbuf_remove_header(pb, sizeof(protocol)); + + LINK_STATS_INC(link.recv); + MIB2_STATS_NETIF_INC(pcb->netif, ifinucastpkts); + MIB2_STATS_NETIF_ADD(pcb->netif, ifinoctets, pb->tot_len); + + /* + * Toss all non-LCP packets unless LCP is OPEN. + */ + if (protocol != PPP_LCP && pcb->lcp_fsm.state != PPP_FSM_OPENED) { + ppp_dbglog("Discarded non-LCP packet when LCP not open"); + goto drop; + } + + /* + * Until we get past the authentication phase, toss all packets + * except LCP, LQR and authentication packets. + */ + if (pcb->phase <= PPP_PHASE_AUTHENTICATE + && !(protocol == PPP_LCP +#if LQR_SUPPORT + || protocol == PPP_LQR +#endif /* LQR_SUPPORT */ +#if PAP_SUPPORT + || protocol == PPP_PAP +#endif /* PAP_SUPPORT */ +#if CHAP_SUPPORT + || protocol == PPP_CHAP +#endif /* CHAP_SUPPORT */ +#if EAP_SUPPORT + || protocol == PPP_EAP +#endif /* EAP_SUPPORT */ + )) { + ppp_dbglog("discarding proto 0x%x in phase %d", protocol, pcb->phase); + goto drop; + } + +#if CCP_SUPPORT +#if MPPE_SUPPORT + /* + * MPPE is required and unencrypted data has arrived (this + * should never happen!). We should probably drop the link if + * the protocol is in the range of what should be encrypted. + * At the least, we drop this packet. + */ + if (pcb->settings.require_mppe && protocol != PPP_COMP && protocol < 0x8000) { + PPPDEBUG(LOG_ERR, ("ppp_input[%d]: MPPE required, received unencrypted data!\n", pcb->netif->num)); + goto drop; + } +#endif /* MPPE_SUPPORT */ + + if (protocol == PPP_COMP) { + u8_t *pl; + + switch (pcb->ccp_receive_method) { +#if MPPE_SUPPORT + case CI_MPPE: + if (mppe_decompress(pcb, &pcb->mppe_decomp, &pb) != ERR_OK) { + goto drop; + } + break; +#endif /* MPPE_SUPPORT */ + default: + PPPDEBUG(LOG_ERR, ("ppp_input[%d]: bad CCP receive method\n", pcb->netif->num)); + goto drop; /* Cannot really happen, we only negotiate what we are able to do */ + } + + /* Assume no PFC */ + if (pb->len < 2) { + goto drop; + } + + /* Extract and hide protocol (do PFC decompression if necessary) */ + pl = (u8_t*)pb->payload; + if (pl[0] & 0x01) { + protocol = pl[0]; + pbuf_remove_header(pb, 1); + } else { + protocol = (pl[0] << 8) | pl[1]; + pbuf_remove_header(pb, 2); + } + } +#endif /* CCP_SUPPORT */ + + switch(protocol) { + +#if PPP_IPV4_SUPPORT + case PPP_IP: /* Internet Protocol */ + PPPDEBUG(LOG_INFO, ("ppp_input[%d]: ip in pbuf len=%d\n", pcb->netif->num, pb->tot_len)); + ip4_input(pb, pcb->netif); + return; +#endif /* PPP_IPV4_SUPPORT */ + +#if PPP_IPV6_SUPPORT + case PPP_IPV6: /* Internet Protocol Version 6 */ + PPPDEBUG(LOG_INFO, ("ppp_input[%d]: ip6 in pbuf len=%d\n", pcb->netif->num, pb->tot_len)); + ip6_input(pb, pcb->netif); + return; +#endif /* PPP_IPV6_SUPPORT */ + +#if VJ_SUPPORT + case PPP_VJC_COMP: /* VJ compressed TCP */ + /* + * Clip off the VJ header and prepend the rebuilt TCP/IP header and + * pass the result to IP. + */ + PPPDEBUG(LOG_INFO, ("ppp_input[%d]: vj_comp in pbuf len=%d\n", pcb->netif->num, pb->tot_len)); + if (pcb->vj_enabled && vj_uncompress_tcp(&pb, &pcb->vj_comp) >= 0) { + ip4_input(pb, pcb->netif); + return; + } + /* Something's wrong so drop it. */ + PPPDEBUG(LOG_WARNING, ("ppp_input[%d]: Dropping VJ compressed\n", pcb->netif->num)); + break; + + case PPP_VJC_UNCOMP: /* VJ uncompressed TCP */ + /* + * Process the TCP/IP header for VJ header compression and then pass + * the packet to IP. + */ + PPPDEBUG(LOG_INFO, ("ppp_input[%d]: vj_un in pbuf len=%d\n", pcb->netif->num, pb->tot_len)); + if (pcb->vj_enabled && vj_uncompress_uncomp(pb, &pcb->vj_comp) >= 0) { + ip4_input(pb, pcb->netif); + return; + } + /* Something's wrong so drop it. */ + PPPDEBUG(LOG_WARNING, ("ppp_input[%d]: Dropping VJ uncompressed\n", pcb->netif->num)); + break; +#endif /* VJ_SUPPORT */ + + default: { + int i; + const struct protent *protp; + + /* + * Upcall the proper protocol input routine. + */ + for (i = 0; (protp = protocols[i]) != NULL; ++i) { + if (protp->protocol == protocol) { + pb = pbuf_coalesce(pb, PBUF_RAW); + (*protp->input)(pcb, (u8_t*)pb->payload, pb->len); + goto out; + } +#if 0 /* UNUSED + * + * This is actually a (hacked?) way for the Linux kernel to pass a data + * packet to pppd. pppd in normal condition only do signaling + * (LCP, PAP, CHAP, IPCP, ...) and does not handle any data packet at all. + * + * We don't even need this interface, which is only there because of PPP + * interface limitation between Linux kernel and pppd. For MPPE, which uses + * CCP to negotiate although it is not really a (de)compressor, we added + * ccp_resetrequest() in CCP and MPPE input data flow is calling either + * ccp_resetrequest() or lcp_close() if the issue is, respectively, non-fatal + * or fatal, this is what ccp_datainput() really do. + */ + if (protocol == (protp->protocol & ~0x8000) + && protp->datainput != NULL) { + (*protp->datainput)(pcb, pb->payload, pb->len); + goto out; + } +#endif /* UNUSED */ + } + +#if PPP_DEBUG +#if PPP_PROTOCOLNAME + pname = protocol_name(protocol); + if (pname != NULL) { + ppp_warn("Unsupported protocol '%s' (0x%x) received", pname, protocol); + } else +#endif /* PPP_PROTOCOLNAME */ + ppp_warn("Unsupported protocol 0x%x received", protocol); +#endif /* PPP_DEBUG */ + if (pbuf_add_header(pb, sizeof(protocol))) { + PPPDEBUG(LOG_WARNING, ("ppp_input[%d]: Dropping (pbuf_add_header failed)\n", pcb->netif->num)); + goto drop; + } + lcp_sprotrej(pcb, (u8_t*)pb->payload, pb->len); + } + break; + } + +drop: + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(pcb->netif, ifindiscards); + +out: + pbuf_free(pb); +} + +/* + * Write a pbuf to a ppp link, only used from PPP functions + * to send PPP packets. + * + * IPv4 and IPv6 packets from lwIP are sent, respectively, + * with ppp_netif_output_ip4() and ppp_netif_output_ip6() + * functions (which are callbacks of the netif PPP interface). + */ +err_t ppp_write(ppp_pcb *pcb, struct pbuf *p) { +#if PRINTPKT_SUPPORT + ppp_dump_packet(pcb, "sent", (unsigned char *)p->payload+2, p->len-2); +#endif /* PRINTPKT_SUPPORT */ + return pcb->link_cb->write(pcb, pcb->link_ctx_cb, p); +} + +void ppp_link_terminated(ppp_pcb *pcb) { + PPPDEBUG(LOG_DEBUG, ("ppp_link_terminated[%d]\n", pcb->netif->num)); + pcb->link_cb->disconnect(pcb, pcb->link_ctx_cb); + PPPDEBUG(LOG_DEBUG, ("ppp_link_terminated[%d]: finished.\n", pcb->netif->num)); +} + + +/************************************************************************ + * Functions called by various PPP subsystems to configure + * the PPP interface or change the PPP phase. + */ + +/* + * new_phase - signal the start of a new phase of pppd's operation. + */ +void new_phase(ppp_pcb *pcb, int p) { + pcb->phase = p; + PPPDEBUG(LOG_DEBUG, ("ppp phase changed[%d]: phase=%d\n", pcb->netif->num, pcb->phase)); +#if PPP_NOTIFY_PHASE + if (pcb->notify_phase_cb != NULL) { + pcb->notify_phase_cb(pcb, p, pcb->ctx_cb); + } +#endif /* PPP_NOTIFY_PHASE */ +} + +/* + * ppp_send_config - configure the transmit-side characteristics of + * the ppp interface. + */ +int ppp_send_config(ppp_pcb *pcb, int mtu, u32_t accm, int pcomp, int accomp) { + LWIP_UNUSED_ARG(mtu); + /* pcb->mtu = mtu; -- set correctly with netif_set_mtu */ + + if (pcb->link_cb->send_config) { + pcb->link_cb->send_config(pcb, pcb->link_ctx_cb, accm, pcomp, accomp); + } + + PPPDEBUG(LOG_INFO, ("ppp_send_config[%d]\n", pcb->netif->num) ); + return 0; +} + +/* + * ppp_recv_config - configure the receive-side characteristics of + * the ppp interface. + */ +int ppp_recv_config(ppp_pcb *pcb, int mru, u32_t accm, int pcomp, int accomp) { + LWIP_UNUSED_ARG(mru); + + if (pcb->link_cb->recv_config) { + pcb->link_cb->recv_config(pcb, pcb->link_ctx_cb, accm, pcomp, accomp); + } + + PPPDEBUG(LOG_INFO, ("ppp_recv_config[%d]\n", pcb->netif->num)); + return 0; +} + +#if PPP_IPV4_SUPPORT +/* + * sifaddr - Config the interface IP addresses and netmask. + */ +int sifaddr(ppp_pcb *pcb, u32_t our_adr, u32_t his_adr, u32_t netmask) { + ip4_addr_t ip, nm, gw; + + ip4_addr_set_u32(&ip, our_adr); + ip4_addr_set_u32(&nm, netmask); + ip4_addr_set_u32(&gw, his_adr); + netif_set_addr(pcb->netif, &ip, &nm, &gw); + return 1; +} + +/******************************************************************** + * + * cifaddr - Clear the interface IP addresses, and delete routes + * through the interface if possible. + */ +int cifaddr(ppp_pcb *pcb, u32_t our_adr, u32_t his_adr) { + LWIP_UNUSED_ARG(our_adr); + LWIP_UNUSED_ARG(his_adr); + + netif_set_addr(pcb->netif, IP4_ADDR_ANY4, IP4_ADDR_BROADCAST, IP4_ADDR_ANY4); + return 1; +} + +#if 0 /* UNUSED - PROXY ARP */ +/******************************************************************** + * + * sifproxyarp - Make a proxy ARP entry for the peer. + */ + +int sifproxyarp(ppp_pcb *pcb, u32_t his_adr) { + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(his_adr); + return 0; +} + +/******************************************************************** + * + * cifproxyarp - Delete the proxy ARP entry for the peer. + */ + +int cifproxyarp(ppp_pcb *pcb, u32_t his_adr) { + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(his_adr); + return 0; +} +#endif /* UNUSED - PROXY ARP */ + +#if LWIP_DNS +/* + * sdns - Config the DNS servers + */ +int sdns(ppp_pcb *pcb, u32_t ns1, u32_t ns2) { + ip_addr_t ns; + LWIP_UNUSED_ARG(pcb); + + ip_addr_set_ip4_u32_val(ns, ns1); + dns_setserver(0, &ns); + ip_addr_set_ip4_u32_val(ns, ns2); + dns_setserver(1, &ns); + return 1; +} + +/******************************************************************** + * + * cdns - Clear the DNS servers + */ +int cdns(ppp_pcb *pcb, u32_t ns1, u32_t ns2) { + const ip_addr_t *nsa; + ip_addr_t nsb; + LWIP_UNUSED_ARG(pcb); + + nsa = dns_getserver(0); + ip_addr_set_ip4_u32_val(nsb, ns1); + if (ip_addr_cmp(nsa, &nsb)) { + dns_setserver(0, IP_ADDR_ANY); + } + nsa = dns_getserver(1); + ip_addr_set_ip4_u32_val(nsb, ns2); + if (ip_addr_cmp(nsa, &nsb)) { + dns_setserver(1, IP_ADDR_ANY); + } + return 1; +} +#endif /* LWIP_DNS */ + +#if VJ_SUPPORT +/******************************************************************** + * + * sifvjcomp - config tcp header compression + */ +int sifvjcomp(ppp_pcb *pcb, int vjcomp, int cidcomp, int maxcid) { + pcb->vj_enabled = vjcomp; + pcb->vj_comp.compressSlot = cidcomp; + pcb->vj_comp.maxSlotIndex = maxcid; + PPPDEBUG(LOG_INFO, ("sifvjcomp[%d]: VJ compress enable=%d slot=%d max slot=%d\n", + pcb->netif->num, vjcomp, cidcomp, maxcid)); + return 0; +} +#endif /* VJ_SUPPORT */ + +/* + * sifup - Config the interface up and enable IP packets to pass. + */ +int sifup(ppp_pcb *pcb) { + pcb->if4_up = 1; + pcb->err_code = PPPERR_NONE; + netif_set_link_up(pcb->netif); + + PPPDEBUG(LOG_DEBUG, ("sifup[%d]: err_code=%d\n", pcb->netif->num, pcb->err_code)); + pcb->link_status_cb(pcb, pcb->err_code, pcb->ctx_cb); + return 1; +} + +/******************************************************************** + * + * sifdown - Disable the indicated protocol and config the interface + * down if there are no remaining protocols. + */ +int sifdown(ppp_pcb *pcb) { + + pcb->if4_up = 0; + + if (1 +#if PPP_IPV6_SUPPORT + /* set the interface down if IPv6 is down as well */ + && !pcb->if6_up +#endif /* PPP_IPV6_SUPPORT */ + ) { + /* make sure the netif link callback is called */ + netif_set_link_down(pcb->netif); + } + PPPDEBUG(LOG_DEBUG, ("sifdown[%d]: err_code=%d\n", pcb->netif->num, pcb->err_code)); + return 1; +} + +/******************************************************************** + * + * Return user specified netmask, modified by any mask we might determine + * for address `addr' (in network byte order). + * Here we scan through the system's list of interfaces, looking for + * any non-point-to-point interfaces which might appear to be on the same + * network as `addr'. If we find any, we OR in their netmask to the + * user-specified netmask. + */ +u32_t get_mask(u32_t addr) { +#if 0 + u32_t mask, nmask; + + addr = lwip_htonl(addr); + if (IP_CLASSA(addr)) { /* determine network mask for address class */ + nmask = IP_CLASSA_NET; + } else if (IP_CLASSB(addr)) { + nmask = IP_CLASSB_NET; + } else { + nmask = IP_CLASSC_NET; + } + + /* class D nets are disallowed by bad_ip_adrs */ + mask = PP_HTONL(0xffffff00UL) | lwip_htonl(nmask); + + /* XXX + * Scan through the system's network interfaces. + * Get each netmask and OR them into our mask. + */ + /* return mask; */ + return mask; +#endif /* 0 */ + LWIP_UNUSED_ARG(addr); + return IPADDR_BROADCAST; +} +#endif /* PPP_IPV4_SUPPORT */ + +#if PPP_IPV6_SUPPORT +#define IN6_LLADDR_FROM_EUI64(ip6, eui64) do { \ + ip6.addr[0] = PP_HTONL(0xfe800000); \ + ip6.addr[1] = 0; \ + eui64_copy(eui64, ip6.addr[2]); \ + } while (0) + +/******************************************************************** + * + * sif6addr - Config the interface with an IPv6 link-local address + */ +int sif6addr(ppp_pcb *pcb, eui64_t our_eui64, eui64_t his_eui64) { + ip6_addr_t ip6; + LWIP_UNUSED_ARG(his_eui64); + + IN6_LLADDR_FROM_EUI64(ip6, our_eui64); + netif_ip6_addr_set(pcb->netif, 0, &ip6); + netif_ip6_addr_set_state(pcb->netif, 0, IP6_ADDR_PREFERRED); + /* FIXME: should we add an IPv6 static neighbor using his_eui64 ? */ + return 1; +} + +/******************************************************************** + * + * cif6addr - Remove IPv6 address from interface + */ +int cif6addr(ppp_pcb *pcb, eui64_t our_eui64, eui64_t his_eui64) { + LWIP_UNUSED_ARG(our_eui64); + LWIP_UNUSED_ARG(his_eui64); + + netif_ip6_addr_set_state(pcb->netif, 0, IP6_ADDR_INVALID); + netif_ip6_addr_set(pcb->netif, 0, IP6_ADDR_ANY6); + return 1; +} + +/* + * sif6up - Config the interface up and enable IPv6 packets to pass. + */ +int sif6up(ppp_pcb *pcb) { + + pcb->if6_up = 1; + pcb->err_code = PPPERR_NONE; + netif_set_link_up(pcb->netif); + + PPPDEBUG(LOG_DEBUG, ("sif6up[%d]: err_code=%d\n", pcb->netif->num, pcb->err_code)); + pcb->link_status_cb(pcb, pcb->err_code, pcb->ctx_cb); + return 1; +} + +/******************************************************************** + * + * sif6down - Disable the indicated protocol and config the interface + * down if there are no remaining protocols. + */ +int sif6down(ppp_pcb *pcb) { + + pcb->if6_up = 0; + + if (1 +#if PPP_IPV4_SUPPORT + /* set the interface down if IPv4 is down as well */ + && !pcb->if4_up +#endif /* PPP_IPV4_SUPPORT */ + ) { + /* make sure the netif link callback is called */ + netif_set_link_down(pcb->netif); + } + PPPDEBUG(LOG_DEBUG, ("sif6down[%d]: err_code=%d\n", pcb->netif->num, pcb->err_code)); + return 1; +} +#endif /* PPP_IPV6_SUPPORT */ + +#if DEMAND_SUPPORT +/* + * sifnpmode - Set the mode for handling packets for a given NP. + */ +int sifnpmode(ppp_pcb *pcb, int proto, enum NPmode mode) { + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(proto); + LWIP_UNUSED_ARG(mode); + return 0; +} +#endif /* DEMAND_SUPPORT */ + +/* + * netif_set_mtu - set the MTU on the PPP network interface. + */ +void netif_set_mtu(ppp_pcb *pcb, int mtu) { + + pcb->netif->mtu = mtu; + PPPDEBUG(LOG_INFO, ("netif_set_mtu[%d]: mtu=%d\n", pcb->netif->num, mtu)); +} + +/* + * netif_get_mtu - get PPP interface MTU + */ +int netif_get_mtu(ppp_pcb *pcb) { + + return pcb->netif->mtu; +} + +#if CCP_SUPPORT +#if 0 /* unused */ +/* + * ccp_test - whether a given compression method is acceptable for use. + */ +int +ccp_test(ppp_pcb *pcb, u_char *opt_ptr, int opt_len, int for_transmit) +{ + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(opt_ptr); + LWIP_UNUSED_ARG(opt_len); + LWIP_UNUSED_ARG(for_transmit); + return -1; +} +#endif /* unused */ + +/* + * ccp_set - inform about the current state of CCP. + */ +void +ccp_set(ppp_pcb *pcb, u8_t isopen, u8_t isup, u8_t receive_method, u8_t transmit_method) +{ + LWIP_UNUSED_ARG(isopen); + LWIP_UNUSED_ARG(isup); + pcb->ccp_receive_method = receive_method; + pcb->ccp_transmit_method = transmit_method; + PPPDEBUG(LOG_DEBUG, ("ccp_set[%d]: is_open=%d, is_up=%d, receive_method=%u, transmit_method=%u\n", + pcb->netif->num, isopen, isup, receive_method, transmit_method)); +} + +void +ccp_reset_comp(ppp_pcb *pcb) +{ + switch (pcb->ccp_transmit_method) { +#if MPPE_SUPPORT + case CI_MPPE: + mppe_comp_reset(pcb, &pcb->mppe_comp); + break; +#endif /* MPPE_SUPPORT */ + default: + break; + } +} + +void +ccp_reset_decomp(ppp_pcb *pcb) +{ + switch (pcb->ccp_receive_method) { +#if MPPE_SUPPORT + case CI_MPPE: + mppe_decomp_reset(pcb, &pcb->mppe_decomp); + break; +#endif /* MPPE_SUPPORT */ + default: + break; + } +} + +#if 0 /* unused */ +/* + * ccp_fatal_error - returns 1 if decompression was disabled as a + * result of an error detected after decompression of a packet, + * 0 otherwise. This is necessary because of patent nonsense. + */ +int +ccp_fatal_error(ppp_pcb *pcb) +{ + LWIP_UNUSED_ARG(pcb); + return 1; +} +#endif /* unused */ +#endif /* CCP_SUPPORT */ + +#if PPP_IDLETIMELIMIT +/******************************************************************** + * + * get_idle_time - return how long the link has been idle. + */ +int get_idle_time(ppp_pcb *pcb, struct ppp_idle *ip) { + /* FIXME: add idle time support and make it optional */ + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(ip); + return 1; +} +#endif /* PPP_IDLETIMELIMIT */ + +#if DEMAND_SUPPORT +/******************************************************************** + * + * get_loop_output - get outgoing packets from the ppp device, + * and detect when we want to bring the real link up. + * Return value is 1 if we need to bring up the link, 0 otherwise. + */ +int get_loop_output(void) { + return 0; +} +#endif /* DEMAND_SUPPORT */ + +#if PPP_PROTOCOLNAME +/* List of protocol names, to make our messages a little more informative. */ +struct protocol_list { + u_short proto; + const char *name; +} const protocol_list[] = { + { 0x21, "IP" }, + { 0x23, "OSI Network Layer" }, + { 0x25, "Xerox NS IDP" }, + { 0x27, "DECnet Phase IV" }, + { 0x29, "Appletalk" }, + { 0x2b, "Novell IPX" }, + { 0x2d, "VJ compressed TCP/IP" }, + { 0x2f, "VJ uncompressed TCP/IP" }, + { 0x31, "Bridging PDU" }, + { 0x33, "Stream Protocol ST-II" }, + { 0x35, "Banyan Vines" }, + { 0x39, "AppleTalk EDDP" }, + { 0x3b, "AppleTalk SmartBuffered" }, + { 0x3d, "Multi-Link" }, + { 0x3f, "NETBIOS Framing" }, + { 0x41, "Cisco Systems" }, + { 0x43, "Ascom Timeplex" }, + { 0x45, "Fujitsu Link Backup and Load Balancing (LBLB)" }, + { 0x47, "DCA Remote Lan" }, + { 0x49, "Serial Data Transport Protocol (PPP-SDTP)" }, + { 0x4b, "SNA over 802.2" }, + { 0x4d, "SNA" }, + { 0x4f, "IP6 Header Compression" }, + { 0x51, "KNX Bridging Data" }, + { 0x53, "Encryption" }, + { 0x55, "Individual Link Encryption" }, + { 0x57, "IPv6" }, + { 0x59, "PPP Muxing" }, + { 0x5b, "Vendor-Specific Network Protocol" }, + { 0x61, "RTP IPHC Full Header" }, + { 0x63, "RTP IPHC Compressed TCP" }, + { 0x65, "RTP IPHC Compressed non-TCP" }, + { 0x67, "RTP IPHC Compressed UDP 8" }, + { 0x69, "RTP IPHC Compressed RTP 8" }, + { 0x6f, "Stampede Bridging" }, + { 0x73, "MP+" }, + { 0xc1, "NTCITS IPI" }, + { 0xfb, "single-link compression" }, + { 0xfd, "Compressed Datagram" }, + { 0x0201, "802.1d Hello Packets" }, + { 0x0203, "IBM Source Routing BPDU" }, + { 0x0205, "DEC LANBridge100 Spanning Tree" }, + { 0x0207, "Cisco Discovery Protocol" }, + { 0x0209, "Netcs Twin Routing" }, + { 0x020b, "STP - Scheduled Transfer Protocol" }, + { 0x020d, "EDP - Extreme Discovery Protocol" }, + { 0x0211, "Optical Supervisory Channel Protocol" }, + { 0x0213, "Optical Supervisory Channel Protocol" }, + { 0x0231, "Luxcom" }, + { 0x0233, "Sigma Network Systems" }, + { 0x0235, "Apple Client Server Protocol" }, + { 0x0281, "MPLS Unicast" }, + { 0x0283, "MPLS Multicast" }, + { 0x0285, "IEEE p1284.4 standard - data packets" }, + { 0x0287, "ETSI TETRA Network Protocol Type 1" }, + { 0x0289, "Multichannel Flow Treatment Protocol" }, + { 0x2063, "RTP IPHC Compressed TCP No Delta" }, + { 0x2065, "RTP IPHC Context State" }, + { 0x2067, "RTP IPHC Compressed UDP 16" }, + { 0x2069, "RTP IPHC Compressed RTP 16" }, + { 0x4001, "Cray Communications Control Protocol" }, + { 0x4003, "CDPD Mobile Network Registration Protocol" }, + { 0x4005, "Expand accelerator protocol" }, + { 0x4007, "ODSICP NCP" }, + { 0x4009, "DOCSIS DLL" }, + { 0x400B, "Cetacean Network Detection Protocol" }, + { 0x4021, "Stacker LZS" }, + { 0x4023, "RefTek Protocol" }, + { 0x4025, "Fibre Channel" }, + { 0x4027, "EMIT Protocols" }, + { 0x405b, "Vendor-Specific Protocol (VSP)" }, + { 0x8021, "Internet Protocol Control Protocol" }, + { 0x8023, "OSI Network Layer Control Protocol" }, + { 0x8025, "Xerox NS IDP Control Protocol" }, + { 0x8027, "DECnet Phase IV Control Protocol" }, + { 0x8029, "Appletalk Control Protocol" }, + { 0x802b, "Novell IPX Control Protocol" }, + { 0x8031, "Bridging NCP" }, + { 0x8033, "Stream Protocol Control Protocol" }, + { 0x8035, "Banyan Vines Control Protocol" }, + { 0x803d, "Multi-Link Control Protocol" }, + { 0x803f, "NETBIOS Framing Control Protocol" }, + { 0x8041, "Cisco Systems Control Protocol" }, + { 0x8043, "Ascom Timeplex" }, + { 0x8045, "Fujitsu LBLB Control Protocol" }, + { 0x8047, "DCA Remote Lan Network Control Protocol (RLNCP)" }, + { 0x8049, "Serial Data Control Protocol (PPP-SDCP)" }, + { 0x804b, "SNA over 802.2 Control Protocol" }, + { 0x804d, "SNA Control Protocol" }, + { 0x804f, "IP6 Header Compression Control Protocol" }, + { 0x8051, "KNX Bridging Control Protocol" }, + { 0x8053, "Encryption Control Protocol" }, + { 0x8055, "Individual Link Encryption Control Protocol" }, + { 0x8057, "IPv6 Control Protocol" }, + { 0x8059, "PPP Muxing Control Protocol" }, + { 0x805b, "Vendor-Specific Network Control Protocol (VSNCP)" }, + { 0x806f, "Stampede Bridging Control Protocol" }, + { 0x8073, "MP+ Control Protocol" }, + { 0x80c1, "NTCITS IPI Control Protocol" }, + { 0x80fb, "Single Link Compression Control Protocol" }, + { 0x80fd, "Compression Control Protocol" }, + { 0x8207, "Cisco Discovery Protocol Control" }, + { 0x8209, "Netcs Twin Routing" }, + { 0x820b, "STP - Control Protocol" }, + { 0x820d, "EDPCP - Extreme Discovery Protocol Ctrl Prtcl" }, + { 0x8235, "Apple Client Server Protocol Control" }, + { 0x8281, "MPLSCP" }, + { 0x8285, "IEEE p1284.4 standard - Protocol Control" }, + { 0x8287, "ETSI TETRA TNP1 Control Protocol" }, + { 0x8289, "Multichannel Flow Treatment Protocol" }, + { 0xc021, "Link Control Protocol" }, + { 0xc023, "Password Authentication Protocol" }, + { 0xc025, "Link Quality Report" }, + { 0xc027, "Shiva Password Authentication Protocol" }, + { 0xc029, "CallBack Control Protocol (CBCP)" }, + { 0xc02b, "BACP Bandwidth Allocation Control Protocol" }, + { 0xc02d, "BAP" }, + { 0xc05b, "Vendor-Specific Authentication Protocol (VSAP)" }, + { 0xc081, "Container Control Protocol" }, + { 0xc223, "Challenge Handshake Authentication Protocol" }, + { 0xc225, "RSA Authentication Protocol" }, + { 0xc227, "Extensible Authentication Protocol" }, + { 0xc229, "Mitsubishi Security Info Exch Ptcl (SIEP)" }, + { 0xc26f, "Stampede Bridging Authorization Protocol" }, + { 0xc281, "Proprietary Authentication Protocol" }, + { 0xc283, "Proprietary Authentication Protocol" }, + { 0xc481, "Proprietary Node ID Authentication Protocol" }, + { 0, NULL }, +}; + +/* + * protocol_name - find a name for a PPP protocol. + */ +const char * protocol_name(int proto) { + const struct protocol_list *lp; + + for (lp = protocol_list; lp->proto != 0; ++lp) { + if (proto == lp->proto) { + return lp->name; + } + } + return NULL; +} +#endif /* PPP_PROTOCOLNAME */ + +#if PPP_STATS_SUPPORT + +/* ---- Note on PPP Stats support ---- + * + * The one willing link stats support should add the get_ppp_stats() + * to fetch statistics from lwIP. + */ + +/* + * reset_link_stats - "reset" stats when link goes up. + */ +void reset_link_stats(int u) { + if (!get_ppp_stats(u, &old_link_stats)) { + return; + } + gettimeofday(&start_time, NULL); +} + +/* + * update_link_stats - get stats at link termination. + */ +void update_link_stats(int u) { + struct timeval now; + char numbuf[32]; + + if (!get_ppp_stats(u, &link_stats) || gettimeofday(&now, NULL) < 0) { + return; + } + link_connect_time = now.tv_sec - start_time.tv_sec; + link_stats_valid = 1; + + link_stats.bytes_in -= old_link_stats.bytes_in; + link_stats.bytes_out -= old_link_stats.bytes_out; + link_stats.pkts_in -= old_link_stats.pkts_in; + link_stats.pkts_out -= old_link_stats.pkts_out; +} + +void print_link_stats() { + /* + * Print connect time and statistics. + */ + if (link_stats_valid) { + int t = (link_connect_time + 5) / 6; /* 1/10ths of minutes */ + info("Connect time %d.%d minutes.", t/10, t%10); + info("Sent %u bytes, received %u bytes.", link_stats.bytes_out, link_stats.bytes_in); + link_stats_valid = 0; + } +} +#endif /* PPP_STATS_SUPPORT */ + +#endif /* PPP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/pppapi.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/pppapi.c new file mode 100644 index 0000000..5d19f00 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/pppapi.c @@ -0,0 +1,427 @@ +/** + * @file + * Point To Point Protocol Sequential API module + * + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "netif/ppp/ppp_opts.h" + +#if LWIP_PPP_API /* don't build if not configured for use in lwipopts.h */ + +#include "netif/ppp/pppapi.h" +#include "lwip/priv/tcpip_priv.h" +#include "netif/ppp/pppoe.h" +#include "netif/ppp/pppol2tp.h" +#include "netif/ppp/pppos.h" + +#if LWIP_MPU_COMPATIBLE +LWIP_MEMPOOL_DECLARE(PPPAPI_MSG, MEMP_NUM_PPP_API_MSG, sizeof(struct pppapi_msg), "PPPAPI_MSG") +#endif + +#define PPPAPI_VAR_REF(name) API_VAR_REF(name) +#define PPPAPI_VAR_DECLARE(name) API_VAR_DECLARE(struct pppapi_msg, name) +#define PPPAPI_VAR_ALLOC(name) API_VAR_ALLOC_POOL(struct pppapi_msg, PPPAPI_MSG, name, ERR_MEM) +#define PPPAPI_VAR_ALLOC_RETURN_NULL(name) API_VAR_ALLOC_POOL(struct pppapi_msg, PPPAPI_MSG, name, NULL) +#define PPPAPI_VAR_FREE(name) API_VAR_FREE_POOL(PPPAPI_MSG, name) + +/** + * Call ppp_set_default() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_set_default(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + ppp_set_default(msg->msg.ppp); + return ERR_OK; +} + +/** + * Call ppp_set_default() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_set_default(ppp_pcb *pcb) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + err = tcpip_api_call(pppapi_do_ppp_set_default, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} + + +#if PPP_NOTIFY_PHASE +/** + * Call ppp_set_notify_phase_callback() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_set_notify_phase_callback(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + ppp_set_notify_phase_callback(msg->msg.ppp, msg->msg.msg.setnotifyphasecb.notify_phase_cb); + return ERR_OK; +} + +/** + * Call ppp_set_notify_phase_callback() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_set_notify_phase_callback(ppp_pcb *pcb, ppp_notify_phase_cb_fn notify_phase_cb) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + PPPAPI_VAR_REF(msg).msg.msg.setnotifyphasecb.notify_phase_cb = notify_phase_cb; + err = tcpip_api_call(pppapi_do_ppp_set_notify_phase_callback, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} +#endif /* PPP_NOTIFY_PHASE */ + + +#if PPPOS_SUPPORT +/** + * Call pppos_create() inside the tcpip_thread context. + */ +static err_t +pppapi_do_pppos_create(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + msg->msg.ppp = pppos_create(msg->msg.msg.serialcreate.pppif, msg->msg.msg.serialcreate.output_cb, + msg->msg.msg.serialcreate.link_status_cb, msg->msg.msg.serialcreate.ctx_cb); + return ERR_OK; +} + +/** + * Call pppos_create() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +ppp_pcb* +pppapi_pppos_create(struct netif *pppif, pppos_output_cb_fn output_cb, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb) +{ + ppp_pcb* result; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC_RETURN_NULL(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = NULL; + PPPAPI_VAR_REF(msg).msg.msg.serialcreate.pppif = pppif; + PPPAPI_VAR_REF(msg).msg.msg.serialcreate.output_cb = output_cb; + PPPAPI_VAR_REF(msg).msg.msg.serialcreate.link_status_cb = link_status_cb; + PPPAPI_VAR_REF(msg).msg.msg.serialcreate.ctx_cb = ctx_cb; + tcpip_api_call(pppapi_do_pppos_create, &PPPAPI_VAR_REF(msg).call); + result = PPPAPI_VAR_REF(msg).msg.ppp; + PPPAPI_VAR_FREE(msg); + return result; +} +#endif /* PPPOS_SUPPORT */ + + +#if PPPOE_SUPPORT +/** + * Call pppoe_create() inside the tcpip_thread context. + */ +static err_t +pppapi_do_pppoe_create(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + msg->msg.ppp = pppoe_create(msg->msg.msg.ethernetcreate.pppif, msg->msg.msg.ethernetcreate.ethif, + msg->msg.msg.ethernetcreate.service_name, msg->msg.msg.ethernetcreate.concentrator_name, + msg->msg.msg.ethernetcreate.link_status_cb, msg->msg.msg.ethernetcreate.ctx_cb); + return ERR_OK; +} + +/** + * Call pppoe_create() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +ppp_pcb* +pppapi_pppoe_create(struct netif *pppif, struct netif *ethif, const char *service_name, + const char *concentrator_name, ppp_link_status_cb_fn link_status_cb, + void *ctx_cb) +{ + ppp_pcb* result; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC_RETURN_NULL(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = NULL; + PPPAPI_VAR_REF(msg).msg.msg.ethernetcreate.pppif = pppif; + PPPAPI_VAR_REF(msg).msg.msg.ethernetcreate.ethif = ethif; + PPPAPI_VAR_REF(msg).msg.msg.ethernetcreate.service_name = service_name; + PPPAPI_VAR_REF(msg).msg.msg.ethernetcreate.concentrator_name = concentrator_name; + PPPAPI_VAR_REF(msg).msg.msg.ethernetcreate.link_status_cb = link_status_cb; + PPPAPI_VAR_REF(msg).msg.msg.ethernetcreate.ctx_cb = ctx_cb; + tcpip_api_call(pppapi_do_pppoe_create, &PPPAPI_VAR_REF(msg).call); + result = PPPAPI_VAR_REF(msg).msg.ppp; + PPPAPI_VAR_FREE(msg); + return result; +} +#endif /* PPPOE_SUPPORT */ + + +#if PPPOL2TP_SUPPORT +/** + * Call pppol2tp_create() inside the tcpip_thread context. + */ +static err_t +pppapi_do_pppol2tp_create(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + msg->msg.ppp = pppol2tp_create(msg->msg.msg.l2tpcreate.pppif, + msg->msg.msg.l2tpcreate.netif, API_EXPR_REF(msg->msg.msg.l2tpcreate.ipaddr), msg->msg.msg.l2tpcreate.port, +#if PPPOL2TP_AUTH_SUPPORT + msg->msg.msg.l2tpcreate.secret, + msg->msg.msg.l2tpcreate.secret_len, +#else /* PPPOL2TP_AUTH_SUPPORT */ + NULL, + 0, +#endif /* PPPOL2TP_AUTH_SUPPORT */ + msg->msg.msg.l2tpcreate.link_status_cb, msg->msg.msg.l2tpcreate.ctx_cb); + return ERR_OK; +} + +/** + * Call pppol2tp_create() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +ppp_pcb* +pppapi_pppol2tp_create(struct netif *pppif, struct netif *netif, ip_addr_t *ipaddr, u16_t port, + const u8_t *secret, u8_t secret_len, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb) +{ + ppp_pcb* result; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC_RETURN_NULL(msg); +#if !PPPOL2TP_AUTH_SUPPORT + LWIP_UNUSED_ARG(secret); + LWIP_UNUSED_ARG(secret_len); +#endif /* !PPPOL2TP_AUTH_SUPPORT */ + + PPPAPI_VAR_REF(msg).msg.ppp = NULL; + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.pppif = pppif; + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.netif = netif; + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.ipaddr = PPPAPI_VAR_REF(ipaddr); + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.port = port; +#if PPPOL2TP_AUTH_SUPPORT + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.secret = secret; + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.secret_len = secret_len; +#endif /* PPPOL2TP_AUTH_SUPPORT */ + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.link_status_cb = link_status_cb; + PPPAPI_VAR_REF(msg).msg.msg.l2tpcreate.ctx_cb = ctx_cb; + tcpip_api_call(pppapi_do_pppol2tp_create, &PPPAPI_VAR_REF(msg).call); + result = PPPAPI_VAR_REF(msg).msg.ppp; + PPPAPI_VAR_FREE(msg); + return result; +} +#endif /* PPPOL2TP_SUPPORT */ + + +/** + * Call ppp_connect() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_connect(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + return ppp_connect(msg->msg.ppp, msg->msg.msg.connect.holdoff); +} + +/** + * Call ppp_connect() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_connect(ppp_pcb *pcb, u16_t holdoff) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + PPPAPI_VAR_REF(msg).msg.msg.connect.holdoff = holdoff; + err = tcpip_api_call(pppapi_do_ppp_connect, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} + + +#if PPP_SERVER +/** + * Call ppp_listen() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_listen(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + return ppp_listen(msg->msg.ppp); +} + +/** + * Call ppp_listen() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_listen(ppp_pcb *pcb) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + err = tcpip_api_call(pppapi_do_ppp_listen, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} +#endif /* PPP_SERVER */ + + +/** + * Call ppp_close() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_close(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + return ppp_close(msg->msg.ppp, msg->msg.msg.close.nocarrier); +} + +/** + * Call ppp_close() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_close(ppp_pcb *pcb, u8_t nocarrier) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + PPPAPI_VAR_REF(msg).msg.msg.close.nocarrier = nocarrier; + err = tcpip_api_call(pppapi_do_ppp_close, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} + + +/** + * Call ppp_free() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_free(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + return ppp_free(msg->msg.ppp); +} + +/** + * Call ppp_free() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_free(ppp_pcb *pcb) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + err = tcpip_api_call(pppapi_do_ppp_free, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} + + +/** + * Call ppp_ioctl() inside the tcpip_thread context. + */ +static err_t +pppapi_do_ppp_ioctl(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct pppapi_msg */ + struct pppapi_msg *msg = (struct pppapi_msg *)(void*)m; + + return ppp_ioctl(msg->msg.ppp, msg->msg.msg.ioctl.cmd, msg->msg.msg.ioctl.arg); +} + +/** + * Call ppp_ioctl() in a thread-safe way by running that function inside the + * tcpip_thread context. + */ +err_t +pppapi_ioctl(ppp_pcb *pcb, u8_t cmd, void *arg) +{ + err_t err; + PPPAPI_VAR_DECLARE(msg); + PPPAPI_VAR_ALLOC(msg); + + PPPAPI_VAR_REF(msg).msg.ppp = pcb; + PPPAPI_VAR_REF(msg).msg.msg.ioctl.cmd = cmd; + PPPAPI_VAR_REF(msg).msg.msg.ioctl.arg = arg; + err = tcpip_api_call(pppapi_do_ppp_ioctl, &PPPAPI_VAR_REF(msg).call); + PPPAPI_VAR_FREE(msg); + return err; +} + +#endif /* LWIP_PPP_API */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/pppcrypt.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/pppcrypt.c new file mode 100644 index 0000000..f2f36aa --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/pppcrypt.c @@ -0,0 +1,66 @@ +/* + * pppcrypt.c - PPP/DES linkage for MS-CHAP and EAP SRP-SHA1 + * + * Extracted from chap_ms.c by James Carlson. + * + * Copyright (c) 1995 Eric Rosenquist. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && MSCHAP_SUPPORT /* don't build if not necessary */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/pppcrypt.h" + + +static u_char pppcrypt_get_7bits(u_char *input, int startBit) { + unsigned int word; + + word = (unsigned)input[startBit / 8] << 8; + word |= (unsigned)input[startBit / 8 + 1]; + + word >>= 15 - (startBit % 8 + 7); + + return word & 0xFE; +} + +/* IN 56 bit DES key missing parity bits + * OUT 64 bit DES key with parity bits added + */ +void pppcrypt_56_to_64_bit_key(u_char *key, u_char * des_key) { + des_key[0] = pppcrypt_get_7bits(key, 0); + des_key[1] = pppcrypt_get_7bits(key, 7); + des_key[2] = pppcrypt_get_7bits(key, 14); + des_key[3] = pppcrypt_get_7bits(key, 21); + des_key[4] = pppcrypt_get_7bits(key, 28); + des_key[5] = pppcrypt_get_7bits(key, 35); + des_key[6] = pppcrypt_get_7bits(key, 42); + des_key[7] = pppcrypt_get_7bits(key, 49); +} + +#endif /* PPP_SUPPORT && MSCHAP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/pppoe.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/pppoe.c new file mode 100644 index 0000000..33b9627 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/pppoe.c @@ -0,0 +1,1201 @@ +/***************************************************************************** +* pppoe.c - PPP Over Ethernet implementation for lwIP. +* +* Copyright (c) 2006 by Marc Boucher, Services Informatiques (MBSI) inc. +* +* The authors hereby grant permission to use, copy, modify, distribute, +* and license this software and its documentation for any purpose, provided +* that existing copyright notices are retained in all copies and that this +* notice and the following disclaimer are included verbatim in any +* distributions. No written agreement, license, or royalty fee is required +* for any of the authorized uses. +* +* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR +* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +* IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +****************************************************************************** +* REVISION HISTORY +* +* 06-01-01 Marc Boucher +* Ported to lwIP. +*****************************************************************************/ + + + +/* based on NetBSD: if_pppoe.c,v 1.64 2006/01/31 23:50:15 martin Exp */ + +/*- + * Copyright (c) 2002 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Martin Husemann . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPPOE_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#endif /* UNUSED */ + +#include "lwip/timeouts.h" +#include "lwip/memp.h" +#include "lwip/stats.h" +#include "lwip/snmp.h" + +#include "netif/ethernet.h" +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/lcp.h" +#include "netif/ppp/ipcp.h" +#include "netif/ppp/pppoe.h" + +/* Memory pool */ +LWIP_MEMPOOL_DECLARE(PPPOE_IF, MEMP_NUM_PPPOE_INTERFACES, sizeof(struct pppoe_softc), "PPPOE_IF") + +/* Add a 16 bit unsigned value to a buffer pointed to by PTR */ +#define PPPOE_ADD_16(PTR, VAL) \ + *(PTR)++ = (u8_t)((VAL) / 256); \ + *(PTR)++ = (u8_t)((VAL) % 256) + +/* Add a complete PPPoE header to the buffer pointed to by PTR */ +#define PPPOE_ADD_HEADER(PTR, CODE, SESS, LEN) \ + *(PTR)++ = PPPOE_VERTYPE; \ + *(PTR)++ = (CODE); \ + PPPOE_ADD_16(PTR, SESS); \ + PPPOE_ADD_16(PTR, LEN) + +#define PPPOE_DISC_TIMEOUT (5*1000) /* base for quick timeout calculation */ +#define PPPOE_SLOW_RETRY (60*1000) /* persistent retry interval */ +#define PPPOE_DISC_MAXPADI 4 /* retry PADI four times (quickly) */ +#define PPPOE_DISC_MAXPADR 2 /* retry PADR twice */ + +#ifdef PPPOE_SERVER +#error "PPPOE_SERVER is not yet supported under lwIP!" +/* from if_spppsubr.c */ +#define IFF_PASSIVE IFF_LINK0 /* wait passively for connection */ +#endif + +#define PPPOE_ERRORSTRING_LEN 64 + + +/* callbacks called from PPP core */ +static err_t pppoe_write(ppp_pcb *ppp, void *ctx, struct pbuf *p); +static err_t pppoe_netif_output(ppp_pcb *ppp, void *ctx, struct pbuf *p, u_short protocol); +static void pppoe_connect(ppp_pcb *ppp, void *ctx); +static void pppoe_disconnect(ppp_pcb *ppp, void *ctx); +static err_t pppoe_destroy(ppp_pcb *ppp, void *ctx); + +/* management routines */ +static void pppoe_abort_connect(struct pppoe_softc *); +#if 0 /* UNUSED */ +static void pppoe_clear_softc(struct pppoe_softc *, const char *); +#endif /* UNUSED */ + +/* internal timeout handling */ +static void pppoe_timeout(void *); + +/* sending actual protocol controll packets */ +static err_t pppoe_send_padi(struct pppoe_softc *); +static err_t pppoe_send_padr(struct pppoe_softc *); +#ifdef PPPOE_SERVER +static err_t pppoe_send_pado(struct pppoe_softc *); +static err_t pppoe_send_pads(struct pppoe_softc *); +#endif +static err_t pppoe_send_padt(struct netif *, u_int, const u8_t *); + +/* internal helper functions */ +static err_t pppoe_xmit(struct pppoe_softc *sc, struct pbuf *pb); +static struct pppoe_softc* pppoe_find_softc_by_session(u_int session, struct netif *rcvif); +static struct pppoe_softc* pppoe_find_softc_by_hunique(u8_t *token, size_t len, struct netif *rcvif); + +/** linked list of created pppoe interfaces */ +static struct pppoe_softc *pppoe_softc_list; + +/* Callbacks structure for PPP core */ +static const struct link_callbacks pppoe_callbacks = { + pppoe_connect, +#if PPP_SERVER + NULL, +#endif /* PPP_SERVER */ + pppoe_disconnect, + pppoe_destroy, + pppoe_write, + pppoe_netif_output, + NULL, + NULL +}; + +/* + * Create a new PPP Over Ethernet (PPPoE) connection. + * + * Return 0 on success, an error code on failure. + */ +ppp_pcb *pppoe_create(struct netif *pppif, + struct netif *ethif, + const char *service_name, const char *concentrator_name, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb) +{ + ppp_pcb *ppp; + struct pppoe_softc *sc; + LWIP_UNUSED_ARG(service_name); + LWIP_UNUSED_ARG(concentrator_name); + LWIP_ASSERT_CORE_LOCKED(); + + sc = (struct pppoe_softc *)LWIP_MEMPOOL_ALLOC(PPPOE_IF); + if (sc == NULL) { + return NULL; + } + + ppp = ppp_new(pppif, &pppoe_callbacks, sc, link_status_cb, ctx_cb); + if (ppp == NULL) { + LWIP_MEMPOOL_FREE(PPPOE_IF, sc); + return NULL; + } + + memset(sc, 0, sizeof(struct pppoe_softc)); + sc->pcb = ppp; + sc->sc_ethif = ethif; + /* put the new interface at the head of the list */ + sc->next = pppoe_softc_list; + pppoe_softc_list = sc; + return ppp; +} + +/* Called by PPP core */ +static err_t pppoe_write(ppp_pcb *ppp, void *ctx, struct pbuf *p) { + struct pppoe_softc *sc = (struct pppoe_softc *)ctx; + struct pbuf *ph; /* Ethernet + PPPoE header */ + err_t ret; +#if MIB2_STATS + u16_t tot_len; +#else /* MIB2_STATS */ + LWIP_UNUSED_ARG(ppp); +#endif /* MIB2_STATS */ + + /* skip address & flags */ + pbuf_remove_header(p, 2); + + ph = pbuf_alloc(PBUF_LINK, (u16_t)(PPPOE_HEADERLEN), PBUF_RAM); + if(!ph) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.proterr); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + pbuf_free(p); + return ERR_MEM; + } + + pbuf_remove_header(ph, PPPOE_HEADERLEN); /* hide PPPoE header */ + pbuf_cat(ph, p); +#if MIB2_STATS + tot_len = ph->tot_len; +#endif /* MIB2_STATS */ + + ret = pppoe_xmit(sc, ph); + if (ret != ERR_OK) { + LINK_STATS_INC(link.err); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return ret; + } + + MIB2_STATS_NETIF_ADD(ppp->netif, ifoutoctets, (u16_t)tot_len); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutucastpkts); + LINK_STATS_INC(link.xmit); + return ERR_OK; +} + +/* Called by PPP core */ +static err_t pppoe_netif_output(ppp_pcb *ppp, void *ctx, struct pbuf *p, u_short protocol) { + struct pppoe_softc *sc = (struct pppoe_softc *)ctx; + struct pbuf *pb; + u8_t *pl; + err_t err; +#if MIB2_STATS + u16_t tot_len; +#else /* MIB2_STATS */ + LWIP_UNUSED_ARG(ppp); +#endif /* MIB2_STATS */ + + /* @todo: try to use pbuf_header() here! */ + pb = pbuf_alloc(PBUF_LINK, PPPOE_HEADERLEN + sizeof(protocol), PBUF_RAM); + if(!pb) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.proterr); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return ERR_MEM; + } + + pbuf_remove_header(pb, PPPOE_HEADERLEN); + + pl = (u8_t*)pb->payload; + PUTSHORT(protocol, pl); + + pbuf_chain(pb, p); +#if MIB2_STATS + tot_len = pb->tot_len; +#endif /* MIB2_STATS */ + + if( (err = pppoe_xmit(sc, pb)) != ERR_OK) { + LINK_STATS_INC(link.err); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return err; + } + + MIB2_STATS_NETIF_ADD(ppp->netif, ifoutoctets, tot_len); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutucastpkts); + LINK_STATS_INC(link.xmit); + return ERR_OK; +} + +static err_t +pppoe_destroy(ppp_pcb *ppp, void *ctx) +{ + struct pppoe_softc *sc = (struct pppoe_softc *)ctx; + struct pppoe_softc **copp, *freep; + LWIP_UNUSED_ARG(ppp); + + sys_untimeout(pppoe_timeout, sc); + + /* remove interface from list */ + for (copp = &pppoe_softc_list; (freep = *copp); copp = &freep->next) { + if (freep == sc) { + *copp = freep->next; + break; + } + } + +#ifdef PPPOE_TODO + if (sc->sc_concentrator_name) { + mem_free(sc->sc_concentrator_name); + } + if (sc->sc_service_name) { + mem_free(sc->sc_service_name); + } +#endif /* PPPOE_TODO */ + LWIP_MEMPOOL_FREE(PPPOE_IF, sc); + + return ERR_OK; +} + +/* + * Find the interface handling the specified session. + * Note: O(number of sessions open), this is a client-side only, mean + * and lean implementation, so number of open sessions typically should + * be 1. + */ +static struct pppoe_softc* pppoe_find_softc_by_session(u_int session, struct netif *rcvif) { + struct pppoe_softc *sc; + + for (sc = pppoe_softc_list; sc != NULL; sc = sc->next) { + if (sc->sc_state == PPPOE_STATE_SESSION + && sc->sc_session == session + && sc->sc_ethif == rcvif) { + return sc; + } + } + return NULL; +} + +/* Check host unique token passed and return appropriate softc pointer, + * or NULL if token is bogus. */ +static struct pppoe_softc* pppoe_find_softc_by_hunique(u8_t *token, size_t len, struct netif *rcvif) { + struct pppoe_softc *sc, *t; + + if (len != sizeof sc) { + return NULL; + } + MEMCPY(&t, token, len); + + for (sc = pppoe_softc_list; sc != NULL; sc = sc->next) { + if (sc == t) { + break; + } + } + + if (sc == NULL) { + PPPDEBUG(LOG_DEBUG, ("pppoe: alien host unique tag, no session found\n")); + return NULL; + } + + /* should be safe to access *sc now */ + if (sc->sc_state < PPPOE_STATE_PADI_SENT || sc->sc_state >= PPPOE_STATE_SESSION) { + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": host unique tag found, but it belongs to a connection in state %d\n", + sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, sc->sc_state)); + return NULL; + } + if (sc->sc_ethif != rcvif) { + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": wrong interface, not accepting host unique\n", + sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + return NULL; + } + return sc; +} + +/* analyze and handle a single received packet while not in session state */ +void +pppoe_disc_input(struct netif *netif, struct pbuf *pb) +{ + u16_t tag, len, off; + u16_t session, plen; + struct pppoe_softc *sc; +#if PPP_DEBUG + const char *err_msg = NULL; +#endif /* PPP_DEBUG */ + u8_t *ac_cookie; + u16_t ac_cookie_len; +#ifdef PPPOE_SERVER + u8_t *hunique; + size_t hunique_len; +#endif + struct pppoehdr *ph; + struct pppoetag pt; + int err; + struct eth_hdr *ethhdr; + + /* don't do anything if there is not a single PPPoE instance */ + if (pppoe_softc_list == NULL) { + pbuf_free(pb); + return; + } + + pb = pbuf_coalesce(pb, PBUF_RAW); + + ethhdr = (struct eth_hdr *)pb->payload; + + ac_cookie = NULL; + ac_cookie_len = 0; +#ifdef PPPOE_SERVER + hunique = NULL; + hunique_len = 0; +#endif + session = 0; + off = sizeof(struct eth_hdr) + sizeof(struct pppoehdr); + if (pb->len < off) { + PPPDEBUG(LOG_DEBUG, ("pppoe: packet too short: %d\n", pb->len)); + goto done; + } + + ph = (struct pppoehdr *) (ethhdr + 1); + if (ph->vertype != PPPOE_VERTYPE) { + PPPDEBUG(LOG_DEBUG, ("pppoe: unknown version/type packet: 0x%x\n", ph->vertype)); + goto done; + } + session = lwip_ntohs(ph->session); + plen = lwip_ntohs(ph->plen); + + if (plen > (pb->len - off)) { + PPPDEBUG(LOG_DEBUG, ("pppoe: packet content does not fit: data available = %d, packet size = %u\n", + pb->len - off, plen)); + goto done; + } + if(pb->tot_len == pb->len) { + u16_t framelen = off + plen; + if (framelen < pb->len) { + /* ignore trailing garbage */ + pb->tot_len = pb->len = framelen; + } + } + tag = 0; + len = 0; + sc = NULL; + while (off + sizeof(pt) <= pb->len) { + MEMCPY(&pt, (u8_t*)pb->payload + off, sizeof(pt)); + tag = lwip_ntohs(pt.tag); + len = lwip_ntohs(pt.len); + if (off + sizeof(pt) + len > pb->len) { + PPPDEBUG(LOG_DEBUG, ("pppoe: tag 0x%x len 0x%x is too long\n", tag, len)); + goto done; + } + switch (tag) { + case PPPOE_TAG_EOL: + goto breakbreak; + case PPPOE_TAG_SNAME: + break; /* ignored */ + case PPPOE_TAG_ACNAME: + break; /* ignored */ + case PPPOE_TAG_HUNIQUE: + if (sc != NULL) { + break; + } +#ifdef PPPOE_SERVER + hunique = (u8_t*)pb->payload + off + sizeof(pt); + hunique_len = len; +#endif + sc = pppoe_find_softc_by_hunique((u8_t*)pb->payload + off + sizeof(pt), len, netif); + break; + case PPPOE_TAG_ACCOOKIE: + if (ac_cookie == NULL) { + if (len > PPPOE_MAX_AC_COOKIE_LEN) { + PPPDEBUG(LOG_DEBUG, ("pppoe: AC cookie is too long: len = %d, max = %d\n", len, PPPOE_MAX_AC_COOKIE_LEN)); + goto done; + } + ac_cookie = (u8_t*)pb->payload + off + sizeof(pt); + ac_cookie_len = len; + } + break; +#if PPP_DEBUG + case PPPOE_TAG_SNAME_ERR: + err_msg = "SERVICE NAME ERROR"; + break; + case PPPOE_TAG_ACSYS_ERR: + err_msg = "AC SYSTEM ERROR"; + break; + case PPPOE_TAG_GENERIC_ERR: + err_msg = "GENERIC ERROR"; + break; +#endif /* PPP_DEBUG */ + default: + break; + } +#if PPP_DEBUG + if (err_msg != NULL) { + char error_tmp[PPPOE_ERRORSTRING_LEN]; + u16_t error_len = LWIP_MIN(len, sizeof(error_tmp)-1); + strncpy(error_tmp, (char*)pb->payload + off + sizeof(pt), error_len); + error_tmp[error_len] = '\0'; + if (sc) { + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": %s: %s\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, err_msg, error_tmp)); + } else { + PPPDEBUG(LOG_DEBUG, ("pppoe: %s: %s\n", err_msg, error_tmp)); + } + } +#endif /* PPP_DEBUG */ + off += sizeof(pt) + len; + } + +breakbreak:; + switch (ph->code) { + case PPPOE_CODE_PADI: +#ifdef PPPOE_SERVER + /* + * got service name, concentrator name, and/or host unique. + * ignore if we have no interfaces with IFF_PASSIVE|IFF_UP. + */ + if (LIST_EMPTY(&pppoe_softc_list)) { + goto done; + } + LIST_FOREACH(sc, &pppoe_softc_list, sc_list) { + if (!(sc->sc_sppp.pp_if.if_flags & IFF_UP)) { + continue; + } + if (!(sc->sc_sppp.pp_if.if_flags & IFF_PASSIVE)) { + continue; + } + if (sc->sc_state == PPPOE_STATE_INITIAL) { + break; + } + } + if (sc == NULL) { + /* PPPDEBUG(LOG_DEBUG, ("pppoe: free passive interface is not found\n")); */ + goto done; + } + if (hunique) { + if (sc->sc_hunique) { + mem_free(sc->sc_hunique); + } + sc->sc_hunique = mem_malloc(hunique_len); + if (sc->sc_hunique == NULL) { + goto done; + } + sc->sc_hunique_len = hunique_len; + MEMCPY(sc->sc_hunique, hunique, hunique_len); + } + MEMCPY(&sc->sc_dest, eh->ether_shost, sizeof sc->sc_dest); + sc->sc_state = PPPOE_STATE_PADO_SENT; + pppoe_send_pado(sc); + break; +#endif /* PPPOE_SERVER */ + case PPPOE_CODE_PADR: +#ifdef PPPOE_SERVER + /* + * get sc from ac_cookie if IFF_PASSIVE + */ + if (ac_cookie == NULL) { + /* be quiet if there is not a single pppoe instance */ + PPPDEBUG(LOG_DEBUG, ("pppoe: received PADR but not includes ac_cookie\n")); + goto done; + } + sc = pppoe_find_softc_by_hunique(ac_cookie, ac_cookie_len, netif); + if (sc == NULL) { + /* be quiet if there is not a single pppoe instance */ + if (!LIST_EMPTY(&pppoe_softc_list)) { + PPPDEBUG(LOG_DEBUG, ("pppoe: received PADR but could not find request for it\n")); + } + goto done; + } + if (sc->sc_state != PPPOE_STATE_PADO_SENT) { + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": received unexpected PADR\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + goto done; + } + if (hunique) { + if (sc->sc_hunique) { + mem_free(sc->sc_hunique); + } + sc->sc_hunique = mem_malloc(hunique_len); + if (sc->sc_hunique == NULL) { + goto done; + } + sc->sc_hunique_len = hunique_len; + MEMCPY(sc->sc_hunique, hunique, hunique_len); + } + pppoe_send_pads(sc); + sc->sc_state = PPPOE_STATE_SESSION; + ppp_start(sc->pcb); /* notify upper layers */ + break; +#else + /* ignore, we are no access concentrator */ + goto done; +#endif /* PPPOE_SERVER */ + case PPPOE_CODE_PADO: + if (sc == NULL) { + /* be quiet if there is not a single pppoe instance */ + if (pppoe_softc_list != NULL) { + PPPDEBUG(LOG_DEBUG, ("pppoe: received PADO but could not find request for it\n")); + } + goto done; + } + if (sc->sc_state != PPPOE_STATE_PADI_SENT) { + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": received unexpected PADO\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + goto done; + } + if (ac_cookie) { + sc->sc_ac_cookie_len = ac_cookie_len; + MEMCPY(sc->sc_ac_cookie, ac_cookie, ac_cookie_len); + } + MEMCPY(&sc->sc_dest, ethhdr->src.addr, sizeof(sc->sc_dest.addr)); + sys_untimeout(pppoe_timeout, sc); + sc->sc_padr_retried = 0; + sc->sc_state = PPPOE_STATE_PADR_SENT; + if ((err = pppoe_send_padr(sc)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": failed to send PADR, error=%d\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(PPPOE_DISC_TIMEOUT * (1 + sc->sc_padr_retried), pppoe_timeout, sc); + break; + case PPPOE_CODE_PADS: + if (sc == NULL) { + goto done; + } + sc->sc_session = session; + sys_untimeout(pppoe_timeout, sc); + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": session 0x%x connected\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, session)); + sc->sc_state = PPPOE_STATE_SESSION; + ppp_start(sc->pcb); /* notify upper layers */ + break; + case PPPOE_CODE_PADT: + /* Don't disconnect here, we let the LCP Echo/Reply find the fact + * that PPP session is down. Asking the PPP stack to end the session + * require strict checking about the PPP phase to prevent endless + * disconnection loops. + */ +#if 0 /* UNUSED */ + if (sc == NULL) { /* PADT frames are rarely sent with a hunique tag, this is actually almost always true */ + goto done; + } + pppoe_clear_softc(sc, "received PADT"); +#endif /* UNUSED */ + break; + default: + if(sc) { + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": unknown code (0x%"X16_F") session = 0x%"X16_F"\n", + sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, + (u16_t)ph->code, session)); + } else { + PPPDEBUG(LOG_DEBUG, ("pppoe: unknown code (0x%"X16_F") session = 0x%"X16_F"\n", (u16_t)ph->code, session)); + } + break; + } + +done: + pbuf_free(pb); + return; +} + +void +pppoe_data_input(struct netif *netif, struct pbuf *pb) +{ + u16_t session, plen; + struct pppoe_softc *sc; + struct pppoehdr *ph; +#ifdef PPPOE_TERM_UNKNOWN_SESSIONS + u8_t shost[ETHER_ADDR_LEN]; +#endif + +#ifdef PPPOE_TERM_UNKNOWN_SESSIONS + MEMCPY(shost, ((struct eth_hdr *)pb->payload)->src.addr, sizeof(shost)); +#endif + if (pbuf_remove_header(pb, sizeof(struct eth_hdr)) != 0) { + /* bail out */ + PPPDEBUG(LOG_ERR, ("pppoe_data_input: pbuf_remove_header failed\n")); + LINK_STATS_INC(link.lenerr); + goto drop; + } + + if (pb->len < sizeof(*ph)) { + PPPDEBUG(LOG_DEBUG, ("pppoe_data_input: could not get PPPoE header\n")); + goto drop; + } + ph = (struct pppoehdr *)pb->payload; + + if (ph->vertype != PPPOE_VERTYPE) { + PPPDEBUG(LOG_DEBUG, ("pppoe (data): unknown version/type packet: 0x%x\n", ph->vertype)); + goto drop; + } + if (ph->code != 0) { + goto drop; + } + + session = lwip_ntohs(ph->session); + sc = pppoe_find_softc_by_session(session, netif); + if (sc == NULL) { +#ifdef PPPOE_TERM_UNKNOWN_SESSIONS + PPPDEBUG(LOG_DEBUG, ("pppoe: input for unknown session 0x%x, sending PADT\n", session)); + pppoe_send_padt(netif, session, shost); +#endif + goto drop; + } + + plen = lwip_ntohs(ph->plen); + + if (pbuf_remove_header(pb, PPPOE_HEADERLEN) != 0) { + /* bail out */ + PPPDEBUG(LOG_ERR, ("pppoe_data_input: pbuf_remove_header PPPOE_HEADERLEN failed\n")); + LINK_STATS_INC(link.lenerr); + goto drop; + } + + PPPDEBUG(LOG_DEBUG, ("pppoe_data_input: %c%c%"U16_F": pkthdr.len=%d, pppoe.len=%d\n", + sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, + pb->len, plen)); + + if (pb->tot_len < plen) { + goto drop; + } + + /* Dispatch the packet thereby consuming it. */ + ppp_input(sc->pcb, pb); + return; + +drop: + pbuf_free(pb); +} + +static err_t +pppoe_output(struct pppoe_softc *sc, struct pbuf *pb) +{ + struct eth_hdr *ethhdr; + u16_t etype; + err_t res; + + /* make room for Ethernet header - should not fail */ + if (pbuf_add_header(pb, sizeof(struct eth_hdr)) != 0) { + /* bail out */ + PPPDEBUG(LOG_ERR, ("pppoe: %c%c%"U16_F": pppoe_output: could not allocate room for Ethernet header\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + LINK_STATS_INC(link.lenerr); + pbuf_free(pb); + return ERR_BUF; + } + ethhdr = (struct eth_hdr *)pb->payload; + etype = sc->sc_state == PPPOE_STATE_SESSION ? ETHTYPE_PPPOE : ETHTYPE_PPPOEDISC; + ethhdr->type = lwip_htons(etype); + MEMCPY(ðhdr->dest.addr, &sc->sc_dest.addr, sizeof(ethhdr->dest.addr)); + MEMCPY(ðhdr->src.addr, &sc->sc_ethif->hwaddr, sizeof(ethhdr->src.addr)); + + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F" (%x) state=%d, session=0x%x output -> %02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F", len=%d\n", + sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, etype, + sc->sc_state, sc->sc_session, + sc->sc_dest.addr[0], sc->sc_dest.addr[1], sc->sc_dest.addr[2], sc->sc_dest.addr[3], sc->sc_dest.addr[4], sc->sc_dest.addr[5], + pb->tot_len)); + + res = sc->sc_ethif->linkoutput(sc->sc_ethif, pb); + + pbuf_free(pb); + + return res; +} + +static err_t +pppoe_send_padi(struct pppoe_softc *sc) +{ + struct pbuf *pb; + u8_t *p; + int len; +#ifdef PPPOE_TODO + int l1 = 0, l2 = 0; /* XXX: gcc */ +#endif /* PPPOE_TODO */ + + /* calculate length of frame (excluding ethernet header + pppoe header) */ + len = 2 + 2 + 2 + 2 + sizeof sc; /* service name tag is required, host unique is send too */ +#ifdef PPPOE_TODO + if (sc->sc_service_name != NULL) { + l1 = (int)strlen(sc->sc_service_name); + len += l1; + } + if (sc->sc_concentrator_name != NULL) { + l2 = (int)strlen(sc->sc_concentrator_name); + len += 2 + 2 + l2; + } +#endif /* PPPOE_TODO */ + LWIP_ASSERT("sizeof(struct eth_hdr) + PPPOE_HEADERLEN + len <= 0xffff", + sizeof(struct eth_hdr) + PPPOE_HEADERLEN + len <= 0xffff); + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_LINK, (u16_t)(PPPOE_HEADERLEN + len), PBUF_RAM); + if (!pb) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + PPPOE_ADD_HEADER(p, PPPOE_CODE_PADI, 0, (u16_t)len); + PPPOE_ADD_16(p, PPPOE_TAG_SNAME); +#ifdef PPPOE_TODO + if (sc->sc_service_name != NULL) { + PPPOE_ADD_16(p, l1); + MEMCPY(p, sc->sc_service_name, l1); + p += l1; + } else +#endif /* PPPOE_TODO */ + { + PPPOE_ADD_16(p, 0); + } +#ifdef PPPOE_TODO + if (sc->sc_concentrator_name != NULL) { + PPPOE_ADD_16(p, PPPOE_TAG_ACNAME); + PPPOE_ADD_16(p, l2); + MEMCPY(p, sc->sc_concentrator_name, l2); + p += l2; + } +#endif /* PPPOE_TODO */ + PPPOE_ADD_16(p, PPPOE_TAG_HUNIQUE); + PPPOE_ADD_16(p, sizeof(sc)); + MEMCPY(p, &sc, sizeof sc); + + /* send pkt */ + return pppoe_output(sc, pb); +} + +static void +pppoe_timeout(void *arg) +{ + u32_t retry_wait; + int err; + struct pppoe_softc *sc = (struct pppoe_softc*)arg; + + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": timeout\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + + switch (sc->sc_state) { + case PPPOE_STATE_PADI_SENT: + /* + * We have two basic ways of retrying: + * - Quick retry mode: try a few times in short sequence + * - Slow retry mode: we already had a connection successfully + * established and will try infinitely (without user + * intervention) + * We only enter slow retry mode if IFF_LINK1 (aka autodial) + * is not set. + */ + if (sc->sc_padi_retried < 0xff) { + sc->sc_padi_retried++; + } + if (!sc->pcb->settings.persist && sc->sc_padi_retried >= PPPOE_DISC_MAXPADI) { +#if 0 + if ((sc->sc_sppp.pp_if.if_flags & IFF_LINK1) == 0) { + /* slow retry mode */ + retry_wait = PPPOE_SLOW_RETRY; + } else +#endif + { + pppoe_abort_connect(sc); + return; + } + } + /* initialize for quick retry mode */ + retry_wait = LWIP_MIN(PPPOE_DISC_TIMEOUT * sc->sc_padi_retried, PPPOE_SLOW_RETRY); + if ((err = pppoe_send_padi(sc)) != 0) { + sc->sc_padi_retried--; + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": failed to transmit PADI, error=%d\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(retry_wait, pppoe_timeout, sc); + break; + + case PPPOE_STATE_PADR_SENT: + sc->sc_padr_retried++; + if (sc->sc_padr_retried >= PPPOE_DISC_MAXPADR) { + MEMCPY(&sc->sc_dest, ethbroadcast.addr, sizeof(sc->sc_dest)); + sc->sc_state = PPPOE_STATE_PADI_SENT; + sc->sc_padr_retried = 0; + if ((err = pppoe_send_padi(sc)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": failed to send PADI, error=%d\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(PPPOE_DISC_TIMEOUT * (1 + sc->sc_padi_retried), pppoe_timeout, sc); + return; + } + if ((err = pppoe_send_padr(sc)) != 0) { + sc->sc_padr_retried--; + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": failed to send PADR, error=%d\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(PPPOE_DISC_TIMEOUT * (1 + sc->sc_padr_retried), pppoe_timeout, sc); + break; + default: + return; /* all done, work in peace */ + } +} + +/* Start a connection (i.e. initiate discovery phase) */ +static void +pppoe_connect(ppp_pcb *ppp, void *ctx) +{ + err_t err; + struct pppoe_softc *sc = (struct pppoe_softc *)ctx; + lcp_options *lcp_wo; + lcp_options *lcp_ao; +#if PPP_IPV4_SUPPORT && VJ_SUPPORT + ipcp_options *ipcp_wo; + ipcp_options *ipcp_ao; +#endif /* PPP_IPV4_SUPPORT && VJ_SUPPORT */ + + sc->sc_session = 0; + sc->sc_ac_cookie_len = 0; + sc->sc_padi_retried = 0; + sc->sc_padr_retried = 0; + /* changed to real address later */ + MEMCPY(&sc->sc_dest, ethbroadcast.addr, sizeof(sc->sc_dest)); +#ifdef PPPOE_SERVER + /* wait PADI if IFF_PASSIVE */ + if ((sc->sc_sppp.pp_if.if_flags & IFF_PASSIVE)) { + return 0; + } +#endif + + lcp_wo = &ppp->lcp_wantoptions; + lcp_wo->mru = sc->sc_ethif->mtu-PPPOE_HEADERLEN-2; /* two byte PPP protocol discriminator, then IP data */ + lcp_wo->neg_asyncmap = 0; + lcp_wo->neg_pcompression = 0; + lcp_wo->neg_accompression = 0; + lcp_wo->passive = 0; + lcp_wo->silent = 0; + + lcp_ao = &ppp->lcp_allowoptions; + lcp_ao->mru = sc->sc_ethif->mtu-PPPOE_HEADERLEN-2; /* two byte PPP protocol discriminator, then IP data */ + lcp_ao->neg_asyncmap = 0; + lcp_ao->neg_pcompression = 0; + lcp_ao->neg_accompression = 0; + +#if PPP_IPV4_SUPPORT && VJ_SUPPORT + ipcp_wo = &ppp->ipcp_wantoptions; + ipcp_wo->neg_vj = 0; + ipcp_wo->old_vj = 0; + + ipcp_ao = &ppp->ipcp_allowoptions; + ipcp_ao->neg_vj = 0; + ipcp_ao->old_vj = 0; +#endif /* PPP_IPV4_SUPPORT && VJ_SUPPORT */ + + /* save state, in case we fail to send PADI */ + sc->sc_state = PPPOE_STATE_PADI_SENT; + if ((err = pppoe_send_padi(sc)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": failed to send PADI, error=%d\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, err)); + } + sys_timeout(PPPOE_DISC_TIMEOUT, pppoe_timeout, sc); +} + +/* disconnect */ +static void +pppoe_disconnect(ppp_pcb *ppp, void *ctx) +{ + struct pppoe_softc *sc = (struct pppoe_softc *)ctx; + + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": disconnecting\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + if (sc->sc_state == PPPOE_STATE_SESSION) { + pppoe_send_padt(sc->sc_ethif, sc->sc_session, (const u8_t *)&sc->sc_dest); + } + + /* stop any timer, disconnect can be called while initiating is in progress */ + sys_untimeout(pppoe_timeout, sc); + sc->sc_state = PPPOE_STATE_INITIAL; +#ifdef PPPOE_SERVER + if (sc->sc_hunique) { + mem_free(sc->sc_hunique); + sc->sc_hunique = NULL; /* probably not necessary, if state is initial we shouldn't have to access hunique anyway */ + } + sc->sc_hunique_len = 0; /* probably not necessary, if state is initial we shouldn't have to access hunique anyway */ +#endif + ppp_link_end(ppp); /* notify upper layers */ + return; +} + +/* Connection attempt aborted */ +static void +pppoe_abort_connect(struct pppoe_softc *sc) +{ + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": could not establish connection\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + sc->sc_state = PPPOE_STATE_INITIAL; + ppp_link_failed(sc->pcb); /* notify upper layers */ +} + +/* Send a PADR packet */ +static err_t +pppoe_send_padr(struct pppoe_softc *sc) +{ + struct pbuf *pb; + u8_t *p; + size_t len; +#ifdef PPPOE_TODO + size_t l1 = 0; /* XXX: gcc */ +#endif /* PPPOE_TODO */ + + len = 2 + 2 + 2 + 2 + sizeof(sc); /* service name, host unique */ +#ifdef PPPOE_TODO + if (sc->sc_service_name != NULL) { /* service name tag maybe empty */ + l1 = strlen(sc->sc_service_name); + len += l1; + } +#endif /* PPPOE_TODO */ + if (sc->sc_ac_cookie_len > 0) { + len += 2 + 2 + sc->sc_ac_cookie_len; /* AC cookie */ + } + LWIP_ASSERT("sizeof(struct eth_hdr) + PPPOE_HEADERLEN + len <= 0xffff", + sizeof(struct eth_hdr) + PPPOE_HEADERLEN + len <= 0xffff); + pb = pbuf_alloc(PBUF_LINK, (u16_t)(PPPOE_HEADERLEN + len), PBUF_RAM); + if (!pb) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + p = (u8_t*)pb->payload; + PPPOE_ADD_HEADER(p, PPPOE_CODE_PADR, 0, len); + PPPOE_ADD_16(p, PPPOE_TAG_SNAME); +#ifdef PPPOE_TODO + if (sc->sc_service_name != NULL) { + PPPOE_ADD_16(p, l1); + MEMCPY(p, sc->sc_service_name, l1); + p += l1; + } else +#endif /* PPPOE_TODO */ + { + PPPOE_ADD_16(p, 0); + } + if (sc->sc_ac_cookie_len > 0) { + PPPOE_ADD_16(p, PPPOE_TAG_ACCOOKIE); + PPPOE_ADD_16(p, sc->sc_ac_cookie_len); + MEMCPY(p, sc->sc_ac_cookie, sc->sc_ac_cookie_len); + p += sc->sc_ac_cookie_len; + } + PPPOE_ADD_16(p, PPPOE_TAG_HUNIQUE); + PPPOE_ADD_16(p, sizeof(sc)); + MEMCPY(p, &sc, sizeof sc); + + return pppoe_output(sc, pb); +} + +/* send a PADT packet */ +static err_t +pppoe_send_padt(struct netif *outgoing_if, u_int session, const u8_t *dest) +{ + struct pbuf *pb; + struct eth_hdr *ethhdr; + err_t res; + u8_t *p; + + pb = pbuf_alloc(PBUF_LINK, (u16_t)(PPPOE_HEADERLEN), PBUF_RAM); + if (!pb) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + if (pbuf_add_header(pb, sizeof(struct eth_hdr))) { + PPPDEBUG(LOG_ERR, ("pppoe: pppoe_send_padt: could not allocate room for PPPoE header\n")); + LINK_STATS_INC(link.lenerr); + pbuf_free(pb); + return ERR_BUF; + } + ethhdr = (struct eth_hdr *)pb->payload; + ethhdr->type = PP_HTONS(ETHTYPE_PPPOEDISC); + MEMCPY(ðhdr->dest.addr, dest, sizeof(ethhdr->dest.addr)); + MEMCPY(ðhdr->src.addr, &outgoing_if->hwaddr, sizeof(ethhdr->src.addr)); + + p = (u8_t*)(ethhdr + 1); + PPPOE_ADD_HEADER(p, PPPOE_CODE_PADT, session, 0); + + res = outgoing_if->linkoutput(outgoing_if, pb); + + pbuf_free(pb); + + return res; +} + +#ifdef PPPOE_SERVER +static err_t +pppoe_send_pado(struct pppoe_softc *sc) +{ + struct pbuf *pb; + u8_t *p; + size_t len; + + /* calc length */ + len = 0; + /* include ac_cookie */ + len += 2 + 2 + sizeof(sc); + /* include hunique */ + len += 2 + 2 + sc->sc_hunique_len; + pb = pbuf_alloc(PBUF_LINK, (u16_t)(PPPOE_HEADERLEN + len), PBUF_RAM); + if (!pb) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + p = (u8_t*)pb->payload; + PPPOE_ADD_HEADER(p, PPPOE_CODE_PADO, 0, len); + PPPOE_ADD_16(p, PPPOE_TAG_ACCOOKIE); + PPPOE_ADD_16(p, sizeof(sc)); + MEMCPY(p, &sc, sizeof(sc)); + p += sizeof(sc); + PPPOE_ADD_16(p, PPPOE_TAG_HUNIQUE); + PPPOE_ADD_16(p, sc->sc_hunique_len); + MEMCPY(p, sc->sc_hunique, sc->sc_hunique_len); + return pppoe_output(sc, pb); +} + +static err_t +pppoe_send_pads(struct pppoe_softc *sc) +{ + struct pbuf *pb; + u8_t *p; + size_t len, l1 = 0; /* XXX: gcc */ + + sc->sc_session = mono_time.tv_sec % 0xff + 1; + /* calc length */ + len = 0; + /* include hunique */ + len += 2 + 2 + 2 + 2 + sc->sc_hunique_len; /* service name, host unique*/ + if (sc->sc_service_name != NULL) { /* service name tag maybe empty */ + l1 = strlen(sc->sc_service_name); + len += l1; + } + pb = pbuf_alloc(PBUF_LINK, (u16_t)(PPPOE_HEADERLEN + len), PBUF_RAM); + if (!pb) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + p = (u8_t*)pb->payload; + PPPOE_ADD_HEADER(p, PPPOE_CODE_PADS, sc->sc_session, len); + PPPOE_ADD_16(p, PPPOE_TAG_SNAME); + if (sc->sc_service_name != NULL) { + PPPOE_ADD_16(p, l1); + MEMCPY(p, sc->sc_service_name, l1); + p += l1; + } else { + PPPOE_ADD_16(p, 0); + } + PPPOE_ADD_16(p, PPPOE_TAG_HUNIQUE); + PPPOE_ADD_16(p, sc->sc_hunique_len); + MEMCPY(p, sc->sc_hunique, sc->sc_hunique_len); + return pppoe_output(sc, pb); +} +#endif + +static err_t +pppoe_xmit(struct pppoe_softc *sc, struct pbuf *pb) +{ + u8_t *p; + size_t len; + + len = pb->tot_len; + + /* make room for PPPoE header - should not fail */ + if (pbuf_add_header(pb, PPPOE_HEADERLEN) != 0) { + /* bail out */ + PPPDEBUG(LOG_ERR, ("pppoe: %c%c%"U16_F": pppoe_xmit: could not allocate room for PPPoE header\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + LINK_STATS_INC(link.lenerr); + pbuf_free(pb); + return ERR_BUF; + } + + p = (u8_t*)pb->payload; + PPPOE_ADD_HEADER(p, 0, sc->sc_session, len); + + return pppoe_output(sc, pb); +} + +#if 0 /*def PFIL_HOOKS*/ +static int +pppoe_ifattach_hook(void *arg, struct pbuf **mp, struct netif *ifp, int dir) +{ + struct pppoe_softc *sc; + int s; + + if (mp != (struct pbuf **)PFIL_IFNET_DETACH) { + return 0; + } + + LIST_FOREACH(sc, &pppoe_softc_list, sc_list) { + if (sc->sc_ethif != ifp) { + continue; + } + if (sc->sc_sppp.pp_if.if_flags & IFF_UP) { + sc->sc_sppp.pp_if.if_flags &= ~(IFF_UP|IFF_RUNNING); + PPPDEBUG(LOG_DEBUG, ("%c%c%"U16_F": ethernet interface detached, going down\n", + sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num)); + } + sc->sc_ethif = NULL; + pppoe_clear_softc(sc, "ethernet interface detached"); + } + + return 0; +} +#endif + +#if 0 /* UNUSED */ +static void +pppoe_clear_softc(struct pppoe_softc *sc, const char *message) +{ + LWIP_UNUSED_ARG(message); + + /* stop timer */ + sys_untimeout(pppoe_timeout, sc); + PPPDEBUG(LOG_DEBUG, ("pppoe: %c%c%"U16_F": session 0x%x terminated, %s\n", sc->sc_ethif->name[0], sc->sc_ethif->name[1], sc->sc_ethif->num, sc->sc_session, message)); + sc->sc_state = PPPOE_STATE_INITIAL; + ppp_link_end(sc->pcb); /* notify upper layers - /!\ dangerous /!\ - see pppoe_disc_input() */ +} +#endif /* UNUSED */ +#endif /* PPP_SUPPORT && PPPOE_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/pppol2tp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/pppol2tp.c new file mode 100644 index 0000000..af366d4 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/pppol2tp.c @@ -0,0 +1,1159 @@ +/** + * @file + * Network Point to Point Protocol over Layer 2 Tunneling Protocol program file. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +/* + * L2TP Support status: + * + * Supported: + * - L2TPv2 (PPP over L2TP, a.k.a. UDP tunnels) + * - LAC + * + * Not supported: + * - LNS (require PPP server support) + * - L2TPv3 ethernet pseudowires + * - L2TPv3 VLAN pseudowire + * - L2TPv3 PPP pseudowires + * - L2TPv3 IP encapsulation + * - L2TPv3 IP pseudowire + * - L2TP tunnel switching - http://tools.ietf.org/html/draft-ietf-l2tpext-tunnel-switching-08 + * - Multiple tunnels per UDP socket, as well as multiple sessions per tunnel + * - Hidden AVPs + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPPOL2TP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/err.h" +#include "lwip/memp.h" +#include "lwip/netif.h" +#include "lwip/udp.h" +#include "lwip/snmp.h" + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/lcp.h" +#include "netif/ppp/ipcp.h" +#include "netif/ppp/pppol2tp.h" +#include "netif/ppp/pppcrypt.h" +#include "netif/ppp/magic.h" + +/* Memory pool */ +LWIP_MEMPOOL_DECLARE(PPPOL2TP_PCB, MEMP_NUM_PPPOL2TP_INTERFACES, sizeof(pppol2tp_pcb), "PPPOL2TP_PCB") + +/* callbacks called from PPP core */ +static err_t pppol2tp_write(ppp_pcb *ppp, void *ctx, struct pbuf *p); +static err_t pppol2tp_netif_output(ppp_pcb *ppp, void *ctx, struct pbuf *p, u_short protocol); +static err_t pppol2tp_destroy(ppp_pcb *ppp, void *ctx); /* Destroy a L2TP control block */ +static void pppol2tp_connect(ppp_pcb *ppp, void *ctx); /* Be a LAC, connect to a LNS. */ +static void pppol2tp_disconnect(ppp_pcb *ppp, void *ctx); /* Disconnect */ + + /* Prototypes for procedures local to this file. */ +static void pppol2tp_input(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port); +static void pppol2tp_dispatch_control_packet(pppol2tp_pcb *l2tp, u16_t port, struct pbuf *p, u16_t ns, u16_t nr); +static void pppol2tp_timeout(void *arg); +static void pppol2tp_abort_connect(pppol2tp_pcb *l2tp); +static err_t pppol2tp_send_sccrq(pppol2tp_pcb *l2tp); +static err_t pppol2tp_send_scccn(pppol2tp_pcb *l2tp, u16_t ns); +static err_t pppol2tp_send_icrq(pppol2tp_pcb *l2tp, u16_t ns); +static err_t pppol2tp_send_iccn(pppol2tp_pcb *l2tp, u16_t ns); +static err_t pppol2tp_send_zlb(pppol2tp_pcb *l2tp, u16_t ns, u16_t nr); +static err_t pppol2tp_send_stopccn(pppol2tp_pcb *l2tp, u16_t ns); +static err_t pppol2tp_xmit(pppol2tp_pcb *l2tp, struct pbuf *pb); +static err_t pppol2tp_udp_send(pppol2tp_pcb *l2tp, struct pbuf *pb); + +/* Callbacks structure for PPP core */ +static const struct link_callbacks pppol2tp_callbacks = { + pppol2tp_connect, +#if PPP_SERVER + NULL, +#endif /* PPP_SERVER */ + pppol2tp_disconnect, + pppol2tp_destroy, + pppol2tp_write, + pppol2tp_netif_output, + NULL, + NULL +}; + + +/* Create a new L2TP session. */ +ppp_pcb *pppol2tp_create(struct netif *pppif, + struct netif *netif, const ip_addr_t *ipaddr, u16_t port, + const u8_t *secret, u8_t secret_len, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb) { + ppp_pcb *ppp; + pppol2tp_pcb *l2tp; + struct udp_pcb *udp; +#if !PPPOL2TP_AUTH_SUPPORT + LWIP_UNUSED_ARG(secret); + LWIP_UNUSED_ARG(secret_len); +#endif /* !PPPOL2TP_AUTH_SUPPORT */ + + if (ipaddr == NULL) { + goto ipaddr_check_failed; + } + + l2tp = (pppol2tp_pcb *)LWIP_MEMPOOL_ALLOC(PPPOL2TP_PCB); + if (l2tp == NULL) { + goto memp_malloc_l2tp_failed; + } + + udp = udp_new_ip_type(IP_GET_TYPE(ipaddr)); + if (udp == NULL) { + goto udp_new_failed; + } + udp_recv(udp, pppol2tp_input, l2tp); + + ppp = ppp_new(pppif, &pppol2tp_callbacks, l2tp, link_status_cb, ctx_cb); + if (ppp == NULL) { + goto ppp_new_failed; + } + + memset(l2tp, 0, sizeof(pppol2tp_pcb)); + l2tp->phase = PPPOL2TP_STATE_INITIAL; + l2tp->ppp = ppp; + l2tp->udp = udp; + l2tp->netif = netif; + ip_addr_copy(l2tp->remote_ip, *ipaddr); + l2tp->remote_port = port; +#if PPPOL2TP_AUTH_SUPPORT + l2tp->secret = secret; + l2tp->secret_len = secret_len; +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + return ppp; + +ppp_new_failed: + udp_remove(udp); +udp_new_failed: + LWIP_MEMPOOL_FREE(PPPOL2TP_PCB, l2tp); +memp_malloc_l2tp_failed: +ipaddr_check_failed: + return NULL; +} + +/* Called by PPP core */ +static err_t pppol2tp_write(ppp_pcb *ppp, void *ctx, struct pbuf *p) { + pppol2tp_pcb *l2tp = (pppol2tp_pcb *)ctx; + struct pbuf *ph; /* UDP + L2TP header */ + err_t ret; +#if MIB2_STATS + u16_t tot_len; +#else /* MIB2_STATS */ + LWIP_UNUSED_ARG(ppp); +#endif /* MIB2_STATS */ + + ph = pbuf_alloc(PBUF_TRANSPORT, (u16_t)(PPPOL2TP_OUTPUT_DATA_HEADER_LEN), PBUF_RAM); + if(!ph) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.proterr); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + pbuf_free(p); + return ERR_MEM; + } + + pbuf_remove_header(ph, PPPOL2TP_OUTPUT_DATA_HEADER_LEN); /* hide L2TP header */ + pbuf_cat(ph, p); +#if MIB2_STATS + tot_len = ph->tot_len; +#endif /* MIB2_STATS */ + + ret = pppol2tp_xmit(l2tp, ph); + if (ret != ERR_OK) { + LINK_STATS_INC(link.err); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return ret; + } + + MIB2_STATS_NETIF_ADD(ppp->netif, ifoutoctets, (u16_t)tot_len); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutucastpkts); + LINK_STATS_INC(link.xmit); + return ERR_OK; +} + +/* Called by PPP core */ +static err_t pppol2tp_netif_output(ppp_pcb *ppp, void *ctx, struct pbuf *p, u_short protocol) { + pppol2tp_pcb *l2tp = (pppol2tp_pcb *)ctx; + struct pbuf *pb; + u8_t *pl; + err_t err; +#if MIB2_STATS + u16_t tot_len; +#else /* MIB2_STATS */ + LWIP_UNUSED_ARG(ppp); +#endif /* MIB2_STATS */ + + /* @todo: try to use pbuf_header() here! */ + pb = pbuf_alloc(PBUF_TRANSPORT, PPPOL2TP_OUTPUT_DATA_HEADER_LEN + sizeof(protocol), PBUF_RAM); + if(!pb) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.proterr); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return ERR_MEM; + } + + pbuf_remove_header(pb, PPPOL2TP_OUTPUT_DATA_HEADER_LEN); + + pl = (u8_t*)pb->payload; + PUTSHORT(protocol, pl); + + pbuf_chain(pb, p); +#if MIB2_STATS + tot_len = pb->tot_len; +#endif /* MIB2_STATS */ + + if( (err = pppol2tp_xmit(l2tp, pb)) != ERR_OK) { + LINK_STATS_INC(link.err); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return err; + } + + MIB2_STATS_NETIF_ADD(ppp->netif, ifoutoctets, tot_len); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutucastpkts); + LINK_STATS_INC(link.xmit); + return ERR_OK; +} + +/* Destroy a L2TP control block */ +static err_t pppol2tp_destroy(ppp_pcb *ppp, void *ctx) { + pppol2tp_pcb *l2tp = (pppol2tp_pcb *)ctx; + LWIP_UNUSED_ARG(ppp); + + sys_untimeout(pppol2tp_timeout, l2tp); + udp_remove(l2tp->udp); + LWIP_MEMPOOL_FREE(PPPOL2TP_PCB, l2tp); + return ERR_OK; +} + +/* Be a LAC, connect to a LNS. */ +static void pppol2tp_connect(ppp_pcb *ppp, void *ctx) { + err_t err; + pppol2tp_pcb *l2tp = (pppol2tp_pcb *)ctx; + lcp_options *lcp_wo; + lcp_options *lcp_ao; +#if PPP_IPV4_SUPPORT && VJ_SUPPORT + ipcp_options *ipcp_wo; + ipcp_options *ipcp_ao; +#endif /* PPP_IPV4_SUPPORT && VJ_SUPPORT */ + + l2tp->tunnel_port = l2tp->remote_port; + l2tp->our_ns = 0; + l2tp->peer_nr = 0; + l2tp->peer_ns = 0; + l2tp->source_tunnel_id = 0; + l2tp->remote_tunnel_id = 0; + l2tp->source_session_id = 0; + l2tp->remote_session_id = 0; + /* l2tp->*_retried are cleared when used */ + + lcp_wo = &ppp->lcp_wantoptions; + lcp_wo->mru = PPPOL2TP_DEFMRU; + lcp_wo->neg_asyncmap = 0; + lcp_wo->neg_pcompression = 0; + lcp_wo->neg_accompression = 0; + lcp_wo->passive = 0; + lcp_wo->silent = 0; + + lcp_ao = &ppp->lcp_allowoptions; + lcp_ao->mru = PPPOL2TP_DEFMRU; + lcp_ao->neg_asyncmap = 0; + lcp_ao->neg_pcompression = 0; + lcp_ao->neg_accompression = 0; + +#if PPP_IPV4_SUPPORT && VJ_SUPPORT + ipcp_wo = &ppp->ipcp_wantoptions; + ipcp_wo->neg_vj = 0; + ipcp_wo->old_vj = 0; + + ipcp_ao = &ppp->ipcp_allowoptions; + ipcp_ao->neg_vj = 0; + ipcp_ao->old_vj = 0; +#endif /* PPP_IPV4_SUPPORT && VJ_SUPPORT */ + + /* Listen to a random source port, we need to do that instead of using udp_connect() + * because the L2TP LNS might answer with its own random source port (!= 1701) + */ +#if LWIP_IPV6 + if (IP_IS_V6_VAL(l2tp->udp->local_ip)) { + udp_bind(l2tp->udp, IP6_ADDR_ANY, 0); + } else +#endif /* LWIP_IPV6 */ + udp_bind(l2tp->udp, IP_ADDR_ANY, 0); + +#if PPPOL2TP_AUTH_SUPPORT + /* Generate random vector */ + if (l2tp->secret != NULL) { + magic_random_bytes(l2tp->secret_rv, sizeof(l2tp->secret_rv)); + } +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + do { + l2tp->remote_tunnel_id = magic(); + } while(l2tp->remote_tunnel_id == 0); + /* save state, in case we fail to send SCCRQ */ + l2tp->sccrq_retried = 0; + l2tp->phase = PPPOL2TP_STATE_SCCRQ_SENT; + if ((err = pppol2tp_send_sccrq(l2tp)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send SCCRQ, error=%d\n", err)); + } + sys_timeout(PPPOL2TP_CONTROL_TIMEOUT, pppol2tp_timeout, l2tp); +} + +/* Disconnect */ +static void pppol2tp_disconnect(ppp_pcb *ppp, void *ctx) { + pppol2tp_pcb *l2tp = (pppol2tp_pcb *)ctx; + + l2tp->our_ns++; + pppol2tp_send_stopccn(l2tp, l2tp->our_ns); + + /* stop any timer, disconnect can be called while initiating is in progress */ + sys_untimeout(pppol2tp_timeout, l2tp); + l2tp->phase = PPPOL2TP_STATE_INITIAL; + ppp_link_end(ppp); /* notify upper layers */ +} + +/* UDP Callback for incoming IPv4 L2TP frames */ +static void pppol2tp_input(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) { + pppol2tp_pcb *l2tp = (pppol2tp_pcb*)arg; + u16_t hflags, hlen, len=0, tunnel_id=0, session_id=0, ns=0, nr=0, offset=0; + u8_t *inp; + LWIP_UNUSED_ARG(pcb); + + /* we can't unbound a UDP pcb, thus we can still receive UDP frames after the link is closed */ + if (l2tp->phase < PPPOL2TP_STATE_SCCRQ_SENT) { + goto free_and_return; + } + + if (!ip_addr_cmp(&l2tp->remote_ip, addr)) { + goto free_and_return; + } + + /* discard packet if port mismatch, but only if we received a SCCRP */ + if (l2tp->phase > PPPOL2TP_STATE_SCCRQ_SENT && l2tp->tunnel_port != port) { + goto free_and_return; + } + + /* printf("-----------\nL2TP INPUT, %d\n", p->len); */ + + /* L2TP header */ + if (p->len < sizeof(hflags) + sizeof(tunnel_id) + sizeof(session_id) ) { + goto packet_too_short; + } + + inp = (u8_t*)p->payload; + GETSHORT(hflags, inp); + + if (hflags & PPPOL2TP_HEADERFLAG_CONTROL) { + /* check mandatory flags for a control packet */ + if ( (hflags & PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY) != PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY ) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: mandatory header flags for control packet not set\n")); + goto free_and_return; + } + /* check forbidden flags for a control packet */ + if (hflags & PPPOL2TP_HEADERFLAG_CONTROL_FORBIDDEN) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: forbidden header flags for control packet found\n")); + goto free_and_return; + } + } else { + /* check mandatory flags for a data packet */ + if ( (hflags & PPPOL2TP_HEADERFLAG_DATA_MANDATORY) != PPPOL2TP_HEADERFLAG_DATA_MANDATORY) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: mandatory header flags for data packet not set\n")); + goto free_and_return; + } + } + + /* Expected header size */ + hlen = sizeof(hflags) + sizeof(tunnel_id) + sizeof(session_id); + if (hflags & PPPOL2TP_HEADERFLAG_LENGTH) { + hlen += sizeof(len); + } + if (hflags & PPPOL2TP_HEADERFLAG_SEQUENCE) { + hlen += sizeof(ns) + sizeof(nr); + } + if (hflags & PPPOL2TP_HEADERFLAG_OFFSET) { + hlen += sizeof(offset); + } + if (p->len < hlen) { + goto packet_too_short; + } + + if (hflags & PPPOL2TP_HEADERFLAG_LENGTH) { + GETSHORT(len, inp); + if (p->len < len || len < hlen) { + goto packet_too_short; + } + } + GETSHORT(tunnel_id, inp); + GETSHORT(session_id, inp); + if (hflags & PPPOL2TP_HEADERFLAG_SEQUENCE) { + GETSHORT(ns, inp); + GETSHORT(nr, inp); + } + if (hflags & PPPOL2TP_HEADERFLAG_OFFSET) { + GETSHORT(offset, inp) + if (offset > 4096) { /* don't be fooled with large offset which might overflow hlen */ + PPPDEBUG(LOG_DEBUG, ("pppol2tp: strange packet received, offset=%d\n", offset)); + goto free_and_return; + } + hlen += offset; + if (p->len < hlen) { + goto packet_too_short; + } + INCPTR(offset, inp); + } + + /* printf("HLEN = %d\n", hlen); */ + + /* skip L2TP header */ + if (pbuf_remove_header(p, hlen) != 0) { + goto free_and_return; + } + + /* printf("LEN=%d, TUNNEL_ID=%d, SESSION_ID=%d, NS=%d, NR=%d, OFFSET=%d\n", len, tunnel_id, session_id, ns, nr, offset); */ + PPPDEBUG(LOG_DEBUG, ("pppol2tp: input packet, len=%"U16_F", tunnel=%"U16_F", session=%"U16_F", ns=%"U16_F", nr=%"U16_F"\n", + len, tunnel_id, session_id, ns, nr)); + + /* Control packet */ + if (hflags & PPPOL2TP_HEADERFLAG_CONTROL) { + pppol2tp_dispatch_control_packet(l2tp, port, p, ns, nr); + goto free_and_return; + } + + /* Data packet */ + if(l2tp->phase != PPPOL2TP_STATE_DATA) { + goto free_and_return; + } + if(tunnel_id != l2tp->remote_tunnel_id) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: tunnel ID mismatch, assigned=%d, received=%d\n", l2tp->remote_tunnel_id, tunnel_id)); + goto free_and_return; + } + if(session_id != l2tp->remote_session_id) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: session ID mismatch, assigned=%d, received=%d\n", l2tp->remote_session_id, session_id)); + goto free_and_return; + } + /* + * skip address & flags if necessary + * + * RFC 2661 does not specify whether the PPP frame in the L2TP payload should + * have a HDLC header or not. We handle both cases for compatibility. + */ + if (p->len >= 2) { + GETSHORT(hflags, inp); + if (hflags == 0xff03) { + pbuf_remove_header(p, 2); + } + } + /* Dispatch the packet thereby consuming it. */ + ppp_input(l2tp->ppp, p); + return; + +packet_too_short: + PPPDEBUG(LOG_DEBUG, ("pppol2tp: packet too short: %d\n", p->len)); +free_and_return: + pbuf_free(p); +} + +/* L2TP Control packet entry point */ +static void pppol2tp_dispatch_control_packet(pppol2tp_pcb *l2tp, u16_t port, struct pbuf *p, u16_t ns, u16_t nr) { + u8_t *inp; + u16_t avplen, avpflags, vendorid, attributetype, messagetype=0; + err_t err; +#if PPPOL2TP_AUTH_SUPPORT + lwip_md5_context md5_ctx; + u8_t md5_hash[16]; + u8_t challenge_id = 0; +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + /* printf("L2TP CTRL INPUT, ns=%d, nr=%d, len=%d\n", ns, nr, p->len); */ + + /* Drop unexpected packet */ + if (ns != l2tp->peer_ns) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: drop unexpected packet: received NS=%d, expected NS=%d\n", ns, l2tp->peer_ns)); + /* + * In order to ensure that all messages are acknowledged properly + * (particularly in the case of a lost ZLB ACK message), receipt + * of duplicate messages MUST be acknowledged. + * + * In this very special case we Ack a packet we previously received. + * Therefore our NS is the NR we just received. And our NR is the + * NS we just received plus one. + */ + if ((s16_t)(ns - l2tp->peer_ns) < 0) { + pppol2tp_send_zlb(l2tp, nr, ns+1); + } + return; + } + + l2tp->peer_nr = nr; + + /* Handle the special case of the ICCN acknowledge */ + if (l2tp->phase == PPPOL2TP_STATE_ICCN_SENT && (s16_t)(l2tp->peer_nr - l2tp->our_ns) > 0) { + l2tp->phase = PPPOL2TP_STATE_DATA; + sys_untimeout(pppol2tp_timeout, l2tp); + ppp_start(l2tp->ppp); /* notify upper layers */ + } + + /* ZLB packets */ + if (p->tot_len == 0) { + return; + } + /* A ZLB packet does not consume a NS slot thus we don't record the NS value for ZLB packets */ + l2tp->peer_ns = ns+1; + + p = pbuf_coalesce(p, PBUF_RAW); + inp = (u8_t*)p->payload; + /* Decode AVPs */ + while (p->len > 0) { + if (p->len < sizeof(avpflags) + sizeof(vendorid) + sizeof(attributetype) ) { + goto packet_too_short; + } + GETSHORT(avpflags, inp); + avplen = avpflags & PPPOL2TP_AVPHEADERFLAG_LENGTHMASK; + /* printf("AVPLEN = %d\n", avplen); */ + if (p->len < avplen || avplen < sizeof(avpflags) + sizeof(vendorid) + sizeof(attributetype)) { + goto packet_too_short; + } + GETSHORT(vendorid, inp); + GETSHORT(attributetype, inp); + avplen -= sizeof(avpflags) + sizeof(vendorid) + sizeof(attributetype); + + /* Message type must be the first AVP */ + if (messagetype == 0) { + if (attributetype != 0 || vendorid != 0 || avplen != sizeof(messagetype) ) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: message type must be the first AVP\n")); + return; + } + GETSHORT(messagetype, inp); + /* printf("Message type = %d\n", messagetype); */ + switch(messagetype) { + /* Start Control Connection Reply */ + case PPPOL2TP_MESSAGETYPE_SCCRP: + /* Only accept SCCRP packet if we sent a SCCRQ */ + if (l2tp->phase != PPPOL2TP_STATE_SCCRQ_SENT) { + goto send_zlb; + } + break; + /* Incoming Call Reply */ + case PPPOL2TP_MESSAGETYPE_ICRP: + /* Only accept ICRP packet if we sent a IRCQ */ + if (l2tp->phase != PPPOL2TP_STATE_ICRQ_SENT) { + goto send_zlb; + } + break; + /* Stop Control Connection Notification */ + case PPPOL2TP_MESSAGETYPE_STOPCCN: + pppol2tp_send_zlb(l2tp, l2tp->our_ns+1, l2tp->peer_ns); /* Ack the StopCCN before we switch to down state */ + if (l2tp->phase < PPPOL2TP_STATE_DATA) { + pppol2tp_abort_connect(l2tp); + } else if (l2tp->phase == PPPOL2TP_STATE_DATA) { + /* Don't disconnect here, we let the LCP Echo/Reply find the fact + * that PPP session is down. Asking the PPP stack to end the session + * require strict checking about the PPP phase to prevent endless + * disconnection loops. + */ + } + return; + default: + break; + } + goto nextavp; + } + + /* Skip proprietary L2TP extensions */ + if (vendorid != 0) { + goto skipavp; + } + + switch (messagetype) { + /* Start Control Connection Reply */ + case PPPOL2TP_MESSAGETYPE_SCCRP: + switch (attributetype) { + case PPPOL2TP_AVPTYPE_TUNNELID: + if (avplen != sizeof(l2tp->source_tunnel_id) ) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: AVP Assign tunnel ID length check failed\n")); + return; + } + GETSHORT(l2tp->source_tunnel_id, inp); + PPPDEBUG(LOG_DEBUG, ("pppol2tp: Assigned tunnel ID %"U16_F"\n", l2tp->source_tunnel_id)); + goto nextavp; +#if PPPOL2TP_AUTH_SUPPORT + case PPPOL2TP_AVPTYPE_CHALLENGE: + if (avplen == 0) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: Challenge length check failed\n")); + return; + } + if (l2tp->secret == NULL) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: Received challenge from peer and no secret key available\n")); + pppol2tp_abort_connect(l2tp); + return; + } + /* Generate hash of ID, secret, challenge */ + lwip_md5_init(&md5_ctx); + lwip_md5_starts(&md5_ctx); + challenge_id = PPPOL2TP_MESSAGETYPE_SCCCN; + lwip_md5_update(&md5_ctx, &challenge_id, 1); + lwip_md5_update(&md5_ctx, l2tp->secret, l2tp->secret_len); + lwip_md5_update(&md5_ctx, inp, avplen); + lwip_md5_finish(&md5_ctx, l2tp->challenge_hash); + lwip_md5_free(&md5_ctx); + l2tp->send_challenge = 1; + goto skipavp; + case PPPOL2TP_AVPTYPE_CHALLENGERESPONSE: + if (avplen != PPPOL2TP_AVPTYPE_CHALLENGERESPONSE_SIZE) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: AVP Challenge Response length check failed\n")); + return; + } + /* Generate hash of ID, secret, challenge */ + lwip_md5_init(&md5_ctx); + lwip_md5_starts(&md5_ctx); + challenge_id = PPPOL2TP_MESSAGETYPE_SCCRP; + lwip_md5_update(&md5_ctx, &challenge_id, 1); + lwip_md5_update(&md5_ctx, l2tp->secret, l2tp->secret_len); + lwip_md5_update(&md5_ctx, l2tp->secret_rv, sizeof(l2tp->secret_rv)); + lwip_md5_finish(&md5_ctx, md5_hash); + lwip_md5_free(&md5_ctx); + if ( memcmp(inp, md5_hash, sizeof(md5_hash)) ) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: Received challenge response from peer and secret key do not match\n")); + pppol2tp_abort_connect(l2tp); + return; + } + goto skipavp; +#endif /* PPPOL2TP_AUTH_SUPPORT */ + default: + break; + } + break; + /* Incoming Call Reply */ + case PPPOL2TP_MESSAGETYPE_ICRP: + switch (attributetype) { + case PPPOL2TP_AVPTYPE_SESSIONID: + if (avplen != sizeof(l2tp->source_session_id) ) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: AVP Assign session ID length check failed\n")); + return; + } + GETSHORT(l2tp->source_session_id, inp); + PPPDEBUG(LOG_DEBUG, ("pppol2tp: Assigned session ID %"U16_F"\n", l2tp->source_session_id)); + goto nextavp; + default: + break; + } + break; + default: + break; + } + +skipavp: + INCPTR(avplen, inp); +nextavp: + /* printf("AVP Found, vendor=%d, attribute=%d, len=%d\n", vendorid, attributetype, avplen); */ + /* next AVP */ + if (pbuf_remove_header(p, avplen + sizeof(avpflags) + sizeof(vendorid) + sizeof(attributetype)) != 0) { + return; + } + } + + switch(messagetype) { + /* Start Control Connection Reply */ + case PPPOL2TP_MESSAGETYPE_SCCRP: + do { + l2tp->remote_session_id = magic(); + } while(l2tp->remote_session_id == 0); + l2tp->tunnel_port = port; /* LNS server might have chosen its own local port */ + l2tp->icrq_retried = 0; + l2tp->phase = PPPOL2TP_STATE_ICRQ_SENT; + l2tp->our_ns++; + if ((err = pppol2tp_send_scccn(l2tp, l2tp->our_ns)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send SCCCN, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + l2tp->our_ns++; + if ((err = pppol2tp_send_icrq(l2tp, l2tp->our_ns)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send ICRQ, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_untimeout(pppol2tp_timeout, l2tp); + sys_timeout(PPPOL2TP_CONTROL_TIMEOUT, pppol2tp_timeout, l2tp); + break; + /* Incoming Call Reply */ + case PPPOL2TP_MESSAGETYPE_ICRP: + l2tp->iccn_retried = 0; + l2tp->phase = PPPOL2TP_STATE_ICCN_SENT; + l2tp->our_ns++; + if ((err = pppol2tp_send_iccn(l2tp, l2tp->our_ns)) != 0) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send ICCN, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_untimeout(pppol2tp_timeout, l2tp); + sys_timeout(PPPOL2TP_CONTROL_TIMEOUT, pppol2tp_timeout, l2tp); + break; + /* Unhandled packet, send ZLB ACK */ + default: + goto send_zlb; + } + return; + +send_zlb: + pppol2tp_send_zlb(l2tp, l2tp->our_ns+1, l2tp->peer_ns); + return; +packet_too_short: + PPPDEBUG(LOG_DEBUG, ("pppol2tp: packet too short: %d\n", p->len)); +} + +/* L2TP Timeout handler */ +static void pppol2tp_timeout(void *arg) { + pppol2tp_pcb *l2tp = (pppol2tp_pcb*)arg; + err_t err; + u32_t retry_wait; + + PPPDEBUG(LOG_DEBUG, ("pppol2tp: timeout\n")); + + switch (l2tp->phase) { + case PPPOL2TP_STATE_SCCRQ_SENT: + /* backoff wait */ + if (l2tp->sccrq_retried < 0xff) { + l2tp->sccrq_retried++; + } + if (!l2tp->ppp->settings.persist && l2tp->sccrq_retried >= PPPOL2TP_MAXSCCRQ) { + pppol2tp_abort_connect(l2tp); + return; + } + retry_wait = LWIP_MIN(PPPOL2TP_CONTROL_TIMEOUT * l2tp->sccrq_retried, PPPOL2TP_SLOW_RETRY); + PPPDEBUG(LOG_DEBUG, ("pppol2tp: sccrq_retried=%d\n", l2tp->sccrq_retried)); + if ((err = pppol2tp_send_sccrq(l2tp)) != 0) { + l2tp->sccrq_retried--; + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send SCCRQ, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(retry_wait, pppol2tp_timeout, l2tp); + break; + + case PPPOL2TP_STATE_ICRQ_SENT: + l2tp->icrq_retried++; + if (l2tp->icrq_retried >= PPPOL2TP_MAXICRQ) { + pppol2tp_abort_connect(l2tp); + return; + } + PPPDEBUG(LOG_DEBUG, ("pppol2tp: icrq_retried=%d\n", l2tp->icrq_retried)); + if ((s16_t)(l2tp->peer_nr - l2tp->our_ns) < 0) { /* the SCCCN was not acknowledged */ + if ((err = pppol2tp_send_scccn(l2tp, l2tp->our_ns -1)) != 0) { + l2tp->icrq_retried--; + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send SCCCN, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + sys_timeout(PPPOL2TP_CONTROL_TIMEOUT, pppol2tp_timeout, l2tp); + break; + } + } + if ((err = pppol2tp_send_icrq(l2tp, l2tp->our_ns)) != 0) { + l2tp->icrq_retried--; + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send ICRQ, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(PPPOL2TP_CONTROL_TIMEOUT, pppol2tp_timeout, l2tp); + break; + + case PPPOL2TP_STATE_ICCN_SENT: + l2tp->iccn_retried++; + if (l2tp->iccn_retried >= PPPOL2TP_MAXICCN) { + pppol2tp_abort_connect(l2tp); + return; + } + PPPDEBUG(LOG_DEBUG, ("pppol2tp: iccn_retried=%d\n", l2tp->iccn_retried)); + if ((err = pppol2tp_send_iccn(l2tp, l2tp->our_ns)) != 0) { + l2tp->iccn_retried--; + PPPDEBUG(LOG_DEBUG, ("pppol2tp: failed to send ICCN, error=%d\n", err)); + LWIP_UNUSED_ARG(err); /* if PPPDEBUG is disabled */ + } + sys_timeout(PPPOL2TP_CONTROL_TIMEOUT, pppol2tp_timeout, l2tp); + break; + + default: + return; /* all done, work in peace */ + } +} + +/* Connection attempt aborted */ +static void pppol2tp_abort_connect(pppol2tp_pcb *l2tp) { + PPPDEBUG(LOG_DEBUG, ("pppol2tp: could not establish connection\n")); + l2tp->phase = PPPOL2TP_STATE_INITIAL; + ppp_link_failed(l2tp->ppp); /* notify upper layers */ +} + +/* Initiate a new tunnel */ +static err_t pppol2tp_send_sccrq(pppol2tp_pcb *l2tp) { + struct pbuf *pb; + u8_t *p; + u16_t len; + + /* calculate UDP packet length */ + len = 12 +8 +8 +10 +10 +6+sizeof(PPPOL2TP_HOSTNAME)-1 +6+sizeof(PPPOL2TP_VENDORNAME)-1 +8 +8; +#if PPPOL2TP_AUTH_SUPPORT + if (l2tp->secret != NULL) { + len += 6 + sizeof(l2tp->secret_rv); + } +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); + if (pb == NULL) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + /* L2TP control header */ + PUTSHORT(PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY, p); + PUTSHORT(len, p); /* Length */ + PUTSHORT(0, p); /* Tunnel Id */ + PUTSHORT(0, p); /* Session Id */ + PUTSHORT(0, p); /* NS Sequence number - to peer */ + PUTSHORT(0, p); /* NR Sequence number - expected for peer */ + + /* AVP - Message type */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_MESSAGE, p); /* Attribute type: Message Type */ + PUTSHORT(PPPOL2TP_MESSAGETYPE_SCCRQ, p); /* Attribute value: Message type: SCCRQ */ + + /* AVP - L2TP Version */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_VERSION, p); /* Attribute type: Version */ + PUTSHORT(PPPOL2TP_VERSION, p); /* Attribute value: L2TP Version */ + + /* AVP - Framing capabilities */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 10, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_FRAMINGCAPABILITIES, p); /* Attribute type: Framing capabilities */ + PUTLONG(PPPOL2TP_FRAMINGCAPABILITIES, p); /* Attribute value: Framing capabilities */ + + /* AVP - Bearer capabilities */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 10, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_BEARERCAPABILITIES, p); /* Attribute type: Bearer capabilities */ + PUTLONG(PPPOL2TP_BEARERCAPABILITIES, p); /* Attribute value: Bearer capabilities */ + + /* AVP - Host name */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 6+sizeof(PPPOL2TP_HOSTNAME)-1, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_HOSTNAME, p); /* Attribute type: Hostname */ + MEMCPY(p, PPPOL2TP_HOSTNAME, sizeof(PPPOL2TP_HOSTNAME)-1); /* Attribute value: Hostname */ + INCPTR(sizeof(PPPOL2TP_HOSTNAME)-1, p); + + /* AVP - Vendor name */ + PUTSHORT(6+sizeof(PPPOL2TP_VENDORNAME)-1, p); /* len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_VENDORNAME, p); /* Attribute type: Vendor name */ + MEMCPY(p, PPPOL2TP_VENDORNAME, sizeof(PPPOL2TP_VENDORNAME)-1); /* Attribute value: Vendor name */ + INCPTR(sizeof(PPPOL2TP_VENDORNAME)-1, p); + + /* AVP - Assign tunnel ID */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_TUNNELID, p); /* Attribute type: Tunnel ID */ + PUTSHORT(l2tp->remote_tunnel_id, p); /* Attribute value: Tunnel ID */ + + /* AVP - Receive window size */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_RECEIVEWINDOWSIZE, p); /* Attribute type: Receive window size */ + PUTSHORT(PPPOL2TP_RECEIVEWINDOWSIZE, p); /* Attribute value: Receive window size */ + +#if PPPOL2TP_AUTH_SUPPORT + /* AVP - Challenge */ + if (l2tp->secret != NULL) { + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 6 + sizeof(l2tp->secret_rv), p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_CHALLENGE, p); /* Attribute type: Challenge */ + MEMCPY(p, l2tp->secret_rv, sizeof(l2tp->secret_rv)); /* Attribute value: Random vector */ + INCPTR(sizeof(l2tp->secret_rv), p); + } +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + return pppol2tp_udp_send(l2tp, pb); +} + +/* Complete tunnel establishment */ +static err_t pppol2tp_send_scccn(pppol2tp_pcb *l2tp, u16_t ns) { + struct pbuf *pb; + u8_t *p; + u16_t len; + + /* calculate UDP packet length */ + len = 12 +8; +#if PPPOL2TP_AUTH_SUPPORT + if (l2tp->send_challenge) { + len += 6 + sizeof(l2tp->challenge_hash); + } +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); + if (pb == NULL) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + /* L2TP control header */ + PUTSHORT(PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY, p); + PUTSHORT(len, p); /* Length */ + PUTSHORT(l2tp->source_tunnel_id, p); /* Tunnel Id */ + PUTSHORT(0, p); /* Session Id */ + PUTSHORT(ns, p); /* NS Sequence number - to peer */ + PUTSHORT(l2tp->peer_ns, p); /* NR Sequence number - expected for peer */ + + /* AVP - Message type */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_MESSAGE, p); /* Attribute type: Message Type */ + PUTSHORT(PPPOL2TP_MESSAGETYPE_SCCCN, p); /* Attribute value: Message type: SCCCN */ + +#if PPPOL2TP_AUTH_SUPPORT + /* AVP - Challenge response */ + if (l2tp->send_challenge) { + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 6 + sizeof(l2tp->challenge_hash), p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_CHALLENGERESPONSE, p); /* Attribute type: Challenge response */ + MEMCPY(p, l2tp->challenge_hash, sizeof(l2tp->challenge_hash)); /* Attribute value: Computed challenge */ + INCPTR(sizeof(l2tp->challenge_hash), p); + } +#endif /* PPPOL2TP_AUTH_SUPPORT */ + + return pppol2tp_udp_send(l2tp, pb); +} + +/* Initiate a new session */ +static err_t pppol2tp_send_icrq(pppol2tp_pcb *l2tp, u16_t ns) { + struct pbuf *pb; + u8_t *p; + u16_t len; + u32_t serialnumber; + + /* calculate UDP packet length */ + len = 12 +8 +8 +10; + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); + if (pb == NULL) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + /* L2TP control header */ + PUTSHORT(PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY, p); + PUTSHORT(len, p); /* Length */ + PUTSHORT(l2tp->source_tunnel_id, p); /* Tunnel Id */ + PUTSHORT(0, p); /* Session Id */ + PUTSHORT(ns, p); /* NS Sequence number - to peer */ + PUTSHORT(l2tp->peer_ns, p); /* NR Sequence number - expected for peer */ + + /* AVP - Message type */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_MESSAGE, p); /* Attribute type: Message Type */ + PUTSHORT(PPPOL2TP_MESSAGETYPE_ICRQ, p); /* Attribute value: Message type: ICRQ */ + + /* AVP - Assign session ID */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_SESSIONID, p); /* Attribute type: Session ID */ + PUTSHORT(l2tp->remote_session_id, p); /* Attribute value: Session ID */ + + /* AVP - Call Serial Number */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 10, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_CALLSERIALNUMBER, p); /* Attribute type: Serial number */ + serialnumber = magic(); + PUTLONG(serialnumber, p); /* Attribute value: Serial number */ + + return pppol2tp_udp_send(l2tp, pb); +} + +/* Complete tunnel establishment */ +static err_t pppol2tp_send_iccn(pppol2tp_pcb *l2tp, u16_t ns) { + struct pbuf *pb; + u8_t *p; + u16_t len; + + /* calculate UDP packet length */ + len = 12 +8 +10 +10; + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); + if (pb == NULL) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + /* L2TP control header */ + PUTSHORT(PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY, p); + PUTSHORT(len, p); /* Length */ + PUTSHORT(l2tp->source_tunnel_id, p); /* Tunnel Id */ + PUTSHORT(l2tp->source_session_id, p); /* Session Id */ + PUTSHORT(ns, p); /* NS Sequence number - to peer */ + PUTSHORT(l2tp->peer_ns, p); /* NR Sequence number - expected for peer */ + + /* AVP - Message type */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_MESSAGE, p); /* Attribute type: Message Type */ + PUTSHORT(PPPOL2TP_MESSAGETYPE_ICCN, p); /* Attribute value: Message type: ICCN */ + + /* AVP - Framing type */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 10, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_FRAMINGTYPE, p); /* Attribute type: Framing type */ + PUTLONG(PPPOL2TP_FRAMINGTYPE, p); /* Attribute value: Framing type */ + + /* AVP - TX Connect speed */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 10, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_TXCONNECTSPEED, p); /* Attribute type: TX Connect speed */ + PUTLONG(PPPOL2TP_TXCONNECTSPEED, p); /* Attribute value: TX Connect speed */ + + return pppol2tp_udp_send(l2tp, pb); +} + +/* Send a ZLB ACK packet */ +static err_t pppol2tp_send_zlb(pppol2tp_pcb *l2tp, u16_t ns, u16_t nr) { + struct pbuf *pb; + u8_t *p; + u16_t len; + + /* calculate UDP packet length */ + len = 12; + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); + if (pb == NULL) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + /* L2TP control header */ + PUTSHORT(PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY, p); + PUTSHORT(len, p); /* Length */ + PUTSHORT(l2tp->source_tunnel_id, p); /* Tunnel Id */ + PUTSHORT(0, p); /* Session Id */ + PUTSHORT(ns, p); /* NS Sequence number - to peer */ + PUTSHORT(nr, p); /* NR Sequence number - expected for peer */ + + return pppol2tp_udp_send(l2tp, pb); +} + +/* Send a StopCCN packet */ +static err_t pppol2tp_send_stopccn(pppol2tp_pcb *l2tp, u16_t ns) { + struct pbuf *pb; + u8_t *p; + u16_t len; + + /* calculate UDP packet length */ + len = 12 +8 +8 +8; + + /* allocate a buffer */ + pb = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); + if (pb == NULL) { + return ERR_MEM; + } + LWIP_ASSERT("pb->tot_len == pb->len", pb->tot_len == pb->len); + + p = (u8_t*)pb->payload; + /* fill in pkt */ + /* L2TP control header */ + PUTSHORT(PPPOL2TP_HEADERFLAG_CONTROL_MANDATORY, p); + PUTSHORT(len, p); /* Length */ + PUTSHORT(l2tp->source_tunnel_id, p); /* Tunnel Id */ + PUTSHORT(0, p); /* Session Id */ + PUTSHORT(ns, p); /* NS Sequence number - to peer */ + PUTSHORT(l2tp->peer_ns, p); /* NR Sequence number - expected for peer */ + + /* AVP - Message type */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_MESSAGE, p); /* Attribute type: Message Type */ + PUTSHORT(PPPOL2TP_MESSAGETYPE_STOPCCN, p); /* Attribute value: Message type: StopCCN */ + + /* AVP - Assign tunnel ID */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_TUNNELID, p); /* Attribute type: Tunnel ID */ + PUTSHORT(l2tp->remote_tunnel_id, p); /* Attribute value: Tunnel ID */ + + /* AVP - Result code */ + PUTSHORT(PPPOL2TP_AVPHEADERFLAG_MANDATORY + 8, p); /* Mandatory flag + len field */ + PUTSHORT(0, p); /* Vendor ID */ + PUTSHORT(PPPOL2TP_AVPTYPE_RESULTCODE, p); /* Attribute type: Result code */ + PUTSHORT(PPPOL2TP_RESULTCODE, p); /* Attribute value: Result code */ + + return pppol2tp_udp_send(l2tp, pb); +} + +static err_t pppol2tp_xmit(pppol2tp_pcb *l2tp, struct pbuf *pb) { + u8_t *p; + + /* make room for L2TP header - should not fail */ + if (pbuf_add_header(pb, PPPOL2TP_OUTPUT_DATA_HEADER_LEN) != 0) { + /* bail out */ + PPPDEBUG(LOG_ERR, ("pppol2tp: pppol2tp_pcb: could not allocate room for L2TP header\n")); + LINK_STATS_INC(link.lenerr); + pbuf_free(pb); + return ERR_BUF; + } + + p = (u8_t*)pb->payload; + PUTSHORT(PPPOL2TP_HEADERFLAG_DATA_MANDATORY, p); + PUTSHORT(l2tp->source_tunnel_id, p); /* Tunnel Id */ + PUTSHORT(l2tp->source_session_id, p); /* Session Id */ + + return pppol2tp_udp_send(l2tp, pb); +} + +static err_t pppol2tp_udp_send(pppol2tp_pcb *l2tp, struct pbuf *pb) { + err_t err; + if (l2tp->netif) { + err = udp_sendto_if(l2tp->udp, pb, &l2tp->remote_ip, l2tp->tunnel_port, l2tp->netif); + } else { + err = udp_sendto(l2tp->udp, pb, &l2tp->remote_ip, l2tp->tunnel_port); + } + pbuf_free(pb); + return err; +} + +#endif /* PPP_SUPPORT && PPPOL2TP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/pppos.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/pppos.c new file mode 100644 index 0000000..d10dd8e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/pppos.c @@ -0,0 +1,895 @@ +/** + * @file + * Network Point to Point Protocol over Serial file. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PPPOS_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include + +#include "lwip/arch.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" +#include "lwip/sys.h" +#include "lwip/memp.h" +#include "lwip/netif.h" +#include "lwip/snmp.h" +#include "lwip/priv/tcpip_priv.h" +#include "lwip/api.h" +#include "lwip/ip4.h" /* for ip4_input() */ + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/pppos.h" +#include "netif/ppp/vj.h" + +/* Memory pool */ +LWIP_MEMPOOL_DECLARE(PPPOS_PCB, MEMP_NUM_PPPOS_INTERFACES, sizeof(pppos_pcb), "PPPOS_PCB") + +/* callbacks called from PPP core */ +static err_t pppos_write(ppp_pcb *ppp, void *ctx, struct pbuf *p); +static err_t pppos_netif_output(ppp_pcb *ppp, void *ctx, struct pbuf *pb, u16_t protocol); +static void pppos_connect(ppp_pcb *ppp, void *ctx); +#if PPP_SERVER +static void pppos_listen(ppp_pcb *ppp, void *ctx); +#endif /* PPP_SERVER */ +static void pppos_disconnect(ppp_pcb *ppp, void *ctx); +static err_t pppos_destroy(ppp_pcb *ppp, void *ctx); +static void pppos_send_config(ppp_pcb *ppp, void *ctx, u32_t accm, int pcomp, int accomp); +static void pppos_recv_config(ppp_pcb *ppp, void *ctx, u32_t accm, int pcomp, int accomp); + +/* Prototypes for procedures local to this file. */ +#if PPP_INPROC_IRQ_SAFE +static void pppos_input_callback(void *arg); +#endif /* PPP_INPROC_IRQ_SAFE */ +static void pppos_input_free_current_packet(pppos_pcb *pppos); +static void pppos_input_drop(pppos_pcb *pppos); +static err_t pppos_output_append(pppos_pcb *pppos, err_t err, struct pbuf *nb, u8_t c, u8_t accm, u16_t *fcs); +static err_t pppos_output_last(pppos_pcb *pppos, err_t err, struct pbuf *nb, u16_t *fcs); + +/* Callbacks structure for PPP core */ +static const struct link_callbacks pppos_callbacks = { + pppos_connect, +#if PPP_SERVER + pppos_listen, +#endif /* PPP_SERVER */ + pppos_disconnect, + pppos_destroy, + pppos_write, + pppos_netif_output, + pppos_send_config, + pppos_recv_config +}; + +/* PPP's Asynchronous-Control-Character-Map. The mask array is used + * to select the specific bit for a character. */ +#define ESCAPE_P(accm, c) ((accm)[(c) >> 3] & 1 << (c & 0x07)) + +#if PPP_FCS_TABLE +/* + * FCS lookup table as calculated by genfcstab. + */ +static const u16_t fcstab[256] = { + 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf, + 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7, + 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e, + 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876, + 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd, + 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5, + 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c, + 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974, + 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb, + 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3, + 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a, + 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72, + 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9, + 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1, + 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738, + 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70, + 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7, + 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff, + 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036, + 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e, + 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5, + 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd, + 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134, + 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c, + 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3, + 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb, + 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232, + 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a, + 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1, + 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9, + 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330, + 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78 +}; +#define PPP_FCS(fcs, c) (((fcs) >> 8) ^ fcstab[((fcs) ^ (c)) & 0xff]) +#else /* PPP_FCS_TABLE */ +/* The HDLC polynomial: X**0 + X**5 + X**12 + X**16 (0x8408) */ +#define PPP_FCS_POLYNOMIAL 0x8408 +static u16_t +ppp_get_fcs(u8_t byte) +{ + unsigned int octet; + int bit; + octet = byte; + for (bit = 8; bit-- > 0; ) { + octet = (octet & 0x01) ? ((octet >> 1) ^ PPP_FCS_POLYNOMIAL) : (octet >> 1); + } + return octet & 0xffff; +} +#define PPP_FCS(fcs, c) (((fcs) >> 8) ^ ppp_get_fcs(((fcs) ^ (c)) & 0xff)) +#endif /* PPP_FCS_TABLE */ + +/* + * Values for FCS calculations. + */ +#define PPP_INITFCS 0xffff /* Initial FCS value */ +#define PPP_GOODFCS 0xf0b8 /* Good final FCS value */ + +#if PPP_INPROC_IRQ_SAFE +#define PPPOS_DECL_PROTECT(lev) SYS_ARCH_DECL_PROTECT(lev) +#define PPPOS_PROTECT(lev) SYS_ARCH_PROTECT(lev) +#define PPPOS_UNPROTECT(lev) SYS_ARCH_UNPROTECT(lev) +#else +#define PPPOS_DECL_PROTECT(lev) +#define PPPOS_PROTECT(lev) +#define PPPOS_UNPROTECT(lev) +#endif /* PPP_INPROC_IRQ_SAFE */ + + +/* + * Create a new PPP connection using the given serial I/O device. + * + * Return 0 on success, an error code on failure. + */ +ppp_pcb *pppos_create(struct netif *pppif, pppos_output_cb_fn output_cb, + ppp_link_status_cb_fn link_status_cb, void *ctx_cb) +{ + pppos_pcb *pppos; + ppp_pcb *ppp; + LWIP_ASSERT_CORE_LOCKED(); + + pppos = (pppos_pcb *)LWIP_MEMPOOL_ALLOC(PPPOS_PCB); + if (pppos == NULL) { + return NULL; + } + + ppp = ppp_new(pppif, &pppos_callbacks, pppos, link_status_cb, ctx_cb); + if (ppp == NULL) { + LWIP_MEMPOOL_FREE(PPPOS_PCB, pppos); + return NULL; + } + + memset(pppos, 0, sizeof(pppos_pcb)); + pppos->ppp = ppp; + pppos->output_cb = output_cb; + return ppp; +} + +/* Called by PPP core */ +static err_t +pppos_write(ppp_pcb *ppp, void *ctx, struct pbuf *p) +{ + pppos_pcb *pppos = (pppos_pcb *)ctx; + u8_t *s; + struct pbuf *nb; + u16_t n; + u16_t fcs_out; + err_t err; + LWIP_UNUSED_ARG(ppp); + + /* Grab an output buffer. Using PBUF_POOL here for tx is ok since the pbuf + gets freed by 'pppos_output_last' before this function returns and thus + cannot starve rx. */ + nb = pbuf_alloc(PBUF_RAW, 0, PBUF_POOL); + if (nb == NULL) { + PPPDEBUG(LOG_WARNING, ("pppos_write[%d]: alloc fail\n", ppp->netif->num)); + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + pbuf_free(p); + return ERR_MEM; + } + + /* Set nb->tot_len to actual payload length */ + nb->tot_len = p->len; + + /* If the link has been idle, we'll send a fresh flag character to + * flush any noise. */ + err = ERR_OK; + if ((sys_now() - pppos->last_xmit) >= PPP_MAXIDLEFLAG) { + err = pppos_output_append(pppos, err, nb, PPP_FLAG, 0, NULL); + } + + /* Load output buffer. */ + fcs_out = PPP_INITFCS; + s = (u8_t*)p->payload; + n = p->len; + while (n-- > 0) { + err = pppos_output_append(pppos, err, nb, *s++, 1, &fcs_out); + } + + err = pppos_output_last(pppos, err, nb, &fcs_out); + if (err == ERR_OK) { + PPPDEBUG(LOG_INFO, ("pppos_write[%d]: len=%d\n", ppp->netif->num, p->len)); + } else { + PPPDEBUG(LOG_WARNING, ("pppos_write[%d]: output failed len=%d\n", ppp->netif->num, p->len)); + } + pbuf_free(p); + return err; +} + +/* Called by PPP core */ +static err_t +pppos_netif_output(ppp_pcb *ppp, void *ctx, struct pbuf *pb, u16_t protocol) +{ + pppos_pcb *pppos = (pppos_pcb *)ctx; + struct pbuf *nb, *p; + u16_t fcs_out; + err_t err; + LWIP_UNUSED_ARG(ppp); + + /* Grab an output buffer. Using PBUF_POOL here for tx is ok since the pbuf + gets freed by 'pppos_output_last' before this function returns and thus + cannot starve rx. */ + nb = pbuf_alloc(PBUF_RAW, 0, PBUF_POOL); + if (nb == NULL) { + PPPDEBUG(LOG_WARNING, ("pppos_netif_output[%d]: alloc fail\n", ppp->netif->num)); + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + return ERR_MEM; + } + + /* Set nb->tot_len to actual payload length */ + nb->tot_len = pb->tot_len; + + /* If the link has been idle, we'll send a fresh flag character to + * flush any noise. */ + err = ERR_OK; + if ((sys_now() - pppos->last_xmit) >= PPP_MAXIDLEFLAG) { + err = pppos_output_append(pppos, err, nb, PPP_FLAG, 0, NULL); + } + + fcs_out = PPP_INITFCS; + if (!pppos->accomp) { + err = pppos_output_append(pppos, err, nb, PPP_ALLSTATIONS, 1, &fcs_out); + err = pppos_output_append(pppos, err, nb, PPP_UI, 1, &fcs_out); + } + if (!pppos->pcomp || protocol > 0xFF) { + err = pppos_output_append(pppos, err, nb, (protocol >> 8) & 0xFF, 1, &fcs_out); + } + err = pppos_output_append(pppos, err, nb, protocol & 0xFF, 1, &fcs_out); + + /* Load packet. */ + for(p = pb; p; p = p->next) { + u16_t n = p->len; + u8_t *s = (u8_t*)p->payload; + + while (n-- > 0) { + err = pppos_output_append(pppos, err, nb, *s++, 1, &fcs_out); + } + } + + err = pppos_output_last(pppos, err, nb, &fcs_out); + if (err == ERR_OK) { + PPPDEBUG(LOG_INFO, ("pppos_netif_output[%d]: proto=0x%"X16_F", len = %d\n", ppp->netif->num, protocol, pb->tot_len)); + } else { + PPPDEBUG(LOG_WARNING, ("pppos_netif_output[%d]: output failed proto=0x%"X16_F", len = %d\n", ppp->netif->num, protocol, pb->tot_len)); + } + return err; +} + +static void +pppos_connect(ppp_pcb *ppp, void *ctx) +{ + pppos_pcb *pppos = (pppos_pcb *)ctx; + PPPOS_DECL_PROTECT(lev); + +#if PPP_INPROC_IRQ_SAFE + /* input pbuf left over from last session? */ + pppos_input_free_current_packet(pppos); +#endif /* PPP_INPROC_IRQ_SAFE */ + + /* reset PPPoS control block to its initial state */ + memset(&pppos->last_xmit, 0, sizeof(pppos_pcb) - offsetof(pppos_pcb, last_xmit)); + + /* + * Default the in and out accm so that escape and flag characters + * are always escaped. + */ + pppos->in_accm[15] = 0x60; /* no need to protect since RX is not running */ + pppos->out_accm[15] = 0x60; + PPPOS_PROTECT(lev); + pppos->open = 1; + PPPOS_UNPROTECT(lev); + + /* + * Start the connection and handle incoming events (packet or timeout). + */ + PPPDEBUG(LOG_INFO, ("pppos_connect: unit %d: connecting\n", ppp->netif->num)); + ppp_start(ppp); /* notify upper layers */ +} + +#if PPP_SERVER +static void +pppos_listen(ppp_pcb *ppp, void *ctx) +{ + pppos_pcb *pppos = (pppos_pcb *)ctx; + PPPOS_DECL_PROTECT(lev); + +#if PPP_INPROC_IRQ_SAFE + /* input pbuf left over from last session? */ + pppos_input_free_current_packet(pppos); +#endif /* PPP_INPROC_IRQ_SAFE */ + + /* reset PPPoS control block to its initial state */ + memset(&pppos->last_xmit, 0, sizeof(pppos_pcb) - offsetof(pppos_pcb, last_xmit)); + + /* + * Default the in and out accm so that escape and flag characters + * are always escaped. + */ + pppos->in_accm[15] = 0x60; /* no need to protect since RX is not running */ + pppos->out_accm[15] = 0x60; + PPPOS_PROTECT(lev); + pppos->open = 1; + PPPOS_UNPROTECT(lev); + + /* + * Wait for something to happen. + */ + PPPDEBUG(LOG_INFO, ("pppos_listen: unit %d: listening\n", ppp->netif->num)); + ppp_start(ppp); /* notify upper layers */ +} +#endif /* PPP_SERVER */ + +static void +pppos_disconnect(ppp_pcb *ppp, void *ctx) +{ + pppos_pcb *pppos = (pppos_pcb *)ctx; + PPPOS_DECL_PROTECT(lev); + + PPPOS_PROTECT(lev); + pppos->open = 0; + PPPOS_UNPROTECT(lev); + + /* If PPP_INPROC_IRQ_SAFE is used we cannot call + * pppos_input_free_current_packet() here because + * rx IRQ might still call pppos_input(). + */ +#if !PPP_INPROC_IRQ_SAFE + /* input pbuf left ? */ + pppos_input_free_current_packet(pppos); +#endif /* !PPP_INPROC_IRQ_SAFE */ + + ppp_link_end(ppp); /* notify upper layers */ +} + +static err_t +pppos_destroy(ppp_pcb *ppp, void *ctx) +{ + pppos_pcb *pppos = (pppos_pcb *)ctx; + LWIP_UNUSED_ARG(ppp); + +#if PPP_INPROC_IRQ_SAFE + /* input pbuf left ? */ + pppos_input_free_current_packet(pppos); +#endif /* PPP_INPROC_IRQ_SAFE */ + + LWIP_MEMPOOL_FREE(PPPOS_PCB, pppos); + return ERR_OK; +} + +#if !NO_SYS && !PPP_INPROC_IRQ_SAFE +/** Pass received raw characters to PPPoS to be decoded through lwIP TCPIP thread. + * + * This is one of the only functions that may be called outside of the TCPIP thread! + * + * @param ppp PPP descriptor index, returned by pppos_create() + * @param s received data + * @param l length of received data + */ +err_t +pppos_input_tcpip(ppp_pcb *ppp, u8_t *s, int l) +{ + struct pbuf *p; + err_t err; + + p = pbuf_alloc(PBUF_RAW, l, PBUF_POOL); + if (!p) { + return ERR_MEM; + } + pbuf_take(p, s, l); + + err = tcpip_inpkt(p, ppp_netif(ppp), pppos_input_sys); + if (err != ERR_OK) { + pbuf_free(p); + } + return err; +} + +/* called from TCPIP thread */ +err_t pppos_input_sys(struct pbuf *p, struct netif *inp) { + ppp_pcb *ppp = (ppp_pcb*)inp->state; + struct pbuf *n; + LWIP_ASSERT_CORE_LOCKED(); + + for (n = p; n; n = n->next) { + pppos_input(ppp, (u8_t*)n->payload, n->len); + } + pbuf_free(p); + return ERR_OK; +} +#endif /* !NO_SYS && !PPP_INPROC_IRQ_SAFE */ + +/** PPPoS input helper struct, must be packed since it is stored + * to pbuf->payload, which might be unaligned. */ +#if PPP_INPROC_IRQ_SAFE +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct pppos_input_header { + PACK_STRUCT_FIELD(ppp_pcb *ppp); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#endif /* PPP_INPROC_IRQ_SAFE */ + +/** Pass received raw characters to PPPoS to be decoded. + * + * @param ppp PPP descriptor index, returned by pppos_create() + * @param s received data + * @param l length of received data + */ +void +pppos_input(ppp_pcb *ppp, u8_t *s, int l) +{ + pppos_pcb *pppos = (pppos_pcb *)ppp->link_ctx_cb; + struct pbuf *next_pbuf; + u8_t cur_char; + u8_t escaped; + PPPOS_DECL_PROTECT(lev); +#if !PPP_INPROC_IRQ_SAFE + LWIP_ASSERT_CORE_LOCKED(); +#endif + + PPPDEBUG(LOG_DEBUG, ("pppos_input[%d]: got %d bytes\n", ppp->netif->num, l)); + while (l-- > 0) { + cur_char = *s++; + + PPPOS_PROTECT(lev); + /* ppp_input can disconnect the interface, we need to abort to prevent a memory + * leak if there are remaining bytes because pppos_connect and pppos_listen + * functions expect input buffer to be free. Furthermore there are no real + * reason to continue reading bytes if we are disconnected. + */ + if (!pppos->open) { + PPPOS_UNPROTECT(lev); + return; + } + escaped = ESCAPE_P(pppos->in_accm, cur_char); + PPPOS_UNPROTECT(lev); + /* Handle special characters. */ + if (escaped) { + /* Check for escape sequences. */ + /* XXX Note that this does not handle an escaped 0x5d character which + * would appear as an escape character. Since this is an ASCII ']' + * and there is no reason that I know of to escape it, I won't complicate + * the code to handle this case. GLL */ + if (cur_char == PPP_ESCAPE) { + pppos->in_escaped = 1; + /* Check for the flag character. */ + } else if (cur_char == PPP_FLAG) { + /* If this is just an extra flag character, ignore it. */ + if (pppos->in_state <= PDADDRESS) { + /* ignore it */; + /* If we haven't received the packet header, drop what has come in. */ + } else if (pppos->in_state < PDDATA) { + PPPDEBUG(LOG_WARNING, + ("pppos_input[%d]: Dropping incomplete packet %d\n", + ppp->netif->num, pppos->in_state)); + LINK_STATS_INC(link.lenerr); + pppos_input_drop(pppos); + /* If the fcs is invalid, drop the packet. */ + } else if (pppos->in_fcs != PPP_GOODFCS) { + PPPDEBUG(LOG_INFO, + ("pppos_input[%d]: Dropping bad fcs 0x%"X16_F" proto=0x%"X16_F"\n", + ppp->netif->num, pppos->in_fcs, pppos->in_protocol)); + /* Note: If you get lots of these, check for UART frame errors or try different baud rate */ + LINK_STATS_INC(link.chkerr); + pppos_input_drop(pppos); + /* Otherwise it's a good packet so pass it on. */ + } else { + struct pbuf *inp; + /* Trim off the checksum. */ + if(pppos->in_tail->len > 2) { + pppos->in_tail->len -= 2; + + pppos->in_tail->tot_len = pppos->in_tail->len; + if (pppos->in_tail != pppos->in_head) { + pbuf_cat(pppos->in_head, pppos->in_tail); + } + } else { + pppos->in_tail->tot_len = pppos->in_tail->len; + if (pppos->in_tail != pppos->in_head) { + pbuf_cat(pppos->in_head, pppos->in_tail); + } + + pbuf_realloc(pppos->in_head, pppos->in_head->tot_len - 2); + } + + /* Dispatch the packet thereby consuming it. */ + inp = pppos->in_head; + /* Packet consumed, release our references. */ + pppos->in_head = NULL; + pppos->in_tail = NULL; +#if IP_FORWARD || LWIP_IPV6_FORWARD + /* hide the room for Ethernet forwarding header */ + pbuf_remove_header(inp, PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN); +#endif /* IP_FORWARD || LWIP_IPV6_FORWARD */ +#if PPP_INPROC_IRQ_SAFE + if(tcpip_try_callback(pppos_input_callback, inp) != ERR_OK) { + PPPDEBUG(LOG_ERR, ("pppos_input[%d]: tcpip_callback() failed, dropping packet\n", ppp->netif->num)); + pbuf_free(inp); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(ppp->netif, ifindiscards); + } +#else /* PPP_INPROC_IRQ_SAFE */ + ppp_input(ppp, inp); +#endif /* PPP_INPROC_IRQ_SAFE */ + } + + /* Prepare for a new packet. */ + pppos->in_fcs = PPP_INITFCS; + pppos->in_state = PDADDRESS; + pppos->in_escaped = 0; + /* Other characters are usually control characters that may have + * been inserted by the physical layer so here we just drop them. */ + } else { + PPPDEBUG(LOG_WARNING, + ("pppos_input[%d]: Dropping ACCM char <%d>\n", ppp->netif->num, cur_char)); + } + /* Process other characters. */ + } else { + /* Unencode escaped characters. */ + if (pppos->in_escaped) { + pppos->in_escaped = 0; + cur_char ^= PPP_TRANS; + } + + /* Process character relative to current state. */ + switch(pppos->in_state) { + case PDIDLE: /* Idle state - waiting. */ + /* Drop the character if it's not 0xff + * we would have processed a flag character above. */ + if (cur_char != PPP_ALLSTATIONS) { + break; + } + /* no break */ + /* Fall through */ + + case PDSTART: /* Process start flag. */ + /* Prepare for a new packet. */ + pppos->in_fcs = PPP_INITFCS; + /* no break */ + /* Fall through */ + + case PDADDRESS: /* Process address field. */ + if (cur_char == PPP_ALLSTATIONS) { + pppos->in_state = PDCONTROL; + break; + } + /* no break */ + + /* Else assume compressed address and control fields so + * fall through to get the protocol... */ + /* Fall through */ + case PDCONTROL: /* Process control field. */ + /* If we don't get a valid control code, restart. */ + if (cur_char == PPP_UI) { + pppos->in_state = PDPROTOCOL1; + break; + } + /* no break */ + +#if 0 + else { + PPPDEBUG(LOG_WARNING, + ("pppos_input[%d]: Invalid control <%d>\n", ppp->netif->num, cur_char)); + pppos->in_state = PDSTART; + } +#endif + /* Fall through */ + + case PDPROTOCOL1: /* Process protocol field 1. */ + /* If the lower bit is set, this is the end of the protocol + * field. */ + if (cur_char & 1) { + pppos->in_protocol = cur_char; + pppos->in_state = PDDATA; + } else { + pppos->in_protocol = (u16_t)cur_char << 8; + pppos->in_state = PDPROTOCOL2; + } + break; + case PDPROTOCOL2: /* Process protocol field 2. */ + pppos->in_protocol |= cur_char; + pppos->in_state = PDDATA; + break; + case PDDATA: /* Process data byte. */ + /* Make space to receive processed data. */ + if (pppos->in_tail == NULL || pppos->in_tail->len == PBUF_POOL_BUFSIZE) { + u16_t pbuf_alloc_len; + if (pppos->in_tail != NULL) { + pppos->in_tail->tot_len = pppos->in_tail->len; + if (pppos->in_tail != pppos->in_head) { + pbuf_cat(pppos->in_head, pppos->in_tail); + /* give up the in_tail reference now */ + pppos->in_tail = NULL; + } + } + /* If we haven't started a packet, we need a packet header. */ + pbuf_alloc_len = 0; +#if IP_FORWARD || LWIP_IPV6_FORWARD + /* If IP forwarding is enabled we are reserving PBUF_LINK_ENCAPSULATION_HLEN + * + PBUF_LINK_HLEN bytes so the packet is being allocated with enough header + * space to be forwarded (to Ethernet for example). + */ + if (pppos->in_head == NULL) { + pbuf_alloc_len = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN; + } +#endif /* IP_FORWARD || LWIP_IPV6_FORWARD */ + next_pbuf = pbuf_alloc(PBUF_RAW, pbuf_alloc_len, PBUF_POOL); + if (next_pbuf == NULL) { + /* No free buffers. Drop the input packet and let the + * higher layers deal with it. Continue processing + * the received pbuf chain in case a new packet starts. */ + PPPDEBUG(LOG_ERR, ("pppos_input[%d]: NO FREE PBUFS!\n", ppp->netif->num)); + LINK_STATS_INC(link.memerr); + pppos_input_drop(pppos); + pppos->in_state = PDSTART; /* Wait for flag sequence. */ + break; + } + if (pppos->in_head == NULL) { + u8_t *payload = ((u8_t*)next_pbuf->payload) + pbuf_alloc_len; +#if PPP_INPROC_IRQ_SAFE + ((struct pppos_input_header*)payload)->ppp = ppp; + payload += sizeof(struct pppos_input_header); + next_pbuf->len += sizeof(struct pppos_input_header); +#endif /* PPP_INPROC_IRQ_SAFE */ + next_pbuf->len += sizeof(pppos->in_protocol); + *(payload++) = pppos->in_protocol >> 8; + *(payload) = pppos->in_protocol & 0xFF; + pppos->in_head = next_pbuf; + } + pppos->in_tail = next_pbuf; + } + /* Load character into buffer. */ + ((u8_t*)pppos->in_tail->payload)[pppos->in_tail->len++] = cur_char; + break; + default: + break; + } + + /* update the frame check sequence number. */ + pppos->in_fcs = PPP_FCS(pppos->in_fcs, cur_char); + } + } /* while (l-- > 0), all bytes processed */ +} + +#if PPP_INPROC_IRQ_SAFE +/* PPPoS input callback using one input pointer + */ +static void pppos_input_callback(void *arg) { + struct pbuf *pb = (struct pbuf*)arg; + ppp_pcb *ppp; + + ppp = ((struct pppos_input_header*)pb->payload)->ppp; + if(pbuf_remove_header(pb, sizeof(struct pppos_input_header))) { + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + goto drop; + } + + /* Dispatch the packet thereby consuming it. */ + ppp_input(ppp, pb); + return; + +drop: + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(ppp->netif, ifindiscards); + pbuf_free(pb); +} +#endif /* PPP_INPROC_IRQ_SAFE */ + +static void +pppos_send_config(ppp_pcb *ppp, void *ctx, u32_t accm, int pcomp, int accomp) +{ + int i; + pppos_pcb *pppos = (pppos_pcb *)ctx; + LWIP_UNUSED_ARG(ppp); + + pppos->pcomp = pcomp; + pppos->accomp = accomp; + + /* Load the ACCM bits for the 32 control codes. */ + for (i = 0; i < 32/8; i++) { + pppos->out_accm[i] = (u8_t)((accm >> (8 * i)) & 0xFF); + } + + PPPDEBUG(LOG_INFO, ("pppos_send_config[%d]: out_accm=%X %X %X %X\n", + pppos->ppp->netif->num, + pppos->out_accm[0], pppos->out_accm[1], pppos->out_accm[2], pppos->out_accm[3])); +} + +static void +pppos_recv_config(ppp_pcb *ppp, void *ctx, u32_t accm, int pcomp, int accomp) +{ + int i; + pppos_pcb *pppos = (pppos_pcb *)ctx; + PPPOS_DECL_PROTECT(lev); + LWIP_UNUSED_ARG(ppp); + LWIP_UNUSED_ARG(pcomp); + LWIP_UNUSED_ARG(accomp); + + /* Load the ACCM bits for the 32 control codes. */ + PPPOS_PROTECT(lev); + for (i = 0; i < 32 / 8; i++) { + pppos->in_accm[i] = (u8_t)(accm >> (i * 8)); + } + PPPOS_UNPROTECT(lev); + + PPPDEBUG(LOG_INFO, ("pppos_recv_config[%d]: in_accm=%X %X %X %X\n", + pppos->ppp->netif->num, + pppos->in_accm[0], pppos->in_accm[1], pppos->in_accm[2], pppos->in_accm[3])); +} + +/* + * Drop the input packet. + */ +static void +pppos_input_free_current_packet(pppos_pcb *pppos) +{ + if (pppos->in_head != NULL) { + if (pppos->in_tail && (pppos->in_tail != pppos->in_head)) { + pbuf_free(pppos->in_tail); + } + pbuf_free(pppos->in_head); + pppos->in_head = NULL; + } + pppos->in_tail = NULL; +} + +/* + * Drop the input packet and increase error counters. + */ +static void +pppos_input_drop(pppos_pcb *pppos) +{ + if (pppos->in_head != NULL) { +#if 0 + PPPDEBUG(LOG_INFO, ("pppos_input_drop: %d:%.*H\n", pppos->in_head->len, min(60, pppos->in_head->len * 2), pppos->in_head->payload)); +#endif + PPPDEBUG(LOG_INFO, ("pppos_input_drop: pbuf len=%d, addr %p\n", pppos->in_head->len, (void*)pppos->in_head)); + } + pppos_input_free_current_packet(pppos); +#if VJ_SUPPORT + vj_uncompress_err(&pppos->ppp->vj_comp); +#endif /* VJ_SUPPORT */ + + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(pppos->ppp->netif, ifindiscards); +} + +/* + * pppos_output_append - append given character to end of given pbuf. + * If out_accm is not 0 and the character needs to be escaped, do so. + * If pbuf is full, send the pbuf and reuse it. + * Return the current pbuf. + */ +static err_t +pppos_output_append(pppos_pcb *pppos, err_t err, struct pbuf *nb, u8_t c, u8_t accm, u16_t *fcs) +{ + if (err != ERR_OK) { + return err; + } + + /* Make sure there is room for the character and an escape code. + * Sure we don't quite fill the buffer if the character doesn't + * get escaped but is one character worth complicating this? */ + if ((PBUF_POOL_BUFSIZE - nb->len) < 2) { + u32_t l = pppos->output_cb(pppos->ppp, (u8_t*)nb->payload, nb->len, pppos->ppp->ctx_cb); + if (l != nb->len) { + return ERR_IF; + } + nb->len = 0; + } + + /* Update FCS before checking for special characters. */ + if (fcs) { + *fcs = PPP_FCS(*fcs, c); + } + + /* Copy to output buffer escaping special characters. */ + if (accm && ESCAPE_P(pppos->out_accm, c)) { + *((u8_t*)nb->payload + nb->len++) = PPP_ESCAPE; + *((u8_t*)nb->payload + nb->len++) = c ^ PPP_TRANS; + } else { + *((u8_t*)nb->payload + nb->len++) = c; + } + + return ERR_OK; +} + +static err_t +pppos_output_last(pppos_pcb *pppos, err_t err, struct pbuf *nb, u16_t *fcs) +{ + ppp_pcb *ppp = pppos->ppp; + + /* Add FCS and trailing flag. */ + err = pppos_output_append(pppos, err, nb, ~(*fcs) & 0xFF, 1, NULL); + err = pppos_output_append(pppos, err, nb, (~(*fcs) >> 8) & 0xFF, 1, NULL); + err = pppos_output_append(pppos, err, nb, PPP_FLAG, 0, NULL); + + if (err != ERR_OK) { + goto failed; + } + + /* Send remaining buffer if not empty */ + if (nb->len > 0) { + u32_t l = pppos->output_cb(ppp, (u8_t*)nb->payload, nb->len, ppp->ctx_cb); + if (l != nb->len) { + err = ERR_IF; + goto failed; + } + } + + pppos->last_xmit = sys_now(); + MIB2_STATS_NETIF_ADD(ppp->netif, ifoutoctets, nb->tot_len); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutucastpkts); + LINK_STATS_INC(link.xmit); + pbuf_free(nb); + return ERR_OK; + +failed: + pppos->last_xmit = 0; /* prepend PPP_FLAG to next packet */ + LINK_STATS_INC(link.err); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(ppp->netif, ifoutdiscards); + pbuf_free(nb); + return err; +} + +#endif /* PPP_SUPPORT && PPPOS_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/upap.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/upap.c new file mode 100644 index 0000000..709c2d6 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/upap.c @@ -0,0 +1,677 @@ +/* + * upap.c - User/Password Authentication Protocol. + * + * Copyright (c) 1984-2000 Carnegie Mellon University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The name "Carnegie Mellon University" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For permission or any legal + * details, please contact + * Office of Technology Transfer + * Carnegie Mellon University + * 5000 Forbes Avenue + * Pittsburgh, PA 15213-3890 + * (412) 268-4387, fax: (412) 268-7395 + * tech-transfer@andrew.cmu.edu + * + * 4. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Computing Services + * at Carnegie Mellon University (http://www.cmu.edu/computing/)." + * + * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE + * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && PAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +/* + * @todo: + */ + +#if 0 /* UNUSED */ +#include +#include +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/upap.h" + +#if PPP_OPTIONS +/* + * Command-line options. + */ +static option_t pap_option_list[] = { + { "hide-password", o_bool, &hide_password, + "Don't output passwords to log", OPT_PRIO | 1 }, + { "show-password", o_bool, &hide_password, + "Show password string in debug log messages", OPT_PRIOSUB | 0 }, + + { "pap-restart", o_int, &upap[0].us_timeouttime, + "Set retransmit timeout for PAP", OPT_PRIO }, + { "pap-max-authreq", o_int, &upap[0].us_maxtransmits, + "Set max number of transmissions for auth-reqs", OPT_PRIO }, + { "pap-timeout", o_int, &upap[0].us_reqtimeout, + "Set time limit for peer PAP authentication", OPT_PRIO }, + + { NULL } +}; +#endif /* PPP_OPTIONS */ + +/* + * Protocol entry points. + */ +static void upap_init(ppp_pcb *pcb); +static void upap_lowerup(ppp_pcb *pcb); +static void upap_lowerdown(ppp_pcb *pcb); +static void upap_input(ppp_pcb *pcb, u_char *inpacket, int l); +static void upap_protrej(ppp_pcb *pcb); +#if PRINTPKT_SUPPORT +static int upap_printpkt(const u_char *p, int plen, void (*printer) (void *, const char *, ...), void *arg); +#endif /* PRINTPKT_SUPPORT */ + +const struct protent pap_protent = { + PPP_PAP, + upap_init, + upap_input, + upap_protrej, + upap_lowerup, + upap_lowerdown, + NULL, + NULL, +#if PRINTPKT_SUPPORT + upap_printpkt, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_DATAINPUT + NULL, +#endif /* PPP_DATAINPUT */ +#if PRINTPKT_SUPPORT + "PAP", + NULL, +#endif /* PRINTPKT_SUPPORT */ +#if PPP_OPTIONS + pap_option_list, + NULL, +#endif /* PPP_OPTIONS */ +#if DEMAND_SUPPORT + NULL, + NULL +#endif /* DEMAND_SUPPORT */ +}; + +static void upap_timeout(void *arg); +#if PPP_SERVER +static void upap_reqtimeout(void *arg); +static void upap_rauthreq(ppp_pcb *pcb, u_char *inp, int id, int len); +#endif /* PPP_SERVER */ +static void upap_rauthack(ppp_pcb *pcb, u_char *inp, int id, int len); +static void upap_rauthnak(ppp_pcb *pcb, u_char *inp, int id, int len); +static void upap_sauthreq(ppp_pcb *pcb); +#if PPP_SERVER +static void upap_sresp(ppp_pcb *pcb, u_char code, u_char id, const char *msg, int msglen); +#endif /* PPP_SERVER */ + + +/* + * upap_init - Initialize a UPAP unit. + */ +static void upap_init(ppp_pcb *pcb) { + pcb->upap.us_user = NULL; + pcb->upap.us_userlen = 0; + pcb->upap.us_passwd = NULL; + pcb->upap.us_passwdlen = 0; + pcb->upap.us_clientstate = UPAPCS_INITIAL; +#if PPP_SERVER + pcb->upap.us_serverstate = UPAPSS_INITIAL; +#endif /* PPP_SERVER */ + pcb->upap.us_id = 0; +} + + +/* + * upap_authwithpeer - Authenticate us with our peer (start client). + * + * Set new state and send authenticate's. + */ +void upap_authwithpeer(ppp_pcb *pcb, const char *user, const char *password) { + + if(!user || !password) + return; + + /* Save the username and password we're given */ + pcb->upap.us_user = user; + pcb->upap.us_userlen = (u8_t)LWIP_MIN(strlen(user), 0xff); + pcb->upap.us_passwd = password; + pcb->upap.us_passwdlen = (u8_t)LWIP_MIN(strlen(password), 0xff); + pcb->upap.us_transmits = 0; + + /* Lower layer up yet? */ + if (pcb->upap.us_clientstate == UPAPCS_INITIAL || + pcb->upap.us_clientstate == UPAPCS_PENDING) { + pcb->upap.us_clientstate = UPAPCS_PENDING; + return; + } + + upap_sauthreq(pcb); /* Start protocol */ +} + +#if PPP_SERVER +/* + * upap_authpeer - Authenticate our peer (start server). + * + * Set new state. + */ +void upap_authpeer(ppp_pcb *pcb) { + + /* Lower layer up yet? */ + if (pcb->upap.us_serverstate == UPAPSS_INITIAL || + pcb->upap.us_serverstate == UPAPSS_PENDING) { + pcb->upap.us_serverstate = UPAPSS_PENDING; + return; + } + + pcb->upap.us_serverstate = UPAPSS_LISTEN; + if (pcb->settings.pap_req_timeout > 0) + TIMEOUT(upap_reqtimeout, pcb, pcb->settings.pap_req_timeout); +} +#endif /* PPP_SERVER */ + +/* + * upap_timeout - Retransmission timer for sending auth-reqs expired. + */ +static void upap_timeout(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + if (pcb->upap.us_clientstate != UPAPCS_AUTHREQ) + return; + + if (pcb->upap.us_transmits >= pcb->settings.pap_max_transmits) { + /* give up in disgust */ + ppp_error("No response to PAP authenticate-requests"); + pcb->upap.us_clientstate = UPAPCS_BADAUTH; + auth_withpeer_fail(pcb, PPP_PAP); + return; + } + + upap_sauthreq(pcb); /* Send Authenticate-Request */ +} + + +#if PPP_SERVER +/* + * upap_reqtimeout - Give up waiting for the peer to send an auth-req. + */ +static void upap_reqtimeout(void *arg) { + ppp_pcb *pcb = (ppp_pcb*)arg; + + if (pcb->upap.us_serverstate != UPAPSS_LISTEN) + return; /* huh?? */ + + auth_peer_fail(pcb, PPP_PAP); + pcb->upap.us_serverstate = UPAPSS_BADAUTH; +} +#endif /* PPP_SERVER */ + + +/* + * upap_lowerup - The lower layer is up. + * + * Start authenticating if pending. + */ +static void upap_lowerup(ppp_pcb *pcb) { + + if (pcb->upap.us_clientstate == UPAPCS_INITIAL) + pcb->upap.us_clientstate = UPAPCS_CLOSED; + else if (pcb->upap.us_clientstate == UPAPCS_PENDING) { + upap_sauthreq(pcb); /* send an auth-request */ + } + +#if PPP_SERVER + if (pcb->upap.us_serverstate == UPAPSS_INITIAL) + pcb->upap.us_serverstate = UPAPSS_CLOSED; + else if (pcb->upap.us_serverstate == UPAPSS_PENDING) { + pcb->upap.us_serverstate = UPAPSS_LISTEN; + if (pcb->settings.pap_req_timeout > 0) + TIMEOUT(upap_reqtimeout, pcb, pcb->settings.pap_req_timeout); + } +#endif /* PPP_SERVER */ +} + + +/* + * upap_lowerdown - The lower layer is down. + * + * Cancel all timeouts. + */ +static void upap_lowerdown(ppp_pcb *pcb) { + + if (pcb->upap.us_clientstate == UPAPCS_AUTHREQ) /* Timeout pending? */ + UNTIMEOUT(upap_timeout, pcb); /* Cancel timeout */ +#if PPP_SERVER + if (pcb->upap.us_serverstate == UPAPSS_LISTEN && pcb->settings.pap_req_timeout > 0) + UNTIMEOUT(upap_reqtimeout, pcb); +#endif /* PPP_SERVER */ + + pcb->upap.us_clientstate = UPAPCS_INITIAL; +#if PPP_SERVER + pcb->upap.us_serverstate = UPAPSS_INITIAL; +#endif /* PPP_SERVER */ +} + + +/* + * upap_protrej - Peer doesn't speak this protocol. + * + * This shouldn't happen. In any case, pretend lower layer went down. + */ +static void upap_protrej(ppp_pcb *pcb) { + + if (pcb->upap.us_clientstate == UPAPCS_AUTHREQ) { + ppp_error("PAP authentication failed due to protocol-reject"); + auth_withpeer_fail(pcb, PPP_PAP); + } +#if PPP_SERVER + if (pcb->upap.us_serverstate == UPAPSS_LISTEN) { + ppp_error("PAP authentication of peer failed (protocol-reject)"); + auth_peer_fail(pcb, PPP_PAP); + } +#endif /* PPP_SERVER */ + upap_lowerdown(pcb); +} + + +/* + * upap_input - Input UPAP packet. + */ +static void upap_input(ppp_pcb *pcb, u_char *inpacket, int l) { + u_char *inp; + u_char code, id; + int len; + + /* + * Parse header (code, id and length). + * If packet too short, drop it. + */ + inp = inpacket; + if (l < UPAP_HEADERLEN) { + UPAPDEBUG(("pap_input: rcvd short header.")); + return; + } + GETCHAR(code, inp); + GETCHAR(id, inp); + GETSHORT(len, inp); + if (len < UPAP_HEADERLEN) { + UPAPDEBUG(("pap_input: rcvd illegal length.")); + return; + } + if (len > l) { + UPAPDEBUG(("pap_input: rcvd short packet.")); + return; + } + len -= UPAP_HEADERLEN; + + /* + * Action depends on code. + */ + switch (code) { + case UPAP_AUTHREQ: +#if PPP_SERVER + upap_rauthreq(pcb, inp, id, len); +#endif /* PPP_SERVER */ + break; + + case UPAP_AUTHACK: + upap_rauthack(pcb, inp, id, len); + break; + + case UPAP_AUTHNAK: + upap_rauthnak(pcb, inp, id, len); + break; + + default: /* XXX Need code reject */ + break; + } +} + +#if PPP_SERVER +/* + * upap_rauth - Receive Authenticate. + */ +static void upap_rauthreq(ppp_pcb *pcb, u_char *inp, int id, int len) { + u_char ruserlen, rpasswdlen; + char *ruser; + char *rpasswd; + char rhostname[256]; + int retcode; + const char *msg; + int msglen; + + if (pcb->upap.us_serverstate < UPAPSS_LISTEN) + return; + + /* + * If we receive a duplicate authenticate-request, we are + * supposed to return the same status as for the first request. + */ + if (pcb->upap.us_serverstate == UPAPSS_OPEN) { + upap_sresp(pcb, UPAP_AUTHACK, id, "", 0); /* return auth-ack */ + return; + } + if (pcb->upap.us_serverstate == UPAPSS_BADAUTH) { + upap_sresp(pcb, UPAP_AUTHNAK, id, "", 0); /* return auth-nak */ + return; + } + + /* + * Parse user/passwd. + */ + if (len < 1) { + UPAPDEBUG(("pap_rauth: rcvd short packet.")); + return; + } + GETCHAR(ruserlen, inp); + len -= sizeof (u_char) + ruserlen + sizeof (u_char); + if (len < 0) { + UPAPDEBUG(("pap_rauth: rcvd short packet.")); + return; + } + ruser = (char *) inp; + INCPTR(ruserlen, inp); + GETCHAR(rpasswdlen, inp); + if (len < rpasswdlen) { + UPAPDEBUG(("pap_rauth: rcvd short packet.")); + return; + } + + rpasswd = (char *) inp; + + /* + * Check the username and password given. + */ + retcode = UPAP_AUTHNAK; + if (auth_check_passwd(pcb, ruser, ruserlen, rpasswd, rpasswdlen, &msg, &msglen)) { + retcode = UPAP_AUTHACK; + } + BZERO(rpasswd, rpasswdlen); + +#if 0 /* UNUSED */ + /* + * Check remote number authorization. A plugin may have filled in + * the remote number or added an allowed number, and rather than + * return an authenticate failure, is leaving it for us to verify. + */ + if (retcode == UPAP_AUTHACK) { + if (!auth_number()) { + /* We do not want to leak info about the pap result. */ + retcode = UPAP_AUTHNAK; /* XXX exit value will be "wrong" */ + warn("calling number %q is not authorized", remote_number); + } + } + + msglen = strlen(msg); + if (msglen > 255) + msglen = 255; +#endif /* UNUSED */ + + upap_sresp(pcb, retcode, id, msg, msglen); + + /* Null terminate and clean remote name. */ + ppp_slprintf(rhostname, sizeof(rhostname), "%.*v", ruserlen, ruser); + + if (retcode == UPAP_AUTHACK) { + pcb->upap.us_serverstate = UPAPSS_OPEN; + ppp_notice("PAP peer authentication succeeded for %q", rhostname); + auth_peer_success(pcb, PPP_PAP, 0, ruser, ruserlen); + } else { + pcb->upap.us_serverstate = UPAPSS_BADAUTH; + ppp_warn("PAP peer authentication failed for %q", rhostname); + auth_peer_fail(pcb, PPP_PAP); + } + + if (pcb->settings.pap_req_timeout > 0) + UNTIMEOUT(upap_reqtimeout, pcb); +} +#endif /* PPP_SERVER */ + +/* + * upap_rauthack - Receive Authenticate-Ack. + */ +static void upap_rauthack(ppp_pcb *pcb, u_char *inp, int id, int len) { + u_char msglen; + char *msg; + LWIP_UNUSED_ARG(id); + + if (pcb->upap.us_clientstate != UPAPCS_AUTHREQ) /* XXX */ + return; + + /* + * Parse message. + */ + if (len < 1) { + UPAPDEBUG(("pap_rauthack: ignoring missing msg-length.")); + } else { + GETCHAR(msglen, inp); + if (msglen > 0) { + len -= sizeof (u_char); + if (len < msglen) { + UPAPDEBUG(("pap_rauthack: rcvd short packet.")); + return; + } + msg = (char *) inp; + PRINTMSG(msg, msglen); + } + } + + pcb->upap.us_clientstate = UPAPCS_OPEN; + + auth_withpeer_success(pcb, PPP_PAP, 0); +} + + +/* + * upap_rauthnak - Receive Authenticate-Nak. + */ +static void upap_rauthnak(ppp_pcb *pcb, u_char *inp, int id, int len) { + u_char msglen; + char *msg; + LWIP_UNUSED_ARG(id); + + if (pcb->upap.us_clientstate != UPAPCS_AUTHREQ) /* XXX */ + return; + + /* + * Parse message. + */ + if (len < 1) { + UPAPDEBUG(("pap_rauthnak: ignoring missing msg-length.")); + } else { + GETCHAR(msglen, inp); + if (msglen > 0) { + len -= sizeof (u_char); + if (len < msglen) { + UPAPDEBUG(("pap_rauthnak: rcvd short packet.")); + return; + } + msg = (char *) inp; + PRINTMSG(msg, msglen); + } + } + + pcb->upap.us_clientstate = UPAPCS_BADAUTH; + + ppp_error("PAP authentication failed"); + auth_withpeer_fail(pcb, PPP_PAP); +} + + +/* + * upap_sauthreq - Send an Authenticate-Request. + */ +static void upap_sauthreq(ppp_pcb *pcb) { + struct pbuf *p; + u_char *outp; + int outlen; + + outlen = UPAP_HEADERLEN + 2 * sizeof (u_char) + + pcb->upap.us_userlen + pcb->upap.us_passwdlen; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN +outlen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + MAKEHEADER(outp, PPP_PAP); + + PUTCHAR(UPAP_AUTHREQ, outp); + PUTCHAR(++pcb->upap.us_id, outp); + PUTSHORT(outlen, outp); + PUTCHAR(pcb->upap.us_userlen, outp); + MEMCPY(outp, pcb->upap.us_user, pcb->upap.us_userlen); + INCPTR(pcb->upap.us_userlen, outp); + PUTCHAR(pcb->upap.us_passwdlen, outp); + MEMCPY(outp, pcb->upap.us_passwd, pcb->upap.us_passwdlen); + + ppp_write(pcb, p); + + TIMEOUT(upap_timeout, pcb, pcb->settings.pap_timeout_time); + ++pcb->upap.us_transmits; + pcb->upap.us_clientstate = UPAPCS_AUTHREQ; +} + +#if PPP_SERVER +/* + * upap_sresp - Send a response (ack or nak). + */ +static void upap_sresp(ppp_pcb *pcb, u_char code, u_char id, const char *msg, int msglen) { + struct pbuf *p; + u_char *outp; + int outlen; + + outlen = UPAP_HEADERLEN + sizeof (u_char) + msglen; + p = pbuf_alloc(PBUF_RAW, (u16_t)(PPP_HDRLEN +outlen), PPP_CTRL_PBUF_TYPE); + if(NULL == p) + return; + if(p->tot_len != p->len) { + pbuf_free(p); + return; + } + + outp = (u_char*)p->payload; + MAKEHEADER(outp, PPP_PAP); + + PUTCHAR(code, outp); + PUTCHAR(id, outp); + PUTSHORT(outlen, outp); + PUTCHAR(msglen, outp); + MEMCPY(outp, msg, msglen); + + ppp_write(pcb, p); +} +#endif /* PPP_SERVER */ + +#if PRINTPKT_SUPPORT +/* + * upap_printpkt - print the contents of a PAP packet. + */ +static const char* const upap_codenames[] = { + "AuthReq", "AuthAck", "AuthNak" +}; + +static int upap_printpkt(const u_char *p, int plen, void (*printer) (void *, const char *, ...), void *arg) { + int code, id, len; + int mlen, ulen, wlen; + const u_char *user, *pwd, *msg; + const u_char *pstart; + + if (plen < UPAP_HEADERLEN) + return 0; + pstart = p; + GETCHAR(code, p); + GETCHAR(id, p); + GETSHORT(len, p); + if (len < UPAP_HEADERLEN || len > plen) + return 0; + + if (code >= 1 && code <= (int)LWIP_ARRAYSIZE(upap_codenames)) + printer(arg, " %s", upap_codenames[code-1]); + else + printer(arg, " code=0x%x", code); + printer(arg, " id=0x%x", id); + len -= UPAP_HEADERLEN; + switch (code) { + case UPAP_AUTHREQ: + if (len < 1) + break; + ulen = p[0]; + if (len < ulen + 2) + break; + wlen = p[ulen + 1]; + if (len < ulen + wlen + 2) + break; + user = (const u_char *) (p + 1); + pwd = (const u_char *) (p + ulen + 2); + p += ulen + wlen + 2; + len -= ulen + wlen + 2; + printer(arg, " user="); + ppp_print_string(user, ulen, printer, arg); + printer(arg, " password="); +/* FIXME: require ppp_pcb struct as printpkt() argument */ +#if 0 + if (!pcb->settings.hide_password) +#endif + ppp_print_string(pwd, wlen, printer, arg); +#if 0 + else + printer(arg, ""); +#endif + break; + case UPAP_AUTHACK: + case UPAP_AUTHNAK: + if (len < 1) + break; + mlen = p[0]; + if (len < mlen + 1) + break; + msg = (const u_char *) (p + 1); + p += mlen + 1; + len -= mlen + 1; + printer(arg, " "); + ppp_print_string(msg, mlen, printer, arg); + break; + default: + break; + } + + /* print the rest of the bytes in the packet */ + for (; len > 0; --len) { + GETCHAR(code, p); + printer(arg, " %.2x", code); + } + + return p - pstart; +} +#endif /* PRINTPKT_SUPPORT */ + +#endif /* PPP_SUPPORT && PAP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/utils.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/utils.c new file mode 100644 index 0000000..be7a3b0 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/utils.c @@ -0,0 +1,957 @@ +/* + * utils.c - various utility functions used in pppd. + * + * Copyright (c) 1999-2002 Paul Mackerras. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. The name(s) of the authors of this software must not be used to + * endorse or promote products derived from this software without + * prior written permission. + * + * 3. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by Paul Mackerras + * ". + * + * THE AUTHORS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO + * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS, IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN + * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#if 0 /* UNUSED */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef SVR4 +#include +#endif +#endif /* UNUSED */ + +#include "netif/ppp/ppp_impl.h" + +#include "netif/ppp/fsm.h" +#include "netif/ppp/lcp.h" + +#if defined(SUNOS4) +extern char *strerror(); +#endif + +static void ppp_logit(int level, const char *fmt, va_list args); +static void ppp_log_write(int level, char *buf); +#if PRINTPKT_SUPPORT +static void ppp_vslp_printer(void *arg, const char *fmt, ...); +static void ppp_format_packet(const u_char *p, int len, + void (*printer) (void *, const char *, ...), void *arg); + +struct buffer_info { + char *ptr; + int len; +}; +#endif /* PRINTPKT_SUPPORT */ + +/* + * ppp_strlcpy - like strcpy/strncpy, doesn't overflow destination buffer, + * always leaves destination null-terminated (for len > 0). + */ +size_t ppp_strlcpy(char *dest, const char *src, size_t len) { + size_t ret = strlen(src); + + if (len != 0) { + if (ret < len) + strcpy(dest, src); + else { + strncpy(dest, src, len - 1); + dest[len-1] = 0; + } + } + return ret; +} + +/* + * ppp_strlcat - like strcat/strncat, doesn't overflow destination buffer, + * always leaves destination null-terminated (for len > 0). + */ +size_t ppp_strlcat(char *dest, const char *src, size_t len) { + size_t dlen = strlen(dest); + + return dlen + ppp_strlcpy(dest + dlen, src, (len > dlen? len - dlen: 0)); +} + + +/* + * ppp_slprintf - format a message into a buffer. Like sprintf except we + * also specify the length of the output buffer, and we handle + * %m (error message), %v (visible string), + * %q (quoted string), %t (current time) and %I (IP address) formats. + * Doesn't do floating-point formats. + * Returns the number of chars put into buf. + */ +int ppp_slprintf(char *buf, int buflen, const char *fmt, ...) { + va_list args; + int n; + + va_start(args, fmt); + n = ppp_vslprintf(buf, buflen, fmt, args); + va_end(args); + return n; +} + +/* + * ppp_vslprintf - like ppp_slprintf, takes a va_list instead of a list of args. + */ +#define OUTCHAR(c) (buflen > 0? (--buflen, *buf++ = (c)): 0) + +int ppp_vslprintf(char *buf, int buflen, const char *fmt, va_list args) { + int c, i, n; + int width, prec, fillch; + int base, len, neg, quoted; + unsigned long val = 0; + const char *f; + char *str, *buf0; + const unsigned char *p; + char num[32]; +#if 0 /* need port */ + time_t t; +#endif /* need port */ + u32_t ip; + static char hexchars[] = "0123456789abcdef"; +#if PRINTPKT_SUPPORT + struct buffer_info bufinfo; +#endif /* PRINTPKT_SUPPORT */ + + buf0 = buf; + --buflen; + while (buflen > 0) { + for (f = fmt; *f != '%' && *f != 0; ++f) + ; + if (f > fmt) { + len = f - fmt; + if (len > buflen) + len = buflen; + memcpy(buf, fmt, len); + buf += len; + buflen -= len; + fmt = f; + } + if (*fmt == 0) + break; + c = *++fmt; + width = 0; + prec = -1; + fillch = ' '; + if (c == '0') { + fillch = '0'; + c = *++fmt; + } + if (c == '*') { + width = va_arg(args, int); + c = *++fmt; + } else { + while (lwip_isdigit(c)) { + width = width * 10 + c - '0'; + c = *++fmt; + } + } + if (c == '.') { + c = *++fmt; + if (c == '*') { + prec = va_arg(args, int); + c = *++fmt; + } else { + prec = 0; + while (lwip_isdigit(c)) { + prec = prec * 10 + c - '0'; + c = *++fmt; + } + } + } + str = 0; + base = 0; + neg = 0; + ++fmt; + switch (c) { + case 'l': + c = *fmt++; + switch (c) { + case 'd': + val = va_arg(args, long); + if ((long)val < 0) { + neg = 1; + val = (unsigned long)-(long)val; + } + base = 10; + break; + case 'u': + val = va_arg(args, unsigned long); + base = 10; + break; + default: + OUTCHAR('%'); + OUTCHAR('l'); + --fmt; /* so %lz outputs %lz etc. */ + continue; + } + break; + case 'd': + i = va_arg(args, int); + if (i < 0) { + neg = 1; + val = -i; + } else + val = i; + base = 10; + break; + case 'u': + val = va_arg(args, unsigned int); + base = 10; + break; + case 'o': + val = va_arg(args, unsigned int); + base = 8; + break; + case 'x': + case 'X': + val = va_arg(args, unsigned int); + base = 16; + break; +#if 0 /* unused (and wrong on LLP64 systems) */ + case 'p': + val = (unsigned long) va_arg(args, void *); + base = 16; + neg = 2; + break; +#endif /* unused (and wrong on LLP64 systems) */ + case 's': + str = va_arg(args, char *); + break; + case 'c': + num[0] = va_arg(args, int); + num[1] = 0; + str = num; + break; +#if 0 /* do we always have strerror() in embedded ? */ + case 'm': + str = strerror(errno); + break; +#endif /* do we always have strerror() in embedded ? */ + case 'I': + ip = va_arg(args, u32_t); + ip = lwip_ntohl(ip); + ppp_slprintf(num, sizeof(num), "%d.%d.%d.%d", (ip >> 24) & 0xff, + (ip >> 16) & 0xff, (ip >> 8) & 0xff, ip & 0xff); + str = num; + break; +#if 0 /* need port */ + case 't': + time(&t); + str = ctime(&t); + str += 4; /* chop off the day name */ + str[15] = 0; /* chop off year and newline */ + break; +#endif /* need port */ + case 'v': /* "visible" string */ + case 'q': /* quoted string */ + quoted = c == 'q'; + p = va_arg(args, unsigned char *); + if (p == NULL) + p = (const unsigned char *)""; + if (fillch == '0' && prec >= 0) { + n = prec; + } else { + n = strlen((const char *)p); + if (prec >= 0 && n > prec) + n = prec; + } + while (n > 0 && buflen > 0) { + c = *p++; + --n; + if (!quoted && c >= 0x80) { + OUTCHAR('M'); + OUTCHAR('-'); + c -= 0x80; + } + if (quoted && (c == '"' || c == '\\')) + OUTCHAR('\\'); + if (c < 0x20 || (0x7f <= c && c < 0xa0)) { + if (quoted) { + OUTCHAR('\\'); + switch (c) { + case '\t': OUTCHAR('t'); break; + case '\n': OUTCHAR('n'); break; + case '\b': OUTCHAR('b'); break; + case '\f': OUTCHAR('f'); break; + default: + OUTCHAR('x'); + OUTCHAR(hexchars[c >> 4]); + OUTCHAR(hexchars[c & 0xf]); + } + } else { + if (c == '\t') + OUTCHAR(c); + else { + OUTCHAR('^'); + OUTCHAR(c ^ 0x40); + } + } + } else + OUTCHAR(c); + } + continue; +#if PRINTPKT_SUPPORT + case 'P': /* print PPP packet */ + bufinfo.ptr = buf; + bufinfo.len = buflen + 1; + p = va_arg(args, unsigned char *); + n = va_arg(args, int); + ppp_format_packet(p, n, ppp_vslp_printer, &bufinfo); + buf = bufinfo.ptr; + buflen = bufinfo.len - 1; + continue; +#endif /* PRINTPKT_SUPPORT */ + case 'B': + p = va_arg(args, unsigned char *); + for (n = prec; n > 0; --n) { + c = *p++; + if (fillch == ' ') + OUTCHAR(' '); + OUTCHAR(hexchars[(c >> 4) & 0xf]); + OUTCHAR(hexchars[c & 0xf]); + } + continue; + default: + *buf++ = '%'; + if (c != '%') + --fmt; /* so %z outputs %z etc. */ + --buflen; + continue; + } + if (base != 0) { + str = num + sizeof(num); + *--str = 0; + while (str > num + neg) { + *--str = hexchars[val % base]; + val = val / base; + if (--prec <= 0 && val == 0) + break; + } + switch (neg) { + case 1: + *--str = '-'; + break; + case 2: + *--str = 'x'; + *--str = '0'; + break; + default: + break; + } + len = num + sizeof(num) - 1 - str; + } else { + len = strlen(str); + if (prec >= 0 && len > prec) + len = prec; + } + if (width > 0) { + if (width > buflen) + width = buflen; + if ((n = width - len) > 0) { + buflen -= n; + for (; n > 0; --n) + *buf++ = fillch; + } + } + if (len > buflen) + len = buflen; + memcpy(buf, str, len); + buf += len; + buflen -= len; + } + *buf = 0; + return buf - buf0; +} + +#if PRINTPKT_SUPPORT +/* + * vslp_printer - used in processing a %P format + */ +static void ppp_vslp_printer(void *arg, const char *fmt, ...) { + int n; + va_list pvar; + struct buffer_info *bi; + + va_start(pvar, fmt); + bi = (struct buffer_info *) arg; + n = ppp_vslprintf(bi->ptr, bi->len, fmt, pvar); + va_end(pvar); + + bi->ptr += n; + bi->len -= n; +} +#endif /* PRINTPKT_SUPPORT */ + +#if 0 /* UNUSED */ +/* + * log_packet - format a packet and log it. + */ + +void +log_packet(p, len, prefix, level) + u_char *p; + int len; + char *prefix; + int level; +{ + init_pr_log(prefix, level); + ppp_format_packet(p, len, pr_log, &level); + end_pr_log(); +} +#endif /* UNUSED */ + +#if PRINTPKT_SUPPORT +/* + * ppp_format_packet - make a readable representation of a packet, + * calling `printer(arg, format, ...)' to output it. + */ +static void ppp_format_packet(const u_char *p, int len, + void (*printer) (void *, const char *, ...), void *arg) { + int i, n; + u_short proto; + const struct protent *protp; + + if (len >= 2) { + GETSHORT(proto, p); + len -= 2; + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (proto == protp->protocol) + break; + if (protp != NULL) { + printer(arg, "[%s", protp->name); + n = (*protp->printpkt)(p, len, printer, arg); + printer(arg, "]"); + p += n; + len -= n; + } else { + for (i = 0; (protp = protocols[i]) != NULL; ++i) + if (proto == (protp->protocol & ~0x8000)) + break; + if (protp != 0 && protp->data_name != 0) { + printer(arg, "[%s data]", protp->data_name); + if (len > 8) + printer(arg, "%.8B ...", p); + else + printer(arg, "%.*B", len, p); + len = 0; + } else + printer(arg, "[proto=0x%x]", proto); + } + } + + if (len > 32) + printer(arg, "%.32B ...", p); + else + printer(arg, "%.*B", len, p); +} +#endif /* PRINTPKT_SUPPORT */ + +#if 0 /* UNUSED */ +/* + * init_pr_log, end_pr_log - initialize and finish use of pr_log. + */ + +static char line[256]; /* line to be logged accumulated here */ +static char *linep; /* current pointer within line */ +static int llevel; /* level for logging */ + +void +init_pr_log(prefix, level) + const char *prefix; + int level; +{ + linep = line; + if (prefix != NULL) { + ppp_strlcpy(line, prefix, sizeof(line)); + linep = line + strlen(line); + } + llevel = level; +} + +void +end_pr_log() +{ + if (linep != line) { + *linep = 0; + ppp_log_write(llevel, line); + } +} + +/* + * pr_log - printer routine for outputting to log + */ +void +pr_log (void *arg, const char *fmt, ...) +{ + int l, n; + va_list pvar; + char *p, *eol; + char buf[256]; + + va_start(pvar, fmt); + n = ppp_vslprintf(buf, sizeof(buf), fmt, pvar); + va_end(pvar); + + p = buf; + eol = strchr(buf, '\n'); + if (linep != line) { + l = (eol == NULL)? n: eol - buf; + if (linep + l < line + sizeof(line)) { + if (l > 0) { + memcpy(linep, buf, l); + linep += l; + } + if (eol == NULL) + return; + p = eol + 1; + eol = strchr(p, '\n'); + } + *linep = 0; + ppp_log_write(llevel, line); + linep = line; + } + + while (eol != NULL) { + *eol = 0; + ppp_log_write(llevel, p); + p = eol + 1; + eol = strchr(p, '\n'); + } + + /* assumes sizeof(buf) <= sizeof(line) */ + l = buf + n - p; + if (l > 0) { + memcpy(line, p, n); + linep = line + l; + } +} +#endif /* UNUSED */ + +/* + * ppp_print_string - print a readable representation of a string using + * printer. + */ +void ppp_print_string(const u_char *p, int len, void (*printer) (void *, const char *, ...), void *arg) { + int c; + + printer(arg, "\""); + for (; len > 0; --len) { + c = *p++; + if (' ' <= c && c <= '~') { + if (c == '\\' || c == '"') + printer(arg, "\\"); + printer(arg, "%c", c); + } else { + switch (c) { + case '\n': + printer(arg, "\\n"); + break; + case '\r': + printer(arg, "\\r"); + break; + case '\t': + printer(arg, "\\t"); + break; + default: + printer(arg, "\\%.3o", (u8_t)c); + /* no break */ + } + } + } + printer(arg, "\""); +} + +/* + * ppp_logit - does the hard work for fatal et al. + */ +static void ppp_logit(int level, const char *fmt, va_list args) { + char buf[1024]; + + ppp_vslprintf(buf, sizeof(buf), fmt, args); + ppp_log_write(level, buf); +} + +static void ppp_log_write(int level, char *buf) { + LWIP_UNUSED_ARG(level); /* necessary if PPPDEBUG is defined to an empty function */ + LWIP_UNUSED_ARG(buf); + PPPDEBUG(level, ("%s\n", buf) ); +#if 0 + if (log_to_fd >= 0 && (level != LOG_DEBUG || debug)) { + int n = strlen(buf); + + if (n > 0 && buf[n-1] == '\n') + --n; + if (write(log_to_fd, buf, n) != n + || write(log_to_fd, "\n", 1) != 1) + log_to_fd = -1; + } +#endif +} + +/* + * ppp_fatal - log an error message and die horribly. + */ +void ppp_fatal(const char *fmt, ...) { + va_list pvar; + + va_start(pvar, fmt); + ppp_logit(LOG_ERR, fmt, pvar); + va_end(pvar); + + LWIP_ASSERT("ppp_fatal", 0); /* as promised */ +} + +/* + * ppp_error - log an error message. + */ +void ppp_error(const char *fmt, ...) { + va_list pvar; + + va_start(pvar, fmt); + ppp_logit(LOG_ERR, fmt, pvar); + va_end(pvar); +#if 0 /* UNUSED */ + ++error_count; +#endif /* UNUSED */ +} + +/* + * ppp_warn - log a warning message. + */ +void ppp_warn(const char *fmt, ...) { + va_list pvar; + + va_start(pvar, fmt); + ppp_logit(LOG_WARNING, fmt, pvar); + va_end(pvar); +} + +/* + * ppp_notice - log a notice-level message. + */ +void ppp_notice(const char *fmt, ...) { + va_list pvar; + + va_start(pvar, fmt); + ppp_logit(LOG_NOTICE, fmt, pvar); + va_end(pvar); +} + +/* + * ppp_info - log an informational message. + */ +void ppp_info(const char *fmt, ...) { + va_list pvar; + + va_start(pvar, fmt); + ppp_logit(LOG_INFO, fmt, pvar); + va_end(pvar); +} + +/* + * ppp_dbglog - log a debug message. + */ +void ppp_dbglog(const char *fmt, ...) { + va_list pvar; + + va_start(pvar, fmt); + ppp_logit(LOG_DEBUG, fmt, pvar); + va_end(pvar); +} + +#if PRINTPKT_SUPPORT +/* + * ppp_dump_packet - print out a packet in readable form if it is interesting. + * Assumes len >= PPP_HDRLEN. + */ +void ppp_dump_packet(ppp_pcb *pcb, const char *tag, unsigned char *p, int len) { + int proto; + + /* + * don't print data packets, i.e. IPv4, IPv6, VJ, and compressed packets. + */ + proto = (p[0] << 8) + p[1]; + if (proto < 0xC000 && (proto & ~0x8000) == proto) + return; + + /* + * don't print valid LCP echo request/reply packets if the link is up. + */ + if (proto == PPP_LCP && pcb->phase == PPP_PHASE_RUNNING && len >= 2 + HEADERLEN) { + unsigned char *lcp = p + 2; + int l = (lcp[2] << 8) + lcp[3]; + + if ((lcp[0] == ECHOREQ || lcp[0] == ECHOREP) + && l >= HEADERLEN && l <= len - 2) + return; + } + + ppp_dbglog("%s %P", tag, p, len); +} +#endif /* PRINTPKT_SUPPORT */ + +#if 0 /* Unused */ + +/* + * complete_read - read a full `count' bytes from fd, + * unless end-of-file or an error other than EINTR is encountered. + */ +ssize_t +complete_read(int fd, void *buf, size_t count) +{ + size_t done; + ssize_t nb; + char *ptr = buf; + + for (done = 0; done < count; ) { + nb = read(fd, ptr, count - done); + if (nb < 0) { + if (errno == EINTR) + continue; + return -1; + } + if (nb == 0) + break; + done += nb; + ptr += nb; + } + return done; +} + +/* Procedures for locking the serial device using a lock file. */ +#ifndef LOCK_DIR +#ifdef __linux__ +#define LOCK_DIR "/var/lock" +#else +#ifdef SVR4 +#define LOCK_DIR "/var/spool/locks" +#else +#define LOCK_DIR "/var/spool/lock" +#endif +#endif +#endif /* LOCK_DIR */ + +static char lock_file[MAXPATHLEN]; + +/* + * lock - create a lock file for the named device + */ +int +lock(dev) + char *dev; +{ +#ifdef LOCKLIB + int result; + + result = mklock (dev, (void *) 0); + if (result == 0) { + ppp_strlcpy(lock_file, dev, sizeof(lock_file)); + return 0; + } + + if (result > 0) + ppp_notice("Device %s is locked by pid %d", dev, result); + else + ppp_error("Can't create lock file %s", lock_file); + return -1; + +#else /* LOCKLIB */ + + char lock_buffer[12]; + int fd, pid, n; + +#ifdef SVR4 + struct stat sbuf; + + if (stat(dev, &sbuf) < 0) { + ppp_error("Can't get device number for %s: %m", dev); + return -1; + } + if ((sbuf.st_mode & S_IFMT) != S_IFCHR) { + ppp_error("Can't lock %s: not a character device", dev); + return -1; + } + ppp_slprintf(lock_file, sizeof(lock_file), "%s/LK.%03d.%03d.%03d", + LOCK_DIR, major(sbuf.st_dev), + major(sbuf.st_rdev), minor(sbuf.st_rdev)); +#else + char *p; + char lockdev[MAXPATHLEN]; + + if ((p = strstr(dev, "dev/")) != NULL) { + dev = p + 4; + strncpy(lockdev, dev, MAXPATHLEN-1); + lockdev[MAXPATHLEN-1] = 0; + while ((p = strrchr(lockdev, '/')) != NULL) { + *p = '_'; + } + dev = lockdev; + } else + if ((p = strrchr(dev, '/')) != NULL) + dev = p + 1; + + ppp_slprintf(lock_file, sizeof(lock_file), "%s/LCK..%s", LOCK_DIR, dev); +#endif + + while ((fd = open(lock_file, O_EXCL | O_CREAT | O_RDWR, 0644)) < 0) { + if (errno != EEXIST) { + ppp_error("Can't create lock file %s: %m", lock_file); + break; + } + + /* Read the lock file to find out who has the device locked. */ + fd = open(lock_file, O_RDONLY, 0); + if (fd < 0) { + if (errno == ENOENT) /* This is just a timing problem. */ + continue; + ppp_error("Can't open existing lock file %s: %m", lock_file); + break; + } +#ifndef LOCK_BINARY + n = read(fd, lock_buffer, 11); +#else + n = read(fd, &pid, sizeof(pid)); +#endif /* LOCK_BINARY */ + close(fd); + fd = -1; + if (n <= 0) { + ppp_error("Can't read pid from lock file %s", lock_file); + break; + } + + /* See if the process still exists. */ +#ifndef LOCK_BINARY + lock_buffer[n] = 0; + pid = atoi(lock_buffer); +#endif /* LOCK_BINARY */ + if (pid == getpid()) + return 1; /* somebody else locked it for us */ + if (pid == 0 + || (kill(pid, 0) == -1 && errno == ESRCH)) { + if (unlink (lock_file) == 0) { + ppp_notice("Removed stale lock on %s (pid %d)", dev, pid); + continue; + } + ppp_warn("Couldn't remove stale lock on %s", dev); + } else + ppp_notice("Device %s is locked by pid %d", dev, pid); + break; + } + + if (fd < 0) { + lock_file[0] = 0; + return -1; + } + + pid = getpid(); +#ifndef LOCK_BINARY + ppp_slprintf(lock_buffer, sizeof(lock_buffer), "%10d\n", pid); + write (fd, lock_buffer, 11); +#else + write(fd, &pid, sizeof (pid)); +#endif + close(fd); + return 0; + +#endif +} + +/* + * relock - called to update our lockfile when we are about to detach, + * thus changing our pid (we fork, the child carries on, and the parent dies). + * Note that this is called by the parent, with pid equal to the pid + * of the child. This avoids a potential race which would exist if + * we had the child rewrite the lockfile (the parent might die first, + * and another process could think the lock was stale if it checked + * between when the parent died and the child rewrote the lockfile). + */ +int +relock(pid) + int pid; +{ +#ifdef LOCKLIB + /* XXX is there a way to do this? */ + return -1; +#else /* LOCKLIB */ + + int fd; + char lock_buffer[12]; + + if (lock_file[0] == 0) + return -1; + fd = open(lock_file, O_WRONLY, 0); + if (fd < 0) { + ppp_error("Couldn't reopen lock file %s: %m", lock_file); + lock_file[0] = 0; + return -1; + } + +#ifndef LOCK_BINARY + ppp_slprintf(lock_buffer, sizeof(lock_buffer), "%10d\n", pid); + write (fd, lock_buffer, 11); +#else + write(fd, &pid, sizeof(pid)); +#endif /* LOCK_BINARY */ + close(fd); + return 0; + +#endif /* LOCKLIB */ +} + +/* + * unlock - remove our lockfile + */ +void +unlock() +{ + if (lock_file[0]) { +#ifdef LOCKLIB + (void) rmlock(lock_file, (void *) 0); +#else + unlink(lock_file); +#endif + lock_file[0] = 0; + } +} + +#endif /* Unused */ + +#endif /* PPP_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/vj.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/vj.c new file mode 100644 index 0000000..8ba94ba --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/ppp/vj.c @@ -0,0 +1,685 @@ +/* + * Routines to compress and uncompess tcp packets (for transmission + * over low speed serial lines. + * + * Copyright (c) 1989 Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms are permitted + * provided that the above copyright notice and this paragraph are + * duplicated in all such forms and that any documentation, + * advertising materials, and other materials related to such + * distribution and use acknowledge that the software was developed + * by the University of California, Berkeley. The name of the + * University may not be used to endorse or promote products derived + * from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * Van Jacobson (van@helios.ee.lbl.gov), Dec 31, 1989: + * Initial distribution. + * + * Modified June 1993 by Paul Mackerras, paulus@cs.anu.edu.au, + * so that the entire packet being decompressed doesn't have + * to be in contiguous memory (just the compressed header). + * + * Modified March 1998 by Guy Lancaster, glanca@gesn.com, + * for a 16 bit processor. + */ + +#include "netif/ppp/ppp_opts.h" +#if PPP_SUPPORT && VJ_SUPPORT /* don't build if not configured for use in lwipopts.h */ + +#include "netif/ppp/ppp_impl.h" +#include "netif/ppp/pppdebug.h" + +#include "netif/ppp/vj.h" + +#include + +#if LINK_STATS +#define INCR(counter) ++comp->stats.counter +#else +#define INCR(counter) +#endif + +void +vj_compress_init(struct vjcompress *comp) +{ + u8_t i; + struct cstate *tstate = comp->tstate; + +#if MAX_SLOTS == 0 + memset((char *)comp, 0, sizeof(*comp)); +#endif + comp->maxSlotIndex = MAX_SLOTS - 1; + comp->compressSlot = 0; /* Disable slot ID compression by default. */ + for (i = MAX_SLOTS - 1; i > 0; --i) { + tstate[i].cs_id = i; + tstate[i].cs_next = &tstate[i - 1]; + } + tstate[0].cs_next = &tstate[MAX_SLOTS - 1]; + tstate[0].cs_id = 0; + comp->last_cs = &tstate[0]; + comp->last_recv = 255; + comp->last_xmit = 255; + comp->flags = VJF_TOSS; +} + + +/* ENCODE encodes a number that is known to be non-zero. ENCODEZ + * checks for zero (since zero has to be encoded in the long, 3 byte + * form). + */ +#define ENCODE(n) { \ + if ((u16_t)(n) >= 256) { \ + *cp++ = 0; \ + cp[1] = (u8_t)(n); \ + cp[0] = (u8_t)((n) >> 8); \ + cp += 2; \ + } else { \ + *cp++ = (u8_t)(n); \ + } \ +} +#define ENCODEZ(n) { \ + if ((u16_t)(n) >= 256 || (u16_t)(n) == 0) { \ + *cp++ = 0; \ + cp[1] = (u8_t)(n); \ + cp[0] = (u8_t)((n) >> 8); \ + cp += 2; \ + } else { \ + *cp++ = (u8_t)(n); \ + } \ +} + +#define DECODEL(f) { \ + if (*cp == 0) {\ + u32_t tmp_ = lwip_ntohl(f) + ((cp[1] << 8) | cp[2]); \ + (f) = lwip_htonl(tmp_); \ + cp += 3; \ + } else { \ + u32_t tmp_ = lwip_ntohl(f) + (u32_t)*cp++; \ + (f) = lwip_htonl(tmp_); \ + } \ +} + +#define DECODES(f) { \ + if (*cp == 0) {\ + u16_t tmp_ = lwip_ntohs(f) + (((u16_t)cp[1] << 8) | cp[2]); \ + (f) = lwip_htons(tmp_); \ + cp += 3; \ + } else { \ + u16_t tmp_ = lwip_ntohs(f) + (u16_t)*cp++; \ + (f) = lwip_htons(tmp_); \ + } \ +} + +#define DECODEU(f) { \ + if (*cp == 0) {\ + (f) = lwip_htons(((u16_t)cp[1] << 8) | cp[2]); \ + cp += 3; \ + } else { \ + (f) = lwip_htons((u16_t)*cp++); \ + } \ +} + +/* Helper structures for unaligned *u32_t and *u16_t accesses */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct vj_u32_t { + PACK_STRUCT_FIELD(u32_t v); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct vj_u16_t { + PACK_STRUCT_FIELD(u16_t v); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* + * vj_compress_tcp - Attempt to do Van Jacobson header compression on a + * packet. This assumes that nb and comp are not null and that the first + * buffer of the chain contains a valid IP header. + * Return the VJ type code indicating whether or not the packet was + * compressed. + */ +u8_t +vj_compress_tcp(struct vjcompress *comp, struct pbuf **pb) +{ + struct pbuf *np = *pb; + struct ip_hdr *ip = (struct ip_hdr *)np->payload; + struct cstate *cs = comp->last_cs->cs_next; + u16_t ilen = IPH_HL(ip); + u16_t hlen; + struct tcp_hdr *oth; + struct tcp_hdr *th; + u16_t deltaS, deltaA = 0; + u32_t deltaL; + u32_t changes = 0; + u8_t new_seq[16]; + u8_t *cp = new_seq; + + /* + * Check that the packet is IP proto TCP. + */ + if (IPH_PROTO(ip) != IP_PROTO_TCP) { + return (TYPE_IP); + } + + /* + * Bail if this is an IP fragment or if the TCP packet isn't + * `compressible' (i.e., ACK isn't set or some other control bit is + * set). + */ + if ((IPH_OFFSET(ip) & PP_HTONS(0x3fff)) || np->tot_len < 40) { + return (TYPE_IP); + } + th = (struct tcp_hdr *)&((struct vj_u32_t*)ip)[ilen]; + if ((TCPH_FLAGS(th) & (TCP_SYN|TCP_FIN|TCP_RST|TCP_ACK)) != TCP_ACK) { + return (TYPE_IP); + } + + /* Check that the TCP/IP headers are contained in the first buffer. */ + hlen = ilen + TCPH_HDRLEN(th); + hlen <<= 2; + if (np->len < hlen) { + PPPDEBUG(LOG_INFO, ("vj_compress_tcp: header len %d spans buffers\n", hlen)); + return (TYPE_IP); + } + + /* TCP stack requires that we don't change the packet payload, therefore we copy + * the whole packet before compression. */ + np = pbuf_clone(PBUF_RAW, PBUF_RAM, *pb); + if (!np) { + return (TYPE_IP); + } + + *pb = np; + ip = (struct ip_hdr *)np->payload; + + /* + * Packet is compressible -- we're going to send either a + * COMPRESSED_TCP or UNCOMPRESSED_TCP packet. Either way we need + * to locate (or create) the connection state. Special case the + * most recently used connection since it's most likely to be used + * again & we don't have to do any reordering if it's used. + */ + INCR(vjs_packets); + if (!ip4_addr_cmp(&ip->src, &cs->cs_ip.src) + || !ip4_addr_cmp(&ip->dest, &cs->cs_ip.dest) + || (*(struct vj_u32_t*)th).v != (((struct vj_u32_t*)&cs->cs_ip)[IPH_HL(&cs->cs_ip)]).v) { + /* + * Wasn't the first -- search for it. + * + * States are kept in a circularly linked list with + * last_cs pointing to the end of the list. The + * list is kept in lru order by moving a state to the + * head of the list whenever it is referenced. Since + * the list is short and, empirically, the connection + * we want is almost always near the front, we locate + * states via linear search. If we don't find a state + * for the datagram, the oldest state is (re-)used. + */ + struct cstate *lcs; + struct cstate *lastcs = comp->last_cs; + + do { + lcs = cs; cs = cs->cs_next; + INCR(vjs_searches); + if (ip4_addr_cmp(&ip->src, &cs->cs_ip.src) + && ip4_addr_cmp(&ip->dest, &cs->cs_ip.dest) + && (*(struct vj_u32_t*)th).v == (((struct vj_u32_t*)&cs->cs_ip)[IPH_HL(&cs->cs_ip)]).v) { + goto found; + } + } while (cs != lastcs); + + /* + * Didn't find it -- re-use oldest cstate. Send an + * uncompressed packet that tells the other side what + * connection number we're using for this conversation. + * Note that since the state list is circular, the oldest + * state points to the newest and we only need to set + * last_cs to update the lru linkage. + */ + INCR(vjs_misses); + comp->last_cs = lcs; + goto uncompressed; + + found: + /* + * Found it -- move to the front on the connection list. + */ + if (cs == lastcs) { + comp->last_cs = lcs; + } else { + lcs->cs_next = cs->cs_next; + cs->cs_next = lastcs->cs_next; + lastcs->cs_next = cs; + } + } + + oth = (struct tcp_hdr *)&((struct vj_u32_t*)&cs->cs_ip)[ilen]; + deltaS = ilen; + + /* + * Make sure that only what we expect to change changed. The first + * line of the `if' checks the IP protocol version, header length & + * type of service. The 2nd line checks the "Don't fragment" bit. + * The 3rd line checks the time-to-live and protocol (the protocol + * check is unnecessary but costless). The 4th line checks the TCP + * header length. The 5th line checks IP options, if any. The 6th + * line checks TCP options, if any. If any of these things are + * different between the previous & current datagram, we send the + * current datagram `uncompressed'. + */ + if ((((struct vj_u16_t*)ip)[0]).v != (((struct vj_u16_t*)&cs->cs_ip)[0]).v + || (((struct vj_u16_t*)ip)[3]).v != (((struct vj_u16_t*)&cs->cs_ip)[3]).v + || (((struct vj_u16_t*)ip)[4]).v != (((struct vj_u16_t*)&cs->cs_ip)[4]).v + || TCPH_HDRLEN(th) != TCPH_HDRLEN(oth) + || (deltaS > 5 && BCMP(ip + 1, &cs->cs_ip + 1, (deltaS - 5) << 2)) + || (TCPH_HDRLEN(th) > 5 && BCMP(th + 1, oth + 1, (TCPH_HDRLEN(th) - 5) << 2))) { + goto uncompressed; + } + + /* + * Figure out which of the changing fields changed. The + * receiver expects changes in the order: urgent, window, + * ack, seq (the order minimizes the number of temporaries + * needed in this section of code). + */ + if (TCPH_FLAGS(th) & TCP_URG) { + deltaS = lwip_ntohs(th->urgp); + ENCODEZ(deltaS); + changes |= NEW_U; + } else if (th->urgp != oth->urgp) { + /* argh! URG not set but urp changed -- a sensible + * implementation should never do this but RFC793 + * doesn't prohibit the change so we have to deal + * with it. */ + goto uncompressed; + } + + if ((deltaS = (u16_t)(lwip_ntohs(th->wnd) - lwip_ntohs(oth->wnd))) != 0) { + ENCODE(deltaS); + changes |= NEW_W; + } + + if ((deltaL = lwip_ntohl(th->ackno) - lwip_ntohl(oth->ackno)) != 0) { + if (deltaL > 0xffff) { + goto uncompressed; + } + deltaA = (u16_t)deltaL; + ENCODE(deltaA); + changes |= NEW_A; + } + + if ((deltaL = lwip_ntohl(th->seqno) - lwip_ntohl(oth->seqno)) != 0) { + if (deltaL > 0xffff) { + goto uncompressed; + } + deltaS = (u16_t)deltaL; + ENCODE(deltaS); + changes |= NEW_S; + } + + switch(changes) { + case 0: + /* + * Nothing changed. If this packet contains data and the + * last one didn't, this is probably a data packet following + * an ack (normal on an interactive connection) and we send + * it compressed. Otherwise it's probably a retransmit, + * retransmitted ack or window probe. Send it uncompressed + * in case the other side missed the compressed version. + */ + if (IPH_LEN(ip) != IPH_LEN(&cs->cs_ip) && + lwip_ntohs(IPH_LEN(&cs->cs_ip)) == hlen) { + break; + } + /* no break */ + /* fall through */ + + case SPECIAL_I: + case SPECIAL_D: + /* + * actual changes match one of our special case encodings -- + * send packet uncompressed. + */ + goto uncompressed; + + case NEW_S|NEW_A: + if (deltaS == deltaA && deltaS == lwip_ntohs(IPH_LEN(&cs->cs_ip)) - hlen) { + /* special case for echoed terminal traffic */ + changes = SPECIAL_I; + cp = new_seq; + } + break; + + case NEW_S: + if (deltaS == lwip_ntohs(IPH_LEN(&cs->cs_ip)) - hlen) { + /* special case for data xfer */ + changes = SPECIAL_D; + cp = new_seq; + } + break; + default: + break; + } + + deltaS = (u16_t)(lwip_ntohs(IPH_ID(ip)) - lwip_ntohs(IPH_ID(&cs->cs_ip))); + if (deltaS != 1) { + ENCODEZ(deltaS); + changes |= NEW_I; + } + if (TCPH_FLAGS(th) & TCP_PSH) { + changes |= TCP_PUSH_BIT; + } + /* + * Grab the cksum before we overwrite it below. Then update our + * state with this packet's header. + */ + deltaA = lwip_ntohs(th->chksum); + MEMCPY(&cs->cs_ip, ip, hlen); + + /* + * We want to use the original packet as our compressed packet. + * (cp - new_seq) is the number of bytes we need for compressed + * sequence numbers. In addition we need one byte for the change + * mask, one for the connection id and two for the tcp checksum. + * So, (cp - new_seq) + 4 bytes of header are needed. hlen is how + * many bytes of the original packet to toss so subtract the two to + * get the new packet size. + */ + deltaS = (u16_t)(cp - new_seq); + if (!comp->compressSlot || comp->last_xmit != cs->cs_id) { + comp->last_xmit = cs->cs_id; + hlen -= deltaS + 4; + if (pbuf_remove_header(np, hlen)){ + /* Can we cope with this failing? Just assert for now */ + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + } + cp = (u8_t*)np->payload; + *cp++ = (u8_t)(changes | NEW_C); + *cp++ = cs->cs_id; + } else { + hlen -= deltaS + 3; + if (pbuf_remove_header(np, hlen)) { + /* Can we cope with this failing? Just assert for now */ + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + } + cp = (u8_t*)np->payload; + *cp++ = (u8_t)changes; + } + *cp++ = (u8_t)(deltaA >> 8); + *cp++ = (u8_t)deltaA; + MEMCPY(cp, new_seq, deltaS); + INCR(vjs_compressed); + return (TYPE_COMPRESSED_TCP); + + /* + * Update connection state cs & send uncompressed packet (that is, + * a regular ip/tcp packet but with the 'conversation id' we hope + * to use on future compressed packets in the protocol field). + */ +uncompressed: + MEMCPY(&cs->cs_ip, ip, hlen); + IPH_PROTO_SET(ip, cs->cs_id); + comp->last_xmit = cs->cs_id; + return (TYPE_UNCOMPRESSED_TCP); +} + +/* + * Called when we may have missed a packet. + */ +void +vj_uncompress_err(struct vjcompress *comp) +{ + comp->flags |= VJF_TOSS; + INCR(vjs_errorin); +} + +/* + * "Uncompress" a packet of type TYPE_UNCOMPRESSED_TCP. + * Return 0 on success, -1 on failure. + */ +int +vj_uncompress_uncomp(struct pbuf *nb, struct vjcompress *comp) +{ + u32_t hlen; + struct cstate *cs; + struct ip_hdr *ip; + + ip = (struct ip_hdr *)nb->payload; + hlen = IPH_HL(ip) << 2; + if (IPH_PROTO(ip) >= MAX_SLOTS + || hlen + sizeof(struct tcp_hdr) > nb->len + || (hlen += TCPH_HDRLEN_BYTES((struct tcp_hdr *)&((char *)ip)[hlen])) + > nb->len + || hlen > MAX_HDR) { + PPPDEBUG(LOG_INFO, ("vj_uncompress_uncomp: bad cid=%d, hlen=%d buflen=%d\n", + IPH_PROTO(ip), hlen, nb->len)); + vj_uncompress_err(comp); + return -1; + } + cs = &comp->rstate[comp->last_recv = IPH_PROTO(ip)]; + comp->flags &=~ VJF_TOSS; + IPH_PROTO_SET(ip, IP_PROTO_TCP); + /* copy from/to bigger buffers checked above instead of cs->cs_ip and ip + just to help static code analysis to see this is correct ;-) */ + MEMCPY(&cs->cs_hdr, nb->payload, hlen); + cs->cs_hlen = (u16_t)hlen; + INCR(vjs_uncompressedin); + return 0; +} + +/* + * Uncompress a packet of type TYPE_COMPRESSED_TCP. + * The packet is composed of a buffer chain and the first buffer + * must contain an accurate chain length. + * The first buffer must include the entire compressed TCP/IP header. + * This procedure replaces the compressed header with the uncompressed + * header and returns the length of the VJ header. + */ +int +vj_uncompress_tcp(struct pbuf **nb, struct vjcompress *comp) +{ + u8_t *cp; + struct tcp_hdr *th; + struct cstate *cs; + struct vj_u16_t *bp; + struct pbuf *n0 = *nb; + u32_t tmp; + u32_t vjlen, hlen, changes; + + INCR(vjs_compressedin); + cp = (u8_t*)n0->payload; + changes = *cp++; + if (changes & NEW_C) { + /* + * Make sure the state index is in range, then grab the state. + * If we have a good state index, clear the 'discard' flag. + */ + if (*cp >= MAX_SLOTS) { + PPPDEBUG(LOG_INFO, ("vj_uncompress_tcp: bad cid=%d\n", *cp)); + goto bad; + } + + comp->flags &=~ VJF_TOSS; + comp->last_recv = *cp++; + } else { + /* + * this packet has an implicit state index. If we've + * had a line error since the last time we got an + * explicit state index, we have to toss the packet. + */ + if (comp->flags & VJF_TOSS) { + PPPDEBUG(LOG_INFO, ("vj_uncompress_tcp: tossing\n")); + INCR(vjs_tossed); + return (-1); + } + } + cs = &comp->rstate[comp->last_recv]; + hlen = IPH_HL(&cs->cs_ip) << 2; + th = (struct tcp_hdr *)&((u8_t*)&cs->cs_ip)[hlen]; + th->chksum = lwip_htons((*cp << 8) | cp[1]); + cp += 2; + if (changes & TCP_PUSH_BIT) { + TCPH_SET_FLAG(th, TCP_PSH); + } else { + TCPH_UNSET_FLAG(th, TCP_PSH); + } + + switch (changes & SPECIALS_MASK) { + case SPECIAL_I: + { + u32_t i = lwip_ntohs(IPH_LEN(&cs->cs_ip)) - cs->cs_hlen; + /* some compilers can't nest inline assembler.. */ + tmp = lwip_ntohl(th->ackno) + i; + th->ackno = lwip_htonl(tmp); + tmp = lwip_ntohl(th->seqno) + i; + th->seqno = lwip_htonl(tmp); + } + break; + + case SPECIAL_D: + /* some compilers can't nest inline assembler.. */ + tmp = lwip_ntohl(th->seqno) + lwip_ntohs(IPH_LEN(&cs->cs_ip)) - cs->cs_hlen; + th->seqno = lwip_htonl(tmp); + break; + + default: + if (changes & NEW_U) { + TCPH_SET_FLAG(th, TCP_URG); + DECODEU(th->urgp); + } else { + TCPH_UNSET_FLAG(th, TCP_URG); + } + if (changes & NEW_W) { + DECODES(th->wnd); + } + if (changes & NEW_A) { + DECODEL(th->ackno); + } + if (changes & NEW_S) { + DECODEL(th->seqno); + } + break; + } + if (changes & NEW_I) { + DECODES(cs->cs_ip._id); + } else { + IPH_ID_SET(&cs->cs_ip, lwip_ntohs(IPH_ID(&cs->cs_ip)) + 1); + IPH_ID_SET(&cs->cs_ip, lwip_htons(IPH_ID(&cs->cs_ip))); + } + + /* + * At this point, cp points to the first byte of data in the + * packet. Fill in the IP total length and update the IP + * header checksum. + */ + vjlen = (u16_t)(cp - (u8_t*)n0->payload); + if (n0->len < vjlen) { + /* + * We must have dropped some characters (crc should detect + * this but the old slip framing won't) + */ + PPPDEBUG(LOG_INFO, ("vj_uncompress_tcp: head buffer %d too short %d\n", + n0->len, vjlen)); + goto bad; + } + +#if BYTE_ORDER == LITTLE_ENDIAN + tmp = n0->tot_len - vjlen + cs->cs_hlen; + IPH_LEN_SET(&cs->cs_ip, lwip_htons((u16_t)tmp)); +#else + IPH_LEN_SET(&cs->cs_ip, lwip_htons(n0->tot_len - vjlen + cs->cs_hlen)); +#endif + + /* recompute the ip header checksum */ + bp = (struct vj_u16_t*) &cs->cs_ip; + IPH_CHKSUM_SET(&cs->cs_ip, 0); + for (tmp = 0; hlen > 0; hlen -= 2) { + tmp += (*bp++).v; + } + tmp = (tmp & 0xffff) + (tmp >> 16); + tmp = (tmp & 0xffff) + (tmp >> 16); + IPH_CHKSUM_SET(&cs->cs_ip, (u16_t)(~tmp)); + + /* Remove the compressed header and prepend the uncompressed header. */ + if (pbuf_remove_header(n0, vjlen)) { + /* Can we cope with this failing? Just assert for now */ + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + goto bad; + } + + if(LWIP_MEM_ALIGN(n0->payload) != n0->payload) { + struct pbuf *np; + +#if IP_FORWARD + /* If IP forwarding is enabled we are using a PBUF_LINK packet type so + * the packet is being allocated with enough header space to be + * forwarded (to Ethernet for example). + */ + np = pbuf_alloc(PBUF_LINK, n0->len + cs->cs_hlen, PBUF_POOL); +#else /* IP_FORWARD */ + np = pbuf_alloc(PBUF_RAW, n0->len + cs->cs_hlen, PBUF_POOL); +#endif /* IP_FORWARD */ + if(!np) { + PPPDEBUG(LOG_WARNING, ("vj_uncompress_tcp: realign failed\n")); + goto bad; + } + + if (pbuf_remove_header(np, cs->cs_hlen)) { + /* Can we cope with this failing? Just assert for now */ + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + goto bad; + } + + pbuf_take(np, n0->payload, n0->len); + + if(n0->next) { + pbuf_chain(np, n0->next); + pbuf_dechain(n0); + } + pbuf_free(n0); + n0 = np; + } + + if (pbuf_add_header(n0, cs->cs_hlen)) { + struct pbuf *np; + + LWIP_ASSERT("vj_uncompress_tcp: cs->cs_hlen <= PBUF_POOL_BUFSIZE", cs->cs_hlen <= PBUF_POOL_BUFSIZE); + np = pbuf_alloc(PBUF_RAW, cs->cs_hlen, PBUF_POOL); + if(!np) { + PPPDEBUG(LOG_WARNING, ("vj_uncompress_tcp: prepend failed\n")); + goto bad; + } + pbuf_cat(np, n0); + n0 = np; + } + LWIP_ASSERT("n0->len >= cs->cs_hlen", n0->len >= cs->cs_hlen); + MEMCPY(n0->payload, &cs->cs_ip, cs->cs_hlen); + + *nb = n0; + + return vjlen; + +bad: + vj_uncompress_err(comp); + return (-1); +} + +#endif /* PPP_SUPPORT && VJ_SUPPORT */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/slipif.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/slipif.c new file mode 100644 index 0000000..037ed9c --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/slipif.c @@ -0,0 +1,558 @@ +/** + * @file + * SLIP Interface + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the Institute nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is built upon the file: src/arch/rtxc/netif/sioslip.c + * + * Author: Magnus Ivarsson + * Simon Goldschmidt + */ + + +/** + * @defgroup slipif SLIP + * @ingroup netifs + * + * This is an arch independent SLIP netif. The specific serial hooks must be + * provided by another file. They are sio_open, sio_read/sio_tryread and sio_send + * + * Usage: This netif can be used in three ways:\n + * 1) For NO_SYS==0, an RX thread can be used which blocks on sio_read() + * until data is received.\n + * 2) In your main loop, call slipif_poll() to check for new RX bytes, + * completed packets are fed into netif->input().\n + * 3) Call slipif_received_byte[s]() from your serial RX ISR and + * slipif_process_rxqueue() from your main loop. ISR level decodes + * packets and puts completed packets on a queue which is fed into + * the stack from the main loop (needs SYS_LIGHTWEIGHT_PROT for + * pbuf_alloc to work on ISR level!). + * + */ + +#include "netif/slipif.h" +#include "lwip/opt.h" + +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/stats.h" +#include "lwip/snmp.h" +#include "lwip/sys.h" +#include "lwip/sio.h" + +#define SLIP_END 0xC0 /* 0300: start and end of every packet */ +#define SLIP_ESC 0xDB /* 0333: escape start (one byte escaped data follows) */ +#define SLIP_ESC_END 0xDC /* 0334: following escape: original byte is 0xC0 (END) */ +#define SLIP_ESC_ESC 0xDD /* 0335: following escape: original byte is 0xDB (ESC) */ + +/** Maximum packet size that is received by this netif */ +#ifndef SLIP_MAX_SIZE +#define SLIP_MAX_SIZE 1500 +#endif + +/** Define this to the interface speed for SNMP + * (sio_fd is the sio_fd_t returned by sio_open). + * The default value of zero means 'unknown'. + */ +#ifndef SLIP_SIO_SPEED +#define SLIP_SIO_SPEED(sio_fd) 0 +#endif + +enum slipif_recv_state { + SLIP_RECV_NORMAL, + SLIP_RECV_ESCAPE +}; + +struct slipif_priv { + sio_fd_t sd; + /* q is the whole pbuf chain for a packet, p is the current pbuf in the chain */ + struct pbuf *p, *q; + u8_t state; + u16_t i, recved; +#if SLIP_RX_FROM_ISR + struct pbuf *rxpackets; +#endif +}; + +/** + * Send a pbuf doing the necessary SLIP encapsulation + * + * Uses the serial layer's sio_send() + * + * @param netif the lwip network interface structure for this slipif + * @param p the pbuf chain packet to send + * @return always returns ERR_OK since the serial layer does not provide return values + */ +static err_t +slipif_output(struct netif *netif, struct pbuf *p) +{ + struct slipif_priv *priv; + struct pbuf *q; + u16_t i; + u8_t c; + + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); + LWIP_ASSERT("p != NULL", (p != NULL)); + + LWIP_DEBUGF(SLIP_DEBUG, ("slipif_output: sending %"U16_F" bytes\n", p->tot_len)); + priv = (struct slipif_priv *)netif->state; + + /* Send pbuf out on the serial I/O device. */ + /* Start with packet delimiter. */ + sio_send(SLIP_END, priv->sd); + + for (q = p; q != NULL; q = q->next) { + for (i = 0; i < q->len; i++) { + c = ((u8_t *)q->payload)[i]; + switch (c) { + case SLIP_END: + /* need to escape this byte (0xC0 -> 0xDB, 0xDC) */ + sio_send(SLIP_ESC, priv->sd); + sio_send(SLIP_ESC_END, priv->sd); + break; + case SLIP_ESC: + /* need to escape this byte (0xDB -> 0xDB, 0xDD) */ + sio_send(SLIP_ESC, priv->sd); + sio_send(SLIP_ESC_ESC, priv->sd); + break; + default: + /* normal byte - no need for escaping */ + sio_send(c, priv->sd); + break; + } + } + } + /* End with packet delimiter. */ + sio_send(SLIP_END, priv->sd); + return ERR_OK; +} + +#if LWIP_IPV4 +/** + * Send a pbuf doing the necessary SLIP encapsulation + * + * Uses the serial layer's sio_send() + * + * @param netif the lwip network interface structure for this slipif + * @param p the pbuf chain packet to send + * @param ipaddr the ip address to send the packet to (not used for slipif) + * @return always returns ERR_OK since the serial layer does not provide return values + */ +static err_t +slipif_output_v4(struct netif *netif, struct pbuf *p, const ip4_addr_t *ipaddr) +{ + LWIP_UNUSED_ARG(ipaddr); + return slipif_output(netif, p); +} +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +/** + * Send a pbuf doing the necessary SLIP encapsulation + * + * Uses the serial layer's sio_send() + * + * @param netif the lwip network interface structure for this slipif + * @param p the pbuf chain packet to send + * @param ipaddr the ip address to send the packet to (not used for slipif) + * @return always returns ERR_OK since the serial layer does not provide return values + */ +static err_t +slipif_output_v6(struct netif *netif, struct pbuf *p, const ip6_addr_t *ipaddr) +{ + LWIP_UNUSED_ARG(ipaddr); + return slipif_output(netif, p); +} +#endif /* LWIP_IPV6 */ + +/** + * Handle the incoming SLIP stream character by character + * + * @param netif the lwip network interface structure for this slipif + * @param c received character (multiple calls to this function will + * return a complete packet, NULL is returned before - used for polling) + * @return The IP packet when SLIP_END is received + */ +static struct pbuf * +slipif_rxbyte(struct netif *netif, u8_t c) +{ + struct slipif_priv *priv; + struct pbuf *t; + + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); + + priv = (struct slipif_priv *)netif->state; + + switch (priv->state) { + case SLIP_RECV_NORMAL: + switch (c) { + case SLIP_END: + if (priv->recved > 0) { + /* Received whole packet. */ + /* Trim the pbuf to the size of the received packet. */ + pbuf_realloc(priv->q, priv->recved); + + LINK_STATS_INC(link.recv); + + LWIP_DEBUGF(SLIP_DEBUG, ("slipif: Got packet (%"U16_F" bytes)\n", priv->recved)); + t = priv->q; + priv->p = priv->q = NULL; + priv->i = priv->recved = 0; + return t; + } + return NULL; + case SLIP_ESC: + priv->state = SLIP_RECV_ESCAPE; + return NULL; + default: + break; + } /* end switch (c) */ + break; + case SLIP_RECV_ESCAPE: + /* un-escape END or ESC bytes, leave other bytes + (although that would be a protocol error) */ + switch (c) { + case SLIP_ESC_END: + c = SLIP_END; + break; + case SLIP_ESC_ESC: + c = SLIP_ESC; + break; + default: + break; + } + priv->state = SLIP_RECV_NORMAL; + break; + default: + break; + } /* end switch (priv->state) */ + + /* byte received, packet not yet completely received */ + if (priv->p == NULL) { + /* allocate a new pbuf */ + LWIP_DEBUGF(SLIP_DEBUG, ("slipif_input: alloc\n")); + priv->p = pbuf_alloc(PBUF_LINK, (PBUF_POOL_BUFSIZE - PBUF_LINK_HLEN - PBUF_LINK_ENCAPSULATION_HLEN), PBUF_POOL); + + if (priv->p == NULL) { + LINK_STATS_INC(link.drop); + LWIP_DEBUGF(SLIP_DEBUG, ("slipif_input: no new pbuf! (DROP)\n")); + /* don't process any further since we got no pbuf to receive to */ + return NULL; + } + + if (priv->q != NULL) { + /* 'chain' the pbuf to the existing chain */ + pbuf_cat(priv->q, priv->p); + } else { + /* p is the first pbuf in the chain */ + priv->q = priv->p; + } + } + + /* this automatically drops bytes if > SLIP_MAX_SIZE */ + if ((priv->p != NULL) && (priv->recved <= SLIP_MAX_SIZE)) { + ((u8_t *)priv->p->payload)[priv->i] = c; + priv->recved++; + priv->i++; + if (priv->i >= priv->p->len) { + /* on to the next pbuf */ + priv->i = 0; + if (priv->p->next != NULL && priv->p->next->len > 0) { + /* p is a chain, on to the next in the chain */ + priv->p = priv->p->next; + } else { + /* p is a single pbuf, set it to NULL so next time a new + * pbuf is allocated */ + priv->p = NULL; + } + } + } + return NULL; +} + +/** Like slipif_rxbyte, but passes completed packets to netif->input + * + * @param netif The lwip network interface structure for this slipif + * @param c received character + */ +static void +slipif_rxbyte_input(struct netif *netif, u8_t c) +{ + struct pbuf *p; + p = slipif_rxbyte(netif, c); + if (p != NULL) { + if (netif->input(p, netif) != ERR_OK) { + pbuf_free(p); + } + } +} + +#if SLIP_USE_RX_THREAD +/** + * The SLIP input thread. + * + * Feed the IP layer with incoming packets + * + * @param nf the lwip network interface structure for this slipif + */ +static void +slipif_loop_thread(void *nf) +{ + u8_t c; + struct netif *netif = (struct netif *)nf; + struct slipif_priv *priv = (struct slipif_priv *)netif->state; + + while (1) { + if (sio_read(priv->sd, &c, 1) > 0) { + slipif_rxbyte_input(netif, c); + } + } +} +#endif /* SLIP_USE_RX_THREAD */ + +/** + * @ingroup slipif + * SLIP netif initialization + * + * Call the arch specific sio_open and remember + * the opened device in the state field of the netif. + * + * @param netif the lwip network interface structure for this slipif + * @return ERR_OK if serial line could be opened, + * ERR_MEM if no memory could be allocated, + * ERR_IF is serial line couldn't be opened + * + * @note If netif->state is interpreted as an u8_t serial port number. + * + */ +err_t +slipif_init(struct netif *netif) +{ + struct slipif_priv *priv; + u8_t sio_num; + + LWIP_ASSERT("slipif needs an input callback", netif->input != NULL); + + /* netif->state contains serial port number */ + sio_num = LWIP_PTR_NUMERIC_CAST(u8_t, netif->state); + + LWIP_DEBUGF(SLIP_DEBUG, ("slipif_init: netif->num=%"U16_F"\n", (u16_t)sio_num)); + + /* Allocate private data */ + priv = (struct slipif_priv *)mem_malloc(sizeof(struct slipif_priv)); + if (!priv) { + return ERR_MEM; + } + + netif->name[0] = 's'; + netif->name[1] = 'l'; +#if LWIP_IPV4 + netif->output = slipif_output_v4; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + netif->output_ip6 = slipif_output_v6; +#endif /* LWIP_IPV6 */ + netif->mtu = SLIP_MAX_SIZE; + + /* Try to open the serial port. */ + priv->sd = sio_open(sio_num); + if (!priv->sd) { + /* Opening the serial port failed. */ + mem_free(priv); + return ERR_IF; + } + + /* Initialize private data */ + priv->p = NULL; + priv->q = NULL; + priv->state = SLIP_RECV_NORMAL; + priv->i = 0; + priv->recved = 0; +#if SLIP_RX_FROM_ISR + priv->rxpackets = NULL; +#endif + + netif->state = priv; + + /* initialize the snmp variables and counters inside the struct netif */ + MIB2_INIT_NETIF(netif, snmp_ifType_slip, SLIP_SIO_SPEED(priv->sd)); + +#if SLIP_USE_RX_THREAD + /* Create a thread to poll the serial line. */ + sys_thread_new(SLIPIF_THREAD_NAME, slipif_loop_thread, netif, + SLIPIF_THREAD_STACKSIZE, SLIPIF_THREAD_PRIO); +#endif /* SLIP_USE_RX_THREAD */ + return ERR_OK; +} + +/** + * @ingroup slipif + * Polls the serial device and feeds the IP layer with incoming packets. + * + * @param netif The lwip network interface structure for this slipif + */ +void +slipif_poll(struct netif *netif) +{ + u8_t c; + struct slipif_priv *priv; + + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); + + priv = (struct slipif_priv *)netif->state; + + while (sio_tryread(priv->sd, &c, 1) > 0) { + slipif_rxbyte_input(netif, c); + } +} + +#if SLIP_RX_FROM_ISR +/** + * @ingroup slipif + * Feeds the IP layer with incoming packets that were receive + * + * @param netif The lwip network interface structure for this slipif + */ +void +slipif_process_rxqueue(struct netif *netif) +{ + struct slipif_priv *priv; + SYS_ARCH_DECL_PROTECT(old_level); + + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); + + priv = (struct slipif_priv *)netif->state; + + SYS_ARCH_PROTECT(old_level); + while (priv->rxpackets != NULL) { + struct pbuf *p = priv->rxpackets; +#if SLIP_RX_QUEUE + /* dequeue packet */ + struct pbuf *q = p; + while ((q->len != q->tot_len) && (q->next != NULL)) { + q = q->next; + } + priv->rxpackets = q->next; + q->next = NULL; +#else /* SLIP_RX_QUEUE */ + priv->rxpackets = NULL; +#endif /* SLIP_RX_QUEUE */ + SYS_ARCH_UNPROTECT(old_level); + if (netif->input(p, netif) != ERR_OK) { + pbuf_free(p); + } + SYS_ARCH_PROTECT(old_level); + } + SYS_ARCH_UNPROTECT(old_level); +} + +/** Like slipif_rxbyte, but queues completed packets. + * + * @param netif The lwip network interface structure for this slipif + * @param data Received serial byte + */ +static void +slipif_rxbyte_enqueue(struct netif *netif, u8_t data) +{ + struct pbuf *p; + struct slipif_priv *priv = (struct slipif_priv *)netif->state; + SYS_ARCH_DECL_PROTECT(old_level); + + p = slipif_rxbyte(netif, data); + if (p != NULL) { + SYS_ARCH_PROTECT(old_level); + if (priv->rxpackets != NULL) { +#if SLIP_RX_QUEUE + /* queue multiple pbufs */ + struct pbuf *q = p; + while (q->next != NULL) { + q = q->next; + } + q->next = p; + } else { +#else /* SLIP_RX_QUEUE */ + pbuf_free(priv->rxpackets); + } + { +#endif /* SLIP_RX_QUEUE */ + priv->rxpackets = p; + } + SYS_ARCH_UNPROTECT(old_level); + } +} + +/** + * @ingroup slipif + * Process a received byte, completed packets are put on a queue that is + * fed into IP through slipif_process_rxqueue(). + * + * This function can be called from ISR if SYS_LIGHTWEIGHT_PROT is enabled. + * + * @param netif The lwip network interface structure for this slipif + * @param data received character + */ +void +slipif_received_byte(struct netif *netif, u8_t data) +{ + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); + slipif_rxbyte_enqueue(netif, data); +} + +/** + * @ingroup slipif + * Process multiple received byte, completed packets are put on a queue that is + * fed into IP through slipif_process_rxqueue(). + * + * This function can be called from ISR if SYS_LIGHTWEIGHT_PROT is enabled. + * + * @param netif The lwip network interface structure for this slipif + * @param data received character + * @param len Number of received characters + */ +void +slipif_received_bytes(struct netif *netif, u8_t *data, u8_t len) +{ + u8_t i; + u8_t *rxdata = data; + LWIP_ASSERT("netif != NULL", (netif != NULL)); + LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); + + for (i = 0; i < len; i++, rxdata++) { + slipif_rxbyte_enqueue(netif, *rxdata); + } +} +#endif /* SLIP_RX_FROM_ISR */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/zepif.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/zepif.c new file mode 100644 index 0000000..b7c9b9c --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/netif/zepif.c @@ -0,0 +1,300 @@ +/** + * @file + * + * @defgroup zepif ZEP - ZigBee Encapsulation Protocol + * @ingroup netifs + * A netif implementing the ZigBee Encapsulation Protocol (ZEP). + * This is used to tunnel 6LowPAN over UDP. + * + * Usage (there must be a default netif before!): + * @code{.c} + * netif_add(&zep_netif, NULL, NULL, NULL, NULL, zepif_init, tcpip_6lowpan_input); + * netif_create_ip6_linklocal_address(&zep_netif, 1); + * netif_set_up(&zep_netif); + * netif_set_link_up(&zep_netif); + * @endcode + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "netif/zepif.h" + +#if LWIP_IPV6 && LWIP_UDP + +#include "netif/lowpan6.h" +#include "lwip/udp.h" +#include "lwip/timeouts.h" +#include + +/** Define this to 1 to loop back TX packets for testing */ +#ifndef ZEPIF_LOOPBACK +#define ZEPIF_LOOPBACK 0 +#endif + +#define ZEP_MAX_DATA_LEN 127 + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct zep_hdr { + PACK_STRUCT_FLD_8(u8_t prot_id[2]); + PACK_STRUCT_FLD_8(u8_t prot_version); + PACK_STRUCT_FLD_8(u8_t type); + PACK_STRUCT_FLD_8(u8_t channel_id); + PACK_STRUCT_FIELD(u16_t device_id); + PACK_STRUCT_FLD_8(u8_t crc_mode); + PACK_STRUCT_FLD_8(u8_t unknown_1); + PACK_STRUCT_FIELD(u32_t timestamp[2]); + PACK_STRUCT_FIELD(u32_t seq_num); + PACK_STRUCT_FLD_8(u8_t unknown_2[10]); + PACK_STRUCT_FLD_8(u8_t len); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +struct zepif_state { + struct zepif_init init; + struct udp_pcb *pcb; + u32_t seqno; +}; + +static u8_t zep_lowpan_timer_running; + +/* Helper function that calls the 6LoWPAN timer and reschedules itself */ +static void +zep_lowpan_timer(void *arg) +{ + lowpan6_tmr(); + if (zep_lowpan_timer_running) { + sys_timeout(LOWPAN6_TMR_INTERVAL, zep_lowpan_timer, arg); + } +} + +/* Pass received pbufs into 6LowPAN netif */ +static void +zepif_udp_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr, u16_t port) +{ + err_t err; + struct netif *netif_lowpan6 = (struct netif *)arg; + struct zep_hdr *zep; + + LWIP_ASSERT("arg != NULL", arg != NULL); + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_UNUSED_ARG(pcb); /* for LWIP_NOASSERT */ + LWIP_UNUSED_ARG(addr); + LWIP_UNUSED_ARG(port); + if (p == NULL) { + return; + } + + /* Parse and hide the ZEP header */ + if (p->len < sizeof(struct zep_hdr)) { + /* need the zep_hdr in one piece */ + goto err_return; + } + zep = (struct zep_hdr *)p->payload; + if (zep->prot_id[0] != 'E') { + goto err_return; + } + if (zep->prot_id[1] != 'X') { + goto err_return; + } + if (zep->prot_version != 2) { + /* we only support this version for now */ + goto err_return; + } + if (zep->type != 1) { + goto err_return; + } + if (zep->crc_mode != 1) { + goto err_return; + } + if (zep->len != p->tot_len - sizeof(struct zep_hdr)) { + goto err_return; + } + /* everything seems to be OK, hide the ZEP header */ + if (pbuf_remove_header(p, sizeof(struct zep_hdr))) { + goto err_return; + } + /* TODO Check CRC? */ + /* remove CRC trailer */ + pbuf_realloc(p, p->tot_len - 2); + + /* Call into 6LoWPAN code. */ + err = netif_lowpan6->input(p, netif_lowpan6); + if (err == ERR_OK) { + return; + } +err_return: + pbuf_free(p); +} + +/* Send 6LoWPAN TX packets as UDP broadcast */ +static err_t +zepif_linkoutput(struct netif *netif, struct pbuf *p) +{ + err_t err; + struct pbuf *q; + struct zep_hdr *zep; + struct zepif_state *state; + + LWIP_ASSERT("invalid netif", netif != NULL); + LWIP_ASSERT("invalid pbuf", p != NULL); + + if (p->tot_len > ZEP_MAX_DATA_LEN) { + return ERR_VAL; + } + LWIP_ASSERT("TODO: support chained pbufs", p->next == NULL); + + state = (struct zepif_state *)netif->state; + LWIP_ASSERT("state->pcb != NULL", state->pcb != NULL); + + q = pbuf_alloc(PBUF_TRANSPORT, sizeof(struct zep_hdr) + p->tot_len, PBUF_RAM); + if (q == NULL) { + return ERR_MEM; + } + zep = (struct zep_hdr *)q->payload; + memset(zep, 0, sizeof(struct zep_hdr)); + zep->prot_id[0] = 'E'; + zep->prot_id[1] = 'X'; + zep->prot_version = 2; + zep->type = 1; /* Data */ + zep->channel_id = 0; /* whatever */ + zep->device_id = lwip_htons(1); /* whatever */ + zep->crc_mode = 1; + zep->unknown_1 = 0xff; + zep->seq_num = lwip_htonl(state->seqno); + state->seqno++; + zep->len = (u8_t)p->tot_len; + + err = pbuf_take_at(q, p->payload, p->tot_len, sizeof(struct zep_hdr)); + if (err == ERR_OK) { +#if ZEPIF_LOOPBACK + zepif_udp_recv(netif, state->pcb, pbuf_clone(PBUF_RAW, PBUF_RAM, q), NULL, 0); +#endif + err = udp_sendto(state->pcb, q, state->init.zep_dst_ip_addr, state->init.zep_dst_udp_port); + } + pbuf_free(q); + + return err; +} + +/** + * @ingroup zepif + * Set up a raw 6LowPAN netif and surround it with input- and output + * functions for ZEP + */ +err_t +zepif_init(struct netif *netif) +{ + err_t err; + struct zepif_init *init_state = (struct zepif_init *)netif->state; + struct zepif_state *state = (struct zepif_state *)mem_malloc(sizeof(struct zepif_state)); + + LWIP_ASSERT("zepif needs an input callback", netif->input != NULL); + + if (state == NULL) { + return ERR_MEM; + } + memset(state, 0, sizeof(struct zepif_state)); + if (init_state != NULL) { + memcpy(&state->init, init_state, sizeof(struct zepif_init)); + } + if (state->init.zep_src_udp_port == 0) { + state->init.zep_src_udp_port = ZEPIF_DEFAULT_UDP_PORT; + } + if (state->init.zep_dst_udp_port == 0) { + state->init.zep_dst_udp_port = ZEPIF_DEFAULT_UDP_PORT; + } +#if LWIP_IPV4 + if (state->init.zep_dst_ip_addr == NULL) { + /* With IPv4 enabled, default to broadcasting packets if no address is set */ + state->init.zep_dst_ip_addr = IP_ADDR_BROADCAST; + } +#endif /* LWIP_IPV4 */ + + netif->state = NULL; + + state->pcb = udp_new_ip_type(IPADDR_TYPE_ANY); + if (state->pcb == NULL) { + err = ERR_MEM; + goto err_ret; + } + err = udp_bind(state->pcb, state->init.zep_src_ip_addr, state->init.zep_src_udp_port); + if (err != ERR_OK) { + goto err_ret; + } + if (state->init.zep_netif != NULL) { + udp_bind_netif(state->pcb, state->init.zep_netif); + } + LWIP_ASSERT("udp_bind(lowpan6_broadcast_pcb) failed", err == ERR_OK); + ip_set_option(state->pcb, SOF_BROADCAST); + udp_recv(state->pcb, zepif_udp_recv, netif); + + err = lowpan6_if_init(netif); + LWIP_ASSERT("lowpan6_if_init set a state", netif->state == NULL); + if (err == ERR_OK) { + netif->state = state; + netif->hwaddr_len = 6; + if (init_state != NULL) { + memcpy(netif->hwaddr, init_state->addr, 6); + } else { + u8_t i; + for (i = 0; i < 6; i++) { + netif->hwaddr[i] = i; + } + netif->hwaddr[0] &= 0xfc; + } + netif->linkoutput = zepif_linkoutput; + + if (!zep_lowpan_timer_running) { + sys_timeout(LOWPAN6_TMR_INTERVAL, zep_lowpan_timer, NULL); + zep_lowpan_timer_running = 1; + } + + return ERR_OK; + } + +err_ret: + if (state->pcb != NULL) { + udp_remove(state->pcb); + } + mem_free(state); + return err; +} + +#endif /* LWIP_IPV6 && LWIP_UDP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/OS/sys_arch.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/OS/sys_arch.c new file mode 100644 index 0000000..a10e29f --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/OS/sys_arch.c @@ -0,0 +1,512 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/* lwIP includes. */ +#include "lwip/debug.h" +#include "lwip/def.h" +#include "lwip/sys.h" +#include "lwip/mem.h" +#include "lwip/stats.h" + +#if !NO_SYS + +#include "cmsis_os.h" + +#if defined(LWIP_PROVIDE_ERRNO) +int errno; +#endif + +/*-----------------------------------------------------------------------------------*/ +// Creates an empty mailbox. +err_t sys_mbox_new(sys_mbox_t *mbox, int size) +{ +#if (osCMSIS < 0x20000U) + osMessageQDef(QUEUE, size, void *); + *mbox = osMessageCreate(osMessageQ(QUEUE), NULL); +#else + *mbox = osMessageQueueNew(size, sizeof(void *), NULL); +#endif +#if SYS_STATS + ++lwip_stats.sys.mbox.used; + if(lwip_stats.sys.mbox.max < lwip_stats.sys.mbox.used) + { + lwip_stats.sys.mbox.max = lwip_stats.sys.mbox.used; + } +#endif /* SYS_STATS */ + if(*mbox == NULL) + return ERR_MEM; + + return ERR_OK; +} + +/*-----------------------------------------------------------------------------------*/ +/* + Deallocates a mailbox. If there are messages still present in the + mailbox when the mailbox is deallocated, it is an indication of a + programming error in lwIP and the developer should be notified. +*/ +void sys_mbox_free(sys_mbox_t *mbox) +{ +#if (osCMSIS < 0x20000U) + if(osMessageWaiting(*mbox)) +#else + if(osMessageQueueGetCount(*mbox)) +#endif + { + /* Line for breakpoint. Should never break here! */ + portNOP(); +#if SYS_STATS + lwip_stats.sys.mbox.err++; +#endif /* SYS_STATS */ + + } +#if (osCMSIS < 0x20000U) + osMessageDelete(*mbox); +#else + osMessageQueueDelete(*mbox); +#endif +#if SYS_STATS + --lwip_stats.sys.mbox.used; +#endif /* SYS_STATS */ +} + +/*-----------------------------------------------------------------------------------*/ +// Posts the "msg" to the mailbox. +void sys_mbox_post(sys_mbox_t *mbox, void *data) +{ +#if (osCMSIS < 0x20000U) + while(osMessagePut(*mbox, (uint32_t)data, osWaitForever) != osOK); +#else + while(osMessageQueuePut(*mbox, &data, 0, osWaitForever) != osOK); +#endif +} + + +/*-----------------------------------------------------------------------------------*/ +// Try to post the "msg" to the mailbox. +err_t sys_mbox_trypost(sys_mbox_t *mbox, void *msg) +{ + err_t result; +#if (osCMSIS < 0x20000U) + if(osMessagePut(*mbox, (uint32_t)msg, 0) == osOK) +#else + if(osMessageQueuePut(*mbox, &msg, 0, 0) == osOK) +#endif + { + result = ERR_OK; + } + else + { + // could not post, queue must be full + result = ERR_MEM; + +#if SYS_STATS + lwip_stats.sys.mbox.err++; +#endif /* SYS_STATS */ + } + + return result; +} + + +/*-----------------------------------------------------------------------------------*/ +// Try to post the "msg" to the mailbox. +err_t sys_mbox_trypost_fromisr(sys_mbox_t *mbox, void *msg) +{ + return sys_mbox_trypost(mbox, msg); +} + +/*-----------------------------------------------------------------------------------*/ +/* + Blocks the thread until a message arrives in the mailbox, but does + not block the thread longer than "timeout" milliseconds (similar to + the sys_arch_sem_wait() function). The "msg" argument is a result + parameter that is set by the function (i.e., by doing "*msg = + ptr"). The "msg" parameter maybe NULL to indicate that the message + should be dropped. + + The return values are the same as for the sys_arch_sem_wait() function: + Number of milliseconds spent waiting or SYS_ARCH_TIMEOUT if there was a + timeout. + + Note that a function with a similar name, sys_mbox_fetch(), is + implemented by lwIP. +*/ +u32_t sys_arch_mbox_fetch(sys_mbox_t *mbox, void **msg, u32_t timeout) +{ +#if (osCMSIS < 0x20000U) + osEvent event; + uint32_t starttime = osKernelSysTick(); +#else + osStatus_t status; + uint32_t starttime = osKernelGetTickCount(); +#endif + if(timeout != 0) + { +#if (osCMSIS < 0x20000U) + event = osMessageGet (*mbox, timeout); + + if(event.status == osEventMessage) + { + *msg = (void *)event.value.v; + return (osKernelSysTick() - starttime); + } +#else + status = osMessageQueueGet(*mbox, msg, 0, timeout); + if (status == osOK) + { + return (osKernelGetTickCount() - starttime); + } +#endif + else + { + return SYS_ARCH_TIMEOUT; + } + } + else + { +#if (osCMSIS < 0x20000U) + event = osMessageGet (*mbox, osWaitForever); + *msg = (void *)event.value.v; + return (osKernelSysTick() - starttime); +#else + osMessageQueueGet(*mbox, msg, 0, osWaitForever ); + return (osKernelGetTickCount() - starttime); +#endif + } +} + +/*-----------------------------------------------------------------------------------*/ +/* + Similar to sys_arch_mbox_fetch, but if message is not ready immediately, we'll + return with SYS_MBOX_EMPTY. On success, 0 is returned. +*/ +u32_t sys_arch_mbox_tryfetch(sys_mbox_t *mbox, void **msg) +{ +#if (osCMSIS < 0x20000U) + osEvent event; + + event = osMessageGet (*mbox, 0); + + if(event.status == osEventMessage) + { + *msg = (void *)event.value.v; +#else + if (osMessageQueueGet(*mbox, msg, 0, 0) == osOK) + { +#endif + return ERR_OK; + } + else + { + return SYS_MBOX_EMPTY; + } +} +/*----------------------------------------------------------------------------------*/ +int sys_mbox_valid(sys_mbox_t *mbox) +{ + if (*mbox == SYS_MBOX_NULL) + return 0; + else + return 1; +} +/*-----------------------------------------------------------------------------------*/ +void sys_mbox_set_invalid(sys_mbox_t *mbox) +{ + *mbox = SYS_MBOX_NULL; +} + +/*-----------------------------------------------------------------------------------*/ +// Creates a new semaphore. The "count" argument specifies +// the initial state of the semaphore. +err_t sys_sem_new(sys_sem_t *sem, u8_t count) +{ +#if (osCMSIS < 0x20000U) + osSemaphoreDef(SEM); + *sem = osSemaphoreCreate (osSemaphore(SEM), 1); +#else + *sem = osSemaphoreNew(UINT16_MAX, count, NULL); +#endif + + if(*sem == NULL) + { +#if SYS_STATS + ++lwip_stats.sys.sem.err; +#endif /* SYS_STATS */ + return ERR_MEM; + } + + if(count == 0) // Means it can't be taken + { +#if (osCMSIS < 0x20000U) + osSemaphoreWait(*sem, 0); +#else + osSemaphoreAcquire(*sem, 0); +#endif + } + +#if SYS_STATS + ++lwip_stats.sys.sem.used; + if (lwip_stats.sys.sem.max < lwip_stats.sys.sem.used) { + lwip_stats.sys.sem.max = lwip_stats.sys.sem.used; + } +#endif /* SYS_STATS */ + + return ERR_OK; +} + +/*-----------------------------------------------------------------------------------*/ +/* + Blocks the thread while waiting for the semaphore to be + signaled. If the "timeout" argument is non-zero, the thread should + only be blocked for the specified time (measured in + milliseconds). + + If the timeout argument is non-zero, the return value is the number of + milliseconds spent waiting for the semaphore to be signaled. If the + semaphore wasn't signaled within the specified time, the return value is + SYS_ARCH_TIMEOUT. If the thread didn't have to wait for the semaphore + (i.e., it was already signaled), the function may return zero. + + Notice that lwIP implements a function with a similar name, + sys_sem_wait(), that uses the sys_arch_sem_wait() function. +*/ +u32_t sys_arch_sem_wait(sys_sem_t *sem, u32_t timeout) +{ +#if (osCMSIS < 0x20000U) + uint32_t starttime = osKernelSysTick(); +#else + uint32_t starttime = osKernelGetTickCount(); +#endif + if(timeout != 0) + { +#if (osCMSIS < 0x20000U) + if(osSemaphoreWait (*sem, timeout) == osOK) + { + return (osKernelSysTick() - starttime); +#else + if(osSemaphoreAcquire(*sem, timeout) == osOK) + { + return (osKernelGetTickCount() - starttime); +#endif + } + else + { + return SYS_ARCH_TIMEOUT; + } + } + else + { +#if (osCMSIS < 0x20000U) + while(osSemaphoreWait (*sem, osWaitForever) != osOK); + return (osKernelSysTick() - starttime); +#else + while(osSemaphoreAcquire(*sem, osWaitForever) != osOK); + return (osKernelGetTickCount() - starttime); +#endif + } +} + +/*-----------------------------------------------------------------------------------*/ +// Signals a semaphore +void sys_sem_signal(sys_sem_t *sem) +{ + osSemaphoreRelease(*sem); +} + +/*-----------------------------------------------------------------------------------*/ +// Deallocates a semaphore +void sys_sem_free(sys_sem_t *sem) +{ +#if SYS_STATS + --lwip_stats.sys.sem.used; +#endif /* SYS_STATS */ + + osSemaphoreDelete(*sem); +} +/*-----------------------------------------------------------------------------------*/ +int sys_sem_valid(sys_sem_t *sem) +{ + if (*sem == SYS_SEM_NULL) + return 0; + else + return 1; +} + +/*-----------------------------------------------------------------------------------*/ +void sys_sem_set_invalid(sys_sem_t *sem) +{ + *sem = SYS_SEM_NULL; +} + +/*-----------------------------------------------------------------------------------*/ +#if (osCMSIS < 0x20000U) +osMutexId lwip_sys_mutex; +osMutexDef(lwip_sys_mutex); +#else +osMutexId_t lwip_sys_mutex; +#endif +// Initialize sys arch +void sys_init(void) +{ +#if (osCMSIS < 0x20000U) + lwip_sys_mutex = osMutexCreate(osMutex(lwip_sys_mutex)); +#else + lwip_sys_mutex = osMutexNew(NULL); +#endif +} +/*-----------------------------------------------------------------------------------*/ + /* Mutexes*/ +/*-----------------------------------------------------------------------------------*/ +/*-----------------------------------------------------------------------------------*/ +#if LWIP_COMPAT_MUTEX == 0 +/* Create a new mutex*/ +err_t sys_mutex_new(sys_mutex_t *mutex) { + +#if (osCMSIS < 0x20000U) + osMutexDef(MUTEX); + *mutex = osMutexCreate(osMutex(MUTEX)); +#else + *mutex = osMutexNew(NULL); +#endif + + if(*mutex == NULL) + { +#if SYS_STATS + ++lwip_stats.sys.mutex.err; +#endif /* SYS_STATS */ + return ERR_MEM; + } + +#if SYS_STATS + ++lwip_stats.sys.mutex.used; + if (lwip_stats.sys.mutex.max < lwip_stats.sys.mutex.used) { + lwip_stats.sys.mutex.max = lwip_stats.sys.mutex.used; + } +#endif /* SYS_STATS */ + return ERR_OK; +} +/*-----------------------------------------------------------------------------------*/ +/* Deallocate a mutex*/ +void sys_mutex_free(sys_mutex_t *mutex) +{ +#if SYS_STATS + --lwip_stats.sys.mutex.used; +#endif /* SYS_STATS */ + + osMutexDelete(*mutex); +} +/*-----------------------------------------------------------------------------------*/ +/* Lock a mutex*/ +void sys_mutex_lock(sys_mutex_t *mutex) +{ +#if (osCMSIS < 0x20000U) + osMutexWait(*mutex, osWaitForever); +#else + osMutexAcquire(*mutex, osWaitForever); +#endif +} + +/*-----------------------------------------------------------------------------------*/ +/* Unlock a mutex*/ +void sys_mutex_unlock(sys_mutex_t *mutex) +{ + osMutexRelease(*mutex); +} +#endif /*LWIP_COMPAT_MUTEX*/ +/*-----------------------------------------------------------------------------------*/ +// TODO +/*-----------------------------------------------------------------------------------*/ +/* + Starts a new thread with priority "prio" that will begin its execution in the + function "thread()". The "arg" argument will be passed as an argument to the + thread() function. The id of the new thread is returned. Both the id and + the priority are system dependent. +*/ +sys_thread_t sys_thread_new(const char *name, lwip_thread_fn thread , void *arg, int stacksize, int prio) +{ +#if (osCMSIS < 0x20000U) + const osThreadDef_t os_thread_def = { (char *)name, (os_pthread)thread, (osPriority)prio, 0, stacksize}; + return osThreadCreate(&os_thread_def, arg); +#else + const osThreadAttr_t attributes = { + .name = name, + .stack_size = stacksize, + .priority = (osPriority_t)prio, + }; + return osThreadNew(thread, arg, &attributes); +#endif +} + +/* + This optional function does a "fast" critical region protection and returns + the previous protection level. This function is only called during very short + critical regions. An embedded system which supports ISR-based drivers might + want to implement this function by disabling interrupts. Task-based systems + might want to implement this by using a mutex or disabling tasking. This + function should support recursive calls from the same task or interrupt. In + other words, sys_arch_protect() could be called while already protected. In + that case the return value indicates that it is already protected. + + sys_arch_protect() is only required if your port is supporting an operating + system. + + Note: This function is based on FreeRTOS API, because no equivalent CMSIS-RTOS + API is available +*/ +sys_prot_t sys_arch_protect(void) +{ +#if (osCMSIS < 0x20000U) + osMutexWait(lwip_sys_mutex, osWaitForever); +#else + osMutexAcquire(lwip_sys_mutex, osWaitForever); +#endif + return (sys_prot_t)1; +} + + +/* + This optional function does a "fast" set of critical region protection to the + value specified by pval. See the documentation for sys_arch_protect() for + more information. This function is only required if your port is supporting + an operating system. + + Note: This function is based on FreeRTOS API, because no equivalent CMSIS-RTOS + API is available +*/ +void sys_arch_unprotect(sys_prot_t pval) +{ + ( void ) pval; + osMutexRelease(lwip_sys_mutex); +} + +#endif /* !NO_SYS */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/arch/bpstruct.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/arch/bpstruct.h new file mode 100644 index 0000000..1e0dde0 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/arch/bpstruct.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#if defined(__IAR_SYSTEMS_ICC__) +#pragma pack(1) +#endif + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/arch/cc.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/arch/cc.h new file mode 100644 index 0000000..05e26c5 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/arch/cc.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef __CC_H__ +#define __CC_H__ + +#include "cpu.h" +#include +#include + +typedef int sys_prot_t; + +#define LWIP_PROVIDE_ERRNO + +#if defined (__GNUC__) & !defined (__CC_ARM) + +#define LWIP_TIMEVAL_PRIVATE 0 +#include + +#endif + +/* define compiler specific symbols */ +#if defined (__ICCARM__) + +#define PACK_STRUCT_BEGIN +#define PACK_STRUCT_STRUCT +#define PACK_STRUCT_END +#define PACK_STRUCT_FIELD(x) x +#define PACK_STRUCT_USE_INCLUDES + +#elif defined (__GNUC__) + +#define PACK_STRUCT_BEGIN +#define PACK_STRUCT_STRUCT __attribute__ ((__packed__)) +#define PACK_STRUCT_END +#define PACK_STRUCT_FIELD(x) x + +#elif defined (__CC_ARM) + +#define PACK_STRUCT_BEGIN __packed +#define PACK_STRUCT_STRUCT +#define PACK_STRUCT_END +#define PACK_STRUCT_FIELD(x) x + +#elif defined (__TASKING__) + +#define PACK_STRUCT_BEGIN +#define PACK_STRUCT_STRUCT +#define PACK_STRUCT_END +#define PACK_STRUCT_FIELD(x) x + +#endif + +#define LWIP_PLATFORM_ASSERT(x) do {printf("Assertion \"%s\" failed at line %d in %s\n", \ + x, __LINE__, __FILE__); } while(0) + +/* Define random number generator function */ +#define LWIP_RAND() ((u32_t)rand()) + +#endif /* __CC_H__ */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/arch/cpu.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/arch/cpu.h new file mode 100644 index 0000000..f0bfba9 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/arch/cpu.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef __CPU_H__ +#define __CPU_H__ + +#ifndef BYTE_ORDER +#define BYTE_ORDER LITTLE_ENDIAN +#endif + +#endif /* __CPU_H__ */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/arch/epstruct.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/arch/epstruct.h new file mode 100644 index 0000000..41951f7 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/arch/epstruct.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#if defined(__IAR_SYSTEMS_ICC__) +#pragma pack() +#endif + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/arch/init.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/arch/init.h new file mode 100644 index 0000000..14b9515 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/arch/init.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef __ARCH_INIT_H__ +#define __ARCH_INIT_H__ + +#define TCPIP_INIT_DONE(arg) tcpip_init_done(arg) + +void tcpip_init_done(void *); +int wait_for_tcpip_init(void); + +#endif /* __ARCH_INIT_H__ */ + + + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/arch/lib.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/arch/lib.h new file mode 100644 index 0000000..9726dee --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/arch/lib.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef __LIB_H__ +#define __LIB_H__ + +#include + + +#endif /* __LIB_H__ */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/arch/perf.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/arch/perf.h new file mode 100644 index 0000000..68afdb5 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/arch/perf.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef __PERF_H__ +#define __PERF_H__ + +#define PERF_START /* null definition */ +#define PERF_STOP(x) /* null definition */ + +#endif /* __PERF_H__ */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/arch/sys_arch.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/arch/sys_arch.h new file mode 100644 index 0000000..c0000fa --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/system/arch/sys_arch.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef __SYS_ARCH_H__ +#define __SYS_ARCH_H__ + +#include "lwip/opt.h" + +#if (NO_SYS != 0) +#error "NO_SYS need to be set to 0 to use threaded API" +#endif + +#include "cmsis_os.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if (osCMSIS < 0x20000U) + +#define SYS_MBOX_NULL (osMessageQId)0 +#define SYS_SEM_NULL (osSemaphoreId)0 + +typedef osSemaphoreId sys_sem_t; +typedef osSemaphoreId sys_mutex_t; +typedef osMessageQId sys_mbox_t; +typedef osThreadId sys_thread_t; +#else + +#define SYS_MBOX_NULL (osMessageQueueId_t)0 +#define SYS_SEM_NULL (osSemaphoreId_t)0 + +typedef osSemaphoreId_t sys_sem_t; +typedef osSemaphoreId_t sys_mutex_t; +typedef osMessageQueueId_t sys_mbox_t; +typedef osThreadId_t sys_thread_t; +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* __SYS_ARCH_H__ */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/STM32F767ZITX_FLASH.ld b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/STM32F767ZITX_FLASH.ld new file mode 100644 index 0000000..7884aa7 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/STM32F767ZITX_FLASH.ld @@ -0,0 +1,186 @@ +/* +****************************************************************************** +** +** @file : LinkerScript.ld +** +** @author : Auto-generated by STM32CubeIDE +** +** Abstract : Linker script for NUCLEO-F767ZI Board embedding STM32F767ZITx Device from stm32f7 series +** 2048Kbytes FLASH +** 512Kbytes RAM +** +** Set heap size, stack size and stack location according +** to application requirements. +** +** Set memory bank area and size if external memory is used +** +** Target : STMicroelectronics STM32 +** +** Distribution: The file is distributed as is, without any warranty +** of any kind. +** +****************************************************************************** +** @attention +** +**

© Copyright (c) 2022 STMicroelectronics. +** All rights reserved.

+** +** This software component is licensed by ST under BSD 3-Clause license, +** the "License"; You may not use this file except in compliance with the +** License. You may obtain a copy of the License at: +** opensource.org/licenses/BSD-3-Clause +** +****************************************************************************** +*/ + +/* Entry Point */ +ENTRY(Reset_Handler) + +/* Highest address of the user mode stack */ +_estack = ORIGIN(RAM) + LENGTH(RAM); /* end of "RAM" Ram type memory */ + +_Min_Heap_Size = 0x200 ; /* required amount of heap */ +_Min_Stack_Size = 0x400 ; /* required amount of stack */ + +/* Memories definition */ +MEMORY +{ + RAM (xrw) : ORIGIN = 0x20000000, LENGTH = 512K + FLASH (rx) : ORIGIN = 0x8000000, LENGTH = 2048K +} + +/* Sections */ +SECTIONS +{ + /* The startup code into "FLASH" Rom type memory */ + .isr_vector : + { + . = ALIGN(4); + KEEP(*(.isr_vector)) /* Startup code */ + . = ALIGN(4); + } >FLASH + + /* The program code and other data into "FLASH" Rom type memory */ + .text : + { + . = ALIGN(4); + *(.text) /* .text sections (code) */ + *(.text*) /* .text* sections (code) */ + *(.glue_7) /* glue arm to thumb code */ + *(.glue_7t) /* glue thumb to arm code */ + *(.eh_frame) + + KEEP (*(.init)) + KEEP (*(.fini)) + + . = ALIGN(4); + _etext = .; /* define a global symbols at end of code */ + } >FLASH + + /* Constant data into "FLASH" Rom type memory */ + .rodata : + { + . = ALIGN(4); + *(.rodata) /* .rodata sections (constants, strings, etc.) */ + *(.rodata*) /* .rodata* sections (constants, strings, etc.) */ + . = ALIGN(4); + } >FLASH + + .ARM.extab : { + . = ALIGN(4); + *(.ARM.extab* .gnu.linkonce.armextab.*) + . = ALIGN(4); + } >FLASH + + .ARM : { + . = ALIGN(4); + __exidx_start = .; + *(.ARM.exidx*) + __exidx_end = .; + . = ALIGN(4); + } >FLASH + + .preinit_array : + { + . = ALIGN(4); + PROVIDE_HIDDEN (__preinit_array_start = .); + KEEP (*(.preinit_array*)) + PROVIDE_HIDDEN (__preinit_array_end = .); + . = ALIGN(4); + } >FLASH + + .init_array : + { + . = ALIGN(4); + PROVIDE_HIDDEN (__init_array_start = .); + KEEP (*(SORT(.init_array.*))) + KEEP (*(.init_array*)) + PROVIDE_HIDDEN (__init_array_end = .); + . = ALIGN(4); + } >FLASH + + .fini_array : + { + . = ALIGN(4); + PROVIDE_HIDDEN (__fini_array_start = .); + KEEP (*(SORT(.fini_array.*))) + KEEP (*(.fini_array*)) + PROVIDE_HIDDEN (__fini_array_end = .); + . = ALIGN(4); + } >FLASH + + /* Used by the startup to initialize data */ + _sidata = LOADADDR(.data); + + /* Initialized data sections into "RAM" Ram type memory */ + .data : + { + . = ALIGN(4); + _sdata = .; /* create a global symbol at data start */ + *(.data) /* .data sections */ + *(.data*) /* .data* sections */ + *(.RamFunc) /* .RamFunc sections */ + *(.RamFunc*) /* .RamFunc* sections */ + + . = ALIGN(4); + _edata = .; /* define a global symbol at data end */ + + } >RAM AT> FLASH + + /* Uninitialized data section into "RAM" Ram type memory */ + . = ALIGN(4); + .bss : + { + /* This is used by the startup in order to initialize the .bss section */ + _sbss = .; /* define a global symbol at bss start */ + __bss_start__ = _sbss; + *(.bss) + *(.bss*) + *(COMMON) + + . = ALIGN(4); + _ebss = .; /* define a global symbol at bss end */ + __bss_end__ = _ebss; + } >RAM + + /* User_heap_stack section, used to check that there is enough "RAM" Ram type memory left */ + ._user_heap_stack : + { + . = ALIGN(8); + PROVIDE ( end = . ); + PROVIDE ( _end = . ); + . = . + _Min_Heap_Size; + . = . + _Min_Stack_Size; + . = ALIGN(8); + } >RAM + + /* Remove information from the compiler libraries */ + /DISCARD/ : + { + libc.a ( * ) + libm.a ( * ) + libgcc.a ( * ) + } + + .ARM.attributes 0 : { *(.ARM.attributes) } +} diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/STM32F767ZITX_RAM.ld b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/STM32F767ZITX_RAM.ld new file mode 100644 index 0000000..e842696 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/STM32F767ZITX_RAM.ld @@ -0,0 +1,186 @@ +/* +****************************************************************************** +** +** @file : LinkerScript.ld (debug in RAM dedicated) +** +** @author : Auto-generated by STM32CubeIDE +** +** Abstract : Linker script for NUCLEO-F767ZI Board embedding STM32F767ZITx Device from stm32f7 series +** 2048Kbytes FLASH +** 512Kbytes RAM +** +** Set heap size, stack size and stack location according +** to application requirements. +** +** Set memory bank area and size if external memory is used +** +** Target : STMicroelectronics STM32 +** +** Distribution: The file is distributed as is, without any warranty +** of any kind. +** +****************************************************************************** +** @attention +** +**

© Copyright (c) 2022 STMicroelectronics. +** All rights reserved.

+** +** This software component is licensed by ST under BSD 3-Clause license, +** the "License"; You may not use this file except in compliance with the +** License. You may obtain a copy of the License at: +** opensource.org/licenses/BSD-3-Clause +** +****************************************************************************** +*/ + +/* Entry Point */ +ENTRY(Reset_Handler) + +/* Highest address of the user mode stack */ +_estack = ORIGIN(RAM) + LENGTH(RAM); /* end of "RAM" Ram type memory */ + +_Min_Heap_Size = 0x200; /* required amount of heap */ +_Min_Stack_Size = 0x400; /* required amount of stack */ + +/* Memories definition */ +MEMORY +{ + RAM (xrw) : ORIGIN = 0x20000000, LENGTH = 512K + FLASH (rx) : ORIGIN = 0x8000000, LENGTH = 2048K +} + +/* Sections */ +SECTIONS +{ + /* The startup code into "RAM" Ram type memory */ + .isr_vector : + { + . = ALIGN(4); + KEEP(*(.isr_vector)) /* Startup code */ + . = ALIGN(4); + } >RAM + + /* The program code and other data into "RAM" Ram type memory */ + .text : + { + . = ALIGN(4); + *(.text) /* .text sections (code) */ + *(.text*) /* .text* sections (code) */ + *(.glue_7) /* glue arm to thumb code */ + *(.glue_7t) /* glue thumb to arm code */ + *(.eh_frame) + *(.RamFunc) /* .RamFunc sections */ + *(.RamFunc*) /* .RamFunc* sections */ + + KEEP (*(.init)) + KEEP (*(.fini)) + + . = ALIGN(4); + _etext = .; /* define a global symbols at end of code */ + } >RAM + + /* Constant data into "RAM" Ram type memory */ + .rodata : + { + . = ALIGN(4); + *(.rodata) /* .rodata sections (constants, strings, etc.) */ + *(.rodata*) /* .rodata* sections (constants, strings, etc.) */ + . = ALIGN(4); + } >RAM + + .ARM.extab : { + . = ALIGN(4); + *(.ARM.extab* .gnu.linkonce.armextab.*) + . = ALIGN(4); + } >RAM + + .ARM : { + . = ALIGN(4); + __exidx_start = .; + *(.ARM.exidx*) + __exidx_end = .; + . = ALIGN(4); + } >RAM + + .preinit_array : + { + . = ALIGN(4); + PROVIDE_HIDDEN (__preinit_array_start = .); + KEEP (*(.preinit_array*)) + PROVIDE_HIDDEN (__preinit_array_end = .); + . = ALIGN(4); + } >RAM + + .init_array : + { + . = ALIGN(4); + PROVIDE_HIDDEN (__init_array_start = .); + KEEP (*(SORT(.init_array.*))) + KEEP (*(.init_array*)) + PROVIDE_HIDDEN (__init_array_end = .); + . = ALIGN(4); + } >RAM + + .fini_array : + { + . = ALIGN(4); + PROVIDE_HIDDEN (__fini_array_start = .); + KEEP (*(SORT(.fini_array.*))) + KEEP (*(.fini_array*)) + PROVIDE_HIDDEN (__fini_array_end = .); + . = ALIGN(4); + } >RAM + + /* Used by the startup to initialize data */ + _sidata = LOADADDR(.data); + + /* Initialized data sections into "RAM" Ram type memory */ + .data : + { + . = ALIGN(4); + _sdata = .; /* create a global symbol at data start */ + *(.data) /* .data sections */ + *(.data*) /* .data* sections */ + + . = ALIGN(4); + _edata = .; /* define a global symbol at data end */ + + } >RAM + + /* Uninitialized data section into "RAM" Ram type memory */ + . = ALIGN(4); + .bss : + { + /* This is used by the startup in order to initialize the .bss section */ + _sbss = .; /* define a global symbol at bss start */ + __bss_start__ = _sbss; + *(.bss) + *(.bss*) + *(COMMON) + + . = ALIGN(4); + _ebss = .; /* define a global symbol at bss end */ + __bss_end__ = _ebss; + } >RAM + + /* User_heap_stack section, used to check that there is enough "RAM" Ram type memory left */ + ._user_heap_stack : + { + . = ALIGN(8); + PROVIDE ( end = . ); + PROVIDE ( _end = . ); + . = . + _Min_Heap_Size; + . = . + _Min_Stack_Size; + . = ALIGN(8); + } >RAM + + /* Remove information from the compiler libraries */ + /DISCARD/ : + { + libc.a ( * ) + libm.a ( * ) + libgcc.a ( * ) + } + + .ARM.attributes 0 : { *(.ARM.attributes) } +} diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/micro-ros_embeddedrtps Debug.launch b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/micro-ros_embeddedrtps Debug.launch new file mode 100644 index 0000000..db11abe --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/micro-ros_embeddedrtps Debug.launch @@ -0,0 +1,79 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/micro-ros_embeddedrtps.ioc b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/micro-ros_embeddedrtps.ioc new file mode 100644 index 0000000..80f6593 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/micro-ros_embeddedrtps.ioc @@ -0,0 +1,357 @@ +#MicroXplorer Configuration settings - do not modify +ETH.IPParameters=MediaInterface,PHY_Name,PHY_Value,PhyAddress +ETH.MediaInterface=ETH_MEDIA_INTERFACE_RMII +ETH.PHY_Name=LAN8742A_PHY_ADDRESS +ETH.PHY_Value=0 +ETH.PhyAddress=0 +FREERTOS.FootprintOK=true +FREERTOS.IPParameters=Tasks01,configUSE_NEWLIB_REENTRANT,configTOTAL_HEAP_SIZE,configMAX_TASK_NAME_LEN,FootprintOK +FREERTOS.Tasks01=defaultTask,19,8000,StartDefaultTask,Default,NULL,Dynamic,NULL,NULL +FREERTOS.configMAX_TASK_NAME_LEN=30 +FREERTOS.configTOTAL_HEAP_SIZE=100000 +FREERTOS.configUSE_NEWLIB_REENTRANT=1 +File.Version=6 +KeepUserPlacement=false +LWIP.GATEWAY_ADDRESS=192.168.011.001 +LWIP.IPParameters=LWIP_MULTICAST_TX_OPTIONS,LWIP_IGMP,LWIP_DHCP,IP_ADDRESS,NETMASK_ADDRESS,GATEWAY_ADDRESS,MEMP_NUM_UDP_PCB,MEM_SIZE,TCPIP_THREAD_STACKSIZE,TCPIP_THREAD_PRIO,PBUF_POOL_SIZE +LWIP.IP_ADDRESS=192.168.011.002 +LWIP.LWIP_DHCP=0 +LWIP.LWIP_IGMP=1 +LWIP.LWIP_MULTICAST_TX_OPTIONS=1 +LWIP.MEMP_NUM_UDP_PCB=15 +LWIP.MEM_SIZE=30000 +LWIP.NETMASK_ADDRESS=255.255.255.000 +LWIP.PBUF_POOL_SIZE=20 +LWIP.TCPIP_THREAD_PRIO=20 +LWIP.TCPIP_THREAD_STACKSIZE=10000 +LWIP.Version=v2.1.2_Cube +Mcu.Family=STM32F7 +Mcu.IP0=CORTEX_M7 +Mcu.IP1=ETH +Mcu.IP2=FREERTOS +Mcu.IP3=LWIP +Mcu.IP4=NVIC +Mcu.IP5=RCC +Mcu.IP6=SYS +Mcu.IP7=USART3 +Mcu.IP8=USB_OTG_FS +Mcu.IPNb=9 +Mcu.Name=STM32F767ZITx +Mcu.Package=LQFP144 +Mcu.Pin0=PC13 +Mcu.Pin1=PC14/OSC32_IN +Mcu.Pin10=PC5 +Mcu.Pin11=PB0 +Mcu.Pin12=PB13 +Mcu.Pin13=PB14 +Mcu.Pin14=PD8 +Mcu.Pin15=PD9 +Mcu.Pin16=PG6 +Mcu.Pin17=PG7 +Mcu.Pin18=PA8 +Mcu.Pin19=PA9 +Mcu.Pin2=PC15/OSC32_OUT +Mcu.Pin20=PA10 +Mcu.Pin21=PA11 +Mcu.Pin22=PA12 +Mcu.Pin23=PA13 +Mcu.Pin24=PA14 +Mcu.Pin25=PG11 +Mcu.Pin26=PG13 +Mcu.Pin27=PB3 +Mcu.Pin28=PB7 +Mcu.Pin29=VP_FREERTOS_VS_CMSIS_V2 +Mcu.Pin3=PH0/OSC_IN +Mcu.Pin30=VP_LWIP_VS_Enabled +Mcu.Pin31=VP_SYS_VS_tim14 +Mcu.Pin4=PH1/OSC_OUT +Mcu.Pin5=PC1 +Mcu.Pin6=PA1 +Mcu.Pin7=PA2 +Mcu.Pin8=PA7 +Mcu.Pin9=PC4 +Mcu.PinsNb=32 +Mcu.ThirdPartyNb=0 +Mcu.UserConstants= +Mcu.UserName=STM32F767ZITx +MxCube.Version=6.3.0 +MxDb.Version=DB.6.0.30 +NVIC.BusFault_IRQn=true\:0\:0\:false\:false\:true\:false\:true\:false +NVIC.DebugMonitor_IRQn=true\:0\:0\:false\:false\:true\:false\:true\:false +NVIC.ETH_IRQn=true\:5\:0\:false\:false\:true\:true\:false\:true +NVIC.ForceEnableDMAVector=true +NVIC.HardFault_IRQn=true\:0\:0\:false\:false\:true\:false\:true\:false +NVIC.MemoryManagement_IRQn=true\:0\:0\:false\:false\:true\:false\:true\:false +NVIC.NonMaskableInt_IRQn=true\:0\:0\:false\:false\:true\:false\:true\:false +NVIC.PendSV_IRQn=true\:15\:0\:false\:false\:false\:true\:true\:false +NVIC.PriorityGroup=NVIC_PRIORITYGROUP_4 +NVIC.SVCall_IRQn=true\:0\:0\:false\:false\:false\:false\:true\:false +NVIC.SavedPendsvIrqHandlerGenerated=true +NVIC.SavedSvcallIrqHandlerGenerated=true +NVIC.SavedSystickIrqHandlerGenerated=true +NVIC.SysTick_IRQn=true\:15\:0\:false\:false\:false\:true\:true\:true +NVIC.TIM8_TRG_COM_TIM14_IRQn=true\:15\:0\:false\:false\:true\:false\:false\:true +NVIC.TimeBase=TIM8_TRG_COM_TIM14_IRQn +NVIC.TimeBaseIP=TIM14 +NVIC.UsageFault_IRQn=true\:0\:0\:false\:false\:true\:false\:true\:false +PA1.GPIOParameters=GPIO_Label +PA1.GPIO_Label=RMII_REF_CLK [LAN8742A-CZ-TR_REFCLK0] +PA1.Locked=true +PA1.Mode=RMII +PA1.Signal=ETH_REF_CLK +PA10.GPIOParameters=GPIO_Label +PA10.GPIO_Label=USB_ID +PA10.Locked=true +PA10.Signal=USB_OTG_FS_ID +PA11.GPIOParameters=GPIO_Label +PA11.GPIO_Label=USB_DM +PA11.Locked=true +PA11.Mode=Device_Only +PA11.Signal=USB_OTG_FS_DM +PA12.GPIOParameters=GPIO_Label +PA12.GPIO_Label=USB_DP +PA12.Locked=true +PA12.Mode=Device_Only +PA12.Signal=USB_OTG_FS_DP +PA13.GPIOParameters=GPIO_Label +PA13.GPIO_Label=TMS +PA13.Locked=true +PA13.Mode=Serial_Wire +PA13.Signal=SYS_JTMS-SWDIO +PA14.GPIOParameters=GPIO_Label +PA14.GPIO_Label=TCK +PA14.Locked=true +PA14.Mode=Serial_Wire +PA14.Signal=SYS_JTCK-SWCLK +PA2.GPIOParameters=GPIO_Label +PA2.GPIO_Label=RMII_MDIO [LAN8742A-CZ-TR_MDIO] +PA2.Locked=true +PA2.Mode=RMII +PA2.Signal=ETH_MDIO +PA7.GPIOParameters=GPIO_Label +PA7.GPIO_Label=RMII_CRS_DV [LAN8742A-CZ-TR_CRS_DV] +PA7.Locked=true +PA7.Mode=RMII +PA7.Signal=ETH_CRS_DV +PA8.GPIOParameters=GPIO_Label +PA8.GPIO_Label=USB_SOF [TP1] +PA8.Locked=true +PA8.Mode=Activate_SOF_FS +PA8.Signal=USB_OTG_FS_SOF +PA9.GPIOParameters=GPIO_Label +PA9.GPIO_Label=USB_VBUS +PA9.Locked=true +PA9.Mode=Activate_VBUS +PA9.Signal=USB_OTG_FS_VBUS +PB0.GPIOParameters=GPIO_Label +PB0.GPIO_Label=LD1 [Green] +PB0.Locked=true +PB0.Signal=GPIO_Output +PB13.GPIOParameters=GPIO_Label +PB13.GPIO_Label=RMII_TXD1 [LAN8742A-CZ-TR_TXD1] +PB13.Locked=true +PB13.Mode=RMII +PB13.Signal=ETH_TXD1 +PB14.GPIOParameters=GPIO_Label +PB14.GPIO_Label=LD3 [Red] +PB14.Locked=true +PB14.Signal=GPIO_Output +PB3.GPIOParameters=GPIO_Label +PB3.GPIO_Label=SWO +PB3.Locked=true +PB3.Signal=SYS_JTDO-SWO +PB7.GPIOParameters=GPIO_Label +PB7.GPIO_Label=LD2 [Blue] +PB7.Locked=true +PB7.Signal=GPIO_Output +PC1.GPIOParameters=GPIO_Label +PC1.GPIO_Label=RMII_MDC [LAN8742A-CZ-TR_MDC] +PC1.Locked=true +PC1.Mode=RMII +PC1.Signal=ETH_MDC +PC13.GPIOParameters=GPIO_Label +PC13.GPIO_Label=USER_Btn [B1] +PC13.Locked=true +PC13.Signal=GPXTI13 +PC14/OSC32_IN.Locked=true +PC14/OSC32_IN.Mode=LSE-External-Oscillator +PC14/OSC32_IN.Signal=RCC_OSC32_IN +PC15/OSC32_OUT.Locked=true +PC15/OSC32_OUT.Mode=LSE-External-Oscillator +PC15/OSC32_OUT.Signal=RCC_OSC32_OUT +PC4.GPIOParameters=GPIO_Label +PC4.GPIO_Label=RMII_RXD0 [LAN8742A-CZ-TR_RXD0] +PC4.Locked=true +PC4.Mode=RMII +PC4.Signal=ETH_RXD0 +PC5.GPIOParameters=GPIO_Label +PC5.GPIO_Label=RMII_RXD1 [LAN8742A-CZ-TR_RXD1] +PC5.Locked=true +PC5.Mode=RMII +PC5.Signal=ETH_RXD1 +PD8.GPIOParameters=GPIO_Label +PD8.GPIO_Label=STLK_RX [STM32F103CBT6_PA3] +PD8.Locked=true +PD8.Mode=Asynchronous +PD8.Signal=USART3_TX +PD9.GPIOParameters=GPIO_Label +PD9.GPIO_Label=STLK_TX [STM32F103CBT6_PA2] +PD9.Locked=true +PD9.Mode=Asynchronous +PD9.Signal=USART3_RX +PG11.GPIOParameters=GPIO_Label +PG11.GPIO_Label=RMII_TX_EN [LAN8742A-CZ-TR_TXEN] +PG11.Locked=true +PG11.Mode=RMII +PG11.Signal=ETH_TX_EN +PG13.GPIOParameters=GPIO_Label +PG13.GPIO_Label=RMII_TXD0 [LAN8742A-CZ-TR_TXD0] +PG13.Locked=true +PG13.Mode=RMII +PG13.Signal=ETH_TXD0 +PG6.GPIOParameters=GPIO_Label +PG6.GPIO_Label=USB_PowerSwitchOn [STMPS2151STR_EN] +PG6.Locked=true +PG6.Signal=GPIO_Output +PG7.GPIOParameters=GPIO_Label +PG7.GPIO_Label=USB_OverCurrent [STMPS2151STR_FAULT] +PG7.Locked=true +PG7.Signal=GPIO_Input +PH0/OSC_IN.GPIOParameters=GPIO_Label +PH0/OSC_IN.GPIO_Label=MCO [STM32F103CBT6_PA8] +PH0/OSC_IN.Locked=true +PH0/OSC_IN.Mode=HSE-External-Clock-Source +PH0/OSC_IN.Signal=RCC_OSC_IN +PH1/OSC_OUT.Locked=true +PH1/OSC_OUT.Mode=HSE-External-Clock-Source +PH1/OSC_OUT.Signal=RCC_OSC_OUT +PinOutPanel.RotationAngle=0 +ProjectManager.AskForMigrate=true +ProjectManager.BackupPrevious=false +ProjectManager.CompilerOptimize=6 +ProjectManager.ComputerToolchain=false +ProjectManager.CoupleFile=false +ProjectManager.CustomerFirmwarePackage= +ProjectManager.DefaultFWLocation=true +ProjectManager.DeletePrevious=true +ProjectManager.DeviceId=STM32F767ZITx +ProjectManager.FirmwarePackage=STM32Cube FW_F7 V1.16.2 +ProjectManager.FreePins=false +ProjectManager.HalAssertFull=false +ProjectManager.HeapSize=0x200 +ProjectManager.KeepUserCode=true +ProjectManager.LastFirmware=true +ProjectManager.LibraryCopy=1 +ProjectManager.MainLocation=Core/Src +ProjectManager.NoMain=false +ProjectManager.PreviousToolchain= +ProjectManager.ProjectBuild=false +ProjectManager.ProjectFileName=micro-ros_embeddedrtps.ioc +ProjectManager.ProjectName=micro-ros_embeddedrtps +ProjectManager.RegisterCallBack= +ProjectManager.StackSize=0x400 +ProjectManager.TargetToolchain=STM32CubeIDE +ProjectManager.ToolChainLocation= +ProjectManager.UnderRoot=true +ProjectManager.functionlistsort=1-MX_GPIO_Init-GPIO-false-HAL-true,2-SystemClock_Config-RCC-false-HAL-false,3-MX_ETH_Init-ETH-false-HAL-true,4-MX_USART3_UART_Init-USART3-false-HAL-true,5-MX_USB_OTG_FS_PCD_Init-USB_OTG_FS-false-HAL-true,0-MX_CORTEX_M7_Init-CORTEX_M7-false-HAL-true +RCC.48MHZClocksFreq_Value=24000000 +RCC.ADC12outputFreq_Value=72000000 +RCC.ADC34outputFreq_Value=72000000 +RCC.AHBFreq_Value=96000000 +RCC.APB1CLKDivider=RCC_HCLK_DIV2 +RCC.APB1Freq_Value=48000000 +RCC.APB1TimFreq_Value=96000000 +RCC.APB2Freq_Value=96000000 +RCC.APB2TimFreq_Value=96000000 +RCC.CECFreq_Value=32786.88524590164 +RCC.CortexFreq_Value=96000000 +RCC.DFSDMAudioFreq_Value=192000000 +RCC.DFSDMFreq_Value=96000000 +RCC.EthernetFreq_Value=96000000 +RCC.FCLKCortexFreq_Value=96000000 +RCC.FamilyName=M +RCC.HCLKFreq_Value=96000000 +RCC.HSE_VALUE=8000000 +RCC.HSI_VALUE=16000000 +RCC.I2C1Freq_Value=48000000 +RCC.I2C2Freq_Value=48000000 +RCC.I2C3Freq_Value=48000000 +RCC.I2C4Freq_Value=48000000 +RCC.I2SClocksFreq_Value=48000000 +RCC.I2SFreq_Value=192000000 +RCC.IPParameters=48MHZClocksFreq_Value,ADC12outputFreq_Value,ADC34outputFreq_Value,AHBFreq_Value,APB1CLKDivider,APB1Freq_Value,APB1TimFreq_Value,APB2Freq_Value,APB2TimFreq_Value,CECFreq_Value,CortexFreq_Value,DFSDMAudioFreq_Value,DFSDMFreq_Value,EthernetFreq_Value,FCLKCortexFreq_Value,FamilyName,HCLKFreq_Value,HSE_VALUE,HSI_VALUE,I2C1Freq_Value,I2C2Freq_Value,I2C3Freq_Value,I2C4Freq_Value,I2SClocksFreq_Value,I2SFreq_Value,LCDTFTFreq_Value,LCDTFToutputFreq_Value,LPTIM1Freq_Value,LSI_VALUE,MCO1PinFreq_Value,MCO2PinFreq_Value,MCOFreq_Value,PLLCLKFreq_Value,PLLI2SPCLKFreq_Value,PLLI2SQCLKFreq_Value,PLLI2SRCLKFreq_Value,PLLI2SRoutputFreq_Value,PLLM,PLLMCOFreq_Value,PLLMUL,PLLN,PLLQ,PLLQCLKFreq_Value,PLLQoutputFreq_Value,PLLRFreq_Value,PLLSAIPCLKFreq_Value,PLLSAIQCLKFreq_Value,PLLSAIRCLKFreq_Value,PLLSAIoutputFreq_Value,PRESCALERUSB,RNGFreq_Value,RTCFreq_Value,RTCHSEDivFreq_Value,SAI1Freq_Value,SAI2Freq_Value,SDMMC2Freq_Value,SDMMCFreq_Value,SPDIFRXFreq_Value,SYSCLKFreq_VALUE,SYSCLKSource,SYSCLKSourceVirtual,TIM15Freq_Value,TIM16Freq_Value,TIM17Freq_Value,TIM1Freq_Value,TIM20Freq_Value,TIM2Freq_Value,TIM3Freq_Value,TIM8Freq_Value,UART4Freq_Value,UART5Freq_Value,UART7Freq_Value,UART8Freq_Value,USART1Freq_Value,USART2Freq_Value,USART3Freq_Value,USART6Freq_Value,USBFreq_Value,VCOI2SOutputFreq_Value,VCOInputFreq_Value,VCOOutput2Freq_Value,VCOOutputFreq_Value,VCOSAIOutputFreq_Value,VcooutputI2S,WatchDogFreq_Value +RCC.LCDTFTFreq_Value=96000000 +RCC.LCDTFToutputFreq_Value=96000000 +RCC.LPTIM1Freq_Value=48000000 +RCC.LSI_VALUE=32000 +RCC.MCO1PinFreq_Value=16000000 +RCC.MCO2PinFreq_Value=96000000 +RCC.MCOFreq_Value=72000000 +RCC.PLLCLKFreq_Value=96000000 +RCC.PLLI2SPCLKFreq_Value=192000000 +RCC.PLLI2SQCLKFreq_Value=192000000 +RCC.PLLI2SRCLKFreq_Value=192000000 +RCC.PLLI2SRoutputFreq_Value=192000000 +RCC.PLLM=4 +RCC.PLLMCOFreq_Value=72000000 +RCC.PLLMUL=RCC_PLL_MUL9 +RCC.PLLN=96 +RCC.PLLQ=4 +RCC.PLLQCLKFreq_Value=48000000 +RCC.PLLQoutputFreq_Value=48000000 +RCC.PLLRFreq_Value=96000000 +RCC.PLLSAIPCLKFreq_Value=192000000 +RCC.PLLSAIQCLKFreq_Value=192000000 +RCC.PLLSAIRCLKFreq_Value=192000000 +RCC.PLLSAIoutputFreq_Value=192000000 +RCC.PRESCALERUSB=RCC_USBCLKSOURCE_PLL_DIV1_5 +RCC.RNGFreq_Value=48000000 +RCC.RTCFreq_Value=32000 +RCC.RTCHSEDivFreq_Value=4000000 +RCC.SAI1Freq_Value=192000000 +RCC.SAI2Freq_Value=192000000 +RCC.SDMMC2Freq_Value=96000000 +RCC.SDMMCFreq_Value=96000000 +RCC.SPDIFRXFreq_Value=192000000 +RCC.SYSCLKFreq_VALUE=96000000 +RCC.SYSCLKSource=RCC_SYSCLKSOURCE_PLLCLK +RCC.SYSCLKSourceVirtual=RCC_SYSCLKSOURCE_PLLCLK +RCC.TIM15Freq_Value=72000000 +RCC.TIM16Freq_Value=72000000 +RCC.TIM17Freq_Value=72000000 +RCC.TIM1Freq_Value=72000000 +RCC.TIM20Freq_Value=72000000 +RCC.TIM2Freq_Value=72000000 +RCC.TIM3Freq_Value=72000000 +RCC.TIM8Freq_Value=72000000 +RCC.UART4Freq_Value=48000000 +RCC.UART5Freq_Value=48000000 +RCC.UART7Freq_Value=48000000 +RCC.UART8Freq_Value=48000000 +RCC.USART1Freq_Value=96000000 +RCC.USART2Freq_Value=48000000 +RCC.USART3Freq_Value=48000000 +RCC.USART6Freq_Value=96000000 +RCC.USBFreq_Value=48000000 +RCC.VCOI2SOutputFreq_Value=384000000 +RCC.VCOInputFreq_Value=2000000 +RCC.VCOOutput2Freq_Value=8000000 +RCC.VCOOutputFreq_Value=192000000 +RCC.VCOSAIOutputFreq_Value=384000000 +RCC.VcooutputI2S=48000000 +RCC.WatchDogFreq_Value=32000 +SH.GPXTI13.0=GPIO_EXTI13 +SH.GPXTI13.ConfNb=1 +USART3.IPParameters=VirtualMode-Asynchronous +USART3.VirtualMode-Asynchronous=VM_ASYNC +USB_OTG_FS.IPParameters=VirtualMode +USB_OTG_FS.VirtualMode=Device_Only +VP_FREERTOS_VS_CMSIS_V2.Mode=CMSIS_V2 +VP_FREERTOS_VS_CMSIS_V2.Signal=FREERTOS_VS_CMSIS_V2 +VP_LWIP_VS_Enabled.Mode=Enabled +VP_LWIP_VS_Enabled.Signal=LWIP_VS_Enabled +VP_SYS_VS_tim14.Mode=TIM14 +VP_SYS_VS_tim14.Signal=SYS_VS_tim14 +board=NUCLEO-F767ZI +boardIOC=true +isbadioc=false diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/micro_ros_stm32cubemx_utils b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/micro_ros_stm32cubemx_utils new file mode 160000 index 0000000..86a429f --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/micro_ros_stm32cubemx_utils @@ -0,0 +1 @@ +Subproject commit 86a429f8e91ce2f0853e1da07daa362098afda10